1/*- 2 * Copyright (c) 1989, 1990 William F. Jolitz. 3 * Copyright (c) 1990 The Regents of the University of California. 4 * Copyright (c) 2007 The FreeBSD Foundation 5 * Copyright (c) 2008 The DragonFly Project. 6 * Copyright (c) 2008 Jordan Gordeev. 7 * All rights reserved. 8 * 9 * Portions of this software were developed by A. Joseph Koshy under 10 * sponsorship from the FreeBSD Foundation and Google, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37#if 0 /* JG */ 38#include "opt_atpic.h" 39#include "opt_compat.h" 40#endif 41 42#include <machine/asmacros.h> 43#include <machine/psl.h> 44#include <machine/trap.h> 45#include <machine/segments.h> 46 47#include "assym.s" 48 49 .text 50 51 .globl lwkt_switch_return 52 53/*****************************************************************************/ 54/* Trap handling */ 55/*****************************************************************************/ 56/* 57 * Trap and fault vector routines. 58 * 59 * All traps are 'interrupt gates', SDT_SYSIGT. An interrupt gate pushes 60 * state on the stack but also disables interrupts. This is important for 61 * us for the use of the swapgs instruction. We cannot be interrupted 62 * until the GS.base value is correct. For most traps, we automatically 63 * then enable interrupts if the interrupted context had them enabled. 64 * 65 * The cpu will push a certain amount of state onto the kernel stack for 66 * the current process. See x86_64/include/frame.h. 67 * This includes the current RFLAGS (status register, which includes 68 * the interrupt disable state prior to the trap), the code segment register, 69 * and the return instruction pointer are pushed by the cpu. The cpu 70 * will also push an 'error' code for certain traps. We push a dummy 71 * error code for those traps where the cpu doesn't in order to maintain 72 * a consistent frame. We also push a contrived 'trap number'. 73 * 74 * The cpu does not push the general registers, we must do that, and we 75 * must restore them prior to calling 'iret'. The cpu adjusts the %cs and 76 * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we 77 * must load them with appropriate values for supervisor mode operation. 78 */ 79 80MCOUNT_LABEL(user) 81MCOUNT_LABEL(btrap) 82 83/* 84 * Interrupts are enabled for all traps, otherwise horrible livelocks 85 * can occur with the smp_invltlb and cpusync ode. 86 */ 87#if 0 88#define TRAP_NOEN(a) \ 89 subq $TF_RIP,%rsp; \ 90 movq $0,TF_XFLAGS(%rsp) ; \ 91 movq $(a),TF_TRAPNO(%rsp) ; \ 92 movq $0,TF_ADDR(%rsp) ; \ 93 movq $0,TF_ERR(%rsp) ; \ 94 jmp alltraps_noen 95#endif 96 97/* Regular traps; The cpu does not supply tf_err for these. */ 98#define TRAP(a) \ 99 subq $TF_RIP,%rsp; \ 100 movq $0,TF_XFLAGS(%rsp) ; \ 101 movq $(a),TF_TRAPNO(%rsp) ; \ 102 movq $0,TF_ADDR(%rsp) ; \ 103 movq $0,TF_ERR(%rsp) ; \ 104 jmp alltraps 105IDTVEC(dbg) 106 TRAP(T_TRCTRAP) 107IDTVEC(bpt) 108 TRAP(T_BPTFLT) 109IDTVEC(div) 110 TRAP(T_DIVIDE) 111IDTVEC(ofl) 112 TRAP(T_OFLOW) 113IDTVEC(bnd) 114 TRAP(T_BOUND) 115IDTVEC(ill) 116 TRAP(T_PRIVINFLT) 117IDTVEC(dna) 118 TRAP(T_DNA) 119IDTVEC(fpusegm) 120 TRAP(T_FPOPFLT) 121IDTVEC(mchk) 122 TRAP(T_MCHK) 123IDTVEC(rsvd) 124 TRAP(T_RESERVED) 125IDTVEC(fpu) 126 TRAP(T_ARITHTRAP) 127IDTVEC(xmm) 128 TRAP(T_XMMFLT) 129 130/* This group of traps have tf_err already pushed by the cpu */ 131#define TRAP_ERR(a) \ 132 subq $TF_ERR,%rsp; \ 133 movq $(a),TF_TRAPNO(%rsp) ; \ 134 movq $0,TF_ADDR(%rsp) ; \ 135 movq $0,TF_XFLAGS(%rsp) ; \ 136 jmp alltraps 137IDTVEC(tss) 138 TRAP_ERR(T_TSSFLT) 139IDTVEC(missing) 140 TRAP_ERR(T_SEGNPFLT) 141IDTVEC(stk) 142 TRAP_ERR(T_STKFLT) 143IDTVEC(align) 144 TRAP_ERR(T_ALIGNFLT) 145 146 /* 147 * alltraps entry point. Use swapgs if this is the first time in the 148 * kernel from userland. Reenable interrupts if they were enabled 149 * before the trap. 150 */ 151 152 SUPERALIGN_TEXT 153 .globl alltraps 154 .type alltraps,@function 155alltraps: 156 /* Fixup %gs if coming from userland */ 157 testb $SEL_RPL_MASK,TF_CS(%rsp) 158 jz alltraps_testi 159 swapgs 160alltraps_testi: 161 testq $PSL_I,TF_RFLAGS(%rsp) 162 jz alltraps_pushregs 163 sti 164alltraps_pushregs: 165 movq %rdi,TF_RDI(%rsp) 166alltraps_pushregs_no_rdi: 167 movq %rsi,TF_RSI(%rsp) 168 movq %rdx,TF_RDX(%rsp) 169 movq %rcx,TF_RCX(%rsp) 170 movq %r8,TF_R8(%rsp) 171 movq %r9,TF_R9(%rsp) 172 movq %rax,TF_RAX(%rsp) 173 movq %rbx,TF_RBX(%rsp) 174 movq %rbp,TF_RBP(%rsp) 175 movq %r10,TF_R10(%rsp) 176 movq %r11,TF_R11(%rsp) 177 movq %r12,TF_R12(%rsp) 178 movq %r13,TF_R13(%rsp) 179 movq %r14,TF_R14(%rsp) 180 movq %r15,TF_R15(%rsp) 181 FAKE_MCOUNT(TF_RIP(%rsp)) 182 .globl calltrap 183 .type calltrap,@function 184calltrap: 185 cld 186 movq %rsp, %rdi 187 call trap 188 MEXITCOUNT 189 jmp doreti /* Handle any pending ASTs */ 190 191 /* 192 * alltraps_noen entry point. Unlike alltraps above, we want to 193 * leave the interrupts disabled. 194 */ 195 SUPERALIGN_TEXT 196 .globl alltraps_noen 197 .type alltraps_noen,@function 198alltraps_noen: 199 /* Fixup %gs if coming from userland */ 200 testb $SEL_RPL_MASK,TF_CS(%rsp) 201 jz alltraps_pushregs 202 swapgs 203 jmp alltraps_pushregs 204 205IDTVEC(dblfault) 206 subq $TF_ERR,%rsp 207 movq $T_DOUBLEFLT,TF_TRAPNO(%rsp) 208 movq $0,TF_ADDR(%rsp) 209 movq $0,TF_ERR(%rsp) 210 movq $0,TF_XFLAGS(%rsp) 211 movq %rdi,TF_RDI(%rsp) 212 movq %rsi,TF_RSI(%rsp) 213 movq %rdx,TF_RDX(%rsp) 214 movq %rcx,TF_RCX(%rsp) 215 movq %r8,TF_R8(%rsp) 216 movq %r9,TF_R9(%rsp) 217 movq %rax,TF_RAX(%rsp) 218 movq %rbx,TF_RBX(%rsp) 219 movq %rbp,TF_RBP(%rsp) 220 movq %r10,TF_R10(%rsp) 221 movq %r11,TF_R11(%rsp) 222 movq %r12,TF_R12(%rsp) 223 movq %r13,TF_R13(%rsp) 224 movq %r14,TF_R14(%rsp) 225 movq %r15,TF_R15(%rsp) 226 testb $SEL_RPL_MASK,TF_CS(%rsp) 227 jz 1f 228 swapgs 2291: movq %rsp, %rdi 230 cld 231 call dblfault_handler 2322: hlt 233 jmp 2b 234 235IDTVEC(page) 236 subq $TF_ERR,%rsp 237 movq $T_PAGEFLT,TF_TRAPNO(%rsp) 238 /* Fixup %gs if coming from userland */ 239 testb $SEL_RPL_MASK,TF_CS(%rsp) 240 jz 1f 241 swapgs 2421: 243 movq %rdi,TF_RDI(%rsp) /* free up a GP register */ 244 movq %cr2,%rdi /* preserve %cr2 before .. */ 245 movq %rdi,TF_ADDR(%rsp) /* enabling interrupts. */ 246 movq $0,TF_XFLAGS(%rsp) 247 testq $PSL_I,TF_RFLAGS(%rsp) 248 jz alltraps_pushregs_no_rdi 249 sti 250 jmp alltraps_pushregs_no_rdi 251 252 /* 253 * We have to special-case this one. If we get a trap in doreti() at 254 * the iretq stage, we'll reenter with the wrong gs state. We'll have 255 * to do a special the swapgs in this case even coming from the kernel. 256 * XXX linux has a trap handler for their equivalent of load_gs(). 257 */ 258IDTVEC(prot) 259 subq $TF_ERR,%rsp 260 movq $T_PROTFLT,TF_TRAPNO(%rsp) 261 movq $0,TF_ADDR(%rsp) 262 movq $0,TF_XFLAGS(%rsp) 263 movq %rdi,TF_RDI(%rsp) /* free up a GP register */ 264 265 /* 266 * Fixup %gs if coming from userland. Handle the special case where 267 * %fs faults in doreti at the iretq instruction itself. 268 */ 269 leaq doreti_iret(%rip),%rdi 270 cmpq %rdi,TF_RIP(%rsp) /* special iretq fault case */ 271 je 2f 272 testb $SEL_RPL_MASK,TF_CS(%rsp) /* check if from userland */ 273 jz 1f 2742: 275 swapgs 2761: 277 testq $PSL_I,TF_RFLAGS(%rsp) 278 jz alltraps_pushregs_no_rdi 279 sti 280 jmp alltraps_pushregs_no_rdi 281 282/* 283 * Fast syscall entry point. We enter here with just our new %cs/%ss set, 284 * and the new privilige level. We are still running on the old user stack 285 * pointer. We have to juggle a few things around to find our stack etc. 286 * swapgs gives us access to our PCPU space only. 287 */ 288IDTVEC(fast_syscall) 289 swapgs 290 movq %rsp,PCPU(scratch_rsp) 291 movq PCPU(common_tss) + TSS_RSP0, %rsp 292 /* Now emulate a trapframe. Make the 8 byte alignment odd for call. */ 293 subq $TF_SIZE,%rsp 294 /* defer TF_RSP till we have a spare register */ 295 movq %r11,TF_RFLAGS(%rsp) 296 movq %rcx,TF_RIP(%rsp) /* %rcx original value is in %r10 */ 297 movq PCPU(scratch_rsp),%r11 /* %r11 already saved */ 298 movq %r11,TF_RSP(%rsp) /* user stack pointer */ 299 orl $RQF_QUICKRET,PCPU(reqflags) 300 sti 301 movq $KUDSEL,TF_SS(%rsp) 302 movq $KUCSEL,TF_CS(%rsp) 303 movq $2,TF_ERR(%rsp) 304 movq $T_FAST_SYSCALL,TF_TRAPNO(%rsp) /* for the vkernel */ 305 movq $0,TF_XFLAGS(%rsp) /* note: used in signal frame */ 306 movq %rdi,TF_RDI(%rsp) /* arg 1 */ 307 movq %rsi,TF_RSI(%rsp) /* arg 2 */ 308 movq %rdx,TF_RDX(%rsp) /* arg 3 */ 309 movq %r10,TF_RCX(%rsp) /* arg 4 */ 310 movq %r8,TF_R8(%rsp) /* arg 5 */ 311 movq %r9,TF_R9(%rsp) /* arg 6 */ 312 movq %rax,TF_RAX(%rsp) /* syscall number */ 313 movq %rbx,TF_RBX(%rsp) /* C preserved */ 314 movq %rbp,TF_RBP(%rsp) /* C preserved */ 315 movq %r12,TF_R12(%rsp) /* C preserved */ 316 movq %r13,TF_R13(%rsp) /* C preserved */ 317 movq %r14,TF_R14(%rsp) /* C preserved */ 318 movq %r15,TF_R15(%rsp) /* C preserved */ 319 FAKE_MCOUNT(TF_RIP(%rsp)) 320 movq %rsp, %rdi 321 call syscall2 322 323 /* 324 * Fast return from system call 325 */ 326 cli 327 testl $RQF_IPIQ|RQF_TIMER|RQF_INTPEND|RQF_AST_MASK,PCPU(reqflags) 328 jnz 1f 329 testl $RQF_QUICKRET,PCPU(reqflags) 330 jz 1f 331 MEXITCOUNT 332 movq TF_RDI(%rsp),%rdi 333 movq TF_RSI(%rsp),%rsi 334 movq TF_RDX(%rsp),%rdx 335 movq TF_RAX(%rsp),%rax 336 movq TF_RFLAGS(%rsp),%r11 337 movq TF_RIP(%rsp),%rcx 338 movq TF_RSP(%rsp),%rsp 339 swapgs 340 sysretq 341 /* 342 * Normal slow / full iret 343 */ 3441: 345 MEXITCOUNT 346 jmp doreti 347 348/* 349 * Here for CYA insurance, in case a "syscall" instruction gets 350 * issued from 32 bit compatibility mode. MSR_CSTAR has to point 351 * to *something* if EFER_SCE is enabled. 352 */ 353IDTVEC(fast_syscall32) 354 sysret 355 356/* 357 * NMI handling is special. 358 * 359 * First, NMIs do not respect the state of the processor's RFLAGS.IF 360 * bit and the NMI handler may be invoked at any time, including when 361 * the processor is in a critical section with RFLAGS.IF == 0. In 362 * particular, this means that the processor's GS.base values could be 363 * inconsistent on entry to the handler, and so we need to read 364 * MSR_GSBASE to determine if a 'swapgs' is needed. We use '%ebx', a 365 * C-preserved register, to remember whether to swap GS back on the 366 * exit path. 367 * 368 * Second, the processor treats NMIs specially, blocking further NMIs 369 * until an 'iretq' instruction is executed. We therefore need to 370 * execute the NMI handler with interrupts disabled to prevent a 371 * nested interrupt from executing an 'iretq' instruction and 372 * inadvertently taking the processor out of NMI mode. 373 * 374 * Third, the NMI handler runs on its own stack (tss_ist1), shared 375 * with the double fault handler. 376 */ 377 378IDTVEC(nmi) 379 subq $TF_RIP,%rsp 380 movq $(T_NMI),TF_TRAPNO(%rsp) 381 movq $0,TF_ADDR(%rsp) 382 movq $0,TF_ERR(%rsp) 383 movq $0,TF_XFLAGS(%rsp) 384 movq %rdi,TF_RDI(%rsp) 385 movq %rsi,TF_RSI(%rsp) 386 movq %rdx,TF_RDX(%rsp) 387 movq %rcx,TF_RCX(%rsp) 388 movq %r8,TF_R8(%rsp) 389 movq %r9,TF_R9(%rsp) 390 movq %rax,TF_RAX(%rsp) 391 movq %rbx,TF_RBX(%rsp) 392 movq %rbp,TF_RBP(%rsp) 393 movq %r10,TF_R10(%rsp) 394 movq %r11,TF_R11(%rsp) 395 movq %r12,TF_R12(%rsp) 396 movq %r13,TF_R13(%rsp) 397 movq %r14,TF_R14(%rsp) 398 movq %r15,TF_R15(%rsp) 399 xorl %ebx,%ebx 400 testb $SEL_RPL_MASK,TF_CS(%rsp) 401 jnz nmi_needswapgs /* we came from userland */ 402 movl $MSR_GSBASE,%ecx 403 rdmsr 404 cmpl $VM_MAX_USER_ADDRESS >> 32,%edx 405 jae nmi_calltrap /* GS.base holds a kernel VA */ 406nmi_needswapgs: 407 incl %ebx 408 swapgs 409/* Note: this label is also used by ddb and gdb: */ 410nmi_calltrap: 411 FAKE_MCOUNT(TF_RIP(%rsp)) 412 cld 413 movq %rsp, %rdi 414 call trap 415 MEXITCOUNT 416 testl %ebx,%ebx 417 jz nmi_restoreregs 418 swapgs 419nmi_restoreregs: 420 movq TF_RDI(%rsp),%rdi 421 movq TF_RSI(%rsp),%rsi 422 movq TF_RDX(%rsp),%rdx 423 movq TF_RCX(%rsp),%rcx 424 movq TF_R8(%rsp),%r8 425 movq TF_R9(%rsp),%r9 426 movq TF_RAX(%rsp),%rax 427 movq TF_RBX(%rsp),%rbx 428 movq TF_RBP(%rsp),%rbp 429 movq TF_R10(%rsp),%r10 430 movq TF_R11(%rsp),%r11 431 movq TF_R12(%rsp),%r12 432 movq TF_R13(%rsp),%r13 433 movq TF_R14(%rsp),%r14 434 movq TF_R15(%rsp),%r15 435 addq $TF_RIP,%rsp 436 iretq 437 438/* 439 * This function is what cpu_heavy_restore jumps to after a new process 440 * is created. The LWKT subsystem switches while holding a critical 441 * section and we maintain that abstraction here (e.g. because 442 * cpu_heavy_restore needs it due to PCB_*() manipulation), then get out of 443 * it before calling the initial function (typically fork_return()) and/or 444 * returning to user mode. 445 * 446 * The MP lock is not held at any point but the critcount is bumped 447 * on entry to prevent interruption of the trampoline at a bad point. 448 * 449 * This is effectively what td->td_switch() returns to. It 'returns' the 450 * old thread in %rax and since this is not returning to a td->td_switch() 451 * call from lwkt_switch() we must handle the cleanup for the old thread 452 * by calling lwkt_switch_return(). 453 * 454 * fork_trampoline(%rax:otd, %rbx:func, %r12:arg) 455 */ 456ENTRY(fork_trampoline) 457 movq %rax,%rdi 458 call lwkt_switch_return 459 movq PCPU(curthread),%rax 460 decl TD_CRITCOUNT(%rax) 461 462 /* 463 * cpu_set_fork_handler intercepts this function call to 464 * have this call a non-return function to stay in kernel mode. 465 * 466 * initproc has its own fork handler, start_init(), which DOES 467 * return. 468 * 469 * %rbx - chaining function (typically fork_return) 470 * %r12 -> %rdi (argument) 471 * frame-> %rsi (trap frame) 472 * 473 * void (func:rbx)(arg:rdi, trapframe:rsi) 474 */ 475 movq %rsp, %rsi /* pass trapframe by reference */ 476 movq %r12, %rdi /* arg1 */ 477 call *%rbx /* function */ 478 479 /* cut from syscall */ 480 481 sti 482 call splz 483 484 /* 485 * Return via doreti to handle ASTs. 486 * 487 * trapframe is at the top of the stack. 488 */ 489 MEXITCOUNT 490 jmp doreti 491 492/* 493 * To efficiently implement classification of trap and interrupt handlers 494 * for profiling, there must be only trap handlers between the labels btrap 495 * and bintr, and only interrupt handlers between the labels bintr and 496 * eintr. This is implemented (partly) by including files that contain 497 * some of the handlers. Before including the files, set up a normal asm 498 * environment so that the included files doen't need to know that they are 499 * included. 500 */ 501 502#if 0 /* COMPAT_IA32 */ 503 .data 504 .p2align 4 505 .text 506 SUPERALIGN_TEXT 507 508#include <x86_64/ia32/ia32_exception.S> 509#endif 510 511 .data 512 .p2align 4 513 .text 514 SUPERALIGN_TEXT 515MCOUNT_LABEL(bintr) 516 517#if 0 /* JG */ 518#include <x86_64/x86_64/apic_vector.S> 519#endif 520 521#ifdef DEV_ATPIC 522 .data 523 .p2align 4 524 .text 525 SUPERALIGN_TEXT 526 527#include <x86_64/isa/atpic_vector.S> 528#endif 529 530 .text 531MCOUNT_LABEL(eintr) 532