1/*- 2 * Copyright (c) 1989, 1990 William F. Jolitz. 3 * Copyright (c) 1990 The Regents of the University of California. 4 * Copyright (c) 2007 The FreeBSD Foundation 5 * Copyright (c) 2008 The DragonFly Project. 6 * Copyright (c) 2008 Jordan Gordeev. 7 * All rights reserved. 8 * 9 * Portions of this software were developed by A. Joseph Koshy under 10 * sponsorship from the FreeBSD Foundation and Google, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37#if 0 /* JG */ 38#include "opt_atpic.h" 39#include "opt_compat.h" 40#endif 41 42#include <machine/asmacros.h> 43#include <machine/psl.h> 44#include <machine/trap.h> 45#include <machine/segments.h> 46 47#include "assym.s" 48 49 .text 50 51 .globl lwkt_switch_return 52 53/*****************************************************************************/ 54/* Trap handling */ 55/*****************************************************************************/ 56/* 57 * Trap and fault vector routines. 58 * 59 * All traps are 'interrupt gates', SDT_SYSIGT. An interrupt gate pushes 60 * state on the stack but also disables interrupts. This is important for 61 * us for the use of the swapgs instruction. We cannot be interrupted 62 * until the GS.base value is correct. For most traps, we automatically 63 * then enable interrupts if the interrupted context had them enabled. 64 * 65 * The cpu will push a certain amount of state onto the kernel stack for 66 * the current process. See x86_64/include/frame.h. 67 * This includes the current RFLAGS (status register, which includes 68 * the interrupt disable state prior to the trap), the code segment register, 69 * and the return instruction pointer are pushed by the cpu. The cpu 70 * will also push an 'error' code for certain traps. We push a dummy 71 * error code for those traps where the cpu doesn't in order to maintain 72 * a consistent frame. We also push a contrived 'trap number'. 73 * 74 * The cpu does not push the general registers, we must do that, and we 75 * must restore them prior to calling 'iret'. The cpu adjusts the %cs and 76 * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we 77 * must load them with appropriate values for supervisor mode operation. 78 */ 79 80MCOUNT_LABEL(user) 81MCOUNT_LABEL(btrap) 82 83/* 84 * Interrupts must be disabled for all traps, otherwise horrible %gs 85 * issues will occur. 86 */ 87 88/* Regular traps; The cpu does not supply tf_err for these. */ 89#define TRAP(a) \ 90 PUSH_FRAME_TFRIP ; \ 91 movq $0,TF_XFLAGS(%rsp) ; \ 92 movq $(a),TF_TRAPNO(%rsp) ; \ 93 movq $0,TF_ADDR(%rsp) ; \ 94 movq $0,TF_ERR(%rsp) ; \ 95 jmp alltraps 96 97/* This group of traps have tf_err already pushed by the cpu */ 98#define TRAP_ERR(a) \ 99 PUSH_FRAME_TFERR ; \ 100 movq $(a),TF_TRAPNO(%rsp) ; \ 101 movq $0,TF_ADDR(%rsp) ; \ 102 movq $0,TF_XFLAGS(%rsp) ; \ 103 jmp alltraps 104 105IDTVEC(dbg) 106 TRAP(T_TRCTRAP) 107IDTVEC(bpt) 108 TRAP(T_BPTFLT) 109IDTVEC(div) 110 TRAP(T_DIVIDE) 111IDTVEC(ofl) 112 TRAP(T_OFLOW) 113IDTVEC(bnd) 114 TRAP(T_BOUND) 115IDTVEC(ill) 116 TRAP(T_PRIVINFLT) 117IDTVEC(dna) 118 TRAP(T_DNA) 119IDTVEC(fpusegm) 120 TRAP(T_FPOPFLT) 121IDTVEC(mchk) 122 TRAP(T_MCHK) 123IDTVEC(rsvd) 124 TRAP(T_RESERVED) 125IDTVEC(fpu) 126 TRAP(T_ARITHTRAP) 127IDTVEC(xmm) 128 TRAP(T_XMMFLT) 129 130IDTVEC(tss) 131 TRAP_ERR(T_TSSFLT) 132IDTVEC(missing) 133 TRAP_ERR(T_SEGNPFLT) 134IDTVEC(stk) 135 TRAP_ERR(T_STKFLT) 136IDTVEC(align) 137 TRAP_ERR(T_ALIGNFLT) 138 139 /* 140 * alltraps entry point. Use swapgs if this is the first time in the 141 * kernel from userland. Reenable interrupts if they were enabled 142 * before the trap. 143 * 144 * WARNING! %gs not available until after our swapgs code 145 */ 146 SUPERALIGN_TEXT 147 .globl alltraps 148 .type alltraps,@function 149alltraps: 150 151#if 0 152alltraps_pushregs: 153 movq %rdi,TF_RDI(%rsp) 154alltraps_pushregs_no_rdi: 155 movq %rsi,TF_RSI(%rsp) 156 movq %rdx,TF_RDX(%rsp) 157 movq %rcx,TF_RCX(%rsp) 158 movq %r8,TF_R8(%rsp) 159 movq %r9,TF_R9(%rsp) 160 movq %rax,TF_RAX(%rsp) 161 movq %rbx,TF_RBX(%rsp) 162 movq %rbp,TF_RBP(%rsp) 163 movq %r10,TF_R10(%rsp) 164 movq %r11,TF_R11(%rsp) 165 movq %r12,TF_R12(%rsp) 166 movq %r13,TF_R13(%rsp) 167 movq %r14,TF_R14(%rsp) 168 movq %r15,TF_R15(%rsp) 169#endif 170 sti 171 FAKE_MCOUNT(TF_RIP(%rsp)) 172 .globl calltrap 173 .type calltrap,@function 174calltrap: 175 cld 176 movq %rsp, %rdi 177 call trap 178 MEXITCOUNT 179 jmp doreti /* Handle any pending ASTs */ 180 181IDTVEC(dblfault) 182 PUSH_FRAME_TFERR 183 movq $T_DOUBLEFLT,TF_TRAPNO(%rsp) 184 movq $0,TF_ADDR(%rsp) 185 movq $0,TF_XFLAGS(%rsp) 186 187 cld 188 movq %rsp, %rdi 189 call dblfault_handler 1902: hlt 191 jmp 2b 192 193 /* 194 * We need to save the contents of %cr2 before PUSH_FRAME* messes 195 * with %cr3. 196 */ 197IDTVEC(page) 198 PUSH_FRAME_TFERR_SAVECR2 199 movq $T_PAGEFLT,TF_TRAPNO(%rsp) 200 movq $0,TF_XFLAGS(%rsp) 201 jmp alltraps 202 203 /* 204 * We have to special-case this one. If we get a trap in doreti() at 205 * the iretq stage, we'll reenter as a kernel exception with the 206 * wrong gs and isolation state. We have to act as through we came 207 * in from userland. 208 */ 209IDTVEC(prot) 210 pushq %r10 211 leaq doreti_iret(%rip),%r10 212 cmpq %r10,TF_RIP-TF_ERR+8(%rsp) /* +8 due to pushq */ 213 jne prot_normal 214 testb $SEL_RPL_MASK,TF_CS-TF_ERR+8(%rsp) /* +8 due to pushq */ 215 jnz prot_normal 216 217 /* 218 * Special fault during iretq 219 */ 220 popq %r10 221 swapgs 222 KMMUENTER_TFERR 223 subq $TF_ERR,%rsp 224 PUSH_FRAME_REGS 225 movq $T_PROTFLT,TF_TRAPNO(%rsp) 226 movq $0,TF_ADDR(%rsp) 227 movq $0,TF_XFLAGS(%rsp) 228 jmp alltraps 229 230prot_normal: 231 popq %r10 232 PUSH_FRAME_TFERR 233 movq $T_PROTFLT,TF_TRAPNO(%rsp) 234 movq $0,TF_ADDR(%rsp) 235 movq $0,TF_XFLAGS(%rsp) 236 jmp alltraps 237 238/* 239 * Fast syscall entry point. We enter here with just our new %cs/%ss set, 240 * and the new privilige level. We are still running on the old user stack 241 * pointer. We have to juggle a few things around to find our stack etc. 242 * swapgs gives us access to our PCPU space only. 243 * 244 * We use GD_TRAMPOLINE+TR_CR2 to save the user stack pointer temporarily. 245 */ 246IDTVEC(fast_syscall) 247 swapgs /* get kernel %gs */ 248 movq %rsp,PCPU(trampoline)+TR_CR2 /* save user %rsp */ 249 movq PCPU(common_tss)+TSS_RSP0,%rsp 250 251 /* 252 * NOTE: KMMUENTER_SYSCALL does not actually use the stack but 253 * adjust the stack pointer for correctness in case we 254 * do in the future. 255 */ 256 subq $TR_PCB_RSP,%rsp 257 KMMUENTER_SYSCALL 258 movq PCPU(trampoline)+TR_PCB_RSP,%rsp 259 260 /* Now emulate a trapframe. Make the 8 byte alignment odd for call. */ 261 subq $TF_SIZE,%rsp 262 /* defer TF_RSP till we have a spare register */ 263 movq %r11,TF_RFLAGS(%rsp) 264 movq %rcx,TF_RIP(%rsp) /* %rcx original value is in %r10 */ 265 movq PCPU(trampoline)+TR_CR2,%r11 /* %r11 already saved */ 266 movq %r11,TF_RSP(%rsp) /* user stack pointer */ 267 orl $RQF_QUICKRET,PCPU(reqflags) 268 movq $KUDSEL,TF_SS(%rsp) 269 movq $KUCSEL,TF_CS(%rsp) 270 movq $2,TF_ERR(%rsp) 271 movq $T_FAST_SYSCALL,TF_TRAPNO(%rsp) /* for the vkernel */ 272 movq $0,TF_XFLAGS(%rsp) /* note: used in signal frame */ 273 movq %rdi,TF_RDI(%rsp) /* arg 1 */ 274 movq %rsi,TF_RSI(%rsp) /* arg 2 */ 275 movq %rdx,TF_RDX(%rsp) /* arg 3 */ 276 movq %r10,TF_RCX(%rsp) /* arg 4 */ 277 movq %r8,TF_R8(%rsp) /* arg 5 */ 278 movq %r9,TF_R9(%rsp) /* arg 6 */ 279 movq %rax,TF_RAX(%rsp) /* syscall number */ 280 movq %rbx,TF_RBX(%rsp) /* C preserved */ 281 movq %rbp,TF_RBP(%rsp) /* C preserved */ 282 movq %r12,TF_R12(%rsp) /* C preserved */ 283 movq %r13,TF_R13(%rsp) /* C preserved */ 284 movq %r14,TF_R14(%rsp) /* C preserved */ 285 movq %r15,TF_R15(%rsp) /* C preserved */ 286 sti 287 FAKE_MCOUNT(TF_RIP(%rsp)) 288 movq %rsp, %rdi 289 call syscall2 290 291 /* 292 * Fast return from system call 293 */ 294 cli 295 testl $RQF_IPIQ|RQF_TIMER|RQF_INTPEND|RQF_AST_MASK,PCPU(reqflags) 296 jnz 1f 297 testl $RQF_QUICKRET,PCPU(reqflags) 298 jz 1f 299 MEXITCOUNT 300 movq TF_RDI(%rsp),%rdi 301 movq TF_RSI(%rsp),%rsi 302 movq TF_RDX(%rsp),%rdx 303 movq TF_RAX(%rsp),%rax 304 movq TF_RFLAGS(%rsp),%r11 305 movq TF_RIP(%rsp),%rcx 306 movq TF_RSP(%rsp),%rsp 307 KMMUEXIT_SYSCALL 308 swapgs 309 sysretq 310 311 /* 312 * Normal slow / full iret 313 */ 3141: 315 MEXITCOUNT 316 jmp doreti 317 318/* 319 * Here for CYA insurance, in case a "syscall" instruction gets 320 * issued from 32 bit compatibility mode. MSR_CSTAR has to point 321 * to *something* if EFER_SCE is enabled. 322 */ 323IDTVEC(fast_syscall32) 324 sysret 325 326/* 327 * NMI handling is special. 328 * 329 * First, NMIs do not respect the state of the processor's RFLAGS.IF 330 * bit and the NMI handler may be invoked at any time, including when 331 * the processor is in a critical section with RFLAGS.IF == 0. In 332 * particular, this means that the processor's GS.base values could be 333 * inconsistent on entry to the handler, and so we need to read 334 * MSR_GSBASE to determine if a 'swapgs' is needed. We use '%ebx', a 335 * C-preserved register, to remember whether to swap GS back on the 336 * exit path. 337 * 338 * Second, the processor treats NMIs specially, blocking further NMIs 339 * until an 'iretq' instruction is executed. We therefore need to 340 * execute the NMI handler with interrupts disabled to prevent a 341 * nested interrupt from executing an 'iretq' instruction and 342 * inadvertently taking the processor out of NMI mode. 343 * 344 * Third, the NMI handler runs on its own stack (tss_ist1), shared 345 * with the double fault handler. 346 */ 347 348IDTVEC(nmi) 349 PUSH_FRAME_TFRIP 350 movq $0,TF_XFLAGS(%rsp) 351 movq $T_NMI,TF_TRAPNO(%rsp) 352 movq $0,TF_ADDR(%rsp) 353 movq $0,TF_ERR(%rsp) 354 355 FAKE_MCOUNT(TF_RIP(%rsp)) 356 cld 357 movq %rsp, %rdi 358 call trap 359 MEXITCOUNT 360 361 POP_FRAME(jmp doreti_iret) 362 363/* 364 * This function is what cpu_heavy_restore jumps to after a new process 365 * is created. The LWKT subsystem switches while holding a critical 366 * section and we maintain that abstraction here (e.g. because 367 * cpu_heavy_restore needs it due to PCB_*() manipulation), then get out of 368 * it before calling the initial function (typically fork_return()) and/or 369 * returning to user mode. 370 * 371 * The MP lock is not held at any point but the critcount is bumped 372 * on entry to prevent interruption of the trampoline at a bad point. 373 * 374 * This is effectively what td->td_switch() returns to. It 'returns' the 375 * old thread in %rax and since this is not returning to a td->td_switch() 376 * call from lwkt_switch() we must handle the cleanup for the old thread 377 * by calling lwkt_switch_return(). 378 * 379 * fork_trampoline(%rax:otd, %rbx:func, %r12:arg) 380 */ 381ENTRY(fork_trampoline) 382 movq %rax,%rdi 383 call lwkt_switch_return 384 movq PCPU(curthread),%rax 385 decl TD_CRITCOUNT(%rax) 386 387 /* 388 * cpu_set_fork_handler intercepts this function call to 389 * have this call a non-return function to stay in kernel mode. 390 * 391 * initproc has its own fork handler, start_init(), which DOES 392 * return. 393 * 394 * %rbx - chaining function (typically fork_return) 395 * %r12 -> %rdi (argument) 396 * frame-> %rsi (trap frame) 397 * 398 * void (func:rbx)(arg:rdi, trapframe:rsi) 399 */ 400 movq %rsp, %rsi /* pass trapframe by reference */ 401 movq %r12, %rdi /* arg1 */ 402 call *%rbx /* function */ 403 404 /* cut from syscall */ 405 406 sti 407 call splz 408 409 /* 410 * Return via doreti to handle ASTs. 411 * 412 * trapframe is at the top of the stack. 413 */ 414 MEXITCOUNT 415 jmp doreti 416 417/* 418 * To efficiently implement classification of trap and interrupt handlers 419 * for profiling, there must be only trap handlers between the labels btrap 420 * and bintr, and only interrupt handlers between the labels bintr and 421 * eintr. This is implemented (partly) by including files that contain 422 * some of the handlers. Before including the files, set up a normal asm 423 * environment so that the included files doen't need to know that they are 424 * included. 425 */ 426 427#if 0 /* COMPAT_IA32 */ 428 .data 429 .p2align 4 430 .text 431 SUPERALIGN_TEXT 432 433#include <x86_64/ia32/ia32_exception.S> 434#endif 435 436 .data 437 .p2align 4 438 .text 439 SUPERALIGN_TEXT 440MCOUNT_LABEL(bintr) 441 442#if 0 /* JG */ 443#include <x86_64/x86_64/apic_vector.S> 444#endif 445 446#ifdef DEV_ATPIC 447 .data 448 .p2align 4 449 .text 450 SUPERALIGN_TEXT 451 452#include <x86_64/isa/atpic_vector.S> 453#endif 454 455 .text 456MCOUNT_LABEL(eintr) 457