1 /*- 2 * Copyright (c) 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (C) 1994, David Greenman 5 * Copyright (c) 2008-2018 The DragonFly Project. 6 * Copyright (c) 2008 Jordan Gordeev. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the University of Utah, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 40 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $ 41 */ 42 43 /* 44 * x86_64 Trap and System call handling 45 */ 46 47 #include "use_isa.h" 48 49 #include "opt_ddb.h" 50 #include "opt_ktrace.h" 51 52 #include <machine/frame.h> 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/kernel.h> 56 #include <sys/kerneldump.h> 57 #include <sys/proc.h> 58 #include <sys/pioctl.h> 59 #include <sys/types.h> 60 #include <sys/signal2.h> 61 #include <sys/syscall.h> 62 #include <sys/sysctl.h> 63 #include <sys/sysent.h> 64 #ifdef KTRACE 65 #include <sys/ktrace.h> 66 #endif 67 #include <sys/ktr.h> 68 #include <sys/sysmsg.h> 69 #include <sys/sysproto.h> 70 #include <sys/sysunion.h> 71 72 #include <vm/pmap.h> 73 #include <vm/vm.h> 74 #include <vm/vm_extern.h> 75 #include <vm/vm_kern.h> 76 #include <vm/vm_param.h> 77 #include <machine/cpu.h> 78 #include <machine/pcb.h> 79 #include <machine/smp.h> 80 #include <machine/thread.h> 81 #include <machine/clock.h> 82 #include <machine/vmparam.h> 83 #include <machine/md_var.h> 84 #include <machine_base/isa/isa_intr.h> 85 #include <machine_base/apic/lapic.h> 86 87 #include <ddb/ddb.h> 88 89 #include <sys/thread2.h> 90 #include <sys/spinlock2.h> 91 92 /* 93 * These %rip's are used to detect a historical CPU artifact on syscall or 94 * int $3 entry, if not shortcutted in exception.S via 95 * DIRECT_DISALLOW_SS_CPUBUG. 96 */ 97 extern void Xbpt(void); 98 extern void Xfast_syscall(void); 99 #define IDTVEC(vec) X##vec 100 101 extern void trap(struct trapframe *frame); 102 103 static int trap_pfault(struct trapframe *, int); 104 static void trap_fatal(struct trapframe *, vm_offset_t); 105 void dblfault_handler(struct trapframe *frame); 106 107 #define MAX_TRAP_MSG 30 108 static char *trap_msg[] = { 109 "", /* 0 unused */ 110 "privileged instruction fault", /* 1 T_PRIVINFLT */ 111 "", /* 2 unused */ 112 "breakpoint instruction fault", /* 3 T_BPTFLT */ 113 "", /* 4 unused */ 114 "", /* 5 unused */ 115 "arithmetic trap", /* 6 T_ARITHTRAP */ 116 "system forced exception", /* 7 T_ASTFLT */ 117 "", /* 8 unused */ 118 "general protection fault", /* 9 T_PROTFLT */ 119 "trace trap", /* 10 T_TRCTRAP */ 120 "", /* 11 unused */ 121 "page fault", /* 12 T_PAGEFLT */ 122 "", /* 13 unused */ 123 "alignment fault", /* 14 T_ALIGNFLT */ 124 "", /* 15 unused */ 125 "", /* 16 unused */ 126 "", /* 17 unused */ 127 "integer divide fault", /* 18 T_DIVIDE */ 128 "non-maskable interrupt trap", /* 19 T_NMI */ 129 "overflow trap", /* 20 T_OFLOW */ 130 "FPU bounds check fault", /* 21 T_BOUND */ 131 "FPU device not available", /* 22 T_DNA */ 132 "double fault", /* 23 T_DOUBLEFLT */ 133 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 134 "invalid TSS fault", /* 25 T_TSSFLT */ 135 "segment not present fault", /* 26 T_SEGNPFLT */ 136 "stack fault", /* 27 T_STKFLT */ 137 "machine check trap", /* 28 T_MCHK */ 138 "SIMD floating-point exception", /* 29 T_XMMFLT */ 139 "reserved (unknown) fault", /* 30 T_RESERVED */ 140 }; 141 142 #ifdef DDB 143 static int ddb_on_nmi = 1; 144 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW, 145 &ddb_on_nmi, 0, "Go to DDB on NMI"); 146 static int ddb_on_seg_fault = 0; 147 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_seg_fault, CTLFLAG_RW, 148 &ddb_on_seg_fault, 0, "Go to DDB on user seg-fault"); 149 __read_mostly static int freeze_on_seg_fault = 0; 150 SYSCTL_INT(_machdep, OID_AUTO, freeze_on_seg_fault, CTLFLAG_RW, 151 &freeze_on_seg_fault, 0, "Go to DDB on user seg-fault"); 152 #endif 153 static int panic_on_nmi = 1; 154 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW, 155 &panic_on_nmi, 0, "Panic on NMI"); 156 157 /* 158 * System call debugging records the worst-case system call 159 * overhead (inclusive of blocking), but may be inaccurate. 160 */ 161 /*#define SYSCALL_DEBUG*/ 162 #ifdef SYSCALL_DEBUG 163 164 #define SCWC_MAXT 30 165 166 struct syscallwc { 167 uint32_t idx; 168 uint32_t dummy; 169 uint64_t tot[SYS_MAXSYSCALL]; 170 uint64_t timings[SYS_MAXSYSCALL][SCWC_MAXT]; 171 } __cachealign; 172 173 struct syscallwc SysCallsWorstCase[MAXCPU]; 174 175 #endif 176 177 /* 178 * Passively intercepts the thread switch function to increase 179 * the thread priority from a user priority to a kernel priority, reducing 180 * syscall and trap overhead for the case where no switch occurs. 181 * 182 * Synchronizes td_ucred with p_ucred. This is used by system calls, 183 * signal handling, faults, AST traps, and anything else that enters the 184 * kernel from userland and provides the kernel with a stable read-only 185 * copy of the process ucred. 186 * 187 * To avoid races with another thread updating p_ucred we obtain p_spin. 188 * The other thread doing the update will obtain both p_token and p_spin. 189 * In the case where the cached cred pointer matches, we will already have 190 * the ref and we don't have to do one blessed thing. 191 */ 192 static __inline void 193 userenter(struct thread *curtd, struct proc *curp) 194 { 195 struct ucred *ocred; 196 struct ucred *ncred; 197 198 curtd->td_release = lwkt_passive_release; 199 200 if (__predict_false(curtd->td_ucred != curp->p_ucred)) { 201 spin_lock(&curp->p_spin); 202 ncred = crhold(curp->p_ucred); 203 spin_unlock(&curp->p_spin); 204 ocred = curtd->td_ucred; 205 curtd->td_ucred = ncred; 206 if (ocred) 207 crfree(ocred); 208 } 209 210 #ifdef DDB 211 /* 212 * Debugging, remove top two user stack pages to catch kernel faults 213 */ 214 if (__predict_false(freeze_on_seg_fault > 1 && curtd->td_lwp)) { 215 pmap_remove(vmspace_pmap(curtd->td_lwp->lwp_vmspace), 216 0x00007FFFFFFFD000LU, 217 0x0000800000000000LU); 218 } 219 #endif 220 } 221 222 /* 223 * Handle signals, upcalls, profiling, and other AST's and/or tasks that 224 * must be completed before we can return to or try to return to userland. 225 * 226 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64 227 * arithmatic on the delta calculation so the absolute tick values are 228 * truncated to an integer. 229 */ 230 static void 231 userret(struct lwp *lp, struct trapframe *frame, int sticks) 232 { 233 struct proc *p = lp->lwp_proc; 234 int sig; 235 int ptok; 236 237 /* 238 * Charge system time if profiling. Note: times are in microseconds. 239 * This may do a copyout and block, so do it first even though it 240 * means some system time will be charged as user time. 241 */ 242 if (__predict_false(p->p_flags & P_PROFIL)) { 243 addupc_task(p, frame->tf_rip, 244 (u_int)((int)lp->lwp_thread->td_sticks - sticks)); 245 } 246 247 recheck: 248 /* 249 * Specific on-return-to-usermode checks (LWP_MP_WEXIT, 250 * LWP_MP_VNLRU, etc). 251 */ 252 if (lp->lwp_mpflags & LWP_MP_URETMASK) 253 lwpuserret(lp); 254 255 /* 256 * Block here if we are in a stopped state. 257 */ 258 if (__predict_false(STOPLWP(p, lp))) { 259 lwkt_gettoken(&p->p_token); 260 tstop(); 261 lwkt_reltoken(&p->p_token); 262 goto recheck; 263 } 264 while (__predict_false(dump_stop_usertds)) { 265 tsleep(&dump_stop_usertds, 0, "dumpstp", 0); 266 } 267 268 /* 269 * Post any pending upcalls. If running a virtual kernel be sure 270 * to restore the virtual kernel's vmspace before posting the upcall. 271 */ 272 if (__predict_false(p->p_flags & (P_SIGVTALRM | P_SIGPROF))) { 273 lwkt_gettoken(&p->p_token); 274 if (p->p_flags & P_SIGVTALRM) { 275 p->p_flags &= ~P_SIGVTALRM; 276 ksignal(p, SIGVTALRM); 277 } 278 if (p->p_flags & P_SIGPROF) { 279 p->p_flags &= ~P_SIGPROF; 280 ksignal(p, SIGPROF); 281 } 282 lwkt_reltoken(&p->p_token); 283 goto recheck; 284 } 285 286 /* 287 * Post any pending signals. If running a virtual kernel be sure 288 * to restore the virtual kernel's vmspace before posting the signal. 289 * 290 * WARNING! postsig() can exit and not return. 291 */ 292 if (__predict_false((sig = CURSIG_LCK_TRACE(lp, &ptok)) != 0)) { 293 postsig(sig, ptok); 294 goto recheck; 295 } 296 297 /* 298 * block here if we are swapped out, but still process signals 299 * (such as SIGKILL). proc0 (the swapin scheduler) is already 300 * aware of our situation, we do not have to wake it up. 301 */ 302 if (__predict_false(p->p_flags & P_SWAPPEDOUT)) { 303 lwkt_gettoken(&p->p_token); 304 p->p_flags |= P_SWAPWAIT; 305 swapin_request(); 306 if (p->p_flags & P_SWAPWAIT) 307 tsleep(p, PCATCH, "SWOUT", 0); 308 p->p_flags &= ~P_SWAPWAIT; 309 lwkt_reltoken(&p->p_token); 310 goto recheck; 311 } 312 313 /* 314 * In a multi-threaded program it is possible for a thread to change 315 * signal state during a system call which temporarily changes the 316 * signal mask. In this case postsig() might not be run and we 317 * have to restore the mask ourselves. 318 */ 319 if (__predict_false(lp->lwp_flags & LWP_OLDMASK)) { 320 lp->lwp_flags &= ~LWP_OLDMASK; 321 lp->lwp_sigmask = lp->lwp_oldsigmask; 322 goto recheck; 323 } 324 } 325 326 /* 327 * Cleanup from userenter and any passive release that might have occured. 328 * We must reclaim the current-process designation before we can return 329 * to usermode. We also handle both LWKT and USER reschedule requests. 330 */ 331 static __inline void 332 userexit(struct lwp *lp) 333 { 334 struct thread *td = lp->lwp_thread; 335 /* globaldata_t gd = td->td_gd; */ 336 337 /* 338 * Handle stop requests at kernel priority. Any requests queued 339 * after this loop will generate another AST. 340 */ 341 while (__predict_false(STOPLWP(lp->lwp_proc, lp))) { 342 lwkt_gettoken(&lp->lwp_proc->p_token); 343 tstop(); 344 lwkt_reltoken(&lp->lwp_proc->p_token); 345 } 346 347 /* 348 * Reduce our priority in preparation for a return to userland. If 349 * our passive release function was still in place, our priority was 350 * never raised and does not need to be reduced. 351 */ 352 lwkt_passive_recover(td); 353 354 /* WARNING: we may have migrated cpu's */ 355 /* gd = td->td_gd; */ 356 357 /* 358 * Become the current user scheduled process if we aren't already, 359 * and deal with reschedule requests and other factors. 360 * 361 * Do a silly hack to avoid RETPOLINE nonsense. 362 */ 363 if (lp->lwp_proc->p_usched == &usched_dfly) 364 dfly_acquire_curproc(lp); 365 else 366 lp->lwp_proc->p_usched->acquire_curproc(lp); 367 } 368 369 /* 370 * A page fault on a userspace address is classified as SMAP-induced 371 * if: 372 * - SMAP is supported 373 * - kernel mode accessed present data page 374 * - rflags.AC was cleared 375 */ 376 static int 377 trap_is_smap(struct trapframe *frame) 378 { 379 if ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 && 380 (frame->tf_err & (PGEX_P | PGEX_U | PGEX_I | PGEX_RSV)) == PGEX_P && 381 (frame->tf_rflags & PSL_AC) == 0) { 382 return 1; 383 } else { 384 return 0; 385 } 386 } 387 388 #if !defined(KTR_KERNENTRY) 389 #define KTR_KERNENTRY KTR_ALL 390 #endif 391 KTR_INFO_MASTER(kernentry); 392 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0, 393 "TRAP(pid %d, tid %d, trapno %ld, eva %lu)", 394 pid_t pid, lwpid_t tid, register_t trapno, vm_offset_t eva); 395 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "TRAP_RET(pid %d, tid %d)", 396 pid_t pid, lwpid_t tid); 397 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "SYSC(pid %d, tid %d, nr %ld)", 398 pid_t pid, lwpid_t tid, register_t trapno); 399 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "SYSRET(pid %d, tid %d, err %d)", 400 pid_t pid, lwpid_t tid, int err); 401 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "FORKRET(pid %d, tid %d)", 402 pid_t pid, lwpid_t tid); 403 404 /* 405 * Exception, fault, and trap interface to the kernel. 406 * This common code is called from assembly language IDT gate entry 407 * routines that prepare a suitable stack frame, and restore this 408 * frame after the exception has been processed. 409 * 410 * This function is also called from doreti in an interlock to handle ASTs. 411 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap 412 * 413 * NOTE! We have to retrieve the fault address prior to potentially 414 * blocking, including blocking on any token. 415 * 416 * NOTE! NMI and kernel DBG traps remain on their respective pcpu IST 417 * stacks if taken from a kernel RPL. trap() cannot block in this 418 * situation. DDB entry or a direct report-and-return is ok. 419 * 420 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing 421 * if an attempt is made to switch from a fast interrupt or IPI. 422 */ 423 void 424 trap(struct trapframe *frame) 425 { 426 static struct krate sscpubugrate = { 1 }; 427 struct globaldata *gd = mycpu; 428 struct thread *td = gd->gd_curthread; 429 struct lwp *lp = td->td_lwp; 430 struct proc *p; 431 int sticks = 0; 432 int i = 0, ucode = 0, type, code; 433 #ifdef INVARIANTS 434 int crit_count = td->td_critcount; 435 lwkt_tokref_t curstop = td->td_toks_stop; 436 #endif 437 vm_offset_t eva; 438 439 p = td->td_proc; 440 clear_quickret(); 441 442 #ifdef DDB 443 /* 444 * We need to allow T_DNA faults when the debugger is active since 445 * some dumping paths do large bcopy() which use the floating 446 * point registers for faster copying. 447 */ 448 if (db_active && frame->tf_trapno != T_DNA) { 449 eva = (frame->tf_trapno == T_PAGEFLT ? frame->tf_addr : 0); 450 ++gd->gd_trap_nesting_level; 451 trap_fatal(frame, eva); 452 --gd->gd_trap_nesting_level; 453 goto out2; 454 } 455 #endif 456 457 eva = 0; 458 459 if ((frame->tf_rflags & PSL_I) == 0) { 460 /* 461 * Buggy application or kernel code has disabled interrupts 462 * and then trapped. Enabling interrupts now is wrong, but 463 * it is better than running with interrupts disabled until 464 * they are accidentally enabled later. 465 */ 466 467 type = frame->tf_trapno; 468 if (ISPL(frame->tf_cs) == SEL_UPL) { 469 /* JG curproc can be NULL */ 470 kprintf( 471 "pid %ld (%s): trap %d with interrupts disabled\n", 472 (long)curproc->p_pid, curproc->p_comm, type); 473 } else if ((type == T_STKFLT || type == T_PROTFLT || 474 type == T_SEGNPFLT) && 475 frame->tf_rip == (long)doreti_iret) { 476 /* 477 * iretq fault from kernel mode during return to 478 * userland. 479 * 480 * This situation is expected, don't complain. 481 */ 482 } else if (type != T_NMI && type != T_BPTFLT && 483 type != T_TRCTRAP) { 484 /* 485 * XXX not quite right, since this may be for a 486 * multiple fault in user mode. 487 */ 488 kprintf("kernel trap %d (%s @ 0x%016jx) with " 489 "interrupts disabled\n", 490 type, 491 td->td_comm, 492 frame->tf_rip); 493 } 494 cpu_enable_intr(); 495 } 496 497 type = frame->tf_trapno; 498 code = frame->tf_err; 499 500 if (ISPL(frame->tf_cs) == SEL_UPL) { 501 /* user trap */ 502 503 KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid, 504 frame->tf_trapno, eva); 505 506 userenter(td, p); 507 508 sticks = (int)td->td_sticks; 509 KASSERT(lp->lwp_md.md_regs == frame, 510 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame)); 511 512 switch (type) { 513 case T_PRIVINFLT: /* privileged instruction fault */ 514 i = SIGILL; 515 ucode = ILL_PRVOPC; 516 break; 517 518 case T_BPTFLT: /* bpt instruction fault */ 519 case T_TRCTRAP: /* trace trap */ 520 frame->tf_rflags &= ~PSL_T; 521 i = SIGTRAP; 522 ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT); 523 break; 524 525 case T_ARITHTRAP: /* arithmetic trap */ 526 ucode = code; 527 i = SIGFPE; 528 break; 529 530 case T_ASTFLT: /* Allow process switch */ 531 mycpu->gd_cnt.v_soft++; 532 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) { 533 atomic_clear_int(&mycpu->gd_reqflags, 534 RQF_AST_OWEUPC); 535 addupc_task(p, p->p_prof.pr_addr, 536 p->p_prof.pr_ticks); 537 } 538 goto out; 539 540 case T_PROTFLT: /* general protection fault */ 541 i = SIGBUS; 542 ucode = BUS_OBJERR; 543 break; 544 case T_STKFLT: /* stack fault */ 545 case T_SEGNPFLT: /* segment not present fault */ 546 i = SIGBUS; 547 ucode = BUS_ADRERR; 548 break; 549 case T_TSSFLT: /* invalid TSS fault */ 550 case T_DOUBLEFLT: /* double fault */ 551 default: 552 i = SIGBUS; 553 ucode = BUS_OBJERR; 554 break; 555 556 case T_PAGEFLT: /* page fault */ 557 i = trap_pfault(frame, TRUE); 558 #ifdef DDB 559 if (frame->tf_rip == 0) { 560 /* used for kernel debugging only */ 561 while (freeze_on_seg_fault) 562 tsleep(p, 0, "freeze", hz * 20); 563 } 564 #endif 565 if (i == -1 || i == 0) 566 goto out; 567 if (i == SIGSEGV) { 568 ucode = SEGV_MAPERR; 569 } else { 570 i = SIGSEGV; 571 ucode = SEGV_ACCERR; 572 } 573 break; 574 575 case T_DIVIDE: /* integer divide fault */ 576 ucode = FPE_INTDIV; 577 i = SIGFPE; 578 break; 579 580 #if NISA > 0 581 case T_NMI: 582 /* machine/parity/power fail/"kitchen sink" faults */ 583 if (isa_nmi(code) == 0) { 584 #ifdef DDB 585 /* 586 * NMI can be hooked up to a pushbutton 587 * for debugging. 588 */ 589 if (ddb_on_nmi) { 590 kprintf ("NMI ... going to debugger\n"); 591 kdb_trap(type, 0, frame); 592 } 593 #endif /* DDB */ 594 goto out2; 595 } else if (panic_on_nmi) 596 panic("NMI indicates hardware failure"); 597 break; 598 #endif /* NISA > 0 */ 599 600 case T_OFLOW: /* integer overflow fault */ 601 ucode = FPE_INTOVF; 602 i = SIGFPE; 603 break; 604 605 case T_BOUND: /* bounds check fault */ 606 ucode = FPE_FLTSUB; 607 i = SIGFPE; 608 break; 609 610 case T_DNA: 611 /* 612 * Virtual kernel intercept - pass the DNA exception 613 * to the virtual kernel if it asked to handle it. 614 * This occurs when the virtual kernel is holding 615 * onto the FP context for a different emulated 616 * process then the one currently running. 617 * 618 * We must still call npxdna() since we may have 619 * saved FP state that the virtual kernel needs 620 * to hand over to a different emulated process. 621 */ 622 if (lp->lwp_vkernel && lp->lwp_vkernel->ve && 623 (td->td_pcb->pcb_flags & FP_VIRTFP) 624 ) { 625 npxdna(); 626 break; 627 } 628 629 /* 630 * The kernel may have switched out the FP unit's 631 * state, causing the user process to take a fault 632 * when it tries to use the FP unit. Restore the 633 * state here 634 */ 635 if (npxdna()) { 636 gd->gd_cnt.v_trap++; 637 goto out; 638 } 639 i = SIGFPE; 640 ucode = FPE_FPU_NP_TRAP; 641 break; 642 643 case T_FPOPFLT: /* FPU operand fetch fault */ 644 ucode = ILL_COPROC; 645 i = SIGILL; 646 break; 647 648 case T_XMMFLT: /* SIMD floating-point exception */ 649 ucode = 0; /* XXX */ 650 i = SIGFPE; 651 break; 652 } 653 } else { 654 /* kernel trap */ 655 656 switch (type) { 657 case T_PAGEFLT: /* page fault */ 658 trap_pfault(frame, FALSE); 659 goto out2; 660 661 case T_DNA: 662 /* 663 * The kernel is apparently using fpu for copying. 664 * XXX this should be fatal unless the kernel has 665 * registered such use. 666 */ 667 if (npxdna()) { 668 gd->gd_cnt.v_trap++; 669 goto out2; 670 } 671 break; 672 673 case T_STKFLT: /* stack fault */ 674 case T_PROTFLT: /* general protection fault */ 675 case T_SEGNPFLT: /* segment not present fault */ 676 /* 677 * Invalid segment selectors and out of bounds 678 * %rip's and %rsp's can be set up in user mode. 679 * This causes a fault in kernel mode when the 680 * kernel tries to return to user mode. We want 681 * to get this fault so that we can fix the 682 * problem here and not have to check all the 683 * selectors and pointers when the user changes 684 * them. 685 */ 686 if (mycpu->gd_intr_nesting_level == 0) { 687 /* 688 * NOTE: in 64-bit mode traps push rsp/ss 689 * even if no ring change occurs. 690 */ 691 if (td->td_pcb->pcb_onfault && 692 td->td_pcb->pcb_onfault_sp == 693 frame->tf_rsp) { 694 frame->tf_rip = (register_t) 695 td->td_pcb->pcb_onfault; 696 goto out2; 697 } 698 699 /* 700 * If the iretq in doreti faults during 701 * return to user, it will be special-cased 702 * in IDTVEC(prot) to get here. We want 703 * to 'return' to doreti_iret_fault in 704 * ipl.s in approximately the same state we 705 * were in at the iretq. 706 */ 707 if (frame->tf_rip == (long)doreti_iret) { 708 frame->tf_rip = (long)doreti_iret_fault; 709 goto out2; 710 } 711 } 712 break; 713 714 case T_TSSFLT: 715 /* 716 * PSL_NT can be set in user mode and isn't cleared 717 * automatically when the kernel is entered. This 718 * causes a TSS fault when the kernel attempts to 719 * `iret' because the TSS link is uninitialized. We 720 * want to get this fault so that we can fix the 721 * problem here and not every time the kernel is 722 * entered. 723 */ 724 if (frame->tf_rflags & PSL_NT) { 725 frame->tf_rflags &= ~PSL_NT; 726 #if 0 727 /* do we need this? */ 728 if (frame->tf_rip == (long)doreti_iret) 729 frame->tf_rip = (long)doreti_iret_fault; 730 #endif 731 goto out2; 732 } 733 break; 734 735 case T_TRCTRAP: /* trace trap */ 736 /* 737 * Detect historical CPU artifact on syscall or int $3 738 * entry (if not shortcutted in exception.s via 739 * DIRECT_DISALLOW_SS_CPUBUG). 740 */ 741 gd->gd_cnt.v_trap++; 742 if (frame->tf_rip == (register_t)IDTVEC(fast_syscall)) { 743 krateprintf(&sscpubugrate, 744 "Caught #DB at syscall cpu artifact\n"); 745 goto out2; 746 } 747 if (frame->tf_rip == (register_t)IDTVEC(bpt)) { 748 krateprintf(&sscpubugrate, 749 "Caught #DB at int $N cpu artifact\n"); 750 goto out2; 751 } 752 753 /* 754 * Ignore debug register trace traps due to 755 * accesses in the user's address space, which 756 * can happen under several conditions such as 757 * if a user sets a watchpoint on a buffer and 758 * then passes that buffer to a system call. 759 * We still want to get TRCTRAPS for addresses 760 * in kernel space because that is useful when 761 * debugging the kernel. 762 */ 763 if (user_dbreg_trap()) { 764 /* 765 * Reset breakpoint bits because the 766 * processor doesn't 767 */ 768 load_dr6(rdr6() & ~0xf); 769 goto out2; 770 } 771 /* 772 * FALLTHROUGH (TRCTRAP kernel mode, kernel address) 773 */ 774 case T_BPTFLT: 775 /* 776 * If DDB is enabled, let it handle the debugger trap. 777 * Otherwise, debugger traps "can't happen". 778 */ 779 ucode = TRAP_BRKPT; 780 #ifdef DDB 781 if (kdb_trap(type, 0, frame)) 782 goto out2; 783 #endif 784 break; 785 786 #if NISA > 0 787 case T_NMI: 788 /* machine/parity/power fail/"kitchen sink" faults */ 789 if (isa_nmi(code) == 0) { 790 #ifdef DDB 791 /* 792 * NMI can be hooked up to a pushbutton 793 * for debugging. 794 */ 795 if (ddb_on_nmi) { 796 kprintf ("NMI ... going to debugger\n"); 797 kdb_trap(type, 0, frame); 798 } 799 #endif /* DDB */ 800 goto out2; 801 } else if (panic_on_nmi == 0) 802 goto out2; 803 /* FALL THROUGH */ 804 #endif /* NISA > 0 */ 805 } 806 trap_fatal(frame, 0); 807 goto out2; 808 } 809 810 /* 811 * Fault from user mode, virtual kernel interecept. 812 * 813 * If the fault is directly related to a VM context managed by a 814 * virtual kernel then let the virtual kernel handle it. 815 */ 816 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 817 vkernel_trap(lp, frame); 818 goto out; 819 } 820 821 /* Translate fault for emulators (e.g. Linux) */ 822 if (*p->p_sysent->sv_transtrap) 823 i = (*p->p_sysent->sv_transtrap)(i, type); 824 825 gd->gd_cnt.v_trap++; 826 trapsignal(lp, i, ucode); 827 828 #ifdef DEBUG 829 if (type <= MAX_TRAP_MSG) { 830 uprintf("fatal process exception: %s", 831 trap_msg[type]); 832 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 833 uprintf(", fault VA = 0x%lx", frame->tf_addr); 834 uprintf("\n"); 835 } 836 #endif 837 838 out: 839 userret(lp, frame, sticks); 840 userexit(lp); 841 out2: ; 842 if (p != NULL && lp != NULL) 843 KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid); 844 #ifdef INVARIANTS 845 KASSERT(crit_count == td->td_critcount, 846 ("trap: critical section count mismatch! %d/%d", 847 crit_count, td->td_pri)); 848 KASSERT(curstop == td->td_toks_stop, 849 ("trap: extra tokens held after trap! %ld/%ld (%s)", 850 curstop - &td->td_toks_base, 851 td->td_toks_stop - &td->td_toks_base, 852 td->td_toks_stop[-1].tr_tok->t_desc)); 853 #endif 854 } 855 856 void 857 trap_handle_userenter(struct thread *td) 858 { 859 userenter(td, td->td_proc); 860 } 861 862 void 863 trap_handle_userexit(struct trapframe *frame, int sticks) 864 { 865 struct lwp *lp = curthread->td_lwp; 866 867 if (lp) { 868 userret(lp, frame, sticks); 869 userexit(lp); 870 } 871 } 872 873 static int 874 trap_pfault(struct trapframe *frame, int usermode) 875 { 876 vm_offset_t va; 877 struct vmspace *vm = NULL; 878 vm_map_t map; 879 int rv = 0; 880 int fault_flags; 881 vm_prot_t ftype; 882 thread_t td = curthread; 883 struct lwp *lp = td->td_lwp; 884 struct proc *p; 885 886 va = trunc_page(frame->tf_addr); 887 if (va >= VM_MIN_KERNEL_ADDRESS) { 888 /* 889 * Don't allow user-mode faults in kernel address space. 890 */ 891 if (usermode) { 892 fault_flags = -1; 893 ftype = -1; 894 goto nogo; 895 } 896 897 map = &kernel_map; 898 } else { 899 /* 900 * This is a fault on non-kernel virtual memory. 901 * vm is initialized above to NULL. If curproc is NULL 902 * or curproc->p_vmspace is NULL the fault is fatal. 903 */ 904 if (lp != NULL) 905 vm = lp->lwp_vmspace; 906 907 if (vm == NULL) { 908 fault_flags = -1; 909 ftype = -1; 910 goto nogo; 911 } 912 913 if (usermode == 0) { 914 #ifdef DDB 915 /* 916 * Debugging, catch kernel faults on the user address 917 * space when not inside on onfault (e.g. copyin/ 918 * copyout) routine. 919 */ 920 if (td->td_pcb == NULL || 921 td->td_pcb->pcb_onfault == NULL) { 922 if (freeze_on_seg_fault) { 923 kprintf("trap_pfault: user address " 924 "fault from kernel mode " 925 "%016lx\n", 926 (long)frame->tf_addr); 927 while (freeze_on_seg_fault) { 928 tsleep(&freeze_on_seg_fault, 929 0, 930 "frzseg", 931 hz * 20); 932 } 933 } 934 } 935 #endif 936 if (td->td_gd->gd_intr_nesting_level || 937 trap_is_smap(frame) || 938 td->td_pcb == NULL || 939 td->td_pcb->pcb_onfault == NULL) { 940 kprintf("Fatal user address access " 941 "from kernel mode from %s at %016jx\n", 942 td->td_comm, frame->tf_rip); 943 trap_fatal(frame, frame->tf_addr); 944 return (-1); 945 } 946 } 947 map = &vm->vm_map; 948 } 949 950 /* 951 * PGEX_I is defined only if the execute disable bit capability is 952 * supported and enabled. 953 */ 954 if (frame->tf_err & PGEX_W) 955 ftype = VM_PROT_WRITE; 956 else if (frame->tf_err & PGEX_I) 957 ftype = VM_PROT_EXECUTE; 958 else 959 ftype = VM_PROT_READ; 960 961 lwkt_tokref_t stop = td->td_toks_stop; 962 963 if (map != &kernel_map) { 964 /* 965 * Keep swapout from messing with us during this 966 * critical time. 967 */ 968 PHOLD(lp->lwp_proc); 969 970 /* 971 * Issue fault 972 */ 973 fault_flags = 0; 974 if (usermode) 975 fault_flags |= VM_FAULT_BURST | VM_FAULT_USERMODE; 976 if (ftype & VM_PROT_WRITE) 977 fault_flags |= VM_FAULT_DIRTY; 978 else 979 fault_flags |= VM_FAULT_NORMAL; 980 rv = vm_fault(map, va, ftype, fault_flags); 981 if (td->td_toks_stop != stop) { 982 stop = td->td_toks_stop - 1; 983 kprintf("A-HELD TOKENS DURING PFAULT td=%p(%s) map=%p va=%p ftype=%d fault_flags=%d\n", td, td->td_comm, map, (void *)va, ftype, fault_flags); 984 panic("held tokens"); 985 } 986 987 PRELE(lp->lwp_proc); 988 } else { 989 /* 990 * Don't have to worry about process locking or stacks in the 991 * kernel. 992 */ 993 fault_flags = VM_FAULT_NORMAL; 994 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); 995 if (td->td_toks_stop != stop) { 996 stop = td->td_toks_stop - 1; 997 kprintf("B-HELD TOKENS DURING PFAULT td=%p(%s) map=%p va=%p ftype=%d fault_flags=%d\n", td, td->td_comm, map, (void *)va, ftype, VM_FAULT_NORMAL); 998 panic("held tokens"); 999 } 1000 } 1001 if (rv == KERN_SUCCESS) 1002 return (0); 1003 nogo: 1004 if (!usermode) { 1005 /* 1006 * NOTE: in 64-bit mode traps push rsp/ss 1007 * even if no ring change occurs. 1008 */ 1009 if (td->td_pcb->pcb_onfault && 1010 td->td_pcb->pcb_onfault_sp == frame->tf_rsp && 1011 td->td_gd->gd_intr_nesting_level == 0) { 1012 frame->tf_rip = (register_t)td->td_pcb->pcb_onfault; 1013 return (0); 1014 } 1015 trap_fatal(frame, frame->tf_addr); 1016 return (-1); 1017 } 1018 1019 /* 1020 * NOTE: on x86_64 we have a tf_addr field in the trapframe, no 1021 * kludge is needed to pass the fault address to signal handlers. 1022 */ 1023 p = td->td_proc; 1024 #ifdef DDB 1025 if (td->td_lwp->lwp_vkernel == NULL) { 1026 while (freeze_on_seg_fault) { 1027 tsleep(p, 0, "freeze", hz * 20); 1028 } 1029 if (ddb_on_seg_fault) 1030 Debugger("ddb_on_seg_fault"); 1031 } 1032 #endif 1033 1034 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 1035 } 1036 1037 static void 1038 trap_fatal(struct trapframe *frame, vm_offset_t eva) 1039 { 1040 int code, ss; 1041 u_int type; 1042 long rsp; 1043 struct soft_segment_descriptor softseg; 1044 char *msg; 1045 1046 code = frame->tf_err; 1047 type = frame->tf_trapno; 1048 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg); 1049 1050 if (type <= MAX_TRAP_MSG) 1051 msg = trap_msg[type]; 1052 else 1053 msg = "UNKNOWN"; 1054 kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg, 1055 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 1056 /* three separate prints in case of a trap on an unmapped page */ 1057 kprintf("cpuid = %d; ", mycpu->gd_cpuid); 1058 if (lapic_usable) 1059 kprintf("lapic id = %u\n", LAPIC_READID); 1060 if (type == T_PAGEFLT) { 1061 kprintf("fault virtual address = 0x%lx\n", eva); 1062 kprintf("fault code = %s %s %s, %s\n", 1063 code & PGEX_U ? "user" : "supervisor", 1064 code & PGEX_W ? "write" : "read", 1065 code & PGEX_I ? "instruction" : "data", 1066 code & PGEX_P ? "protection violation" : "page not present"); 1067 } 1068 kprintf("instruction pointer = 0x%lx:0x%lx\n", 1069 frame->tf_cs & 0xffff, frame->tf_rip); 1070 if (ISPL(frame->tf_cs) == SEL_UPL) { 1071 ss = frame->tf_ss & 0xffff; 1072 rsp = frame->tf_rsp; 1073 } else { 1074 /* 1075 * NOTE: in 64-bit mode traps push rsp/ss even if no ring 1076 * change occurs. 1077 */ 1078 ss = GSEL(GDATA_SEL, SEL_KPL); 1079 rsp = frame->tf_rsp; 1080 } 1081 kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp); 1082 kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp); 1083 kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n", 1084 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 1085 kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n", 1086 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32, 1087 softseg.ssd_gran); 1088 kprintf("processor eflags = "); 1089 if (frame->tf_rflags & PSL_T) 1090 kprintf("trace trap, "); 1091 if (frame->tf_rflags & PSL_I) 1092 kprintf("interrupt enabled, "); 1093 if (frame->tf_rflags & PSL_NT) 1094 kprintf("nested task, "); 1095 if (frame->tf_rflags & PSL_RF) 1096 kprintf("resume, "); 1097 kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12); 1098 kprintf("current process = "); 1099 if (curproc) { 1100 kprintf("%lu\n", 1101 (u_long)curproc->p_pid); 1102 } else { 1103 kprintf("Idle\n"); 1104 } 1105 kprintf("current thread = pri %d ", curthread->td_pri); 1106 if (curthread->td_critcount) 1107 kprintf("(CRIT)"); 1108 kprintf("\n"); 1109 1110 #ifdef DDB 1111 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame)) 1112 return; 1113 #endif 1114 kprintf("trap number = %d\n", type); 1115 if (type <= MAX_TRAP_MSG) 1116 panic("%s", trap_msg[type]); 1117 else 1118 panic("unknown/reserved trap"); 1119 } 1120 1121 /* 1122 * Double fault handler. Called when a fault occurs while writing 1123 * a frame for a trap/exception onto the stack. This usually occurs 1124 * when the stack overflows (such is the case with infinite recursion, 1125 * for example). 1126 */ 1127 static __inline 1128 int 1129 in_kstack_guard(register_t rptr) 1130 { 1131 thread_t td = curthread; 1132 1133 if ((char *)rptr >= td->td_kstack && 1134 (char *)rptr < td->td_kstack + PAGE_SIZE) { 1135 return 1; 1136 } 1137 return 0; 1138 } 1139 1140 void 1141 dblfault_handler(struct trapframe *frame) 1142 { 1143 thread_t td = curthread; 1144 1145 if (in_kstack_guard(frame->tf_rsp) || in_kstack_guard(frame->tf_rbp)) { 1146 kprintf("DOUBLE FAULT - KERNEL STACK GUARD HIT!\n"); 1147 if (in_kstack_guard(frame->tf_rsp)) 1148 frame->tf_rsp = (register_t)(td->td_kstack + PAGE_SIZE); 1149 if (in_kstack_guard(frame->tf_rbp)) 1150 frame->tf_rbp = (register_t)(td->td_kstack + PAGE_SIZE); 1151 } else { 1152 kprintf("DOUBLE FAULT\n"); 1153 } 1154 kprintf("\nFatal double fault\n"); 1155 kprintf("rip = 0x%lx\n", frame->tf_rip); 1156 kprintf("rsp = 0x%lx\n", frame->tf_rsp); 1157 kprintf("rbp = 0x%lx\n", frame->tf_rbp); 1158 /* three separate prints in case of a trap on an unmapped page */ 1159 kprintf("cpuid = %d; ", mycpu->gd_cpuid); 1160 if (lapic_usable) 1161 kprintf("lapic id = %u\n", LAPIC_READID); 1162 panic("double fault"); 1163 } 1164 1165 /* 1166 * syscall2 - MP aware system call request C handler 1167 * 1168 * A system call is essentially treated as a trap except that the 1169 * MP lock is not held on entry or return. We are responsible for 1170 * obtaining the MP lock if necessary and for handling ASTs 1171 * (e.g. a task switch) prior to return. 1172 * 1173 * MPSAFE 1174 */ 1175 void 1176 syscall2(struct trapframe *frame) 1177 { 1178 struct thread *td = curthread; 1179 struct proc *p = td->td_proc; 1180 struct lwp *lp = td->td_lwp; 1181 struct sysent *callp; 1182 register_t orig_tf_rflags; 1183 int sticks; 1184 int error; 1185 int narg; 1186 #ifdef INVARIANTS 1187 int crit_count = td->td_critcount; 1188 #endif 1189 register_t *argp; 1190 u_int code; 1191 int regcnt, optimized_regcnt; 1192 union sysunion args; 1193 register_t *argsdst; 1194 1195 mycpu->gd_cnt.v_syscall++; 1196 1197 #ifdef DIAGNOSTIC 1198 if (__predict_false(ISPL(frame->tf_cs) != SEL_UPL)) { 1199 panic("syscall"); 1200 /* NOT REACHED */ 1201 } 1202 #endif 1203 1204 KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid, 1205 frame->tf_rax); 1206 1207 userenter(td, p); /* lazy raise our priority */ 1208 1209 regcnt = 6; 1210 optimized_regcnt = 6; 1211 1212 /* 1213 * Misc 1214 */ 1215 sticks = (int)td->td_sticks; 1216 orig_tf_rflags = frame->tf_rflags; 1217 1218 /* 1219 * Virtual kernel intercept - if a VM context managed by a virtual 1220 * kernel issues a system call the virtual kernel handles it, not us. 1221 * Restore the virtual kernel context and return from its system 1222 * call. The current frame is copied out to the virtual kernel. 1223 */ 1224 if (__predict_false(lp->lwp_vkernel && lp->lwp_vkernel->ve)) { 1225 vkernel_trap(lp, frame); 1226 error = EJUSTRETURN; 1227 callp = NULL; 1228 code = 0; 1229 goto out; 1230 } 1231 1232 /* 1233 * Get the system call parameters and account for time 1234 */ 1235 KASSERT(lp->lwp_md.md_regs == frame, 1236 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame)); 1237 code = (u_int)frame->tf_rax; 1238 1239 if (__predict_false(code == SYS_syscall || code == SYS___syscall)) { 1240 code = frame->tf_rdi; 1241 regcnt--; 1242 argp = &frame->tf_rdi + 1; 1243 } else { 1244 argp = &frame->tf_rdi; 1245 } 1246 1247 if (code >= p->p_sysent->sv_size) 1248 callp = &p->p_sysent->sv_table[0]; 1249 else 1250 callp = &p->p_sysent->sv_table[code]; 1251 1252 /* 1253 * On x86_64 we get up to six arguments in registers. The rest are 1254 * on the stack. The first six members of 'struct trapframe' happen 1255 * to be the registers used to pass arguments, in exactly the right 1256 * order. 1257 */ 1258 argsdst = (register_t *)(&args.nosys.sysmsg + 1); 1259 1260 /* 1261 * Its easier to copy up to the highest number of syscall arguments 1262 * passed in registers, which is 6, than to conditionalize it. 1263 */ 1264 bcopy(argp, argsdst, sizeof(register_t) * optimized_regcnt); 1265 1266 /* 1267 * Any arguments beyond available argument-passing registers must 1268 * be copyin()'d from the user stack. 1269 */ 1270 narg = callp->sy_narg; 1271 if (__predict_false(narg > regcnt)) { 1272 caddr_t params; 1273 1274 params = (caddr_t)frame->tf_rsp + sizeof(register_t); 1275 error = copyin(params, &argsdst[regcnt], 1276 (narg - regcnt) * sizeof(register_t)); 1277 if (error) { 1278 #ifdef KTRACE 1279 if (KTRPOINTP(p, td, KTR_SYSCALL)) { 1280 ktrsyscall(lp, code, narg, 1281 (void *)(&args.nosys.sysmsg + 1)); 1282 } 1283 #endif 1284 goto bad; 1285 } 1286 } 1287 1288 #ifdef KTRACE 1289 if (KTRPOINTP(p, td, KTR_SYSCALL)) { 1290 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1)); 1291 } 1292 #endif 1293 1294 /* 1295 * Default return value is 0 (will be copied to %rax). Double-value 1296 * returns use %rax and %rdx. %rdx is left unchanged for system 1297 * calls which return only one result. 1298 */ 1299 args.sysmsg_fds[0] = 0; 1300 args.sysmsg_fds[1] = frame->tf_rdx; 1301 1302 /* 1303 * The syscall might manipulate the trap frame. If it does it 1304 * will probably return EJUSTRETURN. 1305 */ 1306 args.sysmsg_frame = frame; 1307 1308 STOPEVENT(p, S_SCE, narg); /* MP aware */ 1309 1310 /* 1311 * NOTE: All system calls run MPSAFE now. The system call itself 1312 * is responsible for getting the MP lock. 1313 */ 1314 #ifdef SYSCALL_DEBUG 1315 tsc_uclock_t tscval = rdtsc(); 1316 #endif 1317 error = (*callp->sy_call)(&args); 1318 #ifdef SYSCALL_DEBUG 1319 tscval = rdtsc() - tscval; 1320 tscval = tscval * 1000000 / (tsc_frequency / 1000); /* ns */ 1321 { 1322 struct syscallwc *scwc = &SysCallsWorstCase[mycpu->gd_cpuid]; 1323 int idx = scwc->idx++ % SCWC_MAXT; 1324 1325 scwc->tot[code] += tscval - scwc->timings[code][idx]; 1326 scwc->timings[code][idx] = tscval; 1327 } 1328 #endif 1329 1330 out: 1331 /* 1332 * MP SAFE (we may or may not have the MP lock at this point) 1333 */ 1334 //kprintf("SYSMSG %d ", error); 1335 if (__predict_true(error == 0)) { 1336 /* 1337 * Reinitialize proc pointer `p' as it may be different 1338 * if this is a child returning from fork syscall. 1339 */ 1340 p = curproc; 1341 lp = curthread->td_lwp; 1342 frame->tf_rax = args.sysmsg_fds[0]; 1343 frame->tf_rdx = args.sysmsg_fds[1]; 1344 frame->tf_rflags &= ~PSL_C; 1345 } else if (error == ERESTART) { 1346 /* 1347 * Reconstruct pc, we know that 'syscall' is 2 bytes. 1348 * We have to do a full context restore so that %r10 1349 * (which was holding the value of %rcx) is restored for 1350 * the next iteration. 1351 */ 1352 if (frame->tf_err != 0 && frame->tf_err != 2) 1353 kprintf("lp %s:%d frame->tf_err is weird %ld\n", 1354 td->td_comm, lp->lwp_proc->p_pid, frame->tf_err); 1355 frame->tf_rip -= frame->tf_err; 1356 frame->tf_r10 = frame->tf_rcx; 1357 } else if (error == EJUSTRETURN) { 1358 /* do nothing */ 1359 } else if (error == EASYNC) { 1360 panic("Unexpected EASYNC return value (for now)"); 1361 } else { 1362 bad: 1363 if (p->p_sysent->sv_errsize) { 1364 if (error >= p->p_sysent->sv_errsize) 1365 error = -1; /* XXX */ 1366 else 1367 error = p->p_sysent->sv_errtbl[error]; 1368 } 1369 frame->tf_rax = error; 1370 frame->tf_rflags |= PSL_C; 1371 } 1372 1373 /* 1374 * Traced syscall. trapsignal() should now be MP aware 1375 */ 1376 if (__predict_false(orig_tf_rflags & PSL_T)) { 1377 frame->tf_rflags &= ~PSL_T; 1378 trapsignal(lp, SIGTRAP, TRAP_TRACE); 1379 } 1380 1381 /* 1382 * Handle reschedule and other end-of-syscall issues 1383 */ 1384 userret(lp, frame, sticks); 1385 1386 #ifdef KTRACE 1387 if (KTRPOINTP(p, td, KTR_SYSRET)) { 1388 ktrsysret(lp, code, error, args.sysmsg_result); 1389 } 1390 #endif 1391 1392 /* 1393 * This works because errno is findable through the 1394 * register set. If we ever support an emulation where this 1395 * is not the case, this code will need to be revisited. 1396 */ 1397 STOPEVENT(p, S_SCX, code); 1398 1399 userexit(lp); 1400 KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error); 1401 #ifdef INVARIANTS 1402 KASSERT(crit_count == td->td_critcount, 1403 ("syscall: critical section count mismatch! %d/%d", 1404 crit_count, td->td_pri)); 1405 KASSERT(&td->td_toks_base == td->td_toks_stop, 1406 ("syscall: %ld extra tokens held after trap! syscall %p", 1407 td->td_toks_stop - &td->td_toks_base, 1408 callp->sy_call)); 1409 #endif 1410 } 1411 1412 void 1413 fork_return(struct lwp *lp, struct trapframe *frame) 1414 { 1415 frame->tf_rax = 0; /* Child returns zero */ 1416 frame->tf_rflags &= ~PSL_C; /* success */ 1417 frame->tf_rdx = 1; 1418 1419 generic_lwp_return(lp, frame); 1420 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid); 1421 } 1422 1423 /* 1424 * Simplified back end of syscall(), used when returning from fork() 1425 * directly into user mode. 1426 * 1427 * This code will return back into the fork trampoline code which then 1428 * runs doreti. 1429 */ 1430 void 1431 generic_lwp_return(struct lwp *lp, struct trapframe *frame) 1432 { 1433 struct proc *p = lp->lwp_proc; 1434 1435 /* 1436 * Check for exit-race. If one lwp exits the process concurrent with 1437 * another lwp creating a new thread, the two operations may cross 1438 * each other resulting in the newly-created lwp not receiving a 1439 * KILL signal. 1440 */ 1441 if (p->p_flags & P_WEXIT) { 1442 lwpsignal(p, lp, SIGKILL); 1443 } 1444 1445 /* 1446 * Newly forked processes are given a kernel priority. We have to 1447 * adjust the priority to a normal user priority and fake entry 1448 * into the kernel (call userenter()) to install a passive release 1449 * function just in case userret() decides to stop the process. This 1450 * can occur when ^Z races a fork. If we do not install the passive 1451 * release function the current process designation will not be 1452 * released when the thread goes to sleep. 1453 */ 1454 lwkt_setpri_self(TDPRI_USER_NORM); 1455 userenter(lp->lwp_thread, p); 1456 userret(lp, frame, 0); 1457 #ifdef KTRACE 1458 if (KTRPOINTP(p, lp->lwp_thread, KTR_SYSRET)) 1459 ktrsysret(lp, SYS_fork, 0, 0); 1460 #endif 1461 lp->lwp_flags |= LWP_PASSIVE_ACQ; 1462 userexit(lp); 1463 lp->lwp_flags &= ~LWP_PASSIVE_ACQ; 1464 } 1465 1466 /* 1467 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA 1468 * fault (which is then passed back to the virtual kernel) if an attempt is 1469 * made to use the FP unit. 1470 * 1471 * XXX this is a fairly big hack. 1472 */ 1473 void 1474 set_vkernel_fp(struct trapframe *frame) 1475 { 1476 struct thread *td = curthread; 1477 1478 if (frame->tf_xflags & PGEX_FPFAULT) { 1479 td->td_pcb->pcb_flags |= FP_VIRTFP; 1480 if (mdcpu->gd_npxthread == td) 1481 npxexit(); 1482 } else { 1483 td->td_pcb->pcb_flags &= ~FP_VIRTFP; 1484 } 1485 } 1486 1487 /* 1488 * Called from vkernel_trap() to fixup the vkernel's syscall 1489 * frame for vmspace_ctl() return. 1490 */ 1491 void 1492 cpu_vkernel_trap(struct trapframe *frame, int error) 1493 { 1494 frame->tf_rax = error; 1495 if (error) 1496 frame->tf_rflags |= PSL_C; 1497 else 1498 frame->tf_rflags &= ~PSL_C; 1499 } 1500