1 /*- 2 * Copyright (c) 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (C) 1994, David Greenman 5 * Copyright (c) 2008 The DragonFly Project. 6 * Copyright (c) 2008 Jordan Gordeev. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the University of Utah, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 40 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $ 41 */ 42 43 /* 44 * x86_64 Trap and System call handling 45 */ 46 47 #include "use_isa.h" 48 49 #include "opt_ddb.h" 50 #include "opt_ktrace.h" 51 52 #include <machine/frame.h> 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/kernel.h> 56 #include <sys/kerneldump.h> 57 #include <sys/proc.h> 58 #include <sys/pioctl.h> 59 #include <sys/types.h> 60 #include <sys/signal2.h> 61 #include <sys/syscall.h> 62 #include <sys/sysctl.h> 63 #include <sys/sysent.h> 64 #ifdef KTRACE 65 #include <sys/ktrace.h> 66 #endif 67 #include <sys/ktr.h> 68 #include <sys/sysmsg.h> 69 #include <sys/sysproto.h> 70 #include <sys/sysunion.h> 71 72 #include <vm/pmap.h> 73 #include <vm/vm.h> 74 #include <vm/vm_extern.h> 75 #include <vm/vm_kern.h> 76 #include <vm/vm_param.h> 77 #include <machine/cpu.h> 78 #include <machine/pcb.h> 79 #include <machine/smp.h> 80 #include <machine/thread.h> 81 #include <machine/clock.h> 82 #include <machine/vmparam.h> 83 #include <machine/md_var.h> 84 #include <machine_base/isa/isa_intr.h> 85 #include <machine_base/apic/lapic.h> 86 87 #include <ddb/ddb.h> 88 89 #include <sys/thread2.h> 90 #include <sys/mplock2.h> 91 #include <sys/spinlock2.h> 92 93 #define MAKEMPSAFE(have_mplock) \ 94 if (have_mplock == 0) { \ 95 get_mplock(); \ 96 have_mplock = 1; \ 97 } 98 99 extern void trap(struct trapframe *frame); 100 101 static int trap_pfault(struct trapframe *, int); 102 static void trap_fatal(struct trapframe *, vm_offset_t); 103 void dblfault_handler(struct trapframe *frame); 104 105 #define MAX_TRAP_MSG 30 106 static char *trap_msg[] = { 107 "", /* 0 unused */ 108 "privileged instruction fault", /* 1 T_PRIVINFLT */ 109 "", /* 2 unused */ 110 "breakpoint instruction fault", /* 3 T_BPTFLT */ 111 "", /* 4 unused */ 112 "", /* 5 unused */ 113 "arithmetic trap", /* 6 T_ARITHTRAP */ 114 "system forced exception", /* 7 T_ASTFLT */ 115 "", /* 8 unused */ 116 "general protection fault", /* 9 T_PROTFLT */ 117 "trace trap", /* 10 T_TRCTRAP */ 118 "", /* 11 unused */ 119 "page fault", /* 12 T_PAGEFLT */ 120 "", /* 13 unused */ 121 "alignment fault", /* 14 T_ALIGNFLT */ 122 "", /* 15 unused */ 123 "", /* 16 unused */ 124 "", /* 17 unused */ 125 "integer divide fault", /* 18 T_DIVIDE */ 126 "non-maskable interrupt trap", /* 19 T_NMI */ 127 "overflow trap", /* 20 T_OFLOW */ 128 "FPU bounds check fault", /* 21 T_BOUND */ 129 "FPU device not available", /* 22 T_DNA */ 130 "double fault", /* 23 T_DOUBLEFLT */ 131 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 132 "invalid TSS fault", /* 25 T_TSSFLT */ 133 "segment not present fault", /* 26 T_SEGNPFLT */ 134 "stack fault", /* 27 T_STKFLT */ 135 "machine check trap", /* 28 T_MCHK */ 136 "SIMD floating-point exception", /* 29 T_XMMFLT */ 137 "reserved (unknown) fault", /* 30 T_RESERVED */ 138 }; 139 140 #ifdef DDB 141 static int ddb_on_nmi = 1; 142 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW, 143 &ddb_on_nmi, 0, "Go to DDB on NMI"); 144 static int ddb_on_seg_fault = 0; 145 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_seg_fault, CTLFLAG_RW, 146 &ddb_on_seg_fault, 0, "Go to DDB on user seg-fault"); 147 static int freeze_on_seg_fault = 0; 148 SYSCTL_INT(_machdep, OID_AUTO, freeze_on_seg_fault, CTLFLAG_RW, 149 &freeze_on_seg_fault, 0, "Go to DDB on user seg-fault"); 150 #endif 151 static int panic_on_nmi = 1; 152 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW, 153 &panic_on_nmi, 0, "Panic on NMI"); 154 static int fast_release; 155 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW, 156 &fast_release, 0, "Passive Release was optimal"); 157 static int slow_release; 158 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW, 159 &slow_release, 0, "Passive Release was nonoptimal"); 160 161 /* 162 * System call debugging records the worst-case system call 163 * overhead (inclusive of blocking), but may be inaccurate. 164 */ 165 /*#define SYSCALL_DEBUG*/ 166 #ifdef SYSCALL_DEBUG 167 uint64_t SysCallsWorstCase[SYS_MAXSYSCALL]; 168 #endif 169 170 /* 171 * Passively intercepts the thread switch function to increase 172 * the thread priority from a user priority to a kernel priority, reducing 173 * syscall and trap overhead for the case where no switch occurs. 174 * 175 * Synchronizes td_ucred with p_ucred. This is used by system calls, 176 * signal handling, faults, AST traps, and anything else that enters the 177 * kernel from userland and provides the kernel with a stable read-only 178 * copy of the process ucred. 179 * 180 * To avoid races with another thread updating p_ucred we obtain p_spin. 181 * The other thread doing the update will obtain both p_token and p_spin. 182 * In the case where the cached cred pointer matches, we will already have 183 * the ref and we don't have to do one blessed thing. 184 */ 185 static __inline void 186 userenter(struct thread *curtd, struct proc *curp) 187 { 188 struct ucred *ocred; 189 struct ucred *ncred; 190 191 curtd->td_release = lwkt_passive_release; 192 193 if (curtd->td_ucred != curp->p_ucred) { 194 spin_lock(&curp->p_spin); 195 ncred = crhold(curp->p_ucred); 196 spin_unlock(&curp->p_spin); 197 ocred = curtd->td_ucred; 198 curtd->td_ucred = ncred; 199 if (ocred) 200 crfree(ocred); 201 } 202 203 #ifdef DDB 204 /* 205 * Debugging, remove top two user stack pages to catch kernel faults 206 */ 207 if (freeze_on_seg_fault > 1 && curtd->td_lwp) { 208 pmap_remove(vmspace_pmap(curtd->td_lwp->lwp_vmspace), 209 0x00007FFFFFFFD000LU, 210 0x0000800000000000LU); 211 } 212 #endif 213 } 214 215 /* 216 * Handle signals, upcalls, profiling, and other AST's and/or tasks that 217 * must be completed before we can return to or try to return to userland. 218 * 219 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64 220 * arithmatic on the delta calculation so the absolute tick values are 221 * truncated to an integer. 222 */ 223 static void 224 userret(struct lwp *lp, struct trapframe *frame, int sticks) 225 { 226 struct proc *p = lp->lwp_proc; 227 int sig; 228 229 /* 230 * Charge system time if profiling. Note: times are in microseconds. 231 * This may do a copyout and block, so do it first even though it 232 * means some system time will be charged as user time. 233 */ 234 if (p->p_flags & P_PROFIL) { 235 addupc_task(p, frame->tf_rip, 236 (u_int)((int)lp->lwp_thread->td_sticks - sticks)); 237 } 238 239 recheck: 240 /* 241 * Specific on-return-to-usermode checks (LWP_MP_WEXIT, 242 * LWP_MP_VNLRU, etc). 243 */ 244 if (lp->lwp_mpflags & LWP_MP_URETMASK) 245 lwpuserret(lp); 246 247 /* 248 * Block here if we are in a stopped state. 249 */ 250 if (STOPLWP(p, lp)) { 251 lwkt_gettoken(&p->p_token); 252 tstop(); 253 lwkt_reltoken(&p->p_token); 254 goto recheck; 255 } 256 while (dump_stop_usertds) { 257 tsleep(&dump_stop_usertds, 0, "dumpstp", 0); 258 } 259 260 /* 261 * Post any pending upcalls. If running a virtual kernel be sure 262 * to restore the virtual kernel's vmspace before posting the upcall. 263 */ 264 if (p->p_flags & (P_SIGVTALRM | P_SIGPROF)) { 265 lwkt_gettoken(&p->p_token); 266 if (p->p_flags & P_SIGVTALRM) { 267 p->p_flags &= ~P_SIGVTALRM; 268 ksignal(p, SIGVTALRM); 269 } 270 if (p->p_flags & P_SIGPROF) { 271 p->p_flags &= ~P_SIGPROF; 272 ksignal(p, SIGPROF); 273 } 274 lwkt_reltoken(&p->p_token); 275 goto recheck; 276 } 277 278 /* 279 * Post any pending signals. If running a virtual kernel be sure 280 * to restore the virtual kernel's vmspace before posting the signal. 281 * 282 * WARNING! postsig() can exit and not return. 283 */ 284 if ((sig = CURSIG_TRACE(lp)) != 0) { 285 lwkt_gettoken(&p->p_token); 286 postsig(sig); 287 lwkt_reltoken(&p->p_token); 288 goto recheck; 289 } 290 291 /* 292 * block here if we are swapped out, but still process signals 293 * (such as SIGKILL). proc0 (the swapin scheduler) is already 294 * aware of our situation, we do not have to wake it up. 295 */ 296 if (p->p_flags & P_SWAPPEDOUT) { 297 lwkt_gettoken(&p->p_token); 298 get_mplock(); 299 p->p_flags |= P_SWAPWAIT; 300 swapin_request(); 301 if (p->p_flags & P_SWAPWAIT) 302 tsleep(p, PCATCH, "SWOUT", 0); 303 p->p_flags &= ~P_SWAPWAIT; 304 rel_mplock(); 305 lwkt_reltoken(&p->p_token); 306 goto recheck; 307 } 308 309 /* 310 * In a multi-threaded program it is possible for a thread to change 311 * signal state during a system call which temporarily changes the 312 * signal mask. In this case postsig() might not be run and we 313 * have to restore the mask ourselves. 314 */ 315 if (lp->lwp_flags & LWP_OLDMASK) { 316 lp->lwp_flags &= ~LWP_OLDMASK; 317 lp->lwp_sigmask = lp->lwp_oldsigmask; 318 goto recheck; 319 } 320 } 321 322 /* 323 * Cleanup from userenter and any passive release that might have occured. 324 * We must reclaim the current-process designation before we can return 325 * to usermode. We also handle both LWKT and USER reschedule requests. 326 */ 327 static __inline void 328 userexit(struct lwp *lp) 329 { 330 struct thread *td = lp->lwp_thread; 331 /* globaldata_t gd = td->td_gd; */ 332 333 /* 334 * Handle stop requests at kernel priority. Any requests queued 335 * after this loop will generate another AST. 336 */ 337 while (STOPLWP(lp->lwp_proc, lp)) { 338 lwkt_gettoken(&lp->lwp_proc->p_token); 339 tstop(); 340 lwkt_reltoken(&lp->lwp_proc->p_token); 341 } 342 343 /* 344 * Reduce our priority in preparation for a return to userland. If 345 * our passive release function was still in place, our priority was 346 * never raised and does not need to be reduced. 347 */ 348 lwkt_passive_recover(td); 349 350 /* WARNING: we may have migrated cpu's */ 351 /* gd = td->td_gd; */ 352 353 /* 354 * Become the current user scheduled process if we aren't already, 355 * and deal with reschedule requests and other factors. 356 */ 357 lp->lwp_proc->p_usched->acquire_curproc(lp); 358 } 359 360 #if !defined(KTR_KERNENTRY) 361 #define KTR_KERNENTRY KTR_ALL 362 #endif 363 KTR_INFO_MASTER(kernentry); 364 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0, 365 "TRAP(pid %d, tid %d, trapno %ld, eva %lu)", 366 pid_t pid, lwpid_t tid, register_t trapno, vm_offset_t eva); 367 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "TRAP_RET(pid %d, tid %d)", 368 pid_t pid, lwpid_t tid); 369 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "SYSC(pid %d, tid %d, nr %ld)", 370 pid_t pid, lwpid_t tid, register_t trapno); 371 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "SYSRET(pid %d, tid %d, err %d)", 372 pid_t pid, lwpid_t tid, int err); 373 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "FORKRET(pid %d, tid %d)", 374 pid_t pid, lwpid_t tid); 375 376 /* 377 * Exception, fault, and trap interface to the kernel. 378 * This common code is called from assembly language IDT gate entry 379 * routines that prepare a suitable stack frame, and restore this 380 * frame after the exception has been processed. 381 * 382 * This function is also called from doreti in an interlock to handle ASTs. 383 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap 384 * 385 * NOTE! We have to retrieve the fault address prior to obtaining the 386 * MP lock because get_mplock() may switch out. YYY cr2 really ought 387 * to be retrieved by the assembly code, not here. 388 * 389 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing 390 * if an attempt is made to switch from a fast interrupt or IPI. This is 391 * necessary to properly take fatal kernel traps on SMP machines if 392 * get_mplock() has to block. 393 */ 394 395 void 396 trap(struct trapframe *frame) 397 { 398 struct globaldata *gd = mycpu; 399 struct thread *td = gd->gd_curthread; 400 struct lwp *lp = td->td_lwp; 401 struct proc *p; 402 int sticks = 0; 403 int i = 0, ucode = 0, type, code; 404 int have_mplock = 0; 405 #ifdef INVARIANTS 406 int crit_count = td->td_critcount; 407 lwkt_tokref_t curstop = td->td_toks_stop; 408 #endif 409 vm_offset_t eva; 410 411 p = td->td_proc; 412 clear_quickret(); 413 414 #ifdef DDB 415 /* 416 * We need to allow T_DNA faults when the debugger is active since 417 * some dumping paths do large bcopy() which use the floating 418 * point registers for faster copying. 419 */ 420 if (db_active && frame->tf_trapno != T_DNA) { 421 eva = (frame->tf_trapno == T_PAGEFLT ? frame->tf_addr : 0); 422 ++gd->gd_trap_nesting_level; 423 MAKEMPSAFE(have_mplock); 424 trap_fatal(frame, eva); 425 --gd->gd_trap_nesting_level; 426 goto out2; 427 } 428 #endif 429 430 eva = 0; 431 432 if ((frame->tf_rflags & PSL_I) == 0) { 433 /* 434 * Buggy application or kernel code has disabled interrupts 435 * and then trapped. Enabling interrupts now is wrong, but 436 * it is better than running with interrupts disabled until 437 * they are accidentally enabled later. 438 */ 439 type = frame->tf_trapno; 440 if (ISPL(frame->tf_cs) == SEL_UPL) { 441 MAKEMPSAFE(have_mplock); 442 /* JG curproc can be NULL */ 443 kprintf( 444 "pid %ld (%s): trap %d with interrupts disabled\n", 445 (long)curproc->p_pid, curproc->p_comm, type); 446 } else if (type != T_NMI && type != T_BPTFLT && 447 type != T_TRCTRAP) { 448 /* 449 * XXX not quite right, since this may be for a 450 * multiple fault in user mode. 451 */ 452 MAKEMPSAFE(have_mplock); 453 kprintf("kernel trap %d (%s @ 0x%016jx) with " 454 "interrupts disabled\n", 455 type, 456 td->td_comm, 457 frame->tf_rip); 458 } 459 cpu_enable_intr(); 460 } 461 462 type = frame->tf_trapno; 463 code = frame->tf_err; 464 465 if (ISPL(frame->tf_cs) == SEL_UPL) { 466 /* user trap */ 467 468 KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid, 469 frame->tf_trapno, eva); 470 471 userenter(td, p); 472 473 sticks = (int)td->td_sticks; 474 KASSERT(lp->lwp_md.md_regs == frame, 475 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame)); 476 477 switch (type) { 478 case T_PRIVINFLT: /* privileged instruction fault */ 479 i = SIGILL; 480 ucode = ILL_PRVOPC; 481 break; 482 483 case T_BPTFLT: /* bpt instruction fault */ 484 case T_TRCTRAP: /* trace trap */ 485 frame->tf_rflags &= ~PSL_T; 486 i = SIGTRAP; 487 ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT); 488 break; 489 490 case T_ARITHTRAP: /* arithmetic trap */ 491 ucode = code; 492 i = SIGFPE; 493 break; 494 495 case T_ASTFLT: /* Allow process switch */ 496 mycpu->gd_cnt.v_soft++; 497 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) { 498 atomic_clear_int(&mycpu->gd_reqflags, 499 RQF_AST_OWEUPC); 500 addupc_task(p, p->p_prof.pr_addr, 501 p->p_prof.pr_ticks); 502 } 503 goto out; 504 505 case T_PROTFLT: /* general protection fault */ 506 i = SIGBUS; 507 ucode = BUS_OBJERR; 508 break; 509 case T_STKFLT: /* stack fault */ 510 case T_SEGNPFLT: /* segment not present fault */ 511 i = SIGBUS; 512 ucode = BUS_ADRERR; 513 break; 514 case T_TSSFLT: /* invalid TSS fault */ 515 case T_DOUBLEFLT: /* double fault */ 516 default: 517 i = SIGBUS; 518 ucode = BUS_OBJERR; 519 break; 520 521 case T_PAGEFLT: /* page fault */ 522 i = trap_pfault(frame, TRUE); 523 if (frame->tf_rip == 0) { 524 #ifdef DDB 525 /* used for kernel debugging only */ 526 while (freeze_on_seg_fault) 527 tsleep(p, 0, "freeze", hz * 20); 528 #endif 529 } 530 if (i == -1 || i == 0) 531 goto out; 532 533 534 if (i == SIGSEGV) 535 ucode = SEGV_MAPERR; 536 else { 537 i = SIGSEGV; 538 ucode = SEGV_ACCERR; 539 } 540 break; 541 542 case T_DIVIDE: /* integer divide fault */ 543 ucode = FPE_INTDIV; 544 i = SIGFPE; 545 break; 546 547 #if NISA > 0 548 case T_NMI: 549 MAKEMPSAFE(have_mplock); 550 /* machine/parity/power fail/"kitchen sink" faults */ 551 if (isa_nmi(code) == 0) { 552 #ifdef DDB 553 /* 554 * NMI can be hooked up to a pushbutton 555 * for debugging. 556 */ 557 if (ddb_on_nmi) { 558 kprintf ("NMI ... going to debugger\n"); 559 kdb_trap(type, 0, frame); 560 } 561 #endif /* DDB */ 562 goto out2; 563 } else if (panic_on_nmi) 564 panic("NMI indicates hardware failure"); 565 break; 566 #endif /* NISA > 0 */ 567 568 case T_OFLOW: /* integer overflow fault */ 569 ucode = FPE_INTOVF; 570 i = SIGFPE; 571 break; 572 573 case T_BOUND: /* bounds check fault */ 574 ucode = FPE_FLTSUB; 575 i = SIGFPE; 576 break; 577 578 case T_DNA: 579 /* 580 * Virtual kernel intercept - pass the DNA exception 581 * to the virtual kernel if it asked to handle it. 582 * This occurs when the virtual kernel is holding 583 * onto the FP context for a different emulated 584 * process then the one currently running. 585 * 586 * We must still call npxdna() since we may have 587 * saved FP state that the virtual kernel needs 588 * to hand over to a different emulated process. 589 */ 590 if (lp->lwp_vkernel && lp->lwp_vkernel->ve && 591 (td->td_pcb->pcb_flags & FP_VIRTFP) 592 ) { 593 npxdna(); 594 break; 595 } 596 597 /* 598 * The kernel may have switched out the FP unit's 599 * state, causing the user process to take a fault 600 * when it tries to use the FP unit. Restore the 601 * state here 602 */ 603 if (npxdna()) 604 goto out; 605 i = SIGFPE; 606 ucode = FPE_FPU_NP_TRAP; 607 break; 608 609 case T_FPOPFLT: /* FPU operand fetch fault */ 610 ucode = ILL_COPROC; 611 i = SIGILL; 612 break; 613 614 case T_XMMFLT: /* SIMD floating-point exception */ 615 ucode = 0; /* XXX */ 616 i = SIGFPE; 617 break; 618 } 619 } else { 620 /* kernel trap */ 621 622 switch (type) { 623 case T_PAGEFLT: /* page fault */ 624 trap_pfault(frame, FALSE); 625 goto out2; 626 627 case T_DNA: 628 /* 629 * The kernel is apparently using fpu for copying. 630 * XXX this should be fatal unless the kernel has 631 * registered such use. 632 */ 633 if (npxdna()) 634 goto out2; 635 break; 636 637 case T_STKFLT: /* stack fault */ 638 case T_PROTFLT: /* general protection fault */ 639 case T_SEGNPFLT: /* segment not present fault */ 640 /* 641 * Invalid segment selectors and out of bounds 642 * %rip's and %rsp's can be set up in user mode. 643 * This causes a fault in kernel mode when the 644 * kernel tries to return to user mode. We want 645 * to get this fault so that we can fix the 646 * problem here and not have to check all the 647 * selectors and pointers when the user changes 648 * them. 649 */ 650 if (mycpu->gd_intr_nesting_level == 0) { 651 /* 652 * NOTE: in 64-bit mode traps push rsp/ss 653 * even if no ring change occurs. 654 */ 655 if (td->td_pcb->pcb_onfault && 656 td->td_pcb->pcb_onfault_sp == 657 frame->tf_rsp) { 658 frame->tf_rip = (register_t) 659 td->td_pcb->pcb_onfault; 660 goto out2; 661 } 662 if (frame->tf_rip == (long)doreti_iret) { 663 frame->tf_rip = (long)doreti_iret_fault; 664 goto out2; 665 } 666 } 667 break; 668 669 case T_TSSFLT: 670 /* 671 * PSL_NT can be set in user mode and isn't cleared 672 * automatically when the kernel is entered. This 673 * causes a TSS fault when the kernel attempts to 674 * `iret' because the TSS link is uninitialized. We 675 * want to get this fault so that we can fix the 676 * problem here and not every time the kernel is 677 * entered. 678 */ 679 if (frame->tf_rflags & PSL_NT) { 680 frame->tf_rflags &= ~PSL_NT; 681 goto out2; 682 } 683 break; 684 685 case T_TRCTRAP: /* trace trap */ 686 #if 0 687 if (frame->tf_rip == (int)IDTVEC(syscall)) { 688 /* 689 * We've just entered system mode via the 690 * syscall lcall. Continue single stepping 691 * silently until the syscall handler has 692 * saved the flags. 693 */ 694 goto out2; 695 } 696 if (frame->tf_rip == (int)IDTVEC(syscall) + 1) { 697 /* 698 * The syscall handler has now saved the 699 * flags. Stop single stepping it. 700 */ 701 frame->tf_rflags &= ~PSL_T; 702 goto out2; 703 } 704 #endif 705 706 /* 707 * Ignore debug register trace traps due to 708 * accesses in the user's address space, which 709 * can happen under several conditions such as 710 * if a user sets a watchpoint on a buffer and 711 * then passes that buffer to a system call. 712 * We still want to get TRCTRAPS for addresses 713 * in kernel space because that is useful when 714 * debugging the kernel. 715 */ 716 #if 0 /* JG */ 717 if (user_dbreg_trap()) { 718 /* 719 * Reset breakpoint bits because the 720 * processor doesn't 721 */ 722 /* XXX check upper bits here */ 723 load_dr6(rdr6() & 0xfffffff0); 724 goto out2; 725 } 726 #endif 727 /* 728 * FALLTHROUGH (TRCTRAP kernel mode, kernel address) 729 */ 730 case T_BPTFLT: 731 /* 732 * If DDB is enabled, let it handle the debugger trap. 733 * Otherwise, debugger traps "can't happen". 734 */ 735 ucode = TRAP_BRKPT; 736 #ifdef DDB 737 MAKEMPSAFE(have_mplock); 738 if (kdb_trap(type, 0, frame)) 739 goto out2; 740 #endif 741 break; 742 743 #if NISA > 0 744 case T_NMI: 745 MAKEMPSAFE(have_mplock); 746 /* machine/parity/power fail/"kitchen sink" faults */ 747 if (isa_nmi(code) == 0) { 748 #ifdef DDB 749 /* 750 * NMI can be hooked up to a pushbutton 751 * for debugging. 752 */ 753 if (ddb_on_nmi) { 754 kprintf ("NMI ... going to debugger\n"); 755 kdb_trap(type, 0, frame); 756 } 757 #endif /* DDB */ 758 goto out2; 759 } else if (panic_on_nmi == 0) 760 goto out2; 761 /* FALL THROUGH */ 762 #endif /* NISA > 0 */ 763 } 764 MAKEMPSAFE(have_mplock); 765 trap_fatal(frame, 0); 766 goto out2; 767 } 768 769 /* 770 * Virtual kernel intercept - if the fault is directly related to a 771 * VM context managed by a virtual kernel then let the virtual kernel 772 * handle it. 773 */ 774 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 775 vkernel_trap(lp, frame); 776 goto out; 777 } 778 779 /* Translate fault for emulators (e.g. Linux) */ 780 if (*p->p_sysent->sv_transtrap) 781 i = (*p->p_sysent->sv_transtrap)(i, type); 782 783 MAKEMPSAFE(have_mplock); 784 trapsignal(lp, i, ucode); 785 786 #ifdef DEBUG 787 if (type <= MAX_TRAP_MSG) { 788 uprintf("fatal process exception: %s", 789 trap_msg[type]); 790 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 791 uprintf(", fault VA = 0x%lx", frame->tf_addr); 792 uprintf("\n"); 793 } 794 #endif 795 796 out: 797 userret(lp, frame, sticks); 798 userexit(lp); 799 out2: ; 800 if (have_mplock) 801 rel_mplock(); 802 if (p != NULL && lp != NULL) 803 KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid); 804 #ifdef INVARIANTS 805 KASSERT(crit_count == td->td_critcount, 806 ("trap: critical section count mismatch! %d/%d", 807 crit_count, td->td_pri)); 808 KASSERT(curstop == td->td_toks_stop, 809 ("trap: extra tokens held after trap! %ld/%ld", 810 curstop - &td->td_toks_base, 811 td->td_toks_stop - &td->td_toks_base)); 812 #endif 813 } 814 815 void 816 trap_handle_userenter(struct thread *td) 817 { 818 userenter(td, td->td_proc); 819 } 820 821 void 822 trap_handle_userexit(struct trapframe *frame, int sticks) 823 { 824 struct lwp *lp = curthread->td_lwp; 825 826 if (lp) { 827 userret(lp, frame, sticks); 828 userexit(lp); 829 } 830 } 831 832 static int 833 trap_pfault(struct trapframe *frame, int usermode) 834 { 835 vm_offset_t va; 836 struct vmspace *vm = NULL; 837 vm_map_t map; 838 int rv = 0; 839 int fault_flags; 840 vm_prot_t ftype; 841 thread_t td = curthread; 842 struct lwp *lp = td->td_lwp; 843 struct proc *p; 844 845 va = trunc_page(frame->tf_addr); 846 if (va >= VM_MIN_KERNEL_ADDRESS) { 847 /* 848 * Don't allow user-mode faults in kernel address space. 849 */ 850 if (usermode) { 851 fault_flags = -1; 852 ftype = -1; 853 goto nogo; 854 } 855 856 map = &kernel_map; 857 } else { 858 /* 859 * This is a fault on non-kernel virtual memory. 860 * vm is initialized above to NULL. If curproc is NULL 861 * or curproc->p_vmspace is NULL the fault is fatal. 862 */ 863 if (lp != NULL) 864 vm = lp->lwp_vmspace; 865 866 if (vm == NULL) { 867 fault_flags = -1; 868 ftype = -1; 869 goto nogo; 870 } 871 872 /* 873 * Debugging, try to catch kernel faults on the user address 874 * space when not inside on onfault (e.g. copyin/copyout) 875 * routine. 876 */ 877 if (usermode == 0 && (td->td_pcb == NULL || 878 td->td_pcb->pcb_onfault == NULL)) { 879 #ifdef DDB 880 if (freeze_on_seg_fault) { 881 kprintf("trap_pfault: user address fault from kernel mode " 882 "%016lx\n", (long)frame->tf_addr); 883 while (freeze_on_seg_fault) 884 tsleep(&freeze_on_seg_fault, 0, "frzseg", hz * 20); 885 } 886 #endif 887 } 888 map = &vm->vm_map; 889 } 890 891 /* 892 * PGEX_I is defined only if the execute disable bit capability is 893 * supported and enabled. 894 */ 895 if (frame->tf_err & PGEX_W) 896 ftype = VM_PROT_WRITE; 897 #if 0 /* JG */ 898 else if ((frame->tf_err & PGEX_I) && pg_nx != 0) 899 ftype = VM_PROT_EXECUTE; 900 #endif 901 else 902 ftype = VM_PROT_READ; 903 904 if (map != &kernel_map) { 905 /* 906 * Keep swapout from messing with us during this 907 * critical time. 908 */ 909 PHOLD(lp->lwp_proc); 910 911 /* 912 * Issue fault 913 */ 914 fault_flags = 0; 915 if (usermode) 916 fault_flags |= VM_FAULT_BURST; 917 if (ftype & VM_PROT_WRITE) 918 fault_flags |= VM_FAULT_DIRTY; 919 else 920 fault_flags |= VM_FAULT_NORMAL; 921 rv = vm_fault(map, va, ftype, fault_flags); 922 923 PRELE(lp->lwp_proc); 924 } else { 925 /* 926 * Don't have to worry about process locking or stacks in the 927 * kernel. 928 */ 929 fault_flags = VM_FAULT_NORMAL; 930 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); 931 } 932 if (rv == KERN_SUCCESS) 933 return (0); 934 nogo: 935 if (!usermode) { 936 /* 937 * NOTE: in 64-bit mode traps push rsp/ss 938 * even if no ring change occurs. 939 */ 940 if (td->td_pcb->pcb_onfault && 941 td->td_pcb->pcb_onfault_sp == frame->tf_rsp && 942 td->td_gd->gd_intr_nesting_level == 0) { 943 frame->tf_rip = (register_t)td->td_pcb->pcb_onfault; 944 return (0); 945 } 946 trap_fatal(frame, frame->tf_addr); 947 return (-1); 948 } 949 950 /* 951 * NOTE: on x86_64 we have a tf_addr field in the trapframe, no 952 * kludge is needed to pass the fault address to signal handlers. 953 */ 954 p = td->td_proc; 955 #ifdef DDB 956 if (td->td_lwp->lwp_vkernel == NULL) { 957 while (freeze_on_seg_fault) { 958 tsleep(p, 0, "freeze", hz * 20); 959 } 960 if (ddb_on_seg_fault) 961 Debugger("ddb_on_seg_fault"); 962 } 963 #endif 964 965 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 966 } 967 968 static void 969 trap_fatal(struct trapframe *frame, vm_offset_t eva) 970 { 971 int code, ss; 972 u_int type; 973 long rsp; 974 struct soft_segment_descriptor softseg; 975 char *msg; 976 977 code = frame->tf_err; 978 type = frame->tf_trapno; 979 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg); 980 981 if (type <= MAX_TRAP_MSG) 982 msg = trap_msg[type]; 983 else 984 msg = "UNKNOWN"; 985 kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg, 986 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 987 /* three separate prints in case of a trap on an unmapped page */ 988 kprintf("cpuid = %d; ", mycpu->gd_cpuid); 989 kprintf("lapic->id = %08x\n", lapic->id); 990 if (type == T_PAGEFLT) { 991 kprintf("fault virtual address = 0x%lx\n", eva); 992 kprintf("fault code = %s %s %s, %s\n", 993 code & PGEX_U ? "user" : "supervisor", 994 code & PGEX_W ? "write" : "read", 995 code & PGEX_I ? "instruction" : "data", 996 code & PGEX_P ? "protection violation" : "page not present"); 997 } 998 kprintf("instruction pointer = 0x%lx:0x%lx\n", 999 frame->tf_cs & 0xffff, frame->tf_rip); 1000 if (ISPL(frame->tf_cs) == SEL_UPL) { 1001 ss = frame->tf_ss & 0xffff; 1002 rsp = frame->tf_rsp; 1003 } else { 1004 /* 1005 * NOTE: in 64-bit mode traps push rsp/ss even if no ring 1006 * change occurs. 1007 */ 1008 ss = GSEL(GDATA_SEL, SEL_KPL); 1009 rsp = frame->tf_rsp; 1010 } 1011 kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp); 1012 kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp); 1013 kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n", 1014 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 1015 kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n", 1016 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32, 1017 softseg.ssd_gran); 1018 kprintf("processor eflags = "); 1019 if (frame->tf_rflags & PSL_T) 1020 kprintf("trace trap, "); 1021 if (frame->tf_rflags & PSL_I) 1022 kprintf("interrupt enabled, "); 1023 if (frame->tf_rflags & PSL_NT) 1024 kprintf("nested task, "); 1025 if (frame->tf_rflags & PSL_RF) 1026 kprintf("resume, "); 1027 kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12); 1028 kprintf("current process = "); 1029 if (curproc) { 1030 kprintf("%lu\n", 1031 (u_long)curproc->p_pid); 1032 } else { 1033 kprintf("Idle\n"); 1034 } 1035 kprintf("current thread = pri %d ", curthread->td_pri); 1036 if (curthread->td_critcount) 1037 kprintf("(CRIT)"); 1038 kprintf("\n"); 1039 1040 #ifdef DDB 1041 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame)) 1042 return; 1043 #endif 1044 kprintf("trap number = %d\n", type); 1045 if (type <= MAX_TRAP_MSG) 1046 panic("%s", trap_msg[type]); 1047 else 1048 panic("unknown/reserved trap"); 1049 } 1050 1051 /* 1052 * Double fault handler. Called when a fault occurs while writing 1053 * a frame for a trap/exception onto the stack. This usually occurs 1054 * when the stack overflows (such is the case with infinite recursion, 1055 * for example). 1056 */ 1057 static __inline 1058 int 1059 in_kstack_guard(register_t rptr) 1060 { 1061 thread_t td = curthread; 1062 1063 if ((char *)rptr >= td->td_kstack && 1064 (char *)rptr < td->td_kstack + PAGE_SIZE) { 1065 return 1; 1066 } 1067 return 0; 1068 } 1069 1070 void 1071 dblfault_handler(struct trapframe *frame) 1072 { 1073 thread_t td = curthread; 1074 1075 if (in_kstack_guard(frame->tf_rsp) || in_kstack_guard(frame->tf_rbp)) { 1076 kprintf("DOUBLE FAULT - KERNEL STACK GUARD HIT!\n"); 1077 if (in_kstack_guard(frame->tf_rsp)) 1078 frame->tf_rsp = (register_t)(td->td_kstack + PAGE_SIZE); 1079 if (in_kstack_guard(frame->tf_rbp)) 1080 frame->tf_rbp = (register_t)(td->td_kstack + PAGE_SIZE); 1081 } else { 1082 kprintf("DOUBLE FAULT\n"); 1083 } 1084 kprintf("\nFatal double fault\n"); 1085 kprintf("rip = 0x%lx\n", frame->tf_rip); 1086 kprintf("rsp = 0x%lx\n", frame->tf_rsp); 1087 kprintf("rbp = 0x%lx\n", frame->tf_rbp); 1088 /* three separate prints in case of a trap on an unmapped page */ 1089 kprintf("cpuid = %d; ", mycpu->gd_cpuid); 1090 kprintf("lapic->id = %08x\n", lapic->id); 1091 panic("double fault"); 1092 } 1093 1094 /* 1095 * syscall2 - MP aware system call request C handler 1096 * 1097 * A system call is essentially treated as a trap except that the 1098 * MP lock is not held on entry or return. We are responsible for 1099 * obtaining the MP lock if necessary and for handling ASTs 1100 * (e.g. a task switch) prior to return. 1101 * 1102 * MPSAFE 1103 */ 1104 void 1105 syscall2(struct trapframe *frame) 1106 { 1107 struct thread *td = curthread; 1108 struct proc *p = td->td_proc; 1109 struct lwp *lp = td->td_lwp; 1110 caddr_t params; 1111 struct sysent *callp; 1112 register_t orig_tf_rflags; 1113 int sticks; 1114 int error; 1115 int narg; 1116 #ifdef INVARIANTS 1117 int crit_count = td->td_critcount; 1118 #endif 1119 int have_mplock = 0; 1120 register_t *argp; 1121 u_int code; 1122 int reg, regcnt; 1123 union sysunion args; 1124 register_t *argsdst; 1125 1126 mycpu->gd_cnt.v_syscall++; 1127 1128 #ifdef DIAGNOSTIC 1129 if (ISPL(frame->tf_cs) != SEL_UPL) { 1130 get_mplock(); 1131 panic("syscall"); 1132 /* NOT REACHED */ 1133 } 1134 #endif 1135 1136 KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid, 1137 frame->tf_rax); 1138 1139 userenter(td, p); /* lazy raise our priority */ 1140 1141 reg = 0; 1142 regcnt = 6; 1143 /* 1144 * Misc 1145 */ 1146 sticks = (int)td->td_sticks; 1147 orig_tf_rflags = frame->tf_rflags; 1148 1149 /* 1150 * Virtual kernel intercept - if a VM context managed by a virtual 1151 * kernel issues a system call the virtual kernel handles it, not us. 1152 * Restore the virtual kernel context and return from its system 1153 * call. The current frame is copied out to the virtual kernel. 1154 */ 1155 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 1156 vkernel_trap(lp, frame); 1157 error = EJUSTRETURN; 1158 goto out; 1159 } 1160 1161 /* 1162 * Get the system call parameters and account for time 1163 */ 1164 KASSERT(lp->lwp_md.md_regs == frame, 1165 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame)); 1166 params = (caddr_t)frame->tf_rsp + sizeof(register_t); 1167 code = frame->tf_rax; 1168 1169 if (p->p_sysent->sv_prepsyscall) { 1170 (*p->p_sysent->sv_prepsyscall)( 1171 frame, (int *)(&args.nosys.sysmsg + 1), 1172 &code, ¶ms); 1173 } else { 1174 if (code == SYS_syscall || code == SYS___syscall) { 1175 code = frame->tf_rdi; 1176 reg++; 1177 regcnt--; 1178 } 1179 } 1180 1181 if (p->p_sysent->sv_mask) 1182 code &= p->p_sysent->sv_mask; 1183 1184 if (code >= p->p_sysent->sv_size) 1185 callp = &p->p_sysent->sv_table[0]; 1186 else 1187 callp = &p->p_sysent->sv_table[code]; 1188 1189 narg = callp->sy_narg & SYF_ARGMASK; 1190 1191 /* 1192 * On x86_64 we get up to six arguments in registers. The rest are 1193 * on the stack. The first six members of 'struct trapframe' happen 1194 * to be the registers used to pass arguments, in exactly the right 1195 * order. 1196 */ 1197 argp = &frame->tf_rdi; 1198 argp += reg; 1199 argsdst = (register_t *)(&args.nosys.sysmsg + 1); 1200 /* 1201 * JG can we overflow the space pointed to by 'argsdst' 1202 * either with 'bcopy' or with 'copyin'? 1203 */ 1204 bcopy(argp, argsdst, sizeof(register_t) * regcnt); 1205 /* 1206 * copyin is MP aware, but the tracing code is not 1207 */ 1208 if (narg > regcnt) { 1209 KASSERT(params != NULL, ("copyin args with no params!")); 1210 error = copyin(params, &argsdst[regcnt], 1211 (narg - regcnt) * sizeof(register_t)); 1212 if (error) { 1213 #ifdef KTRACE 1214 if (KTRPOINT(td, KTR_SYSCALL)) { 1215 MAKEMPSAFE(have_mplock); 1216 1217 ktrsyscall(lp, code, narg, 1218 (void *)(&args.nosys.sysmsg + 1)); 1219 } 1220 #endif 1221 goto bad; 1222 } 1223 } 1224 1225 #ifdef KTRACE 1226 if (KTRPOINT(td, KTR_SYSCALL)) { 1227 MAKEMPSAFE(have_mplock); 1228 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1)); 1229 } 1230 #endif 1231 1232 /* 1233 * Default return value is 0 (will be copied to %rax). Double-value 1234 * returns use %rax and %rdx. %rdx is left unchanged for system 1235 * calls which return only one result. 1236 */ 1237 args.sysmsg_fds[0] = 0; 1238 args.sysmsg_fds[1] = frame->tf_rdx; 1239 1240 /* 1241 * The syscall might manipulate the trap frame. If it does it 1242 * will probably return EJUSTRETURN. 1243 */ 1244 args.sysmsg_frame = frame; 1245 1246 STOPEVENT(p, S_SCE, narg); /* MP aware */ 1247 1248 /* 1249 * NOTE: All system calls run MPSAFE now. The system call itself 1250 * is responsible for getting the MP lock. 1251 */ 1252 #ifdef SYSCALL_DEBUG 1253 uint64_t tscval = rdtsc(); 1254 #endif 1255 error = (*callp->sy_call)(&args); 1256 #ifdef SYSCALL_DEBUG 1257 tscval = rdtsc() - tscval; 1258 tscval = tscval * 1000000 / tsc_frequency; 1259 if (SysCallsWorstCase[code] < tscval) 1260 SysCallsWorstCase[code] = tscval; 1261 #endif 1262 1263 out: 1264 /* 1265 * MP SAFE (we may or may not have the MP lock at this point) 1266 */ 1267 //kprintf("SYSMSG %d ", error); 1268 switch (error) { 1269 case 0: 1270 /* 1271 * Reinitialize proc pointer `p' as it may be different 1272 * if this is a child returning from fork syscall. 1273 */ 1274 p = curproc; 1275 lp = curthread->td_lwp; 1276 frame->tf_rax = args.sysmsg_fds[0]; 1277 frame->tf_rdx = args.sysmsg_fds[1]; 1278 frame->tf_rflags &= ~PSL_C; 1279 break; 1280 case ERESTART: 1281 /* 1282 * Reconstruct pc, we know that 'syscall' is 2 bytes. 1283 * We have to do a full context restore so that %r10 1284 * (which was holding the value of %rcx) is restored for 1285 * the next iteration. 1286 */ 1287 if (frame->tf_err != 0 && frame->tf_err != 2) 1288 kprintf("lp %s:%d frame->tf_err is weird %ld\n", 1289 td->td_comm, lp->lwp_proc->p_pid, frame->tf_err); 1290 frame->tf_rip -= frame->tf_err; 1291 frame->tf_r10 = frame->tf_rcx; 1292 break; 1293 case EJUSTRETURN: 1294 break; 1295 case EASYNC: 1296 panic("Unexpected EASYNC return value (for now)"); 1297 default: 1298 bad: 1299 if (p->p_sysent->sv_errsize) { 1300 if (error >= p->p_sysent->sv_errsize) 1301 error = -1; /* XXX */ 1302 else 1303 error = p->p_sysent->sv_errtbl[error]; 1304 } 1305 frame->tf_rax = error; 1306 frame->tf_rflags |= PSL_C; 1307 break; 1308 } 1309 1310 /* 1311 * Traced syscall. trapsignal() is not MP aware. 1312 */ 1313 if (orig_tf_rflags & PSL_T) { 1314 MAKEMPSAFE(have_mplock); 1315 frame->tf_rflags &= ~PSL_T; 1316 trapsignal(lp, SIGTRAP, TRAP_TRACE); 1317 } 1318 1319 /* 1320 * Handle reschedule and other end-of-syscall issues 1321 */ 1322 userret(lp, frame, sticks); 1323 1324 #ifdef KTRACE 1325 if (KTRPOINT(td, KTR_SYSRET)) { 1326 MAKEMPSAFE(have_mplock); 1327 ktrsysret(lp, code, error, args.sysmsg_result); 1328 } 1329 #endif 1330 1331 /* 1332 * This works because errno is findable through the 1333 * register set. If we ever support an emulation where this 1334 * is not the case, this code will need to be revisited. 1335 */ 1336 STOPEVENT(p, S_SCX, code); 1337 1338 userexit(lp); 1339 /* 1340 * Release the MP lock if we had to get it 1341 */ 1342 if (have_mplock) 1343 rel_mplock(); 1344 KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error); 1345 #ifdef INVARIANTS 1346 KASSERT(crit_count == td->td_critcount, 1347 ("syscall: critical section count mismatch! %d/%d", 1348 crit_count, td->td_pri)); 1349 KASSERT(&td->td_toks_base == td->td_toks_stop, 1350 ("syscall: extra tokens held after trap! %ld", 1351 td->td_toks_stop - &td->td_toks_base)); 1352 #endif 1353 } 1354 1355 /* 1356 * NOTE: mplock not held at any point 1357 */ 1358 void 1359 fork_return(struct lwp *lp, struct trapframe *frame) 1360 { 1361 frame->tf_rax = 0; /* Child returns zero */ 1362 frame->tf_rflags &= ~PSL_C; /* success */ 1363 frame->tf_rdx = 1; 1364 1365 generic_lwp_return(lp, frame); 1366 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid); 1367 } 1368 1369 /* 1370 * Simplified back end of syscall(), used when returning from fork() 1371 * directly into user mode. 1372 * 1373 * This code will return back into the fork trampoline code which then 1374 * runs doreti. 1375 * 1376 * NOTE: The mplock is not held at any point. 1377 */ 1378 void 1379 generic_lwp_return(struct lwp *lp, struct trapframe *frame) 1380 { 1381 struct proc *p = lp->lwp_proc; 1382 1383 /* 1384 * Check for exit-race. If one lwp exits the process concurrent with 1385 * another lwp creating a new thread, the two operations may cross 1386 * each other resulting in the newly-created lwp not receiving a 1387 * KILL signal. 1388 */ 1389 if (p->p_flags & P_WEXIT) { 1390 kprintf("pid %d (%s) exit race handled\n", 1391 p->p_pid, p->p_comm); 1392 lwpsignal(p, lp, SIGKILL); 1393 } 1394 1395 /* 1396 * Newly forked processes are given a kernel priority. We have to 1397 * adjust the priority to a normal user priority and fake entry 1398 * into the kernel (call userenter()) to install a passive release 1399 * function just in case userret() decides to stop the process. This 1400 * can occur when ^Z races a fork. If we do not install the passive 1401 * release function the current process designation will not be 1402 * released when the thread goes to sleep. 1403 */ 1404 lwkt_setpri_self(TDPRI_USER_NORM); 1405 userenter(lp->lwp_thread, p); 1406 userret(lp, frame, 0); 1407 #ifdef KTRACE 1408 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET)) 1409 ktrsysret(lp, SYS_fork, 0, 0); 1410 #endif 1411 lp->lwp_flags |= LWP_PASSIVE_ACQ; 1412 userexit(lp); 1413 lp->lwp_flags &= ~LWP_PASSIVE_ACQ; 1414 } 1415 1416 /* 1417 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA 1418 * fault (which is then passed back to the virtual kernel) if an attempt is 1419 * made to use the FP unit. 1420 * 1421 * XXX this is a fairly big hack. 1422 */ 1423 void 1424 set_vkernel_fp(struct trapframe *frame) 1425 { 1426 struct thread *td = curthread; 1427 1428 if (frame->tf_xflags & PGEX_FPFAULT) { 1429 td->td_pcb->pcb_flags |= FP_VIRTFP; 1430 if (mdcpu->gd_npxthread == td) 1431 npxexit(); 1432 } else { 1433 td->td_pcb->pcb_flags &= ~FP_VIRTFP; 1434 } 1435 } 1436 1437 /* 1438 * Called from vkernel_trap() to fixup the vkernel's syscall 1439 * frame for vmspace_ctl() return. 1440 */ 1441 void 1442 cpu_vkernel_trap(struct trapframe *frame, int error) 1443 { 1444 frame->tf_rax = error; 1445 if (error) 1446 frame->tf_rflags |= PSL_C; 1447 else 1448 frame->tf_rflags &= ~PSL_C; 1449 } 1450