1 /*- 2 * Copyright (c) 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (C) 1994, David Greenman 5 * Copyright (c) 2008 The DragonFly Project. 6 * Copyright (c) 2008 Jordan Gordeev. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the University of Utah, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 40 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $ 41 */ 42 43 /* 44 * x86_64 Trap and System call handling 45 */ 46 47 #include "use_isa.h" 48 49 #include "opt_ddb.h" 50 #include "opt_ktrace.h" 51 52 #include <machine/frame.h> 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/kernel.h> 56 #include <sys/kerneldump.h> 57 #include <sys/proc.h> 58 #include <sys/pioctl.h> 59 #include <sys/types.h> 60 #include <sys/signal2.h> 61 #include <sys/syscall.h> 62 #include <sys/sysctl.h> 63 #include <sys/sysent.h> 64 #ifdef KTRACE 65 #include <sys/ktrace.h> 66 #endif 67 #include <sys/ktr.h> 68 #include <sys/sysmsg.h> 69 #include <sys/sysproto.h> 70 #include <sys/sysunion.h> 71 72 #include <vm/pmap.h> 73 #include <vm/vm.h> 74 #include <vm/vm_extern.h> 75 #include <vm/vm_kern.h> 76 #include <vm/vm_param.h> 77 #include <machine/cpu.h> 78 #include <machine/pcb.h> 79 #include <machine/smp.h> 80 #include <machine/thread.h> 81 #include <machine/clock.h> 82 #include <machine/vmparam.h> 83 #include <machine/md_var.h> 84 #include <machine_base/isa/isa_intr.h> 85 #include <machine_base/apic/lapic.h> 86 87 #include <ddb/ddb.h> 88 89 #include <sys/thread2.h> 90 #include <sys/mplock2.h> 91 #include <sys/spinlock2.h> 92 93 #define MAKEMPSAFE(have_mplock) \ 94 if (have_mplock == 0) { \ 95 get_mplock(); \ 96 have_mplock = 1; \ 97 } 98 99 extern void trap(struct trapframe *frame); 100 101 static int trap_pfault(struct trapframe *, int); 102 static void trap_fatal(struct trapframe *, vm_offset_t); 103 void dblfault_handler(struct trapframe *frame); 104 105 #define MAX_TRAP_MSG 30 106 static char *trap_msg[] = { 107 "", /* 0 unused */ 108 "privileged instruction fault", /* 1 T_PRIVINFLT */ 109 "", /* 2 unused */ 110 "breakpoint instruction fault", /* 3 T_BPTFLT */ 111 "", /* 4 unused */ 112 "", /* 5 unused */ 113 "arithmetic trap", /* 6 T_ARITHTRAP */ 114 "system forced exception", /* 7 T_ASTFLT */ 115 "", /* 8 unused */ 116 "general protection fault", /* 9 T_PROTFLT */ 117 "trace trap", /* 10 T_TRCTRAP */ 118 "", /* 11 unused */ 119 "page fault", /* 12 T_PAGEFLT */ 120 "", /* 13 unused */ 121 "alignment fault", /* 14 T_ALIGNFLT */ 122 "", /* 15 unused */ 123 "", /* 16 unused */ 124 "", /* 17 unused */ 125 "integer divide fault", /* 18 T_DIVIDE */ 126 "non-maskable interrupt trap", /* 19 T_NMI */ 127 "overflow trap", /* 20 T_OFLOW */ 128 "FPU bounds check fault", /* 21 T_BOUND */ 129 "FPU device not available", /* 22 T_DNA */ 130 "double fault", /* 23 T_DOUBLEFLT */ 131 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 132 "invalid TSS fault", /* 25 T_TSSFLT */ 133 "segment not present fault", /* 26 T_SEGNPFLT */ 134 "stack fault", /* 27 T_STKFLT */ 135 "machine check trap", /* 28 T_MCHK */ 136 "SIMD floating-point exception", /* 29 T_XMMFLT */ 137 "reserved (unknown) fault", /* 30 T_RESERVED */ 138 }; 139 140 #ifdef DDB 141 static int ddb_on_nmi = 1; 142 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW, 143 &ddb_on_nmi, 0, "Go to DDB on NMI"); 144 static int ddb_on_seg_fault = 0; 145 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_seg_fault, CTLFLAG_RW, 146 &ddb_on_seg_fault, 0, "Go to DDB on user seg-fault"); 147 static int freeze_on_seg_fault = 0; 148 SYSCTL_INT(_machdep, OID_AUTO, freeze_on_seg_fault, CTLFLAG_RW, 149 &freeze_on_seg_fault, 0, "Go to DDB on user seg-fault"); 150 #endif 151 static int panic_on_nmi = 1; 152 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW, 153 &panic_on_nmi, 0, "Panic on NMI"); 154 static int fast_release; 155 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW, 156 &fast_release, 0, "Passive Release was optimal"); 157 static int slow_release; 158 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW, 159 &slow_release, 0, "Passive Release was nonoptimal"); 160 161 /* 162 * System call debugging records the worst-case system call 163 * overhead (inclusive of blocking), but may be inaccurate. 164 */ 165 /*#define SYSCALL_DEBUG*/ 166 #ifdef SYSCALL_DEBUG 167 uint64_t SysCallsWorstCase[SYS_MAXSYSCALL]; 168 #endif 169 170 /* 171 * Passively intercepts the thread switch function to increase 172 * the thread priority from a user priority to a kernel priority, reducing 173 * syscall and trap overhead for the case where no switch occurs. 174 * 175 * Synchronizes td_ucred with p_ucred. This is used by system calls, 176 * signal handling, faults, AST traps, and anything else that enters the 177 * kernel from userland and provides the kernel with a stable read-only 178 * copy of the process ucred. 179 * 180 * To avoid races with another thread updating p_ucred we obtain p_spin. 181 * The other thread doing the update will obtain both p_token and p_spin. 182 * In the case where the cached cred pointer matches, we will already have 183 * the ref and we don't have to do one blessed thing. 184 */ 185 static __inline void 186 userenter(struct thread *curtd, struct proc *curp) 187 { 188 struct ucred *ocred; 189 struct ucred *ncred; 190 191 curtd->td_release = lwkt_passive_release; 192 193 if (curtd->td_ucred != curp->p_ucred) { 194 spin_lock(&curp->p_spin); 195 ncred = crhold(curp->p_ucred); 196 spin_unlock(&curp->p_spin); 197 ocred = curtd->td_ucred; 198 curtd->td_ucred = ncred; 199 if (ocred) 200 crfree(ocred); 201 } 202 203 #ifdef DDB 204 /* 205 * Debugging, remove top two user stack pages to catch kernel faults 206 */ 207 if (freeze_on_seg_fault > 1 && curtd->td_lwp) { 208 pmap_remove(vmspace_pmap(curtd->td_lwp->lwp_vmspace), 209 0x00007FFFFFFFD000LU, 210 0x0000800000000000LU); 211 } 212 #endif 213 } 214 215 /* 216 * Handle signals, upcalls, profiling, and other AST's and/or tasks that 217 * must be completed before we can return to or try to return to userland. 218 * 219 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64 220 * arithmatic on the delta calculation so the absolute tick values are 221 * truncated to an integer. 222 */ 223 static void 224 userret(struct lwp *lp, struct trapframe *frame, int sticks) 225 { 226 struct proc *p = lp->lwp_proc; 227 int sig; 228 229 /* 230 * Charge system time if profiling. Note: times are in microseconds. 231 * This may do a copyout and block, so do it first even though it 232 * means some system time will be charged as user time. 233 */ 234 if (p->p_flags & P_PROFIL) { 235 addupc_task(p, frame->tf_rip, 236 (u_int)((int)lp->lwp_thread->td_sticks - sticks)); 237 } 238 239 recheck: 240 /* 241 * Specific on-return-to-usermode checks (LWP_MP_WEXIT, 242 * LWP_MP_VNLRU, etc). 243 */ 244 if (lp->lwp_mpflags & LWP_MP_URETMASK) 245 lwpuserret(lp); 246 247 /* 248 * Block here if we are in a stopped state. 249 */ 250 if (p->p_stat == SSTOP || p->p_stat == SCORE || dump_stop_usertds) { 251 lwkt_gettoken(&p->p_token); 252 tstop(); 253 lwkt_reltoken(&p->p_token); 254 goto recheck; 255 } 256 257 /* 258 * Post any pending upcalls. If running a virtual kernel be sure 259 * to restore the virtual kernel's vmspace before posting the upcall. 260 */ 261 if (p->p_flags & (P_SIGVTALRM | P_SIGPROF)) { 262 lwkt_gettoken(&p->p_token); 263 if (p->p_flags & P_SIGVTALRM) { 264 p->p_flags &= ~P_SIGVTALRM; 265 ksignal(p, SIGVTALRM); 266 } 267 if (p->p_flags & P_SIGPROF) { 268 p->p_flags &= ~P_SIGPROF; 269 ksignal(p, SIGPROF); 270 } 271 lwkt_reltoken(&p->p_token); 272 goto recheck; 273 } 274 275 /* 276 * Post any pending signals. If running a virtual kernel be sure 277 * to restore the virtual kernel's vmspace before posting the signal. 278 * 279 * WARNING! postsig() can exit and not return. 280 */ 281 if ((sig = CURSIG_TRACE(lp)) != 0) { 282 lwkt_gettoken(&p->p_token); 283 postsig(sig); 284 lwkt_reltoken(&p->p_token); 285 goto recheck; 286 } 287 288 /* 289 * block here if we are swapped out, but still process signals 290 * (such as SIGKILL). proc0 (the swapin scheduler) is already 291 * aware of our situation, we do not have to wake it up. 292 */ 293 if (p->p_flags & P_SWAPPEDOUT) { 294 lwkt_gettoken(&p->p_token); 295 get_mplock(); 296 p->p_flags |= P_SWAPWAIT; 297 swapin_request(); 298 if (p->p_flags & P_SWAPWAIT) 299 tsleep(p, PCATCH, "SWOUT", 0); 300 p->p_flags &= ~P_SWAPWAIT; 301 rel_mplock(); 302 lwkt_reltoken(&p->p_token); 303 goto recheck; 304 } 305 306 /* 307 * In a multi-threaded program it is possible for a thread to change 308 * signal state during a system call which temporarily changes the 309 * signal mask. In this case postsig() might not be run and we 310 * have to restore the mask ourselves. 311 */ 312 if (lp->lwp_flags & LWP_OLDMASK) { 313 lp->lwp_flags &= ~LWP_OLDMASK; 314 lp->lwp_sigmask = lp->lwp_oldsigmask; 315 goto recheck; 316 } 317 } 318 319 /* 320 * Cleanup from userenter and any passive release that might have occured. 321 * We must reclaim the current-process designation before we can return 322 * to usermode. We also handle both LWKT and USER reschedule requests. 323 */ 324 static __inline void 325 userexit(struct lwp *lp) 326 { 327 struct thread *td = lp->lwp_thread; 328 /* globaldata_t gd = td->td_gd; */ 329 330 /* 331 * Handle stop requests at kernel priority. Any requests queued 332 * after this loop will generate another AST. 333 */ 334 while (lp->lwp_proc->p_stat == SSTOP || 335 lp->lwp_proc->p_stat == SCORE) { 336 lwkt_gettoken(&lp->lwp_proc->p_token); 337 tstop(); 338 lwkt_reltoken(&lp->lwp_proc->p_token); 339 } 340 341 /* 342 * Reduce our priority in preparation for a return to userland. If 343 * our passive release function was still in place, our priority was 344 * never raised and does not need to be reduced. 345 */ 346 lwkt_passive_recover(td); 347 348 /* WARNING: we may have migrated cpu's */ 349 /* gd = td->td_gd; */ 350 351 /* 352 * Become the current user scheduled process if we aren't already, 353 * and deal with reschedule requests and other factors. 354 */ 355 lp->lwp_proc->p_usched->acquire_curproc(lp); 356 } 357 358 #if !defined(KTR_KERNENTRY) 359 #define KTR_KERNENTRY KTR_ALL 360 #endif 361 KTR_INFO_MASTER(kernentry); 362 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0, 363 "TRAP(pid %d, tid %d, trapno %ld, eva %lu)", 364 pid_t pid, lwpid_t tid, register_t trapno, vm_offset_t eva); 365 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "TRAP_RET(pid %d, tid %d)", 366 pid_t pid, lwpid_t tid); 367 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "SYSC(pid %d, tid %d, nr %ld)", 368 pid_t pid, lwpid_t tid, register_t trapno); 369 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "SYSRET(pid %d, tid %d, err %d)", 370 pid_t pid, lwpid_t tid, int err); 371 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "FORKRET(pid %d, tid %d)", 372 pid_t pid, lwpid_t tid); 373 374 /* 375 * Exception, fault, and trap interface to the kernel. 376 * This common code is called from assembly language IDT gate entry 377 * routines that prepare a suitable stack frame, and restore this 378 * frame after the exception has been processed. 379 * 380 * This function is also called from doreti in an interlock to handle ASTs. 381 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap 382 * 383 * NOTE! We have to retrieve the fault address prior to obtaining the 384 * MP lock because get_mplock() may switch out. YYY cr2 really ought 385 * to be retrieved by the assembly code, not here. 386 * 387 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing 388 * if an attempt is made to switch from a fast interrupt or IPI. This is 389 * necessary to properly take fatal kernel traps on SMP machines if 390 * get_mplock() has to block. 391 */ 392 393 void 394 trap(struct trapframe *frame) 395 { 396 struct globaldata *gd = mycpu; 397 struct thread *td = gd->gd_curthread; 398 struct lwp *lp = td->td_lwp; 399 struct proc *p; 400 int sticks = 0; 401 int i = 0, ucode = 0, type, code; 402 int have_mplock = 0; 403 #ifdef INVARIANTS 404 int crit_count = td->td_critcount; 405 lwkt_tokref_t curstop = td->td_toks_stop; 406 #endif 407 vm_offset_t eva; 408 409 p = td->td_proc; 410 clear_quickret(); 411 412 #ifdef DDB 413 /* 414 * We need to allow T_DNA faults when the debugger is active since 415 * some dumping paths do large bcopy() which use the floating 416 * point registers for faster copying. 417 */ 418 if (db_active && frame->tf_trapno != T_DNA) { 419 eva = (frame->tf_trapno == T_PAGEFLT ? frame->tf_addr : 0); 420 ++gd->gd_trap_nesting_level; 421 MAKEMPSAFE(have_mplock); 422 trap_fatal(frame, eva); 423 --gd->gd_trap_nesting_level; 424 goto out2; 425 } 426 #endif 427 428 eva = 0; 429 430 if ((frame->tf_rflags & PSL_I) == 0) { 431 /* 432 * Buggy application or kernel code has disabled interrupts 433 * and then trapped. Enabling interrupts now is wrong, but 434 * it is better than running with interrupts disabled until 435 * they are accidentally enabled later. 436 */ 437 type = frame->tf_trapno; 438 if (ISPL(frame->tf_cs) == SEL_UPL) { 439 MAKEMPSAFE(have_mplock); 440 /* JG curproc can be NULL */ 441 kprintf( 442 "pid %ld (%s): trap %d with interrupts disabled\n", 443 (long)curproc->p_pid, curproc->p_comm, type); 444 } else if (type != T_NMI && type != T_BPTFLT && 445 type != T_TRCTRAP) { 446 /* 447 * XXX not quite right, since this may be for a 448 * multiple fault in user mode. 449 */ 450 MAKEMPSAFE(have_mplock); 451 kprintf("kernel trap %d with interrupts disabled\n", 452 type); 453 } 454 cpu_enable_intr(); 455 } 456 457 type = frame->tf_trapno; 458 code = frame->tf_err; 459 460 if (ISPL(frame->tf_cs) == SEL_UPL) { 461 /* user trap */ 462 463 KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid, 464 frame->tf_trapno, eva); 465 466 userenter(td, p); 467 468 sticks = (int)td->td_sticks; 469 KASSERT(lp->lwp_md.md_regs == frame, 470 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame)); 471 472 switch (type) { 473 case T_PRIVINFLT: /* privileged instruction fault */ 474 i = SIGILL; 475 ucode = ILL_PRVOPC; 476 break; 477 478 case T_BPTFLT: /* bpt instruction fault */ 479 case T_TRCTRAP: /* trace trap */ 480 frame->tf_rflags &= ~PSL_T; 481 i = SIGTRAP; 482 ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT); 483 break; 484 485 case T_ARITHTRAP: /* arithmetic trap */ 486 ucode = code; 487 i = SIGFPE; 488 break; 489 490 case T_ASTFLT: /* Allow process switch */ 491 mycpu->gd_cnt.v_soft++; 492 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) { 493 atomic_clear_int(&mycpu->gd_reqflags, 494 RQF_AST_OWEUPC); 495 addupc_task(p, p->p_prof.pr_addr, 496 p->p_prof.pr_ticks); 497 } 498 goto out; 499 500 case T_PROTFLT: /* general protection fault */ 501 i = SIGBUS; 502 ucode = BUS_OBJERR; 503 break; 504 case T_STKFLT: /* stack fault */ 505 case T_SEGNPFLT: /* segment not present fault */ 506 i = SIGBUS; 507 ucode = BUS_ADRERR; 508 break; 509 case T_TSSFLT: /* invalid TSS fault */ 510 case T_DOUBLEFLT: /* double fault */ 511 default: 512 i = SIGBUS; 513 ucode = BUS_OBJERR; 514 break; 515 516 case T_PAGEFLT: /* page fault */ 517 i = trap_pfault(frame, TRUE); 518 if (frame->tf_rip == 0) { 519 kprintf("T_PAGEFLT: Warning %%rip == 0!\n"); 520 #ifdef DDB 521 while (freeze_on_seg_fault) 522 tsleep(p, 0, "freeze", hz * 20); 523 #endif 524 } 525 if (i == -1 || i == 0) 526 goto out; 527 528 529 if (i == SIGSEGV) 530 ucode = SEGV_MAPERR; 531 else { 532 i = SIGSEGV; 533 ucode = SEGV_ACCERR; 534 } 535 break; 536 537 case T_DIVIDE: /* integer divide fault */ 538 ucode = FPE_INTDIV; 539 i = SIGFPE; 540 break; 541 542 #if NISA > 0 543 case T_NMI: 544 MAKEMPSAFE(have_mplock); 545 /* machine/parity/power fail/"kitchen sink" faults */ 546 if (isa_nmi(code) == 0) { 547 #ifdef DDB 548 /* 549 * NMI can be hooked up to a pushbutton 550 * for debugging. 551 */ 552 if (ddb_on_nmi) { 553 kprintf ("NMI ... going to debugger\n"); 554 kdb_trap(type, 0, frame); 555 } 556 #endif /* DDB */ 557 goto out2; 558 } else if (panic_on_nmi) 559 panic("NMI indicates hardware failure"); 560 break; 561 #endif /* NISA > 0 */ 562 563 case T_OFLOW: /* integer overflow fault */ 564 ucode = FPE_INTOVF; 565 i = SIGFPE; 566 break; 567 568 case T_BOUND: /* bounds check fault */ 569 ucode = FPE_FLTSUB; 570 i = SIGFPE; 571 break; 572 573 case T_DNA: 574 /* 575 * Virtual kernel intercept - pass the DNA exception 576 * to the virtual kernel if it asked to handle it. 577 * This occurs when the virtual kernel is holding 578 * onto the FP context for a different emulated 579 * process then the one currently running. 580 * 581 * We must still call npxdna() since we may have 582 * saved FP state that the virtual kernel needs 583 * to hand over to a different emulated process. 584 */ 585 if (lp->lwp_vkernel && lp->lwp_vkernel->ve && 586 (td->td_pcb->pcb_flags & FP_VIRTFP) 587 ) { 588 npxdna(); 589 break; 590 } 591 592 /* 593 * The kernel may have switched out the FP unit's 594 * state, causing the user process to take a fault 595 * when it tries to use the FP unit. Restore the 596 * state here 597 */ 598 if (npxdna()) 599 goto out; 600 i = SIGFPE; 601 ucode = FPE_FPU_NP_TRAP; 602 break; 603 604 case T_FPOPFLT: /* FPU operand fetch fault */ 605 ucode = ILL_COPROC; 606 i = SIGILL; 607 break; 608 609 case T_XMMFLT: /* SIMD floating-point exception */ 610 ucode = 0; /* XXX */ 611 i = SIGFPE; 612 break; 613 } 614 } else { 615 /* kernel trap */ 616 617 switch (type) { 618 case T_PAGEFLT: /* page fault */ 619 trap_pfault(frame, FALSE); 620 goto out2; 621 622 case T_DNA: 623 /* 624 * The kernel is apparently using fpu for copying. 625 * XXX this should be fatal unless the kernel has 626 * registered such use. 627 */ 628 if (npxdna()) 629 goto out2; 630 break; 631 632 case T_STKFLT: /* stack fault */ 633 case T_PROTFLT: /* general protection fault */ 634 case T_SEGNPFLT: /* segment not present fault */ 635 /* 636 * Invalid segment selectors and out of bounds 637 * %rip's and %rsp's can be set up in user mode. 638 * This causes a fault in kernel mode when the 639 * kernel tries to return to user mode. We want 640 * to get this fault so that we can fix the 641 * problem here and not have to check all the 642 * selectors and pointers when the user changes 643 * them. 644 */ 645 if (mycpu->gd_intr_nesting_level == 0) { 646 /* 647 * NOTE: in 64-bit mode traps push rsp/ss 648 * even if no ring change occurs. 649 */ 650 if (td->td_pcb->pcb_onfault && 651 td->td_pcb->pcb_onfault_sp == 652 frame->tf_rsp) { 653 frame->tf_rip = (register_t) 654 td->td_pcb->pcb_onfault; 655 goto out2; 656 } 657 if (frame->tf_rip == (long)doreti_iret) { 658 frame->tf_rip = (long)doreti_iret_fault; 659 goto out2; 660 } 661 } 662 break; 663 664 case T_TSSFLT: 665 /* 666 * PSL_NT can be set in user mode and isn't cleared 667 * automatically when the kernel is entered. This 668 * causes a TSS fault when the kernel attempts to 669 * `iret' because the TSS link is uninitialized. We 670 * want to get this fault so that we can fix the 671 * problem here and not every time the kernel is 672 * entered. 673 */ 674 if (frame->tf_rflags & PSL_NT) { 675 frame->tf_rflags &= ~PSL_NT; 676 goto out2; 677 } 678 break; 679 680 case T_TRCTRAP: /* trace trap */ 681 #if 0 682 if (frame->tf_rip == (int)IDTVEC(syscall)) { 683 /* 684 * We've just entered system mode via the 685 * syscall lcall. Continue single stepping 686 * silently until the syscall handler has 687 * saved the flags. 688 */ 689 goto out2; 690 } 691 if (frame->tf_rip == (int)IDTVEC(syscall) + 1) { 692 /* 693 * The syscall handler has now saved the 694 * flags. Stop single stepping it. 695 */ 696 frame->tf_rflags &= ~PSL_T; 697 goto out2; 698 } 699 #endif 700 701 /* 702 * Ignore debug register trace traps due to 703 * accesses in the user's address space, which 704 * can happen under several conditions such as 705 * if a user sets a watchpoint on a buffer and 706 * then passes that buffer to a system call. 707 * We still want to get TRCTRAPS for addresses 708 * in kernel space because that is useful when 709 * debugging the kernel. 710 */ 711 #if 0 /* JG */ 712 if (user_dbreg_trap()) { 713 /* 714 * Reset breakpoint bits because the 715 * processor doesn't 716 */ 717 /* XXX check upper bits here */ 718 load_dr6(rdr6() & 0xfffffff0); 719 goto out2; 720 } 721 #endif 722 /* 723 * FALLTHROUGH (TRCTRAP kernel mode, kernel address) 724 */ 725 case T_BPTFLT: 726 /* 727 * If DDB is enabled, let it handle the debugger trap. 728 * Otherwise, debugger traps "can't happen". 729 */ 730 ucode = TRAP_BRKPT; 731 #ifdef DDB 732 MAKEMPSAFE(have_mplock); 733 if (kdb_trap(type, 0, frame)) 734 goto out2; 735 #endif 736 break; 737 738 #if NISA > 0 739 case T_NMI: 740 MAKEMPSAFE(have_mplock); 741 /* machine/parity/power fail/"kitchen sink" faults */ 742 if (isa_nmi(code) == 0) { 743 #ifdef DDB 744 /* 745 * NMI can be hooked up to a pushbutton 746 * for debugging. 747 */ 748 if (ddb_on_nmi) { 749 kprintf ("NMI ... going to debugger\n"); 750 kdb_trap(type, 0, frame); 751 } 752 #endif /* DDB */ 753 goto out2; 754 } else if (panic_on_nmi == 0) 755 goto out2; 756 /* FALL THROUGH */ 757 #endif /* NISA > 0 */ 758 } 759 MAKEMPSAFE(have_mplock); 760 trap_fatal(frame, 0); 761 goto out2; 762 } 763 764 /* 765 * Virtual kernel intercept - if the fault is directly related to a 766 * VM context managed by a virtual kernel then let the virtual kernel 767 * handle it. 768 */ 769 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 770 vkernel_trap(lp, frame); 771 goto out; 772 } 773 774 /* Translate fault for emulators (e.g. Linux) */ 775 if (*p->p_sysent->sv_transtrap) 776 i = (*p->p_sysent->sv_transtrap)(i, type); 777 778 MAKEMPSAFE(have_mplock); 779 trapsignal(lp, i, ucode); 780 781 #ifdef DEBUG 782 if (type <= MAX_TRAP_MSG) { 783 uprintf("fatal process exception: %s", 784 trap_msg[type]); 785 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 786 uprintf(", fault VA = 0x%lx", frame->tf_addr); 787 uprintf("\n"); 788 } 789 #endif 790 791 out: 792 userret(lp, frame, sticks); 793 userexit(lp); 794 out2: ; 795 if (have_mplock) 796 rel_mplock(); 797 if (p != NULL && lp != NULL) 798 KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid); 799 #ifdef INVARIANTS 800 KASSERT(crit_count == td->td_critcount, 801 ("trap: critical section count mismatch! %d/%d", 802 crit_count, td->td_pri)); 803 KASSERT(curstop == td->td_toks_stop, 804 ("trap: extra tokens held after trap! %ld/%ld", 805 curstop - &td->td_toks_base, 806 td->td_toks_stop - &td->td_toks_base)); 807 #endif 808 } 809 810 void 811 trap_handle_userenter(struct thread *td) 812 { 813 userenter(td, td->td_proc); 814 } 815 816 void 817 trap_handle_userexit(struct trapframe *frame, int sticks) 818 { 819 struct lwp *lp = curthread->td_lwp; 820 821 if (lp) { 822 userret(lp, frame, sticks); 823 userexit(lp); 824 } 825 } 826 827 static int 828 trap_pfault(struct trapframe *frame, int usermode) 829 { 830 vm_offset_t va; 831 struct vmspace *vm = NULL; 832 vm_map_t map; 833 int rv = 0; 834 int fault_flags; 835 vm_prot_t ftype; 836 thread_t td = curthread; 837 struct lwp *lp = td->td_lwp; 838 struct proc *p; 839 840 va = trunc_page(frame->tf_addr); 841 if (va >= VM_MIN_KERNEL_ADDRESS) { 842 /* 843 * Don't allow user-mode faults in kernel address space. 844 */ 845 if (usermode) { 846 fault_flags = -1; 847 ftype = -1; 848 goto nogo; 849 } 850 851 map = &kernel_map; 852 } else { 853 /* 854 * This is a fault on non-kernel virtual memory. 855 * vm is initialized above to NULL. If curproc is NULL 856 * or curproc->p_vmspace is NULL the fault is fatal. 857 */ 858 if (lp != NULL) 859 vm = lp->lwp_vmspace; 860 861 if (vm == NULL) { 862 fault_flags = -1; 863 ftype = -1; 864 goto nogo; 865 } 866 867 /* 868 * Debugging, try to catch kernel faults on the user address 869 * space when not inside on onfault (e.g. copyin/copyout) 870 * routine. 871 */ 872 if (usermode == 0 && (td->td_pcb == NULL || 873 td->td_pcb->pcb_onfault == NULL)) { 874 #ifdef DDB 875 if (freeze_on_seg_fault) { 876 kprintf("trap_pfault: user address fault from kernel mode " 877 "%016lx\n", (long)frame->tf_addr); 878 while (freeze_on_seg_fault) 879 tsleep(&freeze_on_seg_fault, 0, "frzseg", hz * 20); 880 } 881 #endif 882 } 883 map = &vm->vm_map; 884 } 885 886 /* 887 * PGEX_I is defined only if the execute disable bit capability is 888 * supported and enabled. 889 */ 890 if (frame->tf_err & PGEX_W) 891 ftype = VM_PROT_WRITE; 892 #if 0 /* JG */ 893 else if ((frame->tf_err & PGEX_I) && pg_nx != 0) 894 ftype = VM_PROT_EXECUTE; 895 #endif 896 else 897 ftype = VM_PROT_READ; 898 899 if (map != &kernel_map) { 900 /* 901 * Keep swapout from messing with us during this 902 * critical time. 903 */ 904 PHOLD(lp->lwp_proc); 905 906 /* 907 * Issue fault 908 */ 909 fault_flags = 0; 910 if (usermode) 911 fault_flags |= VM_FAULT_BURST; 912 if (ftype & VM_PROT_WRITE) 913 fault_flags |= VM_FAULT_DIRTY; 914 else 915 fault_flags |= VM_FAULT_NORMAL; 916 rv = vm_fault(map, va, ftype, fault_flags); 917 918 PRELE(lp->lwp_proc); 919 } else { 920 /* 921 * Don't have to worry about process locking or stacks in the 922 * kernel. 923 */ 924 fault_flags = VM_FAULT_NORMAL; 925 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); 926 } 927 if (rv == KERN_SUCCESS) 928 return (0); 929 nogo: 930 if (!usermode) { 931 /* 932 * NOTE: in 64-bit mode traps push rsp/ss 933 * even if no ring change occurs. 934 */ 935 if (td->td_pcb->pcb_onfault && 936 td->td_pcb->pcb_onfault_sp == frame->tf_rsp && 937 td->td_gd->gd_intr_nesting_level == 0) { 938 frame->tf_rip = (register_t)td->td_pcb->pcb_onfault; 939 return (0); 940 } 941 trap_fatal(frame, frame->tf_addr); 942 return (-1); 943 } 944 945 /* 946 * NOTE: on x86_64 we have a tf_addr field in the trapframe, no 947 * kludge is needed to pass the fault address to signal handlers. 948 */ 949 p = td->td_proc; 950 #ifdef DDB 951 if (td->td_lwp->lwp_vkernel == NULL) { 952 while (freeze_on_seg_fault) { 953 tsleep(p, 0, "freeze", hz * 20); 954 } 955 if (ddb_on_seg_fault) 956 Debugger("ddb_on_seg_fault"); 957 } 958 #endif 959 960 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 961 } 962 963 static void 964 trap_fatal(struct trapframe *frame, vm_offset_t eva) 965 { 966 int code, ss; 967 u_int type; 968 long rsp; 969 struct soft_segment_descriptor softseg; 970 char *msg; 971 972 code = frame->tf_err; 973 type = frame->tf_trapno; 974 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg); 975 976 if (type <= MAX_TRAP_MSG) 977 msg = trap_msg[type]; 978 else 979 msg = "UNKNOWN"; 980 kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg, 981 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 982 /* three separate prints in case of a trap on an unmapped page */ 983 kprintf("cpuid = %d; ", mycpu->gd_cpuid); 984 kprintf("lapic->id = %08x\n", lapic->id); 985 if (type == T_PAGEFLT) { 986 kprintf("fault virtual address = 0x%lx\n", eva); 987 kprintf("fault code = %s %s %s, %s\n", 988 code & PGEX_U ? "user" : "supervisor", 989 code & PGEX_W ? "write" : "read", 990 code & PGEX_I ? "instruction" : "data", 991 code & PGEX_P ? "protection violation" : "page not present"); 992 } 993 kprintf("instruction pointer = 0x%lx:0x%lx\n", 994 frame->tf_cs & 0xffff, frame->tf_rip); 995 if (ISPL(frame->tf_cs) == SEL_UPL) { 996 ss = frame->tf_ss & 0xffff; 997 rsp = frame->tf_rsp; 998 } else { 999 /* 1000 * NOTE: in 64-bit mode traps push rsp/ss even if no ring 1001 * change occurs. 1002 */ 1003 ss = GSEL(GDATA_SEL, SEL_KPL); 1004 rsp = frame->tf_rsp; 1005 } 1006 kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp); 1007 kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp); 1008 kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n", 1009 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 1010 kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n", 1011 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32, 1012 softseg.ssd_gran); 1013 kprintf("processor eflags = "); 1014 if (frame->tf_rflags & PSL_T) 1015 kprintf("trace trap, "); 1016 if (frame->tf_rflags & PSL_I) 1017 kprintf("interrupt enabled, "); 1018 if (frame->tf_rflags & PSL_NT) 1019 kprintf("nested task, "); 1020 if (frame->tf_rflags & PSL_RF) 1021 kprintf("resume, "); 1022 kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12); 1023 kprintf("current process = "); 1024 if (curproc) { 1025 kprintf("%lu\n", 1026 (u_long)curproc->p_pid); 1027 } else { 1028 kprintf("Idle\n"); 1029 } 1030 kprintf("current thread = pri %d ", curthread->td_pri); 1031 if (curthread->td_critcount) 1032 kprintf("(CRIT)"); 1033 kprintf("\n"); 1034 1035 #ifdef DDB 1036 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame)) 1037 return; 1038 #endif 1039 kprintf("trap number = %d\n", type); 1040 if (type <= MAX_TRAP_MSG) 1041 panic("%s", trap_msg[type]); 1042 else 1043 panic("unknown/reserved trap"); 1044 } 1045 1046 /* 1047 * Double fault handler. Called when a fault occurs while writing 1048 * a frame for a trap/exception onto the stack. This usually occurs 1049 * when the stack overflows (such is the case with infinite recursion, 1050 * for example). 1051 */ 1052 static __inline 1053 int 1054 in_kstack_guard(register_t rptr) 1055 { 1056 thread_t td = curthread; 1057 1058 if ((char *)rptr >= td->td_kstack && 1059 (char *)rptr < td->td_kstack + PAGE_SIZE) { 1060 return 1; 1061 } 1062 return 0; 1063 } 1064 1065 void 1066 dblfault_handler(struct trapframe *frame) 1067 { 1068 thread_t td = curthread; 1069 1070 if (in_kstack_guard(frame->tf_rsp) || in_kstack_guard(frame->tf_rbp)) { 1071 kprintf("DOUBLE FAULT - KERNEL STACK GUARD HIT!\n"); 1072 if (in_kstack_guard(frame->tf_rsp)) 1073 frame->tf_rsp = (register_t)(td->td_kstack + PAGE_SIZE); 1074 if (in_kstack_guard(frame->tf_rbp)) 1075 frame->tf_rbp = (register_t)(td->td_kstack + PAGE_SIZE); 1076 } else { 1077 kprintf("DOUBLE FAULT\n"); 1078 } 1079 kprintf("\nFatal double fault\n"); 1080 kprintf("rip = 0x%lx\n", frame->tf_rip); 1081 kprintf("rsp = 0x%lx\n", frame->tf_rsp); 1082 kprintf("rbp = 0x%lx\n", frame->tf_rbp); 1083 /* three separate prints in case of a trap on an unmapped page */ 1084 kprintf("cpuid = %d; ", mycpu->gd_cpuid); 1085 kprintf("lapic->id = %08x\n", lapic->id); 1086 panic("double fault"); 1087 } 1088 1089 /* 1090 * syscall2 - MP aware system call request C handler 1091 * 1092 * A system call is essentially treated as a trap except that the 1093 * MP lock is not held on entry or return. We are responsible for 1094 * obtaining the MP lock if necessary and for handling ASTs 1095 * (e.g. a task switch) prior to return. 1096 * 1097 * MPSAFE 1098 */ 1099 void 1100 syscall2(struct trapframe *frame) 1101 { 1102 struct thread *td = curthread; 1103 struct proc *p = td->td_proc; 1104 struct lwp *lp = td->td_lwp; 1105 caddr_t params; 1106 struct sysent *callp; 1107 register_t orig_tf_rflags; 1108 int sticks; 1109 int error; 1110 int narg; 1111 #ifdef INVARIANTS 1112 int crit_count = td->td_critcount; 1113 #endif 1114 int have_mplock = 0; 1115 register_t *argp; 1116 u_int code; 1117 int reg, regcnt; 1118 union sysunion args; 1119 register_t *argsdst; 1120 1121 mycpu->gd_cnt.v_syscall++; 1122 1123 #ifdef DIAGNOSTIC 1124 if (ISPL(frame->tf_cs) != SEL_UPL) { 1125 get_mplock(); 1126 panic("syscall"); 1127 /* NOT REACHED */ 1128 } 1129 #endif 1130 1131 KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid, 1132 frame->tf_rax); 1133 1134 userenter(td, p); /* lazy raise our priority */ 1135 1136 reg = 0; 1137 regcnt = 6; 1138 /* 1139 * Misc 1140 */ 1141 sticks = (int)td->td_sticks; 1142 orig_tf_rflags = frame->tf_rflags; 1143 1144 /* 1145 * Virtual kernel intercept - if a VM context managed by a virtual 1146 * kernel issues a system call the virtual kernel handles it, not us. 1147 * Restore the virtual kernel context and return from its system 1148 * call. The current frame is copied out to the virtual kernel. 1149 */ 1150 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 1151 vkernel_trap(lp, frame); 1152 error = EJUSTRETURN; 1153 goto out; 1154 } 1155 1156 /* 1157 * Get the system call parameters and account for time 1158 */ 1159 KASSERT(lp->lwp_md.md_regs == frame, 1160 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame)); 1161 params = (caddr_t)frame->tf_rsp + sizeof(register_t); 1162 code = frame->tf_rax; 1163 1164 if (p->p_sysent->sv_prepsyscall) { 1165 (*p->p_sysent->sv_prepsyscall)( 1166 frame, (int *)(&args.nosys.sysmsg + 1), 1167 &code, ¶ms); 1168 } else { 1169 if (code == SYS_syscall || code == SYS___syscall) { 1170 code = frame->tf_rdi; 1171 reg++; 1172 regcnt--; 1173 } 1174 } 1175 1176 if (p->p_sysent->sv_mask) 1177 code &= p->p_sysent->sv_mask; 1178 1179 if (code >= p->p_sysent->sv_size) 1180 callp = &p->p_sysent->sv_table[0]; 1181 else 1182 callp = &p->p_sysent->sv_table[code]; 1183 1184 narg = callp->sy_narg & SYF_ARGMASK; 1185 1186 /* 1187 * On x86_64 we get up to six arguments in registers. The rest are 1188 * on the stack. The first six members of 'struct trapframe' happen 1189 * to be the registers used to pass arguments, in exactly the right 1190 * order. 1191 */ 1192 argp = &frame->tf_rdi; 1193 argp += reg; 1194 argsdst = (register_t *)(&args.nosys.sysmsg + 1); 1195 /* 1196 * JG can we overflow the space pointed to by 'argsdst' 1197 * either with 'bcopy' or with 'copyin'? 1198 */ 1199 bcopy(argp, argsdst, sizeof(register_t) * regcnt); 1200 /* 1201 * copyin is MP aware, but the tracing code is not 1202 */ 1203 if (narg > regcnt) { 1204 KASSERT(params != NULL, ("copyin args with no params!")); 1205 error = copyin(params, &argsdst[regcnt], 1206 (narg - regcnt) * sizeof(register_t)); 1207 if (error) { 1208 #ifdef KTRACE 1209 if (KTRPOINT(td, KTR_SYSCALL)) { 1210 MAKEMPSAFE(have_mplock); 1211 1212 ktrsyscall(lp, code, narg, 1213 (void *)(&args.nosys.sysmsg + 1)); 1214 } 1215 #endif 1216 goto bad; 1217 } 1218 } 1219 1220 #ifdef KTRACE 1221 if (KTRPOINT(td, KTR_SYSCALL)) { 1222 MAKEMPSAFE(have_mplock); 1223 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1)); 1224 } 1225 #endif 1226 1227 /* 1228 * Default return value is 0 (will be copied to %rax). Double-value 1229 * returns use %rax and %rdx. %rdx is left unchanged for system 1230 * calls which return only one result. 1231 */ 1232 args.sysmsg_fds[0] = 0; 1233 args.sysmsg_fds[1] = frame->tf_rdx; 1234 1235 /* 1236 * The syscall might manipulate the trap frame. If it does it 1237 * will probably return EJUSTRETURN. 1238 */ 1239 args.sysmsg_frame = frame; 1240 1241 STOPEVENT(p, S_SCE, narg); /* MP aware */ 1242 1243 /* 1244 * NOTE: All system calls run MPSAFE now. The system call itself 1245 * is responsible for getting the MP lock. 1246 */ 1247 #ifdef SYSCALL_DEBUG 1248 uint64_t tscval = rdtsc(); 1249 #endif 1250 error = (*callp->sy_call)(&args); 1251 #ifdef SYSCALL_DEBUG 1252 tscval = rdtsc() - tscval; 1253 tscval = tscval * 1000000 / tsc_frequency; 1254 if (SysCallsWorstCase[code] < tscval) 1255 SysCallsWorstCase[code] = tscval; 1256 #endif 1257 1258 out: 1259 /* 1260 * MP SAFE (we may or may not have the MP lock at this point) 1261 */ 1262 //kprintf("SYSMSG %d ", error); 1263 switch (error) { 1264 case 0: 1265 /* 1266 * Reinitialize proc pointer `p' as it may be different 1267 * if this is a child returning from fork syscall. 1268 */ 1269 p = curproc; 1270 lp = curthread->td_lwp; 1271 frame->tf_rax = args.sysmsg_fds[0]; 1272 frame->tf_rdx = args.sysmsg_fds[1]; 1273 frame->tf_rflags &= ~PSL_C; 1274 break; 1275 case ERESTART: 1276 /* 1277 * Reconstruct pc, we know that 'syscall' is 2 bytes. 1278 * We have to do a full context restore so that %r10 1279 * (which was holding the value of %rcx) is restored for 1280 * the next iteration. 1281 */ 1282 if (frame->tf_err != 0 && frame->tf_err != 2) 1283 kprintf("lp %s:%d frame->tf_err is weird %ld\n", 1284 td->td_comm, lp->lwp_proc->p_pid, frame->tf_err); 1285 frame->tf_rip -= frame->tf_err; 1286 frame->tf_r10 = frame->tf_rcx; 1287 break; 1288 case EJUSTRETURN: 1289 break; 1290 case EASYNC: 1291 panic("Unexpected EASYNC return value (for now)"); 1292 default: 1293 bad: 1294 if (p->p_sysent->sv_errsize) { 1295 if (error >= p->p_sysent->sv_errsize) 1296 error = -1; /* XXX */ 1297 else 1298 error = p->p_sysent->sv_errtbl[error]; 1299 } 1300 frame->tf_rax = error; 1301 frame->tf_rflags |= PSL_C; 1302 break; 1303 } 1304 1305 /* 1306 * Traced syscall. trapsignal() is not MP aware. 1307 */ 1308 if (orig_tf_rflags & PSL_T) { 1309 MAKEMPSAFE(have_mplock); 1310 frame->tf_rflags &= ~PSL_T; 1311 trapsignal(lp, SIGTRAP, TRAP_TRACE); 1312 } 1313 1314 /* 1315 * Handle reschedule and other end-of-syscall issues 1316 */ 1317 userret(lp, frame, sticks); 1318 1319 #ifdef KTRACE 1320 if (KTRPOINT(td, KTR_SYSRET)) { 1321 MAKEMPSAFE(have_mplock); 1322 ktrsysret(lp, code, error, args.sysmsg_result); 1323 } 1324 #endif 1325 1326 /* 1327 * This works because errno is findable through the 1328 * register set. If we ever support an emulation where this 1329 * is not the case, this code will need to be revisited. 1330 */ 1331 STOPEVENT(p, S_SCX, code); 1332 1333 userexit(lp); 1334 /* 1335 * Release the MP lock if we had to get it 1336 */ 1337 if (have_mplock) 1338 rel_mplock(); 1339 KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error); 1340 #ifdef INVARIANTS 1341 KASSERT(crit_count == td->td_critcount, 1342 ("syscall: critical section count mismatch! %d/%d", 1343 crit_count, td->td_pri)); 1344 KASSERT(&td->td_toks_base == td->td_toks_stop, 1345 ("syscall: extra tokens held after trap! %ld", 1346 td->td_toks_stop - &td->td_toks_base)); 1347 #endif 1348 } 1349 1350 /* 1351 * NOTE: mplock not held at any point 1352 */ 1353 void 1354 fork_return(struct lwp *lp, struct trapframe *frame) 1355 { 1356 frame->tf_rax = 0; /* Child returns zero */ 1357 frame->tf_rflags &= ~PSL_C; /* success */ 1358 frame->tf_rdx = 1; 1359 1360 generic_lwp_return(lp, frame); 1361 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid); 1362 } 1363 1364 /* 1365 * Simplified back end of syscall(), used when returning from fork() 1366 * directly into user mode. 1367 * 1368 * This code will return back into the fork trampoline code which then 1369 * runs doreti. 1370 * 1371 * NOTE: The mplock is not held at any point. 1372 */ 1373 void 1374 generic_lwp_return(struct lwp *lp, struct trapframe *frame) 1375 { 1376 struct proc *p = lp->lwp_proc; 1377 1378 /* 1379 * Newly forked processes are given a kernel priority. We have to 1380 * adjust the priority to a normal user priority and fake entry 1381 * into the kernel (call userenter()) to install a passive release 1382 * function just in case userret() decides to stop the process. This 1383 * can occur when ^Z races a fork. If we do not install the passive 1384 * release function the current process designation will not be 1385 * released when the thread goes to sleep. 1386 */ 1387 lwkt_setpri_self(TDPRI_USER_NORM); 1388 userenter(lp->lwp_thread, p); 1389 userret(lp, frame, 0); 1390 #ifdef KTRACE 1391 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET)) 1392 ktrsysret(lp, SYS_fork, 0, 0); 1393 #endif 1394 lp->lwp_flags |= LWP_PASSIVE_ACQ; 1395 userexit(lp); 1396 lp->lwp_flags &= ~LWP_PASSIVE_ACQ; 1397 } 1398 1399 /* 1400 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA 1401 * fault (which is then passed back to the virtual kernel) if an attempt is 1402 * made to use the FP unit. 1403 * 1404 * XXX this is a fairly big hack. 1405 */ 1406 void 1407 set_vkernel_fp(struct trapframe *frame) 1408 { 1409 struct thread *td = curthread; 1410 1411 if (frame->tf_xflags & PGEX_FPFAULT) { 1412 td->td_pcb->pcb_flags |= FP_VIRTFP; 1413 if (mdcpu->gd_npxthread == td) 1414 npxexit(); 1415 } else { 1416 td->td_pcb->pcb_flags &= ~FP_VIRTFP; 1417 } 1418 } 1419 1420 /* 1421 * Called from vkernel_trap() to fixup the vkernel's syscall 1422 * frame for vmspace_ctl() return. 1423 */ 1424 void 1425 cpu_vkernel_trap(struct trapframe *frame, int error) 1426 { 1427 frame->tf_rax = error; 1428 if (error) 1429 frame->tf_rflags |= PSL_C; 1430 else 1431 frame->tf_rflags &= ~PSL_C; 1432 } 1433