1 /*- 2 * Copyright (c) 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (C) 1994, David Greenman 5 * Copyright (c) 2008-2018 The DragonFly Project. 6 * Copyright (c) 2008 Jordan Gordeev. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the University of Utah, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 40 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $ 41 */ 42 43 /* 44 * x86_64 Trap and System call handling 45 */ 46 47 #include "use_isa.h" 48 49 #include "opt_ddb.h" 50 #include "opt_ktrace.h" 51 52 #include <machine/frame.h> 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/kernel.h> 56 #include <sys/kerneldump.h> 57 #include <sys/proc.h> 58 #include <sys/pioctl.h> 59 #include <sys/types.h> 60 #include <sys/signal2.h> 61 #include <sys/syscall.h> 62 #include <sys/sysctl.h> 63 #include <sys/sysent.h> 64 #ifdef KTRACE 65 #include <sys/ktrace.h> 66 #endif 67 #include <sys/ktr.h> 68 #include <sys/sysmsg.h> 69 #include <sys/sysproto.h> 70 #include <sys/sysunion.h> 71 72 #include <vm/pmap.h> 73 #include <vm/vm.h> 74 #include <vm/vm_extern.h> 75 #include <vm/vm_kern.h> 76 #include <vm/vm_param.h> 77 #include <machine/cpu.h> 78 #include <machine/pcb.h> 79 #include <machine/smp.h> 80 #include <machine/thread.h> 81 #include <machine/clock.h> 82 #include <machine/vmparam.h> 83 #include <machine/md_var.h> 84 #include <machine_base/isa/isa_intr.h> 85 #include <machine_base/apic/lapic.h> 86 87 #include <ddb/ddb.h> 88 89 #include <sys/thread2.h> 90 #include <sys/spinlock2.h> 91 92 /* 93 * These %rip's are used to detect a historical CPU artifact on syscall or 94 * int $3 entry, if not shortcutted in exception.S via 95 * DIRECT_DISALLOW_SS_CPUBUG. 96 */ 97 extern void Xbpt(void); 98 extern void Xfast_syscall(void); 99 #define IDTVEC(vec) X##vec 100 101 extern void trap(struct trapframe *frame); 102 103 static int trap_pfault(struct trapframe *, int); 104 static void trap_fatal(struct trapframe *, vm_offset_t); 105 void dblfault_handler(struct trapframe *frame); 106 107 #define MAX_TRAP_MSG 30 108 static char *trap_msg[] = { 109 "", /* 0 unused */ 110 "privileged instruction fault", /* 1 T_PRIVINFLT */ 111 "", /* 2 unused */ 112 "breakpoint instruction fault", /* 3 T_BPTFLT */ 113 "", /* 4 unused */ 114 "", /* 5 unused */ 115 "arithmetic trap", /* 6 T_ARITHTRAP */ 116 "system forced exception", /* 7 T_ASTFLT */ 117 "", /* 8 unused */ 118 "general protection fault", /* 9 T_PROTFLT */ 119 "trace trap", /* 10 T_TRCTRAP */ 120 "", /* 11 unused */ 121 "page fault", /* 12 T_PAGEFLT */ 122 "", /* 13 unused */ 123 "alignment fault", /* 14 T_ALIGNFLT */ 124 "", /* 15 unused */ 125 "", /* 16 unused */ 126 "", /* 17 unused */ 127 "integer divide fault", /* 18 T_DIVIDE */ 128 "non-maskable interrupt trap", /* 19 T_NMI */ 129 "overflow trap", /* 20 T_OFLOW */ 130 "FPU bounds check fault", /* 21 T_BOUND */ 131 "FPU device not available", /* 22 T_DNA */ 132 "double fault", /* 23 T_DOUBLEFLT */ 133 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 134 "invalid TSS fault", /* 25 T_TSSFLT */ 135 "segment not present fault", /* 26 T_SEGNPFLT */ 136 "stack fault", /* 27 T_STKFLT */ 137 "machine check trap", /* 28 T_MCHK */ 138 "SIMD floating-point exception", /* 29 T_XMMFLT */ 139 "reserved (unknown) fault", /* 30 T_RESERVED */ 140 }; 141 142 #ifdef DDB 143 static int ddb_on_nmi = 1; 144 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW, 145 &ddb_on_nmi, 0, "Go to DDB on NMI"); 146 static int ddb_on_seg_fault = 0; 147 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_seg_fault, CTLFLAG_RW, 148 &ddb_on_seg_fault, 0, "Go to DDB on user seg-fault"); 149 static int freeze_on_seg_fault = 0; 150 SYSCTL_INT(_machdep, OID_AUTO, freeze_on_seg_fault, CTLFLAG_RW, 151 &freeze_on_seg_fault, 0, "Go to DDB on user seg-fault"); 152 #endif 153 static int panic_on_nmi = 1; 154 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW, 155 &panic_on_nmi, 0, "Panic on NMI"); 156 static int fast_release; 157 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW, 158 &fast_release, 0, "Passive Release was optimal"); 159 static int slow_release; 160 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW, 161 &slow_release, 0, "Passive Release was nonoptimal"); 162 163 /* 164 * System call debugging records the worst-case system call 165 * overhead (inclusive of blocking), but may be inaccurate. 166 */ 167 /*#define SYSCALL_DEBUG*/ 168 #ifdef SYSCALL_DEBUG 169 uint64_t SysCallsWorstCase[SYS_MAXSYSCALL]; 170 #endif 171 172 /* 173 * Passively intercepts the thread switch function to increase 174 * the thread priority from a user priority to a kernel priority, reducing 175 * syscall and trap overhead for the case where no switch occurs. 176 * 177 * Synchronizes td_ucred with p_ucred. This is used by system calls, 178 * signal handling, faults, AST traps, and anything else that enters the 179 * kernel from userland and provides the kernel with a stable read-only 180 * copy of the process ucred. 181 * 182 * To avoid races with another thread updating p_ucred we obtain p_spin. 183 * The other thread doing the update will obtain both p_token and p_spin. 184 * In the case where the cached cred pointer matches, we will already have 185 * the ref and we don't have to do one blessed thing. 186 */ 187 static __inline void 188 userenter(struct thread *curtd, struct proc *curp) 189 { 190 struct ucred *ocred; 191 struct ucred *ncred; 192 193 curtd->td_release = lwkt_passive_release; 194 195 if (curtd->td_ucred != curp->p_ucred) { 196 spin_lock(&curp->p_spin); 197 ncred = crhold(curp->p_ucred); 198 spin_unlock(&curp->p_spin); 199 ocred = curtd->td_ucred; 200 curtd->td_ucred = ncred; 201 if (ocred) 202 crfree(ocred); 203 } 204 205 #ifdef DDB 206 /* 207 * Debugging, remove top two user stack pages to catch kernel faults 208 */ 209 if (freeze_on_seg_fault > 1 && curtd->td_lwp) { 210 pmap_remove(vmspace_pmap(curtd->td_lwp->lwp_vmspace), 211 0x00007FFFFFFFD000LU, 212 0x0000800000000000LU); 213 } 214 #endif 215 } 216 217 /* 218 * Handle signals, upcalls, profiling, and other AST's and/or tasks that 219 * must be completed before we can return to or try to return to userland. 220 * 221 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64 222 * arithmatic on the delta calculation so the absolute tick values are 223 * truncated to an integer. 224 */ 225 static void 226 userret(struct lwp *lp, struct trapframe *frame, int sticks) 227 { 228 struct proc *p = lp->lwp_proc; 229 int sig; 230 int ptok; 231 232 /* 233 * Charge system time if profiling. Note: times are in microseconds. 234 * This may do a copyout and block, so do it first even though it 235 * means some system time will be charged as user time. 236 */ 237 if (p->p_flags & P_PROFIL) { 238 addupc_task(p, frame->tf_rip, 239 (u_int)((int)lp->lwp_thread->td_sticks - sticks)); 240 } 241 242 recheck: 243 /* 244 * Specific on-return-to-usermode checks (LWP_MP_WEXIT, 245 * LWP_MP_VNLRU, etc). 246 */ 247 if (lp->lwp_mpflags & LWP_MP_URETMASK) 248 lwpuserret(lp); 249 250 /* 251 * Block here if we are in a stopped state. 252 */ 253 if (STOPLWP(p, lp)) { 254 lwkt_gettoken(&p->p_token); 255 tstop(); 256 lwkt_reltoken(&p->p_token); 257 goto recheck; 258 } 259 while (dump_stop_usertds) { 260 tsleep(&dump_stop_usertds, 0, "dumpstp", 0); 261 } 262 263 /* 264 * Post any pending upcalls. If running a virtual kernel be sure 265 * to restore the virtual kernel's vmspace before posting the upcall. 266 */ 267 if (p->p_flags & (P_SIGVTALRM | P_SIGPROF)) { 268 lwkt_gettoken(&p->p_token); 269 if (p->p_flags & P_SIGVTALRM) { 270 p->p_flags &= ~P_SIGVTALRM; 271 ksignal(p, SIGVTALRM); 272 } 273 if (p->p_flags & P_SIGPROF) { 274 p->p_flags &= ~P_SIGPROF; 275 ksignal(p, SIGPROF); 276 } 277 lwkt_reltoken(&p->p_token); 278 goto recheck; 279 } 280 281 /* 282 * Post any pending signals. If running a virtual kernel be sure 283 * to restore the virtual kernel's vmspace before posting the signal. 284 * 285 * WARNING! postsig() can exit and not return. 286 */ 287 if ((sig = CURSIG_LCK_TRACE(lp, &ptok)) != 0) { 288 postsig(sig, ptok); 289 goto recheck; 290 } 291 292 /* 293 * block here if we are swapped out, but still process signals 294 * (such as SIGKILL). proc0 (the swapin scheduler) is already 295 * aware of our situation, we do not have to wake it up. 296 */ 297 if (p->p_flags & P_SWAPPEDOUT) { 298 lwkt_gettoken(&p->p_token); 299 p->p_flags |= P_SWAPWAIT; 300 swapin_request(); 301 if (p->p_flags & P_SWAPWAIT) 302 tsleep(p, PCATCH, "SWOUT", 0); 303 p->p_flags &= ~P_SWAPWAIT; 304 lwkt_reltoken(&p->p_token); 305 goto recheck; 306 } 307 308 /* 309 * In a multi-threaded program it is possible for a thread to change 310 * signal state during a system call which temporarily changes the 311 * signal mask. In this case postsig() might not be run and we 312 * have to restore the mask ourselves. 313 */ 314 if (lp->lwp_flags & LWP_OLDMASK) { 315 lp->lwp_flags &= ~LWP_OLDMASK; 316 lp->lwp_sigmask = lp->lwp_oldsigmask; 317 goto recheck; 318 } 319 } 320 321 /* 322 * Cleanup from userenter and any passive release that might have occured. 323 * We must reclaim the current-process designation before we can return 324 * to usermode. We also handle both LWKT and USER reschedule requests. 325 */ 326 static __inline void 327 userexit(struct lwp *lp) 328 { 329 struct thread *td = lp->lwp_thread; 330 /* globaldata_t gd = td->td_gd; */ 331 332 /* 333 * Handle stop requests at kernel priority. Any requests queued 334 * after this loop will generate another AST. 335 */ 336 while (STOPLWP(lp->lwp_proc, lp)) { 337 lwkt_gettoken(&lp->lwp_proc->p_token); 338 tstop(); 339 lwkt_reltoken(&lp->lwp_proc->p_token); 340 } 341 342 /* 343 * Reduce our priority in preparation for a return to userland. If 344 * our passive release function was still in place, our priority was 345 * never raised and does not need to be reduced. 346 */ 347 lwkt_passive_recover(td); 348 349 /* WARNING: we may have migrated cpu's */ 350 /* gd = td->td_gd; */ 351 352 /* 353 * Become the current user scheduled process if we aren't already, 354 * and deal with reschedule requests and other factors. 355 */ 356 lp->lwp_proc->p_usched->acquire_curproc(lp); 357 } 358 359 #if !defined(KTR_KERNENTRY) 360 #define KTR_KERNENTRY KTR_ALL 361 #endif 362 KTR_INFO_MASTER(kernentry); 363 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0, 364 "TRAP(pid %d, tid %d, trapno %ld, eva %lu)", 365 pid_t pid, lwpid_t tid, register_t trapno, vm_offset_t eva); 366 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "TRAP_RET(pid %d, tid %d)", 367 pid_t pid, lwpid_t tid); 368 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "SYSC(pid %d, tid %d, nr %ld)", 369 pid_t pid, lwpid_t tid, register_t trapno); 370 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "SYSRET(pid %d, tid %d, err %d)", 371 pid_t pid, lwpid_t tid, int err); 372 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "FORKRET(pid %d, tid %d)", 373 pid_t pid, lwpid_t tid); 374 375 /* 376 * Exception, fault, and trap interface to the kernel. 377 * This common code is called from assembly language IDT gate entry 378 * routines that prepare a suitable stack frame, and restore this 379 * frame after the exception has been processed. 380 * 381 * This function is also called from doreti in an interlock to handle ASTs. 382 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap 383 * 384 * NOTE! We have to retrieve the fault address prior to potentially 385 * blocking, including blocking on any token. 386 * 387 * NOTE! NMI and kernel DBG traps remain on their respective pcpu IST 388 * stacks if taken from a kernel RPL. trap() cannot block in this 389 * situation. DDB entry or a direct report-and-return is ok. 390 * 391 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing 392 * if an attempt is made to switch from a fast interrupt or IPI. 393 */ 394 void 395 trap(struct trapframe *frame) 396 { 397 static struct krate sscpubugrate = { 1 }; 398 struct globaldata *gd = mycpu; 399 struct thread *td = gd->gd_curthread; 400 struct lwp *lp = td->td_lwp; 401 struct proc *p; 402 int sticks = 0; 403 int i = 0, ucode = 0, type, code; 404 #ifdef INVARIANTS 405 int crit_count = td->td_critcount; 406 lwkt_tokref_t curstop = td->td_toks_stop; 407 #endif 408 vm_offset_t eva; 409 410 p = td->td_proc; 411 clear_quickret(); 412 413 #ifdef DDB 414 /* 415 * We need to allow T_DNA faults when the debugger is active since 416 * some dumping paths do large bcopy() which use the floating 417 * point registers for faster copying. 418 */ 419 if (db_active && frame->tf_trapno != T_DNA) { 420 eva = (frame->tf_trapno == T_PAGEFLT ? frame->tf_addr : 0); 421 ++gd->gd_trap_nesting_level; 422 trap_fatal(frame, eva); 423 --gd->gd_trap_nesting_level; 424 goto out2; 425 } 426 #endif 427 428 eva = 0; 429 430 if ((frame->tf_rflags & PSL_I) == 0) { 431 /* 432 * Buggy application or kernel code has disabled interrupts 433 * and then trapped. Enabling interrupts now is wrong, but 434 * it is better than running with interrupts disabled until 435 * they are accidentally enabled later. 436 */ 437 438 type = frame->tf_trapno; 439 if (ISPL(frame->tf_cs) == SEL_UPL) { 440 /* JG curproc can be NULL */ 441 kprintf( 442 "pid %ld (%s): trap %d with interrupts disabled\n", 443 (long)curproc->p_pid, curproc->p_comm, type); 444 } else if ((type == T_STKFLT || type == T_PROTFLT || 445 type == T_SEGNPFLT) && 446 frame->tf_rip == (long)doreti_iret) { 447 /* 448 * iretq fault from kernel mode during return to 449 * userland. 450 * 451 * This situation is expected, don't complain. 452 */ 453 } else if (type != T_NMI && type != T_BPTFLT && 454 type != T_TRCTRAP) { 455 /* 456 * XXX not quite right, since this may be for a 457 * multiple fault in user mode. 458 */ 459 kprintf("kernel trap %d (%s @ 0x%016jx) with " 460 "interrupts disabled\n", 461 type, 462 td->td_comm, 463 frame->tf_rip); 464 } 465 cpu_enable_intr(); 466 } 467 468 type = frame->tf_trapno; 469 code = frame->tf_err; 470 471 if (ISPL(frame->tf_cs) == SEL_UPL) { 472 /* user trap */ 473 474 KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid, 475 frame->tf_trapno, eva); 476 477 userenter(td, p); 478 479 sticks = (int)td->td_sticks; 480 KASSERT(lp->lwp_md.md_regs == frame, 481 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame)); 482 483 switch (type) { 484 case T_PRIVINFLT: /* privileged instruction fault */ 485 i = SIGILL; 486 ucode = ILL_PRVOPC; 487 break; 488 489 case T_BPTFLT: /* bpt instruction fault */ 490 case T_TRCTRAP: /* trace trap */ 491 frame->tf_rflags &= ~PSL_T; 492 i = SIGTRAP; 493 ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT); 494 break; 495 496 case T_ARITHTRAP: /* arithmetic trap */ 497 ucode = code; 498 i = SIGFPE; 499 break; 500 501 case T_ASTFLT: /* Allow process switch */ 502 mycpu->gd_cnt.v_soft++; 503 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) { 504 atomic_clear_int(&mycpu->gd_reqflags, 505 RQF_AST_OWEUPC); 506 addupc_task(p, p->p_prof.pr_addr, 507 p->p_prof.pr_ticks); 508 } 509 goto out; 510 511 case T_PROTFLT: /* general protection fault */ 512 i = SIGBUS; 513 ucode = BUS_OBJERR; 514 break; 515 case T_STKFLT: /* stack fault */ 516 case T_SEGNPFLT: /* segment not present fault */ 517 i = SIGBUS; 518 ucode = BUS_ADRERR; 519 break; 520 case T_TSSFLT: /* invalid TSS fault */ 521 case T_DOUBLEFLT: /* double fault */ 522 default: 523 i = SIGBUS; 524 ucode = BUS_OBJERR; 525 break; 526 527 case T_PAGEFLT: /* page fault */ 528 i = trap_pfault(frame, TRUE); 529 #ifdef DDB 530 if (frame->tf_rip == 0) { 531 /* used for kernel debugging only */ 532 while (freeze_on_seg_fault) 533 tsleep(p, 0, "freeze", hz * 20); 534 } 535 #endif 536 if (i == -1 || i == 0) 537 goto out; 538 if (i == SIGSEGV) { 539 ucode = SEGV_MAPERR; 540 } else { 541 i = SIGSEGV; 542 ucode = SEGV_ACCERR; 543 } 544 break; 545 546 case T_DIVIDE: /* integer divide fault */ 547 ucode = FPE_INTDIV; 548 i = SIGFPE; 549 break; 550 551 #if NISA > 0 552 case T_NMI: 553 /* machine/parity/power fail/"kitchen sink" faults */ 554 if (isa_nmi(code) == 0) { 555 #ifdef DDB 556 /* 557 * NMI can be hooked up to a pushbutton 558 * for debugging. 559 */ 560 if (ddb_on_nmi) { 561 kprintf ("NMI ... going to debugger\n"); 562 kdb_trap(type, 0, frame); 563 } 564 #endif /* DDB */ 565 goto out2; 566 } else if (panic_on_nmi) 567 panic("NMI indicates hardware failure"); 568 break; 569 #endif /* NISA > 0 */ 570 571 case T_OFLOW: /* integer overflow fault */ 572 ucode = FPE_INTOVF; 573 i = SIGFPE; 574 break; 575 576 case T_BOUND: /* bounds check fault */ 577 ucode = FPE_FLTSUB; 578 i = SIGFPE; 579 break; 580 581 case T_DNA: 582 /* 583 * Virtual kernel intercept - pass the DNA exception 584 * to the virtual kernel if it asked to handle it. 585 * This occurs when the virtual kernel is holding 586 * onto the FP context for a different emulated 587 * process then the one currently running. 588 * 589 * We must still call npxdna() since we may have 590 * saved FP state that the virtual kernel needs 591 * to hand over to a different emulated process. 592 */ 593 if (lp->lwp_vkernel && lp->lwp_vkernel->ve && 594 (td->td_pcb->pcb_flags & FP_VIRTFP) 595 ) { 596 npxdna(); 597 break; 598 } 599 600 /* 601 * The kernel may have switched out the FP unit's 602 * state, causing the user process to take a fault 603 * when it tries to use the FP unit. Restore the 604 * state here 605 */ 606 if (npxdna()) 607 goto out; 608 i = SIGFPE; 609 ucode = FPE_FPU_NP_TRAP; 610 break; 611 612 case T_FPOPFLT: /* FPU operand fetch fault */ 613 ucode = ILL_COPROC; 614 i = SIGILL; 615 break; 616 617 case T_XMMFLT: /* SIMD floating-point exception */ 618 ucode = 0; /* XXX */ 619 i = SIGFPE; 620 break; 621 } 622 } else { 623 /* kernel trap */ 624 625 switch (type) { 626 case T_PAGEFLT: /* page fault */ 627 trap_pfault(frame, FALSE); 628 goto out2; 629 630 case T_DNA: 631 /* 632 * The kernel is apparently using fpu for copying. 633 * XXX this should be fatal unless the kernel has 634 * registered such use. 635 */ 636 if (npxdna()) 637 goto out2; 638 break; 639 640 case T_STKFLT: /* stack fault */ 641 case T_PROTFLT: /* general protection fault */ 642 case T_SEGNPFLT: /* segment not present fault */ 643 /* 644 * Invalid segment selectors and out of bounds 645 * %rip's and %rsp's can be set up in user mode. 646 * This causes a fault in kernel mode when the 647 * kernel tries to return to user mode. We want 648 * to get this fault so that we can fix the 649 * problem here and not have to check all the 650 * selectors and pointers when the user changes 651 * them. 652 */ 653 if (mycpu->gd_intr_nesting_level == 0) { 654 /* 655 * NOTE: in 64-bit mode traps push rsp/ss 656 * even if no ring change occurs. 657 */ 658 if (td->td_pcb->pcb_onfault && 659 td->td_pcb->pcb_onfault_sp == 660 frame->tf_rsp) { 661 frame->tf_rip = (register_t) 662 td->td_pcb->pcb_onfault; 663 goto out2; 664 } 665 666 /* 667 * If the iretq in doreti faults during 668 * return to user, it will be special-cased 669 * in IDTVEC(prot) to get here. We want 670 * to 'return' to doreti_iret_fault in 671 * ipl.s in approximately the same state we 672 * were in at the iretq. 673 */ 674 if (frame->tf_rip == (long)doreti_iret) { 675 frame->tf_rip = (long)doreti_iret_fault; 676 goto out2; 677 } 678 } 679 break; 680 681 case T_TSSFLT: 682 /* 683 * PSL_NT can be set in user mode and isn't cleared 684 * automatically when the kernel is entered. This 685 * causes a TSS fault when the kernel attempts to 686 * `iret' because the TSS link is uninitialized. We 687 * want to get this fault so that we can fix the 688 * problem here and not every time the kernel is 689 * entered. 690 */ 691 if (frame->tf_rflags & PSL_NT) { 692 frame->tf_rflags &= ~PSL_NT; 693 #if 0 694 /* do we need this? */ 695 if (frame->tf_rip == (long)doreti_iret) 696 frame->tf_rip = (long)doreti_iret_fault; 697 #endif 698 goto out2; 699 } 700 break; 701 702 case T_TRCTRAP: /* trace trap */ 703 /* 704 * Detect historical CPU artifact on syscall or int $3 705 * entry (if not shortcutted in exception.s via 706 * DIRECT_DISALLOW_SS_CPUBUG). 707 */ 708 if (frame->tf_rip == (register_t)IDTVEC(fast_syscall)) { 709 krateprintf(&sscpubugrate, 710 "Caught #DB at syscall cpu artifact\n"); 711 goto out2; 712 } 713 if (frame->tf_rip == (register_t)IDTVEC(bpt)) { 714 krateprintf(&sscpubugrate, 715 "Caught #DB at int $N cpu artifact\n"); 716 goto out2; 717 } 718 719 /* 720 * Ignore debug register trace traps due to 721 * accesses in the user's address space, which 722 * can happen under several conditions such as 723 * if a user sets a watchpoint on a buffer and 724 * then passes that buffer to a system call. 725 * We still want to get TRCTRAPS for addresses 726 * in kernel space because that is useful when 727 * debugging the kernel. 728 */ 729 if (user_dbreg_trap()) { 730 /* 731 * Reset breakpoint bits because the 732 * processor doesn't 733 */ 734 load_dr6(rdr6() & ~0xf); 735 goto out2; 736 } 737 /* 738 * FALLTHROUGH (TRCTRAP kernel mode, kernel address) 739 */ 740 case T_BPTFLT: 741 /* 742 * If DDB is enabled, let it handle the debugger trap. 743 * Otherwise, debugger traps "can't happen". 744 */ 745 ucode = TRAP_BRKPT; 746 #ifdef DDB 747 if (kdb_trap(type, 0, frame)) 748 goto out2; 749 #endif 750 break; 751 752 #if NISA > 0 753 case T_NMI: 754 /* machine/parity/power fail/"kitchen sink" faults */ 755 if (isa_nmi(code) == 0) { 756 #ifdef DDB 757 /* 758 * NMI can be hooked up to a pushbutton 759 * for debugging. 760 */ 761 if (ddb_on_nmi) { 762 kprintf ("NMI ... going to debugger\n"); 763 kdb_trap(type, 0, frame); 764 } 765 #endif /* DDB */ 766 goto out2; 767 } else if (panic_on_nmi == 0) 768 goto out2; 769 /* FALL THROUGH */ 770 #endif /* NISA > 0 */ 771 } 772 trap_fatal(frame, 0); 773 goto out2; 774 } 775 776 /* 777 * Fault from user mode, virtual kernel interecept. 778 * 779 * If the fault is directly related to a VM context managed by a 780 * virtual kernel then let the virtual kernel handle it. 781 */ 782 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 783 vkernel_trap(lp, frame); 784 goto out; 785 } 786 787 /* Translate fault for emulators (e.g. Linux) */ 788 if (*p->p_sysent->sv_transtrap) 789 i = (*p->p_sysent->sv_transtrap)(i, type); 790 791 trapsignal(lp, i, ucode); 792 793 #ifdef DEBUG 794 if (type <= MAX_TRAP_MSG) { 795 uprintf("fatal process exception: %s", 796 trap_msg[type]); 797 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 798 uprintf(", fault VA = 0x%lx", frame->tf_addr); 799 uprintf("\n"); 800 } 801 #endif 802 803 out: 804 userret(lp, frame, sticks); 805 userexit(lp); 806 out2: ; 807 if (p != NULL && lp != NULL) 808 KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid); 809 #ifdef INVARIANTS 810 KASSERT(crit_count == td->td_critcount, 811 ("trap: critical section count mismatch! %d/%d", 812 crit_count, td->td_pri)); 813 KASSERT(curstop == td->td_toks_stop, 814 ("trap: extra tokens held after trap! %ld/%ld", 815 curstop - &td->td_toks_base, 816 td->td_toks_stop - &td->td_toks_base)); 817 #endif 818 } 819 820 void 821 trap_handle_userenter(struct thread *td) 822 { 823 userenter(td, td->td_proc); 824 } 825 826 void 827 trap_handle_userexit(struct trapframe *frame, int sticks) 828 { 829 struct lwp *lp = curthread->td_lwp; 830 831 if (lp) { 832 userret(lp, frame, sticks); 833 userexit(lp); 834 } 835 } 836 837 static int 838 trap_pfault(struct trapframe *frame, int usermode) 839 { 840 vm_offset_t va; 841 struct vmspace *vm = NULL; 842 vm_map_t map; 843 int rv = 0; 844 int fault_flags; 845 vm_prot_t ftype; 846 thread_t td = curthread; 847 struct lwp *lp = td->td_lwp; 848 struct proc *p; 849 850 va = trunc_page(frame->tf_addr); 851 if (va >= VM_MIN_KERNEL_ADDRESS) { 852 /* 853 * Don't allow user-mode faults in kernel address space. 854 */ 855 if (usermode) { 856 fault_flags = -1; 857 ftype = -1; 858 goto nogo; 859 } 860 861 map = &kernel_map; 862 } else { 863 /* 864 * This is a fault on non-kernel virtual memory. 865 * vm is initialized above to NULL. If curproc is NULL 866 * or curproc->p_vmspace is NULL the fault is fatal. 867 */ 868 if (lp != NULL) 869 vm = lp->lwp_vmspace; 870 871 if (vm == NULL) { 872 fault_flags = -1; 873 ftype = -1; 874 goto nogo; 875 } 876 877 /* 878 * Debugging, try to catch kernel faults on the user address 879 * space when not inside on onfault (e.g. copyin/copyout) 880 * routine. 881 */ 882 if (usermode == 0 && (td->td_pcb == NULL || 883 td->td_pcb->pcb_onfault == NULL)) { 884 #ifdef DDB 885 if (freeze_on_seg_fault) { 886 kprintf("trap_pfault: user address fault from kernel mode " 887 "%016lx\n", (long)frame->tf_addr); 888 while (freeze_on_seg_fault) 889 tsleep(&freeze_on_seg_fault, 0, "frzseg", hz * 20); 890 } 891 #endif 892 } 893 map = &vm->vm_map; 894 } 895 896 /* 897 * PGEX_I is defined only if the execute disable bit capability is 898 * supported and enabled. 899 */ 900 if (frame->tf_err & PGEX_W) 901 ftype = VM_PROT_WRITE; 902 else if (frame->tf_err & PGEX_I) 903 ftype = VM_PROT_EXECUTE; 904 else 905 ftype = VM_PROT_READ; 906 907 if (map != &kernel_map) { 908 /* 909 * Keep swapout from messing with us during this 910 * critical time. 911 */ 912 PHOLD(lp->lwp_proc); 913 914 /* 915 * Issue fault 916 */ 917 fault_flags = 0; 918 if (usermode) 919 fault_flags |= VM_FAULT_BURST | VM_FAULT_USERMODE; 920 if (ftype & VM_PROT_WRITE) 921 fault_flags |= VM_FAULT_DIRTY; 922 else 923 fault_flags |= VM_FAULT_NORMAL; 924 rv = vm_fault(map, va, ftype, fault_flags); 925 926 PRELE(lp->lwp_proc); 927 } else { 928 /* 929 * Don't have to worry about process locking or stacks in the 930 * kernel. 931 */ 932 fault_flags = VM_FAULT_NORMAL; 933 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); 934 } 935 if (rv == KERN_SUCCESS) 936 return (0); 937 nogo: 938 if (!usermode) { 939 /* 940 * NOTE: in 64-bit mode traps push rsp/ss 941 * even if no ring change occurs. 942 */ 943 if (td->td_pcb->pcb_onfault && 944 td->td_pcb->pcb_onfault_sp == frame->tf_rsp && 945 td->td_gd->gd_intr_nesting_level == 0) { 946 frame->tf_rip = (register_t)td->td_pcb->pcb_onfault; 947 return (0); 948 } 949 trap_fatal(frame, frame->tf_addr); 950 return (-1); 951 } 952 953 /* 954 * NOTE: on x86_64 we have a tf_addr field in the trapframe, no 955 * kludge is needed to pass the fault address to signal handlers. 956 */ 957 p = td->td_proc; 958 #ifdef DDB 959 if (td->td_lwp->lwp_vkernel == NULL) { 960 while (freeze_on_seg_fault) { 961 tsleep(p, 0, "freeze", hz * 20); 962 } 963 if (ddb_on_seg_fault) 964 Debugger("ddb_on_seg_fault"); 965 } 966 #endif 967 968 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 969 } 970 971 static void 972 trap_fatal(struct trapframe *frame, vm_offset_t eva) 973 { 974 int code, ss; 975 u_int type; 976 long rsp; 977 struct soft_segment_descriptor softseg; 978 char *msg; 979 980 code = frame->tf_err; 981 type = frame->tf_trapno; 982 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg); 983 984 if (type <= MAX_TRAP_MSG) 985 msg = trap_msg[type]; 986 else 987 msg = "UNKNOWN"; 988 kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg, 989 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 990 /* three separate prints in case of a trap on an unmapped page */ 991 kprintf("cpuid = %d; ", mycpu->gd_cpuid); 992 if (lapic_usable) 993 kprintf("lapic id = %u\n", LAPIC_READID); 994 if (type == T_PAGEFLT) { 995 kprintf("fault virtual address = 0x%lx\n", eva); 996 kprintf("fault code = %s %s %s, %s\n", 997 code & PGEX_U ? "user" : "supervisor", 998 code & PGEX_W ? "write" : "read", 999 code & PGEX_I ? "instruction" : "data", 1000 code & PGEX_P ? "protection violation" : "page not present"); 1001 } 1002 kprintf("instruction pointer = 0x%lx:0x%lx\n", 1003 frame->tf_cs & 0xffff, frame->tf_rip); 1004 if (ISPL(frame->tf_cs) == SEL_UPL) { 1005 ss = frame->tf_ss & 0xffff; 1006 rsp = frame->tf_rsp; 1007 } else { 1008 /* 1009 * NOTE: in 64-bit mode traps push rsp/ss even if no ring 1010 * change occurs. 1011 */ 1012 ss = GSEL(GDATA_SEL, SEL_KPL); 1013 rsp = frame->tf_rsp; 1014 } 1015 kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp); 1016 kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp); 1017 kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n", 1018 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 1019 kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n", 1020 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32, 1021 softseg.ssd_gran); 1022 kprintf("processor eflags = "); 1023 if (frame->tf_rflags & PSL_T) 1024 kprintf("trace trap, "); 1025 if (frame->tf_rflags & PSL_I) 1026 kprintf("interrupt enabled, "); 1027 if (frame->tf_rflags & PSL_NT) 1028 kprintf("nested task, "); 1029 if (frame->tf_rflags & PSL_RF) 1030 kprintf("resume, "); 1031 kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12); 1032 kprintf("current process = "); 1033 if (curproc) { 1034 kprintf("%lu\n", 1035 (u_long)curproc->p_pid); 1036 } else { 1037 kprintf("Idle\n"); 1038 } 1039 kprintf("current thread = pri %d ", curthread->td_pri); 1040 if (curthread->td_critcount) 1041 kprintf("(CRIT)"); 1042 kprintf("\n"); 1043 1044 #ifdef DDB 1045 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame)) 1046 return; 1047 #endif 1048 kprintf("trap number = %d\n", type); 1049 if (type <= MAX_TRAP_MSG) 1050 panic("%s", trap_msg[type]); 1051 else 1052 panic("unknown/reserved trap"); 1053 } 1054 1055 /* 1056 * Double fault handler. Called when a fault occurs while writing 1057 * a frame for a trap/exception onto the stack. This usually occurs 1058 * when the stack overflows (such is the case with infinite recursion, 1059 * for example). 1060 */ 1061 static __inline 1062 int 1063 in_kstack_guard(register_t rptr) 1064 { 1065 thread_t td = curthread; 1066 1067 if ((char *)rptr >= td->td_kstack && 1068 (char *)rptr < td->td_kstack + PAGE_SIZE) { 1069 return 1; 1070 } 1071 return 0; 1072 } 1073 1074 void 1075 dblfault_handler(struct trapframe *frame) 1076 { 1077 thread_t td = curthread; 1078 1079 if (in_kstack_guard(frame->tf_rsp) || in_kstack_guard(frame->tf_rbp)) { 1080 kprintf("DOUBLE FAULT - KERNEL STACK GUARD HIT!\n"); 1081 if (in_kstack_guard(frame->tf_rsp)) 1082 frame->tf_rsp = (register_t)(td->td_kstack + PAGE_SIZE); 1083 if (in_kstack_guard(frame->tf_rbp)) 1084 frame->tf_rbp = (register_t)(td->td_kstack + PAGE_SIZE); 1085 } else { 1086 kprintf("DOUBLE FAULT\n"); 1087 } 1088 kprintf("\nFatal double fault\n"); 1089 kprintf("rip = 0x%lx\n", frame->tf_rip); 1090 kprintf("rsp = 0x%lx\n", frame->tf_rsp); 1091 kprintf("rbp = 0x%lx\n", frame->tf_rbp); 1092 /* three separate prints in case of a trap on an unmapped page */ 1093 kprintf("cpuid = %d; ", mycpu->gd_cpuid); 1094 if (lapic_usable) 1095 kprintf("lapic id = %u\n", LAPIC_READID); 1096 panic("double fault"); 1097 } 1098 1099 /* 1100 * syscall2 - MP aware system call request C handler 1101 * 1102 * A system call is essentially treated as a trap except that the 1103 * MP lock is not held on entry or return. We are responsible for 1104 * obtaining the MP lock if necessary and for handling ASTs 1105 * (e.g. a task switch) prior to return. 1106 * 1107 * MPSAFE 1108 */ 1109 void 1110 syscall2(struct trapframe *frame) 1111 { 1112 struct thread *td = curthread; 1113 struct proc *p = td->td_proc; 1114 struct lwp *lp = td->td_lwp; 1115 struct sysent *callp; 1116 register_t orig_tf_rflags; 1117 int sticks; 1118 int error; 1119 int narg; 1120 #ifdef INVARIANTS 1121 int crit_count = td->td_critcount; 1122 #endif 1123 register_t *argp; 1124 u_int code; 1125 int regcnt, optimized_regcnt; 1126 union sysunion args; 1127 register_t *argsdst; 1128 1129 mycpu->gd_cnt.v_syscall++; 1130 1131 #ifdef DIAGNOSTIC 1132 if (ISPL(frame->tf_cs) != SEL_UPL) { 1133 panic("syscall"); 1134 /* NOT REACHED */ 1135 } 1136 #endif 1137 1138 KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid, 1139 frame->tf_rax); 1140 1141 userenter(td, p); /* lazy raise our priority */ 1142 1143 regcnt = 6; 1144 optimized_regcnt = 6; 1145 1146 /* 1147 * Misc 1148 */ 1149 sticks = (int)td->td_sticks; 1150 orig_tf_rflags = frame->tf_rflags; 1151 1152 /* 1153 * Virtual kernel intercept - if a VM context managed by a virtual 1154 * kernel issues a system call the virtual kernel handles it, not us. 1155 * Restore the virtual kernel context and return from its system 1156 * call. The current frame is copied out to the virtual kernel. 1157 */ 1158 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 1159 vkernel_trap(lp, frame); 1160 error = EJUSTRETURN; 1161 callp = NULL; 1162 code = 0; 1163 goto out; 1164 } 1165 1166 /* 1167 * Get the system call parameters and account for time 1168 */ 1169 KASSERT(lp->lwp_md.md_regs == frame, 1170 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame)); 1171 code = (u_int)frame->tf_rax; 1172 1173 if (code == SYS_syscall || code == SYS___syscall) { 1174 code = frame->tf_rdi; 1175 regcnt--; 1176 argp = &frame->tf_rdi + 1; 1177 } else { 1178 argp = &frame->tf_rdi; 1179 } 1180 1181 if (code >= p->p_sysent->sv_size) 1182 callp = &p->p_sysent->sv_table[0]; 1183 else 1184 callp = &p->p_sysent->sv_table[code]; 1185 1186 narg = callp->sy_narg & SYF_ARGMASK; 1187 1188 /* 1189 * On x86_64 we get up to six arguments in registers. The rest are 1190 * on the stack. The first six members of 'struct trapframe' happen 1191 * to be the registers used to pass arguments, in exactly the right 1192 * order. 1193 */ 1194 argsdst = (register_t *)(&args.nosys.sysmsg + 1); 1195 1196 /* 1197 * Its easier to copy up to the highest number of syscall arguments 1198 * passed in registers, which is 6, than to conditionalize it. 1199 */ 1200 __builtin_memcpy(argsdst, argp, sizeof(register_t) * optimized_regcnt); 1201 1202 /* 1203 * Any arguments beyond available argument-passing registers must 1204 * be copyin()'d from the user stack. 1205 */ 1206 if (narg > regcnt) { 1207 caddr_t params; 1208 1209 params = (caddr_t)frame->tf_rsp + sizeof(register_t); 1210 error = copyin(params, &argsdst[regcnt], 1211 (narg - regcnt) * sizeof(register_t)); 1212 if (error) { 1213 #ifdef KTRACE 1214 if (KTRPOINT(td, KTR_SYSCALL)) { 1215 ktrsyscall(lp, code, narg, 1216 (void *)(&args.nosys.sysmsg + 1)); 1217 } 1218 #endif 1219 goto bad; 1220 } 1221 } 1222 1223 #ifdef KTRACE 1224 if (KTRPOINT(td, KTR_SYSCALL)) { 1225 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1)); 1226 } 1227 #endif 1228 1229 /* 1230 * Default return value is 0 (will be copied to %rax). Double-value 1231 * returns use %rax and %rdx. %rdx is left unchanged for system 1232 * calls which return only one result. 1233 */ 1234 args.sysmsg_fds[0] = 0; 1235 args.sysmsg_fds[1] = frame->tf_rdx; 1236 1237 /* 1238 * The syscall might manipulate the trap frame. If it does it 1239 * will probably return EJUSTRETURN. 1240 */ 1241 args.sysmsg_frame = frame; 1242 1243 STOPEVENT(p, S_SCE, narg); /* MP aware */ 1244 1245 /* 1246 * NOTE: All system calls run MPSAFE now. The system call itself 1247 * is responsible for getting the MP lock. 1248 */ 1249 #ifdef SYSCALL_DEBUG 1250 tsc_uclock_t tscval = rdtsc(); 1251 #endif 1252 error = (*callp->sy_call)(&args); 1253 #ifdef SYSCALL_DEBUG 1254 tscval = rdtsc() - tscval; 1255 tscval = tscval * 1000000 / tsc_frequency; 1256 if (SysCallsWorstCase[code] < tscval) 1257 SysCallsWorstCase[code] = tscval; 1258 #endif 1259 1260 out: 1261 /* 1262 * MP SAFE (we may or may not have the MP lock at this point) 1263 */ 1264 //kprintf("SYSMSG %d ", error); 1265 switch (error) { 1266 case 0: 1267 /* 1268 * Reinitialize proc pointer `p' as it may be different 1269 * if this is a child returning from fork syscall. 1270 */ 1271 p = curproc; 1272 lp = curthread->td_lwp; 1273 frame->tf_rax = args.sysmsg_fds[0]; 1274 frame->tf_rdx = args.sysmsg_fds[1]; 1275 frame->tf_rflags &= ~PSL_C; 1276 break; 1277 case ERESTART: 1278 /* 1279 * Reconstruct pc, we know that 'syscall' is 2 bytes. 1280 * We have to do a full context restore so that %r10 1281 * (which was holding the value of %rcx) is restored for 1282 * the next iteration. 1283 */ 1284 if (frame->tf_err != 0 && frame->tf_err != 2) 1285 kprintf("lp %s:%d frame->tf_err is weird %ld\n", 1286 td->td_comm, lp->lwp_proc->p_pid, frame->tf_err); 1287 frame->tf_rip -= frame->tf_err; 1288 frame->tf_r10 = frame->tf_rcx; 1289 break; 1290 case EJUSTRETURN: 1291 break; 1292 case EASYNC: 1293 panic("Unexpected EASYNC return value (for now)"); 1294 default: 1295 bad: 1296 if (p->p_sysent->sv_errsize) { 1297 if (error >= p->p_sysent->sv_errsize) 1298 error = -1; /* XXX */ 1299 else 1300 error = p->p_sysent->sv_errtbl[error]; 1301 } 1302 frame->tf_rax = error; 1303 frame->tf_rflags |= PSL_C; 1304 break; 1305 } 1306 1307 /* 1308 * Traced syscall. trapsignal() should now be MP aware 1309 */ 1310 if (orig_tf_rflags & PSL_T) { 1311 frame->tf_rflags &= ~PSL_T; 1312 trapsignal(lp, SIGTRAP, TRAP_TRACE); 1313 } 1314 1315 /* 1316 * Handle reschedule and other end-of-syscall issues 1317 */ 1318 userret(lp, frame, sticks); 1319 1320 #ifdef KTRACE 1321 if (KTRPOINT(td, KTR_SYSRET)) { 1322 ktrsysret(lp, code, error, args.sysmsg_result); 1323 } 1324 #endif 1325 1326 /* 1327 * This works because errno is findable through the 1328 * register set. If we ever support an emulation where this 1329 * is not the case, this code will need to be revisited. 1330 */ 1331 STOPEVENT(p, S_SCX, code); 1332 1333 userexit(lp); 1334 KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error); 1335 #ifdef INVARIANTS 1336 KASSERT(crit_count == td->td_critcount, 1337 ("syscall: critical section count mismatch! %d/%d", 1338 crit_count, td->td_pri)); 1339 KASSERT(&td->td_toks_base == td->td_toks_stop, 1340 ("syscall: %ld extra tokens held after trap! syscall %p", 1341 td->td_toks_stop - &td->td_toks_base, 1342 callp->sy_call)); 1343 #endif 1344 } 1345 1346 void 1347 fork_return(struct lwp *lp, struct trapframe *frame) 1348 { 1349 frame->tf_rax = 0; /* Child returns zero */ 1350 frame->tf_rflags &= ~PSL_C; /* success */ 1351 frame->tf_rdx = 1; 1352 1353 generic_lwp_return(lp, frame); 1354 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid); 1355 } 1356 1357 /* 1358 * Simplified back end of syscall(), used when returning from fork() 1359 * directly into user mode. 1360 * 1361 * This code will return back into the fork trampoline code which then 1362 * runs doreti. 1363 */ 1364 void 1365 generic_lwp_return(struct lwp *lp, struct trapframe *frame) 1366 { 1367 struct proc *p = lp->lwp_proc; 1368 1369 /* 1370 * Check for exit-race. If one lwp exits the process concurrent with 1371 * another lwp creating a new thread, the two operations may cross 1372 * each other resulting in the newly-created lwp not receiving a 1373 * KILL signal. 1374 */ 1375 if (p->p_flags & P_WEXIT) { 1376 lwpsignal(p, lp, SIGKILL); 1377 } 1378 1379 /* 1380 * Newly forked processes are given a kernel priority. We have to 1381 * adjust the priority to a normal user priority and fake entry 1382 * into the kernel (call userenter()) to install a passive release 1383 * function just in case userret() decides to stop the process. This 1384 * can occur when ^Z races a fork. If we do not install the passive 1385 * release function the current process designation will not be 1386 * released when the thread goes to sleep. 1387 */ 1388 lwkt_setpri_self(TDPRI_USER_NORM); 1389 userenter(lp->lwp_thread, p); 1390 userret(lp, frame, 0); 1391 #ifdef KTRACE 1392 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET)) 1393 ktrsysret(lp, SYS_fork, 0, 0); 1394 #endif 1395 lp->lwp_flags |= LWP_PASSIVE_ACQ; 1396 userexit(lp); 1397 lp->lwp_flags &= ~LWP_PASSIVE_ACQ; 1398 } 1399 1400 /* 1401 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA 1402 * fault (which is then passed back to the virtual kernel) if an attempt is 1403 * made to use the FP unit. 1404 * 1405 * XXX this is a fairly big hack. 1406 */ 1407 void 1408 set_vkernel_fp(struct trapframe *frame) 1409 { 1410 struct thread *td = curthread; 1411 1412 if (frame->tf_xflags & PGEX_FPFAULT) { 1413 td->td_pcb->pcb_flags |= FP_VIRTFP; 1414 if (mdcpu->gd_npxthread == td) 1415 npxexit(); 1416 } else { 1417 td->td_pcb->pcb_flags &= ~FP_VIRTFP; 1418 } 1419 } 1420 1421 /* 1422 * Called from vkernel_trap() to fixup the vkernel's syscall 1423 * frame for vmspace_ctl() return. 1424 */ 1425 void 1426 cpu_vkernel_trap(struct trapframe *frame, int error) 1427 { 1428 frame->tf_rax = error; 1429 if (error) 1430 frame->tf_rflags |= PSL_C; 1431 else 1432 frame->tf_rflags &= ~PSL_C; 1433 } 1434