1 /*- 2 * Copyright (c) 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (C) 1994, David Greenman 5 * Copyright (c) 2008 The DragonFly Project. 6 * Copyright (c) 2008 Jordan Gordeev. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the University of Utah, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 40 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $ 41 */ 42 43 /* 44 * x86_64 Trap and System call handling 45 */ 46 47 #include "use_isa.h" 48 49 #include "opt_ddb.h" 50 #include "opt_ktrace.h" 51 52 #include <machine/frame.h> 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/kernel.h> 56 #include <sys/kerneldump.h> 57 #include <sys/proc.h> 58 #include <sys/pioctl.h> 59 #include <sys/types.h> 60 #include <sys/signal2.h> 61 #include <sys/syscall.h> 62 #include <sys/sysctl.h> 63 #include <sys/sysent.h> 64 #include <sys/systm.h> 65 #ifdef KTRACE 66 #include <sys/ktrace.h> 67 #endif 68 #include <sys/ktr.h> 69 #include <sys/sysmsg.h> 70 #include <sys/sysproto.h> 71 #include <sys/sysunion.h> 72 73 #include <vm/pmap.h> 74 #include <vm/vm.h> 75 #include <vm/vm_extern.h> 76 #include <vm/vm_kern.h> 77 #include <vm/vm_param.h> 78 #include <machine/cpu.h> 79 #include <machine/pcb.h> 80 #include <machine/smp.h> 81 #include <machine/thread.h> 82 #include <machine/clock.h> 83 #include <machine/vmparam.h> 84 #include <machine/md_var.h> 85 #include <machine_base/isa/isa_intr.h> 86 #include <machine_base/apic/lapic.h> 87 88 #include <ddb/ddb.h> 89 90 #include <sys/thread2.h> 91 #include <sys/mplock2.h> 92 93 #ifdef SMP 94 95 #define MAKEMPSAFE(have_mplock) \ 96 if (have_mplock == 0) { \ 97 get_mplock(); \ 98 have_mplock = 1; \ 99 } 100 101 #else 102 103 #define MAKEMPSAFE(have_mplock) 104 105 #endif 106 107 extern void trap(struct trapframe *frame); 108 109 static int trap_pfault(struct trapframe *, int); 110 static void trap_fatal(struct trapframe *, vm_offset_t); 111 void dblfault_handler(struct trapframe *frame); 112 113 #define MAX_TRAP_MSG 30 114 static char *trap_msg[] = { 115 "", /* 0 unused */ 116 "privileged instruction fault", /* 1 T_PRIVINFLT */ 117 "", /* 2 unused */ 118 "breakpoint instruction fault", /* 3 T_BPTFLT */ 119 "", /* 4 unused */ 120 "", /* 5 unused */ 121 "arithmetic trap", /* 6 T_ARITHTRAP */ 122 "system forced exception", /* 7 T_ASTFLT */ 123 "", /* 8 unused */ 124 "general protection fault", /* 9 T_PROTFLT */ 125 "trace trap", /* 10 T_TRCTRAP */ 126 "", /* 11 unused */ 127 "page fault", /* 12 T_PAGEFLT */ 128 "", /* 13 unused */ 129 "alignment fault", /* 14 T_ALIGNFLT */ 130 "", /* 15 unused */ 131 "", /* 16 unused */ 132 "", /* 17 unused */ 133 "integer divide fault", /* 18 T_DIVIDE */ 134 "non-maskable interrupt trap", /* 19 T_NMI */ 135 "overflow trap", /* 20 T_OFLOW */ 136 "FPU bounds check fault", /* 21 T_BOUND */ 137 "FPU device not available", /* 22 T_DNA */ 138 "double fault", /* 23 T_DOUBLEFLT */ 139 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 140 "invalid TSS fault", /* 25 T_TSSFLT */ 141 "segment not present fault", /* 26 T_SEGNPFLT */ 142 "stack fault", /* 27 T_STKFLT */ 143 "machine check trap", /* 28 T_MCHK */ 144 "SIMD floating-point exception", /* 29 T_XMMFLT */ 145 "reserved (unknown) fault", /* 30 T_RESERVED */ 146 }; 147 148 #ifdef DDB 149 static int ddb_on_nmi = 1; 150 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW, 151 &ddb_on_nmi, 0, "Go to DDB on NMI"); 152 static int ddb_on_seg_fault = 0; 153 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_seg_fault, CTLFLAG_RW, 154 &ddb_on_seg_fault, 0, "Go to DDB on user seg-fault"); 155 static int freeze_on_seg_fault = 0; 156 SYSCTL_INT(_machdep, OID_AUTO, freeze_on_seg_fault, CTLFLAG_RW, 157 &freeze_on_seg_fault, 0, "Go to DDB on user seg-fault"); 158 #endif 159 static int panic_on_nmi = 1; 160 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW, 161 &panic_on_nmi, 0, "Panic on NMI"); 162 static int fast_release; 163 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW, 164 &fast_release, 0, "Passive Release was optimal"); 165 static int slow_release; 166 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW, 167 &slow_release, 0, "Passive Release was nonoptimal"); 168 169 /* 170 * System call debugging records the worst-case system call 171 * overhead (inclusive of blocking), but may be inaccurate. 172 */ 173 /*#define SYSCALL_DEBUG*/ 174 #ifdef SYSCALL_DEBUG 175 uint64_t SysCallsWorstCase[SYS_MAXSYSCALL]; 176 #endif 177 178 /* 179 * Passively intercepts the thread switch function to increase 180 * the thread priority from a user priority to a kernel priority, reducing 181 * syscall and trap overhead for the case where no switch occurs. 182 * 183 * Synchronizes td_ucred with p_ucred. This is used by system calls, 184 * signal handling, faults, AST traps, and anything else that enters the 185 * kernel from userland and provides the kernel with a stable read-only 186 * copy of the process ucred. 187 */ 188 static __inline void 189 userenter(struct thread *curtd, struct proc *curp) 190 { 191 struct ucred *ocred; 192 struct ucred *ncred; 193 194 curtd->td_release = lwkt_passive_release; 195 196 if (curtd->td_ucred != curp->p_ucred) { 197 ncred = crhold(curp->p_ucred); 198 ocred = curtd->td_ucred; 199 curtd->td_ucred = ncred; 200 if (ocred) 201 crfree(ocred); 202 } 203 204 /* 205 * Debugging, remove top two user stack pages to catch kernel faults 206 */ 207 if (freeze_on_seg_fault > 1 && curtd->td_lwp) { 208 pmap_remove(vmspace_pmap(curtd->td_lwp->lwp_vmspace), 209 0x00007FFFFFFFD000LU, 210 0x0000800000000000LU); 211 } 212 } 213 214 /* 215 * Handle signals, upcalls, profiling, and other AST's and/or tasks that 216 * must be completed before we can return to or try to return to userland. 217 * 218 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64 219 * arithmatic on the delta calculation so the absolute tick values are 220 * truncated to an integer. 221 */ 222 static void 223 userret(struct lwp *lp, struct trapframe *frame, int sticks) 224 { 225 struct proc *p = lp->lwp_proc; 226 int sig; 227 228 /* 229 * Charge system time if profiling. Note: times are in microseconds. 230 * This may do a copyout and block, so do it first even though it 231 * means some system time will be charged as user time. 232 */ 233 if (p->p_flags & P_PROFIL) { 234 addupc_task(p, frame->tf_rip, 235 (u_int)((int)lp->lwp_thread->td_sticks - sticks)); 236 } 237 238 recheck: 239 /* 240 * If the jungle wants us dead, so be it. 241 */ 242 if (lp->lwp_mpflags & LWP_MP_WEXIT) { 243 lwkt_gettoken(&p->p_token); 244 lwp_exit(0); 245 lwkt_reltoken(&p->p_token); /* NOT REACHED */ 246 } 247 248 /* 249 * Block here if we are in a stopped state. 250 */ 251 if (p->p_stat == SSTOP || dump_stop_usertds) { 252 lwkt_gettoken(&p->p_token); 253 tstop(); 254 lwkt_reltoken(&p->p_token); 255 goto recheck; 256 } 257 258 /* 259 * Post any pending upcalls. If running a virtual kernel be sure 260 * to restore the virtual kernel's vmspace before posting the upcall. 261 */ 262 if (p->p_flags & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) { 263 lwkt_gettoken(&p->p_token); 264 if (p->p_flags & P_SIGVTALRM) { 265 p->p_flags &= ~P_SIGVTALRM; 266 ksignal(p, SIGVTALRM); 267 } 268 if (p->p_flags & P_SIGPROF) { 269 p->p_flags &= ~P_SIGPROF; 270 ksignal(p, SIGPROF); 271 } 272 if (p->p_flags & P_UPCALLPEND) { 273 p->p_flags &= ~P_UPCALLPEND; 274 postupcall(lp); 275 } 276 lwkt_reltoken(&p->p_token); 277 goto recheck; 278 } 279 280 /* 281 * Post any pending signals. If running a virtual kernel be sure 282 * to restore the virtual kernel's vmspace before posting the signal. 283 * 284 * WARNING! postsig() can exit and not return. 285 */ 286 if ((sig = CURSIG_TRACE(lp)) != 0) { 287 lwkt_gettoken(&p->p_token); 288 postsig(sig); 289 lwkt_reltoken(&p->p_token); 290 goto recheck; 291 } 292 293 /* 294 * block here if we are swapped out, but still process signals 295 * (such as SIGKILL). proc0 (the swapin scheduler) is already 296 * aware of our situation, we do not have to wake it up. 297 */ 298 if (p->p_flags & P_SWAPPEDOUT) { 299 lwkt_gettoken(&p->p_token); 300 get_mplock(); 301 p->p_flags |= P_SWAPWAIT; 302 swapin_request(); 303 if (p->p_flags & P_SWAPWAIT) 304 tsleep(p, PCATCH, "SWOUT", 0); 305 p->p_flags &= ~P_SWAPWAIT; 306 rel_mplock(); 307 lwkt_reltoken(&p->p_token); 308 goto recheck; 309 } 310 311 /* 312 * Make sure postsig() handled request to restore old signal mask after 313 * running signal handler. 314 */ 315 KKASSERT((lp->lwp_flags & LWP_OLDMASK) == 0); 316 } 317 318 /* 319 * Cleanup from userenter and any passive release that might have occured. 320 * We must reclaim the current-process designation before we can return 321 * to usermode. We also handle both LWKT and USER reschedule requests. 322 */ 323 static __inline void 324 userexit(struct lwp *lp) 325 { 326 struct thread *td = lp->lwp_thread; 327 /* globaldata_t gd = td->td_gd;*/ 328 329 /* 330 * Handle stop requests at kernel priority. Any requests queued 331 * after this loop will generate another AST. 332 */ 333 while (lp->lwp_proc->p_stat == SSTOP) { 334 lwkt_gettoken(&lp->lwp_proc->p_token); 335 tstop(); 336 lwkt_reltoken(&lp->lwp_proc->p_token); 337 } 338 339 /* 340 * Reduce our priority in preparation for a return to userland. If 341 * our passive release function was still in place, our priority was 342 * never raised and does not need to be reduced. 343 */ 344 lwkt_passive_recover(td); 345 346 /* 347 * Become the current user scheduled process if we aren't already, 348 * and deal with reschedule requests and other factors. 349 */ 350 lp->lwp_proc->p_usched->acquire_curproc(lp); 351 /* WARNING: we may have migrated cpu's */ 352 /* gd = td->td_gd; */ 353 } 354 355 #if !defined(KTR_KERNENTRY) 356 #define KTR_KERNENTRY KTR_ALL 357 #endif 358 KTR_INFO_MASTER(kernentry); 359 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0, 360 "TRAP(pid %hd, tid %hd, trapno %ld, eva %lu)", 361 pid_t pid, lwpid_t tid, register_t trapno, vm_offset_t eva); 362 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "TRAP_RET(pid %hd, tid %hd)", 363 pid_t pid, lwpid_t tid); 364 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "SYSC(pid %hd, tid %hd, nr %ld)", 365 pid_t pid, lwpid_t tid, register_t trapno); 366 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "SYSRET(pid %hd, tid %hd, err %d)", 367 pid_t pid, lwpid_t tid, int err); 368 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "FORKRET(pid %hd, tid %hd)", 369 pid_t pid, lwpid_t tid); 370 371 /* 372 * Exception, fault, and trap interface to the kernel. 373 * This common code is called from assembly language IDT gate entry 374 * routines that prepare a suitable stack frame, and restore this 375 * frame after the exception has been processed. 376 * 377 * This function is also called from doreti in an interlock to handle ASTs. 378 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap 379 * 380 * NOTE! We have to retrieve the fault address prior to obtaining the 381 * MP lock because get_mplock() may switch out. YYY cr2 really ought 382 * to be retrieved by the assembly code, not here. 383 * 384 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing 385 * if an attempt is made to switch from a fast interrupt or IPI. This is 386 * necessary to properly take fatal kernel traps on SMP machines if 387 * get_mplock() has to block. 388 */ 389 390 void 391 trap(struct trapframe *frame) 392 { 393 struct globaldata *gd = mycpu; 394 struct thread *td = gd->gd_curthread; 395 struct lwp *lp = td->td_lwp; 396 struct proc *p; 397 int sticks = 0; 398 int i = 0, ucode = 0, type, code; 399 #ifdef SMP 400 int have_mplock = 0; 401 #endif 402 #ifdef INVARIANTS 403 int crit_count = td->td_critcount; 404 lwkt_tokref_t curstop = td->td_toks_stop; 405 #endif 406 vm_offset_t eva; 407 408 p = td->td_proc; 409 clear_quickret(); 410 411 #ifdef DDB 412 /* 413 * We need to allow T_DNA faults when the debugger is active since 414 * some dumping paths do large bcopy() which use the floating 415 * point registers for faster copying. 416 */ 417 if (db_active && frame->tf_trapno != T_DNA) { 418 eva = (frame->tf_trapno == T_PAGEFLT ? frame->tf_addr : 0); 419 ++gd->gd_trap_nesting_level; 420 MAKEMPSAFE(have_mplock); 421 trap_fatal(frame, eva); 422 --gd->gd_trap_nesting_level; 423 goto out2; 424 } 425 #endif 426 427 eva = 0; 428 429 if ((frame->tf_rflags & PSL_I) == 0) { 430 /* 431 * Buggy application or kernel code has disabled interrupts 432 * and then trapped. Enabling interrupts now is wrong, but 433 * it is better than running with interrupts disabled until 434 * they are accidentally enabled later. 435 */ 436 type = frame->tf_trapno; 437 if (ISPL(frame->tf_cs) == SEL_UPL) { 438 MAKEMPSAFE(have_mplock); 439 /* JG curproc can be NULL */ 440 kprintf( 441 "pid %ld (%s): trap %d with interrupts disabled\n", 442 (long)curproc->p_pid, curproc->p_comm, type); 443 } else if (type != T_NMI && type != T_BPTFLT && 444 type != T_TRCTRAP) { 445 /* 446 * XXX not quite right, since this may be for a 447 * multiple fault in user mode. 448 */ 449 MAKEMPSAFE(have_mplock); 450 kprintf("kernel trap %d with interrupts disabled\n", 451 type); 452 } 453 cpu_enable_intr(); 454 } 455 456 type = frame->tf_trapno; 457 code = frame->tf_err; 458 459 if (ISPL(frame->tf_cs) == SEL_UPL) { 460 /* user trap */ 461 462 KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid, 463 frame->tf_trapno, eva); 464 465 userenter(td, p); 466 467 sticks = (int)td->td_sticks; 468 KASSERT(lp->lwp_md.md_regs == frame, 469 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame)); 470 471 switch (type) { 472 case T_PRIVINFLT: /* privileged instruction fault */ 473 ucode = ILL_PRVOPC; 474 i = SIGILL; 475 break; 476 477 case T_BPTFLT: /* bpt instruction fault */ 478 case T_TRCTRAP: /* trace trap */ 479 frame->tf_rflags &= ~PSL_T; 480 ucode = TRAP_TRACE; 481 i = SIGTRAP; 482 break; 483 484 case T_ARITHTRAP: /* arithmetic trap */ 485 ucode = code; 486 i = SIGFPE; 487 #if 0 488 #if JG 489 ucode = fputrap(); 490 #else 491 ucode = code; 492 #endif 493 i = SIGFPE; 494 #endif 495 break; 496 497 case T_ASTFLT: /* Allow process switch */ 498 mycpu->gd_cnt.v_soft++; 499 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) { 500 atomic_clear_int(&mycpu->gd_reqflags, 501 RQF_AST_OWEUPC); 502 addupc_task(p, p->p_prof.pr_addr, 503 p->p_prof.pr_ticks); 504 } 505 goto out; 506 507 case T_PROTFLT: /* general protection fault */ 508 i = SIGBUS; 509 ucode = BUS_OBJERR; 510 break; 511 case T_SEGNPFLT: /* segment not present fault */ 512 i = SIGBUS; 513 ucode = BUS_ADRERR; 514 break; 515 case T_TSSFLT: /* invalid TSS fault */ 516 case T_DOUBLEFLT: /* double fault */ 517 i = SIGBUS; 518 ucode = BUS_OBJERR; 519 default: 520 #if 0 521 ucode = code + BUS_SEGM_FAULT ; /* XXX: ???*/ 522 #endif 523 ucode = BUS_OBJERR; 524 i = SIGBUS; 525 break; 526 527 case T_PAGEFLT: /* page fault */ 528 i = trap_pfault(frame, TRUE); 529 if (frame->tf_rip == 0) { 530 kprintf("T_PAGEFLT: Warning %%rip == 0!\n"); 531 while (freeze_on_seg_fault) { 532 tsleep(p, 0, "freeze", hz * 20); 533 } 534 } 535 if (i == -1) 536 goto out; 537 if (i == 0) 538 goto out; 539 540 #if 0 541 ucode = T_PAGEFLT; 542 #endif 543 if (i == SIGSEGV) 544 ucode = SEGV_MAPERR; 545 else 546 ucode = BUS_ADRERR; 547 break; 548 549 case T_DIVIDE: /* integer divide fault */ 550 ucode = FPE_INTDIV; 551 i = SIGFPE; 552 break; 553 554 #if NISA > 0 555 case T_NMI: 556 MAKEMPSAFE(have_mplock); 557 /* machine/parity/power fail/"kitchen sink" faults */ 558 if (isa_nmi(code) == 0) { 559 #ifdef DDB 560 /* 561 * NMI can be hooked up to a pushbutton 562 * for debugging. 563 */ 564 if (ddb_on_nmi) { 565 kprintf ("NMI ... going to debugger\n"); 566 kdb_trap(type, 0, frame); 567 } 568 #endif /* DDB */ 569 goto out2; 570 } else if (panic_on_nmi) 571 panic("NMI indicates hardware failure"); 572 break; 573 #endif /* NISA > 0 */ 574 575 case T_OFLOW: /* integer overflow fault */ 576 ucode = FPE_INTOVF; 577 i = SIGFPE; 578 break; 579 580 case T_BOUND: /* bounds check fault */ 581 ucode = FPE_FLTSUB; 582 i = SIGFPE; 583 break; 584 585 case T_DNA: 586 /* 587 * Virtual kernel intercept - pass the DNA exception 588 * to the virtual kernel if it asked to handle it. 589 * This occurs when the virtual kernel is holding 590 * onto the FP context for a different emulated 591 * process then the one currently running. 592 * 593 * We must still call npxdna() since we may have 594 * saved FP state that the virtual kernel needs 595 * to hand over to a different emulated process. 596 */ 597 if (lp->lwp_vkernel && lp->lwp_vkernel->ve && 598 (td->td_pcb->pcb_flags & FP_VIRTFP) 599 ) { 600 npxdna(); 601 break; 602 } 603 604 /* 605 * The kernel may have switched out the FP unit's 606 * state, causing the user process to take a fault 607 * when it tries to use the FP unit. Restore the 608 * state here 609 */ 610 if (npxdna()) 611 goto out; 612 i = SIGFPE; 613 ucode = FPE_FPU_NP_TRAP; 614 break; 615 616 case T_FPOPFLT: /* FPU operand fetch fault */ 617 ucode = ILL_COPROC; 618 i = SIGILL; 619 break; 620 621 case T_XMMFLT: /* SIMD floating-point exception */ 622 ucode = 0; /* XXX */ 623 i = SIGFPE; 624 break; 625 } 626 } else { 627 /* kernel trap */ 628 629 switch (type) { 630 case T_PAGEFLT: /* page fault */ 631 trap_pfault(frame, FALSE); 632 goto out2; 633 634 case T_DNA: 635 /* 636 * The kernel is apparently using fpu for copying. 637 * XXX this should be fatal unless the kernel has 638 * registered such use. 639 */ 640 if (npxdna()) 641 goto out2; 642 break; 643 644 case T_STKFLT: /* stack fault */ 645 break; 646 647 case T_PROTFLT: /* general protection fault */ 648 case T_SEGNPFLT: /* segment not present fault */ 649 /* 650 * Invalid segment selectors and out of bounds 651 * %rip's and %rsp's can be set up in user mode. 652 * This causes a fault in kernel mode when the 653 * kernel tries to return to user mode. We want 654 * to get this fault so that we can fix the 655 * problem here and not have to check all the 656 * selectors and pointers when the user changes 657 * them. 658 */ 659 if (mycpu->gd_intr_nesting_level == 0) { 660 if (td->td_pcb->pcb_onfault) { 661 frame->tf_rip = (register_t) 662 td->td_pcb->pcb_onfault; 663 goto out2; 664 } 665 if (frame->tf_rip == (long)doreti_iret) { 666 frame->tf_rip = (long)doreti_iret_fault; 667 goto out2; 668 } 669 } 670 break; 671 672 case T_TSSFLT: 673 /* 674 * PSL_NT can be set in user mode and isn't cleared 675 * automatically when the kernel is entered. This 676 * causes a TSS fault when the kernel attempts to 677 * `iret' because the TSS link is uninitialized. We 678 * want to get this fault so that we can fix the 679 * problem here and not every time the kernel is 680 * entered. 681 */ 682 if (frame->tf_rflags & PSL_NT) { 683 frame->tf_rflags &= ~PSL_NT; 684 goto out2; 685 } 686 break; 687 688 case T_TRCTRAP: /* trace trap */ 689 #if 0 690 if (frame->tf_rip == (int)IDTVEC(syscall)) { 691 /* 692 * We've just entered system mode via the 693 * syscall lcall. Continue single stepping 694 * silently until the syscall handler has 695 * saved the flags. 696 */ 697 goto out2; 698 } 699 if (frame->tf_rip == (int)IDTVEC(syscall) + 1) { 700 /* 701 * The syscall handler has now saved the 702 * flags. Stop single stepping it. 703 */ 704 frame->tf_rflags &= ~PSL_T; 705 goto out2; 706 } 707 #endif 708 709 /* 710 * Ignore debug register trace traps due to 711 * accesses in the user's address space, which 712 * can happen under several conditions such as 713 * if a user sets a watchpoint on a buffer and 714 * then passes that buffer to a system call. 715 * We still want to get TRCTRAPS for addresses 716 * in kernel space because that is useful when 717 * debugging the kernel. 718 */ 719 #if JG 720 if (user_dbreg_trap()) { 721 /* 722 * Reset breakpoint bits because the 723 * processor doesn't 724 */ 725 /* XXX check upper bits here */ 726 load_dr6(rdr6() & 0xfffffff0); 727 goto out2; 728 } 729 #endif 730 /* 731 * FALLTHROUGH (TRCTRAP kernel mode, kernel address) 732 */ 733 case T_BPTFLT: 734 /* 735 * If DDB is enabled, let it handle the debugger trap. 736 * Otherwise, debugger traps "can't happen". 737 */ 738 ucode = TRAP_BRKPT; 739 #ifdef DDB 740 MAKEMPSAFE(have_mplock); 741 if (kdb_trap(type, 0, frame)) 742 goto out2; 743 #endif 744 break; 745 746 #if NISA > 0 747 case T_NMI: 748 MAKEMPSAFE(have_mplock); 749 /* machine/parity/power fail/"kitchen sink" faults */ 750 if (isa_nmi(code) == 0) { 751 #ifdef DDB 752 /* 753 * NMI can be hooked up to a pushbutton 754 * for debugging. 755 */ 756 if (ddb_on_nmi) { 757 kprintf ("NMI ... going to debugger\n"); 758 kdb_trap(type, 0, frame); 759 } 760 #endif /* DDB */ 761 goto out2; 762 } else if (panic_on_nmi == 0) 763 goto out2; 764 /* FALL THROUGH */ 765 #endif /* NISA > 0 */ 766 } 767 MAKEMPSAFE(have_mplock); 768 trap_fatal(frame, 0); 769 goto out2; 770 } 771 772 /* 773 * Virtual kernel intercept - if the fault is directly related to a 774 * VM context managed by a virtual kernel then let the virtual kernel 775 * handle it. 776 */ 777 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 778 vkernel_trap(lp, frame); 779 goto out; 780 } 781 782 /* 783 * Translate fault for emulators (e.g. Linux) 784 */ 785 if (*p->p_sysent->sv_transtrap) 786 i = (*p->p_sysent->sv_transtrap)(i, type); 787 788 MAKEMPSAFE(have_mplock); 789 trapsignal(lp, i, ucode); 790 791 #ifdef DEBUG 792 if (type <= MAX_TRAP_MSG) { 793 uprintf("fatal process exception: %s", 794 trap_msg[type]); 795 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 796 uprintf(", fault VA = 0x%lx", frame->tf_addr); 797 uprintf("\n"); 798 } 799 #endif 800 801 out: 802 userret(lp, frame, sticks); 803 userexit(lp); 804 out2: ; 805 #ifdef SMP 806 if (have_mplock) 807 rel_mplock(); 808 #endif 809 if (p != NULL && lp != NULL) 810 KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid); 811 #ifdef INVARIANTS 812 KASSERT(crit_count == td->td_critcount, 813 ("trap: critical section count mismatch! %d/%d", 814 crit_count, td->td_pri)); 815 KASSERT(curstop == td->td_toks_stop, 816 ("trap: extra tokens held after trap! %ld/%ld", 817 curstop - &td->td_toks_base, 818 td->td_toks_stop - &td->td_toks_base)); 819 #endif 820 } 821 822 static int 823 trap_pfault(struct trapframe *frame, int usermode) 824 { 825 vm_offset_t va; 826 struct vmspace *vm = NULL; 827 vm_map_t map; 828 int rv = 0; 829 int fault_flags; 830 vm_prot_t ftype; 831 thread_t td = curthread; 832 struct lwp *lp = td->td_lwp; 833 struct proc *p; 834 835 va = trunc_page(frame->tf_addr); 836 if (va >= VM_MIN_KERNEL_ADDRESS) { 837 /* 838 * Don't allow user-mode faults in kernel address space. 839 */ 840 if (usermode) { 841 fault_flags = -1; 842 ftype = -1; 843 goto nogo; 844 } 845 846 map = &kernel_map; 847 } else { 848 /* 849 * This is a fault on non-kernel virtual memory. 850 * vm is initialized above to NULL. If curproc is NULL 851 * or curproc->p_vmspace is NULL the fault is fatal. 852 */ 853 if (lp != NULL) 854 vm = lp->lwp_vmspace; 855 856 if (vm == NULL) { 857 fault_flags = -1; 858 ftype = -1; 859 goto nogo; 860 } 861 862 /* 863 * Debugging, try to catch kernel faults on the user address space when not inside 864 * on onfault (e.g. copyin/copyout) routine. 865 */ 866 if (usermode == 0 && (td->td_pcb == NULL || td->td_pcb->pcb_onfault == NULL)) { 867 if (freeze_on_seg_fault) { 868 kprintf("trap_pfault: user address fault from kernel mode " 869 "%016lx\n", (long)frame->tf_addr); 870 while (freeze_on_seg_fault) { 871 tsleep(&freeze_on_seg_fault, 0, "frzseg", hz * 20); 872 } 873 } 874 } 875 map = &vm->vm_map; 876 } 877 878 /* 879 * PGEX_I is defined only if the execute disable bit capability is 880 * supported and enabled. 881 */ 882 if (frame->tf_err & PGEX_W) 883 ftype = VM_PROT_WRITE; 884 #if JG 885 else if ((frame->tf_err & PGEX_I) && pg_nx != 0) 886 ftype = VM_PROT_EXECUTE; 887 #endif 888 else 889 ftype = VM_PROT_READ; 890 891 if (map != &kernel_map) { 892 /* 893 * Keep swapout from messing with us during this 894 * critical time. 895 */ 896 PHOLD(lp->lwp_proc); 897 898 /* 899 * Issue fault 900 */ 901 fault_flags = 0; 902 if (usermode) 903 fault_flags |= VM_FAULT_BURST; 904 if (ftype & VM_PROT_WRITE) 905 fault_flags |= VM_FAULT_DIRTY; 906 else 907 fault_flags |= VM_FAULT_NORMAL; 908 rv = vm_fault(map, va, ftype, fault_flags); 909 910 PRELE(lp->lwp_proc); 911 } else { 912 /* 913 * Don't have to worry about process locking or stacks 914 * in the kernel. 915 */ 916 fault_flags = VM_FAULT_NORMAL; 917 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); 918 } 919 920 if (rv == KERN_SUCCESS) 921 return (0); 922 nogo: 923 if (!usermode) { 924 if (td->td_gd->gd_intr_nesting_level == 0 && 925 td->td_pcb->pcb_onfault) { 926 frame->tf_rip = (register_t)td->td_pcb->pcb_onfault; 927 return (0); 928 } 929 trap_fatal(frame, frame->tf_addr); 930 return (-1); 931 } 932 933 /* 934 * NOTE: on x86_64 we have a tf_addr field in the trapframe, no 935 * kludge is needed to pass the fault address to signal handlers. 936 */ 937 p = td->td_proc; 938 if (td->td_lwp->lwp_vkernel == NULL) { 939 if (bootverbose || freeze_on_seg_fault || ddb_on_seg_fault) { 940 kprintf("seg-fault ft=%04x ff=%04x addr=%p rip=%p " 941 "pid=%d cpu=%d p_comm=%s\n", 942 ftype, fault_flags, 943 (void *)frame->tf_addr, 944 (void *)frame->tf_rip, 945 p->p_pid, mycpu->gd_cpuid, p->p_comm); 946 } 947 #ifdef DDB 948 while (freeze_on_seg_fault) { 949 tsleep(p, 0, "freeze", hz * 20); 950 } 951 if (ddb_on_seg_fault) 952 Debugger("ddb_on_seg_fault"); 953 #endif 954 } 955 956 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 957 } 958 959 static void 960 trap_fatal(struct trapframe *frame, vm_offset_t eva) 961 { 962 int code, ss; 963 u_int type; 964 long rsp; 965 struct soft_segment_descriptor softseg; 966 char *msg; 967 968 code = frame->tf_err; 969 type = frame->tf_trapno; 970 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg); 971 972 if (type <= MAX_TRAP_MSG) 973 msg = trap_msg[type]; 974 else 975 msg = "UNKNOWN"; 976 kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg, 977 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 978 #ifdef SMP 979 /* three separate prints in case of a trap on an unmapped page */ 980 kprintf("cpuid = %d; ", mycpu->gd_cpuid); 981 kprintf("lapic->id = %08x\n", lapic->id); 982 #endif 983 if (type == T_PAGEFLT) { 984 kprintf("fault virtual address = 0x%lx\n", eva); 985 kprintf("fault code = %s %s %s, %s\n", 986 code & PGEX_U ? "user" : "supervisor", 987 code & PGEX_W ? "write" : "read", 988 code & PGEX_I ? "instruction" : "data", 989 code & PGEX_P ? "protection violation" : "page not present"); 990 } 991 kprintf("instruction pointer = 0x%lx:0x%lx\n", 992 frame->tf_cs & 0xffff, frame->tf_rip); 993 if (ISPL(frame->tf_cs) == SEL_UPL) { 994 ss = frame->tf_ss & 0xffff; 995 rsp = frame->tf_rsp; 996 } else { 997 ss = GSEL(GDATA_SEL, SEL_KPL); 998 rsp = (long)&frame->tf_rsp; 999 } 1000 kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp); 1001 kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp); 1002 kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n", 1003 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 1004 kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n", 1005 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32, 1006 softseg.ssd_gran); 1007 kprintf("processor eflags = "); 1008 if (frame->tf_rflags & PSL_T) 1009 kprintf("trace trap, "); 1010 if (frame->tf_rflags & PSL_I) 1011 kprintf("interrupt enabled, "); 1012 if (frame->tf_rflags & PSL_NT) 1013 kprintf("nested task, "); 1014 if (frame->tf_rflags & PSL_RF) 1015 kprintf("resume, "); 1016 kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12); 1017 kprintf("current process = "); 1018 if (curproc) { 1019 kprintf("%lu\n", 1020 (u_long)curproc->p_pid); 1021 } else { 1022 kprintf("Idle\n"); 1023 } 1024 kprintf("current thread = pri %d ", curthread->td_pri); 1025 if (curthread->td_critcount) 1026 kprintf("(CRIT)"); 1027 kprintf("\n"); 1028 1029 #ifdef DDB 1030 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame)) 1031 return; 1032 #endif 1033 kprintf("trap number = %d\n", type); 1034 if (type <= MAX_TRAP_MSG) 1035 panic("%s", trap_msg[type]); 1036 else 1037 panic("unknown/reserved trap"); 1038 } 1039 1040 /* 1041 * Double fault handler. Called when a fault occurs while writing 1042 * a frame for a trap/exception onto the stack. This usually occurs 1043 * when the stack overflows (such is the case with infinite recursion, 1044 * for example). 1045 */ 1046 static __inline 1047 int 1048 in_kstack_guard(register_t rptr) 1049 { 1050 thread_t td = curthread; 1051 1052 if ((char *)rptr >= td->td_kstack && 1053 (char *)rptr < td->td_kstack + PAGE_SIZE) { 1054 return 1; 1055 } 1056 return 0; 1057 } 1058 1059 void 1060 dblfault_handler(struct trapframe *frame) 1061 { 1062 thread_t td = curthread; 1063 1064 if (in_kstack_guard(frame->tf_rsp) || in_kstack_guard(frame->tf_rbp)) { 1065 kprintf("DOUBLE FAULT - KERNEL STACK GUARD HIT!\n"); 1066 if (in_kstack_guard(frame->tf_rsp)) 1067 frame->tf_rsp = (register_t)(td->td_kstack + PAGE_SIZE); 1068 if (in_kstack_guard(frame->tf_rbp)) 1069 frame->tf_rbp = (register_t)(td->td_kstack + PAGE_SIZE); 1070 } else { 1071 kprintf("DOUBLE FAULT\n"); 1072 } 1073 kprintf("\nFatal double fault\n"); 1074 kprintf("rip = 0x%lx\n", frame->tf_rip); 1075 kprintf("rsp = 0x%lx\n", frame->tf_rsp); 1076 kprintf("rbp = 0x%lx\n", frame->tf_rbp); 1077 #ifdef SMP 1078 /* three separate prints in case of a trap on an unmapped page */ 1079 kprintf("cpuid = %d; ", mycpu->gd_cpuid); 1080 kprintf("lapic->id = %08x\n", lapic->id); 1081 #endif 1082 panic("double fault"); 1083 } 1084 1085 /* 1086 * syscall2 - MP aware system call request C handler 1087 * 1088 * A system call is essentially treated as a trap except that the 1089 * MP lock is not held on entry or return. We are responsible for 1090 * obtaining the MP lock if necessary and for handling ASTs 1091 * (e.g. a task switch) prior to return. 1092 * 1093 * MPSAFE 1094 */ 1095 void 1096 syscall2(struct trapframe *frame) 1097 { 1098 struct thread *td = curthread; 1099 struct proc *p = td->td_proc; 1100 struct lwp *lp = td->td_lwp; 1101 caddr_t params; 1102 struct sysent *callp; 1103 register_t orig_tf_rflags; 1104 int sticks; 1105 int error; 1106 int narg; 1107 #ifdef INVARIANTS 1108 int crit_count = td->td_critcount; 1109 #endif 1110 #ifdef SMP 1111 int have_mplock = 0; 1112 #endif 1113 register_t *argp; 1114 u_int code; 1115 int reg, regcnt; 1116 union sysunion args; 1117 register_t *argsdst; 1118 1119 mycpu->gd_cnt.v_syscall++; 1120 1121 #ifdef DIAGNOSTIC 1122 if (ISPL(frame->tf_cs) != SEL_UPL) { 1123 get_mplock(); 1124 panic("syscall"); 1125 /* NOT REACHED */ 1126 } 1127 #endif 1128 1129 KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid, 1130 frame->tf_rax); 1131 1132 userenter(td, p); /* lazy raise our priority */ 1133 1134 reg = 0; 1135 regcnt = 6; 1136 /* 1137 * Misc 1138 */ 1139 sticks = (int)td->td_sticks; 1140 orig_tf_rflags = frame->tf_rflags; 1141 1142 /* 1143 * Virtual kernel intercept - if a VM context managed by a virtual 1144 * kernel issues a system call the virtual kernel handles it, not us. 1145 * Restore the virtual kernel context and return from its system 1146 * call. The current frame is copied out to the virtual kernel. 1147 */ 1148 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 1149 vkernel_trap(lp, frame); 1150 error = EJUSTRETURN; 1151 goto out; 1152 } 1153 1154 /* 1155 * Get the system call parameters and account for time 1156 */ 1157 KASSERT(lp->lwp_md.md_regs == frame, 1158 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame)); 1159 params = (caddr_t)frame->tf_rsp + sizeof(register_t); 1160 code = frame->tf_rax; 1161 1162 if (p->p_sysent->sv_prepsyscall) { 1163 (*p->p_sysent->sv_prepsyscall)( 1164 frame, (int *)(&args.nosys.sysmsg + 1), 1165 &code, ¶ms); 1166 } else { 1167 if (code == SYS_syscall || code == SYS___syscall) { 1168 code = frame->tf_rdi; 1169 reg++; 1170 regcnt--; 1171 } 1172 } 1173 1174 if (p->p_sysent->sv_mask) 1175 code &= p->p_sysent->sv_mask; 1176 1177 if (code >= p->p_sysent->sv_size) 1178 callp = &p->p_sysent->sv_table[0]; 1179 else 1180 callp = &p->p_sysent->sv_table[code]; 1181 1182 narg = callp->sy_narg & SYF_ARGMASK; 1183 1184 /* 1185 * On x86_64 we get up to six arguments in registers. The rest are 1186 * on the stack. The first six members of 'struct trapframe' happen 1187 * to be the registers used to pass arguments, in exactly the right 1188 * order. 1189 */ 1190 argp = &frame->tf_rdi; 1191 argp += reg; 1192 argsdst = (register_t *)(&args.nosys.sysmsg + 1); 1193 /* 1194 * JG can we overflow the space pointed to by 'argsdst' 1195 * either with 'bcopy' or with 'copyin'? 1196 */ 1197 bcopy(argp, argsdst, sizeof(register_t) * regcnt); 1198 /* 1199 * copyin is MP aware, but the tracing code is not 1200 */ 1201 if (narg > regcnt) { 1202 KASSERT(params != NULL, ("copyin args with no params!")); 1203 error = copyin(params, &argsdst[regcnt], 1204 (narg - regcnt) * sizeof(register_t)); 1205 if (error) { 1206 #ifdef KTRACE 1207 if (KTRPOINT(td, KTR_SYSCALL)) { 1208 MAKEMPSAFE(have_mplock); 1209 1210 ktrsyscall(lp, code, narg, 1211 (void *)(&args.nosys.sysmsg + 1)); 1212 } 1213 #endif 1214 goto bad; 1215 } 1216 } 1217 1218 #ifdef KTRACE 1219 if (KTRPOINT(td, KTR_SYSCALL)) { 1220 MAKEMPSAFE(have_mplock); 1221 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1)); 1222 } 1223 #endif 1224 1225 /* 1226 * Default return value is 0 (will be copied to %rax). Double-value 1227 * returns use %rax and %rdx. %rdx is left unchanged for system 1228 * calls which return only one result. 1229 */ 1230 args.sysmsg_fds[0] = 0; 1231 args.sysmsg_fds[1] = frame->tf_rdx; 1232 1233 /* 1234 * The syscall might manipulate the trap frame. If it does it 1235 * will probably return EJUSTRETURN. 1236 */ 1237 args.sysmsg_frame = frame; 1238 1239 STOPEVENT(p, S_SCE, narg); /* MP aware */ 1240 1241 /* 1242 * NOTE: All system calls run MPSAFE now. The system call itself 1243 * is responsible for getting the MP lock. 1244 */ 1245 #ifdef SYSCALL_DEBUG 1246 uint64_t tscval = rdtsc(); 1247 #endif 1248 error = (*callp->sy_call)(&args); 1249 #ifdef SYSCALL_DEBUG 1250 tscval = rdtsc() - tscval; 1251 tscval = tscval * 1000000 / tsc_frequency; 1252 if (SysCallsWorstCase[code] < tscval) 1253 SysCallsWorstCase[code] = tscval; 1254 #endif 1255 1256 out: 1257 /* 1258 * MP SAFE (we may or may not have the MP lock at this point) 1259 */ 1260 //kprintf("SYSMSG %d ", error); 1261 switch (error) { 1262 case 0: 1263 /* 1264 * Reinitialize proc pointer `p' as it may be different 1265 * if this is a child returning from fork syscall. 1266 */ 1267 p = curproc; 1268 lp = curthread->td_lwp; 1269 frame->tf_rax = args.sysmsg_fds[0]; 1270 frame->tf_rdx = args.sysmsg_fds[1]; 1271 frame->tf_rflags &= ~PSL_C; 1272 break; 1273 case ERESTART: 1274 /* 1275 * Reconstruct pc, we know that 'syscall' is 2 bytes. 1276 * We have to do a full context restore so that %r10 1277 * (which was holding the value of %rcx) is restored for 1278 * the next iteration. 1279 */ 1280 if (frame->tf_err != 0 && frame->tf_err != 2) 1281 kprintf("lp %s:%d frame->tf_err is weird %ld\n", 1282 td->td_comm, lp->lwp_proc->p_pid, frame->tf_err); 1283 frame->tf_rip -= frame->tf_err; 1284 frame->tf_r10 = frame->tf_rcx; 1285 break; 1286 case EJUSTRETURN: 1287 break; 1288 case EASYNC: 1289 panic("Unexpected EASYNC return value (for now)"); 1290 default: 1291 bad: 1292 if (p->p_sysent->sv_errsize) { 1293 if (error >= p->p_sysent->sv_errsize) 1294 error = -1; /* XXX */ 1295 else 1296 error = p->p_sysent->sv_errtbl[error]; 1297 } 1298 frame->tf_rax = error; 1299 frame->tf_rflags |= PSL_C; 1300 break; 1301 } 1302 1303 /* 1304 * Traced syscall. trapsignal() is not MP aware. 1305 */ 1306 if (orig_tf_rflags & PSL_T) { 1307 MAKEMPSAFE(have_mplock); 1308 frame->tf_rflags &= ~PSL_T; 1309 trapsignal(lp, SIGTRAP, TRAP_TRACE); 1310 } 1311 1312 /* 1313 * Handle reschedule and other end-of-syscall issues 1314 */ 1315 userret(lp, frame, sticks); 1316 1317 #ifdef KTRACE 1318 if (KTRPOINT(td, KTR_SYSRET)) { 1319 MAKEMPSAFE(have_mplock); 1320 ktrsysret(lp, code, error, args.sysmsg_result); 1321 } 1322 #endif 1323 1324 /* 1325 * This works because errno is findable through the 1326 * register set. If we ever support an emulation where this 1327 * is not the case, this code will need to be revisited. 1328 */ 1329 STOPEVENT(p, S_SCX, code); 1330 1331 userexit(lp); 1332 #ifdef SMP 1333 /* 1334 * Release the MP lock if we had to get it 1335 */ 1336 if (have_mplock) 1337 rel_mplock(); 1338 #endif 1339 KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error); 1340 #ifdef INVARIANTS 1341 KASSERT(crit_count == td->td_critcount, 1342 ("syscall: critical section count mismatch! %d/%d", 1343 crit_count, td->td_pri)); 1344 KASSERT(&td->td_toks_base == td->td_toks_stop, 1345 ("syscall: extra tokens held after trap! %ld", 1346 td->td_toks_stop - &td->td_toks_base)); 1347 #endif 1348 } 1349 1350 /* 1351 * NOTE: mplock not held at any point 1352 */ 1353 void 1354 fork_return(struct lwp *lp, struct trapframe *frame) 1355 { 1356 frame->tf_rax = 0; /* Child returns zero */ 1357 frame->tf_rflags &= ~PSL_C; /* success */ 1358 frame->tf_rdx = 1; 1359 1360 generic_lwp_return(lp, frame); 1361 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid); 1362 } 1363 1364 /* 1365 * Simplified back end of syscall(), used when returning from fork() 1366 * directly into user mode. 1367 * 1368 * This code will return back into the fork trampoline code which then 1369 * runs doreti. 1370 * 1371 * NOTE: The mplock is not held at any point. 1372 */ 1373 void 1374 generic_lwp_return(struct lwp *lp, struct trapframe *frame) 1375 { 1376 struct proc *p = lp->lwp_proc; 1377 1378 /* 1379 * Newly forked processes are given a kernel priority. We have to 1380 * adjust the priority to a normal user priority and fake entry 1381 * into the kernel (call userenter()) to install a passive release 1382 * function just in case userret() decides to stop the process. This 1383 * can occur when ^Z races a fork. If we do not install the passive 1384 * release function the current process designation will not be 1385 * released when the thread goes to sleep. 1386 */ 1387 lwkt_setpri_self(TDPRI_USER_NORM); 1388 userenter(lp->lwp_thread, p); 1389 userret(lp, frame, 0); 1390 #ifdef KTRACE 1391 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET)) 1392 ktrsysret(lp, SYS_fork, 0, 0); 1393 #endif 1394 lp->lwp_flags |= LWP_PASSIVE_ACQ; 1395 userexit(lp); 1396 lp->lwp_flags &= ~LWP_PASSIVE_ACQ; 1397 } 1398 1399 /* 1400 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA 1401 * fault (which is then passed back to the virtual kernel) if an attempt is 1402 * made to use the FP unit. 1403 * 1404 * XXX this is a fairly big hack. 1405 */ 1406 void 1407 set_vkernel_fp(struct trapframe *frame) 1408 { 1409 struct thread *td = curthread; 1410 1411 if (frame->tf_xflags & PGEX_FPFAULT) { 1412 td->td_pcb->pcb_flags |= FP_VIRTFP; 1413 if (mdcpu->gd_npxthread == td) 1414 npxexit(); 1415 } else { 1416 td->td_pcb->pcb_flags &= ~FP_VIRTFP; 1417 } 1418 } 1419 1420 /* 1421 * Called from vkernel_trap() to fixup the vkernel's syscall 1422 * frame for vmspace_ctl() return. 1423 */ 1424 void 1425 cpu_vkernel_trap(struct trapframe *frame, int error) 1426 { 1427 frame->tf_rax = error; 1428 if (error) 1429 frame->tf_rflags |= PSL_C; 1430 else 1431 frame->tf_rflags &= ~PSL_C; 1432 } 1433