1 /* $NetBSD: trap.c,v 1.186 2011/01/14 02:06:31 rmind Exp $ */ 2 3 /* 4 * Copyright (c) 1996 5 * The President and Fellows of Harvard College. All rights reserved. 6 * Copyright (c) 1992, 1993 7 * The Regents of the University of California. All rights reserved. 8 * 9 * This software was developed by the Computer Systems Engineering group 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 11 * contributed to Berkeley. 12 * 13 * All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Lawrence Berkeley Laboratory. 17 * This product includes software developed by Harvard University. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 1. Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * 2. Redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution. 27 * 3. All advertising materials mentioning features or use of this software 28 * must display the following acknowledgement: 29 * This product includes software developed by the University of 30 * California, Berkeley and its contributors. 31 * This product includes software developed by Harvard University. 32 * 4. Neither the name of the University nor the names of its contributors 33 * may be used to endorse or promote products derived from this software 34 * without specific prior written permission. 35 * 36 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 39 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 46 * SUCH DAMAGE. 47 * 48 * @(#)trap.c 8.4 (Berkeley) 9/23/93 49 */ 50 51 #include <sys/cdefs.h> 52 __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.186 2011/01/14 02:06:31 rmind Exp $"); 53 54 #include "opt_ddb.h" 55 #include "opt_compat_svr4.h" 56 #include "opt_compat_sunos.h" 57 #include "opt_sparc_arch.h" 58 #include "opt_multiprocessor.h" 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/proc.h> 63 #include <sys/kernel.h> 64 #include <sys/malloc.h> 65 #include <sys/kmem.h> 66 #include <sys/resource.h> 67 #include <sys/signal.h> 68 #include <sys/wait.h> 69 #include <sys/sa.h> 70 #include <sys/savar.h> 71 #include <sys/syscall.h> 72 #include <sys/syslog.h> 73 #include <sys/kauth.h> 74 75 #include <uvm/uvm_extern.h> 76 77 #include <sparc/sparc/asm.h> 78 #include <machine/cpu.h> 79 #include <machine/ctlreg.h> 80 #include <machine/trap.h> 81 #include <machine/instr.h> 82 #include <machine/pcb.h> 83 #include <machine/pmap.h> 84 #include <machine/userret.h> 85 86 #ifdef DDB 87 #include <machine/db_machdep.h> 88 #else 89 #include <machine/frame.h> 90 #endif 91 #ifdef COMPAT_SVR4 92 #include <machine/svr4_machdep.h> 93 #endif 94 #ifdef COMPAT_SUNOS 95 extern struct emul emul_sunos; 96 #define SUNOS_MAXSADDR_SLOP (32 * 1024) 97 #endif 98 99 #include <sparc/fpu/fpu_extern.h> 100 #include <sparc/sparc/memreg.h> 101 #include <sparc/sparc/cpuvar.h> 102 103 #ifdef DEBUG 104 int rwindow_debug = 0; 105 #endif 106 107 /* 108 * Initial FPU state is all registers == all 1s, everything else == all 0s. 109 * This makes every floating point register a signalling NaN, with sign bit 110 * set, no matter how it is interpreted. Appendix N of the Sparc V8 document 111 * seems to imply that we should do this, and it does make sense. 112 */ 113 struct fpstate initfpstate = { 114 { ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, 115 ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0 }, 116 0, 0, 117 }; 118 119 /* 120 * There are more than 100 trap types, but most are unused. 121 * 122 * Trap type 0 is taken over as an `Asynchronous System Trap'. 123 * This is left-over Vax emulation crap that should be fixed. 124 */ 125 static const char T[] = "trap"; 126 const char *trap_type[] = { 127 /* non-user vectors */ 128 "ast", /* 0 */ 129 "text fault", /* 1 */ 130 "illegal instruction", /* 2 */ 131 "privileged instruction",/*3 */ 132 "fp disabled", /* 4 */ 133 "window overflow", /* 5 */ 134 "window underflow", /* 6 */ 135 "alignment fault", /* 7 */ 136 "fp exception", /* 8 */ 137 "data fault", /* 9 */ 138 "tag overflow", /* 0a */ 139 "watchpoint", /* 0b */ 140 T, T, T, T, T, /* 0c..10 */ 141 "level 1 int", /* 11 */ 142 "level 2 int", /* 12 */ 143 "level 3 int", /* 13 */ 144 "level 4 int", /* 14 */ 145 "level 5 int", /* 15 */ 146 "level 6 int", /* 16 */ 147 "level 7 int", /* 17 */ 148 "level 8 int", /* 18 */ 149 "level 9 int", /* 19 */ 150 "level 10 int", /* 1a */ 151 "level 11 int", /* 1b */ 152 "level 12 int", /* 1c */ 153 "level 13 int", /* 1d */ 154 "level 14 int", /* 1e */ 155 "level 15 int", /* 1f */ 156 "register access error",/* 20 */ 157 "instruction access error",/* 21 */ 158 T, T, /* 22..23 */ 159 "cp disabled", /* 24 */ 160 "unimplemented flush", /* 25 */ 161 T, T, /* 26..27 */ 162 "cp exception", /* 28 */ 163 "data access error", /* 29 */ 164 "hw zero divide", /* 2a */ 165 "data store error", /* 2b */ 166 "data access MMU miss", /* 2c */ 167 T, T, T, /* 2d..2f */ 168 T, T, T, T, T, T, T, T, /* 30..37 */ 169 T, T, T, T, /* 38..3b */ 170 "insn access MMU miss", /* 3c */ 171 T, T, T, /* 3d..3f */ 172 T, T, T, T, T, T, T, T, /* 40..47 */ 173 T, T, T, T, T, T, T, T, /* 48..4f */ 174 T, T, T, T, T, T, T, T, /* 50..57 */ 175 T, T, T, T, T, T, T, T, /* 58..5f */ 176 T, T, T, T, T, T, T, T, /* 60..67 */ 177 T, T, T, T, T, T, T, T, /* 68..6f */ 178 T, T, T, T, T, T, T, T, /* 70..77 */ 179 T, T, T, T, T, T, T, T, /* 78..7f */ 180 181 /* user (software trap) vectors */ 182 "syscall", /* 80 */ 183 "breakpoint", /* 81 */ 184 "zero divide", /* 82 */ 185 "flush windows", /* 83 */ 186 "clean windows", /* 84 */ 187 "range check", /* 85 */ 188 "fix align", /* 86 */ 189 "integer overflow", /* 87 */ 190 "svr4 syscall", /* 88 */ 191 "4.4 syscall", /* 89 */ 192 "kgdb exec", /* 8a */ 193 T, T, T, T, T, /* 8b..8f */ 194 T, T, T, T, T, T, T, T, /* 9a..97 */ 195 T, T, T, T, T, T, T, T, /* 98..9f */ 196 "svr4 getcc", /* a0 */ 197 "svr4 setcc", /* a1 */ 198 "svr4 getpsr", /* a2 */ 199 "svr4 setpsr", /* a3 */ 200 "svr4 gethrtime", /* a4 */ 201 "svr4 gethrvtime", /* a5 */ 202 T, /* a6 */ 203 "svr4 gethrestime", /* a7 */ 204 }; 205 206 #define N_TRAP_TYPES (sizeof trap_type / sizeof *trap_type) 207 208 void trap(unsigned, int, int, struct trapframe *); 209 void mem_access_fault(unsigned, int, u_int, int, int, struct trapframe *); 210 void mem_access_fault4m(unsigned, u_int, u_int, struct trapframe *); 211 212 int ignore_bogus_traps = 1; 213 214 /* 215 * Called from locore.s trap handling, for non-MMU-related traps. 216 * (MMU-related traps go through mem_access_fault, below.) 217 */ 218 void 219 trap(unsigned type, int psr, int pc, struct trapframe *tf) 220 { 221 struct proc *p; 222 struct lwp *l; 223 struct pcb *pcb; 224 int n, s; 225 char bits[64]; 226 u_quad_t sticks; 227 ksiginfo_t ksi; 228 int code, sig; 229 230 /* This steps the PC over the trap. */ 231 #define ADVANCE (n = tf->tf_npc, tf->tf_pc = n, tf->tf_npc = n + 4) 232 233 curcpu()->ci_data.cpu_ntrap++; 234 /* 235 * Generally, kernel traps cause a panic. Any exceptions are 236 * handled early here. 237 */ 238 if (psr & PSR_PS) { 239 #ifdef DDB 240 if (type == T_BREAKPOINT) { 241 write_all_windows(); 242 if (kdb_trap(type, tf)) { 243 return; 244 } 245 } 246 #if defined(MULTIPROCESSOR) 247 if (type == T_DBPAUSE) { 248 /* XXX - deal with kgdb too */ 249 extern void ddb_suspend(struct trapframe *); 250 write_all_windows(); 251 ddb_suspend(tf); 252 ADVANCE; 253 return; 254 } 255 #endif 256 #endif 257 #ifdef DIAGNOSTIC 258 /* 259 * Currently, we allow DIAGNOSTIC kernel code to 260 * flush the windows to record stack traces. 261 */ 262 if (type == T_FLUSHWIN) { 263 write_all_windows(); 264 ADVANCE; 265 return; 266 } 267 #endif 268 if (type == T_UNIMPLFLUSH) { 269 /* 270 * This should happen only on hypersparc. 271 * It also is a rare event to get this trap 272 * from kernel space. For now, just flush the 273 * entire I-cache. 274 */ 275 #if defined(MULTIPROCESSOR) 276 /* Broadcast to all CPUs */ 277 XCALL0(*cpuinfo.pure_vcache_flush, CPUSET_ALL); 278 #else 279 (*cpuinfo.pure_vcache_flush)(); 280 #endif 281 ADVANCE; 282 return; 283 } 284 285 /* 286 * Storing %fsr in cpu_attach will cause this trap 287 * even though the fpu has been enabled, if and only 288 * if there is no FPU. 289 */ 290 if (type == T_FPDISABLED && cold) { 291 ADVANCE; 292 return; 293 } 294 dopanic: 295 snprintb(bits, sizeof(bits), PSR_BITS, psr); 296 printf("trap type 0x%x: pc=0x%x npc=0x%x psr=%s\n", 297 type, pc, tf->tf_npc, bits); 298 #ifdef DDB 299 write_all_windows(); 300 (void) kdb_trap(type, tf); 301 #endif 302 panic(type < N_TRAP_TYPES ? trap_type[type] : T); 303 /* NOTREACHED */ 304 } 305 if ((l = curlwp) == NULL) 306 l = &lwp0; 307 p = l->l_proc; 308 LWP_CACHE_CREDS(l, p); 309 sticks = p->p_sticks; 310 pcb = lwp_getpcb(l); 311 l->l_md.md_tf = tf; /* for ptrace/signals */ 312 313 #ifdef FPU_DEBUG 314 if (type != T_FPDISABLED && (tf->tf_psr & PSR_EF) != 0) { 315 if (cpuinfo.fplwp != l) 316 panic("FPU enabled but wrong proc (0) [l=%p, fwlp=%p]", 317 l, cpuinfo.fplwp); 318 savefpstate(l->l_md.md_fpstate); 319 l->l_md.md_fpu = NULL; 320 cpuinfo.fplwp = NULL; 321 tf->tf_psr &= ~PSR_EF; 322 setpsr(getpsr() & ~PSR_EF); 323 } 324 #endif 325 326 sig = 0; 327 328 switch (type) { 329 330 default: 331 if (type < 0x80) { 332 if (!ignore_bogus_traps) 333 goto dopanic; 334 snprintb(bits, sizeof(bits), PSR_BITS, psr); 335 printf("trap type 0x%x: pc=0x%x npc=0x%x psr=%s\n", 336 type, pc, tf->tf_npc, bits); 337 sig = SIGILL; 338 KSI_INIT_TRAP(&ksi); 339 ksi.ksi_trap = type; 340 ksi.ksi_code = ILL_ILLTRP; 341 ksi.ksi_addr = (void *)pc; 342 break; 343 } 344 #if defined(COMPAT_SVR4) 345 badtrap: 346 #endif 347 #ifdef DIAGNOSTIC 348 if (type < 0x90 || type > 0x9f) { 349 /* the following message is gratuitous */ 350 /* ... but leave it in until we find anything */ 351 uprintf("%s[%d]: unimplemented software trap 0x%x\n", 352 p->p_comm, p->p_pid, type); 353 } 354 #endif 355 sig = SIGILL; 356 KSI_INIT_TRAP(&ksi); 357 ksi.ksi_trap = type; 358 ksi.ksi_code = ILL_ILLTRP; 359 ksi.ksi_addr = (void *)pc; 360 break; 361 362 #ifdef COMPAT_SVR4 363 case T_SVR4_GETCC: 364 case T_SVR4_SETCC: 365 case T_SVR4_GETPSR: 366 case T_SVR4_SETPSR: 367 case T_SVR4_GETHRTIME: 368 case T_SVR4_GETHRVTIME: 369 case T_SVR4_GETHRESTIME: 370 if (!svr4_trap(type, l)) 371 goto badtrap; 372 break; 373 #endif 374 375 case T_AST: 376 break; /* the work is all in userret() */ 377 378 case T_UNIMPLFLUSH: 379 /* Invalidate the entire I-cache */ 380 #if defined(MULTIPROCESSOR) 381 /* Broadcast to all CPUs */ 382 XCALL0(*cpuinfo.pure_vcache_flush, CPUSET_ALL); 383 #else 384 (*cpuinfo.pure_vcache_flush)(); 385 #endif 386 ADVANCE; 387 break; 388 389 case T_ILLINST: 390 /* Note: Cypress generates a T_ILLINST on FLUSH instructions */ 391 if ((sig = emulinstr(pc, tf)) == 0) { 392 ADVANCE; 393 break; 394 } 395 KSI_INIT_TRAP(&ksi); 396 ksi.ksi_trap = type; 397 ksi.ksi_code = ILL_ILLOPC; 398 ksi.ksi_addr = (void *)pc; 399 break; 400 401 case T_PRIVINST: 402 sig = SIGILL; 403 KSI_INIT_TRAP(&ksi); 404 ksi.ksi_trap = type; 405 ksi.ksi_code = ILL_PRVOPC; 406 ksi.ksi_addr = (void *)pc; 407 break; 408 409 case T_FPDISABLED: { 410 struct fpstate *fs = l->l_md.md_fpstate; 411 412 #ifdef FPU_DEBUG 413 if ((tf->tf_psr & PSR_PS) != 0) { 414 printf("FPU fault from kernel mode, pc=%x\n", pc); 415 #ifdef DDB 416 Debugger(); 417 #endif 418 } 419 #endif 420 421 if (fs == NULL) { 422 fs = malloc(sizeof *fs, M_SUBPROC, M_WAITOK); 423 *fs = initfpstate; 424 l->l_md.md_fpstate = fs; 425 } 426 /* 427 * If we have not found an FPU, we have to emulate it. 428 */ 429 if (!cpuinfo.fpupresent) { 430 #ifdef notyet 431 fpu_emulate(l, tf, fs); 432 #else 433 sig = SIGFPE; 434 KSI_INIT_TRAP(&ksi); 435 ksi.ksi_trap = type; 436 ksi.ksi_code = SI_NOINFO; 437 ksi.ksi_addr = (void *)pc; 438 #endif 439 break; 440 } 441 /* 442 * We may have more FPEs stored up and/or ops queued. 443 * If they exist, handle them and get out. Otherwise, 444 * resolve the FPU state, turn it on, and try again. 445 */ 446 if (fs->fs_qsize) { 447 if ((code = fpu_cleanup(l, fs)) != 0) { 448 sig = SIGFPE; 449 KSI_INIT_TRAP(&ksi); 450 ksi.ksi_trap = type; 451 ksi.ksi_code = code; 452 ksi.ksi_addr = (void *)pc; 453 } 454 break; 455 } 456 457 /* 458 * If we do not own the FPU state on this CPU, we must 459 * now acquire it. 460 */ 461 if (cpuinfo.fplwp != l) { 462 struct cpu_info *cpi; 463 464 FPU_LOCK(s); 465 if (cpuinfo.fplwp != NULL) { 466 /* someone else had it*/ 467 savefpstate(cpuinfo.fplwp->l_md.md_fpstate); 468 cpuinfo.fplwp->l_md.md_fpu = NULL; 469 } 470 471 /* 472 * On MP machines, some of the other FPUs might 473 * still have our state. Tell the owning processor 474 * to save the process' FPU state. 475 */ 476 if ((cpi = l->l_md.md_fpu) != NULL) { 477 if (cpi->ci_cpuid == cpuinfo.ci_cpuid) 478 panic("FPU(%d): state for %p", 479 cpi->ci_cpuid, l); 480 #if defined(MULTIPROCESSOR) 481 XCALL1(ipi_savefpstate, fs, 1 << cpi->ci_cpuid); 482 #endif 483 cpi->fplwp = NULL; 484 } 485 loadfpstate(fs); 486 487 /* now we do have it */ 488 cpuinfo.fplwp = l; 489 l->l_md.md_fpu = curcpu(); 490 FPU_UNLOCK(s); 491 } 492 493 tf->tf_psr |= PSR_EF; 494 break; 495 } 496 497 case T_WINOF: 498 if (rwindow_save(l)) { 499 mutex_enter(p->p_lock); 500 sigexit(l, SIGILL); 501 } 502 break; 503 504 #define read_rw(src, dst) \ 505 copyin((void *)(src), (void *)(dst), sizeof(struct rwindow)) 506 507 case T_RWRET: 508 /* 509 * T_RWRET is a window load needed in order to rett. 510 * It simply needs the window to which tf->tf_out[6] 511 * (%sp) points. There are no user or saved windows now. 512 * Copy the one from %sp into pcb->pcb_rw[0] and set 513 * nsaved to -1. If we decide to deliver a signal on 514 * our way out, we will clear nsaved. 515 */ 516 if (pcb->pcb_uw || pcb->pcb_nsaved) 517 panic("trap T_RWRET 1"); 518 #ifdef DEBUG 519 if (rwindow_debug) 520 printf("cpu%d:%s[%d]: rwindow: pcb<-stack: 0x%x\n", 521 cpuinfo.ci_cpuid, p->p_comm, p->p_pid, 522 tf->tf_out[6]); 523 #endif 524 if (read_rw(tf->tf_out[6], &pcb->pcb_rw[0])) { 525 mutex_enter(p->p_lock); 526 sigexit(l, SIGILL); 527 } 528 if (pcb->pcb_nsaved) 529 panic("trap T_RWRET 2"); 530 pcb->pcb_nsaved = -1; /* mark success */ 531 break; 532 533 case T_WINUF: 534 /* 535 * T_WINUF is a real window underflow, from a restore 536 * instruction. It needs to have the contents of two 537 * windows---the one belonging to the restore instruction 538 * itself, which is at its %sp, and the one belonging to 539 * the window above, which is at its %fp or %i6---both 540 * in the pcb. The restore's window may still be in 541 * the CPU; we need to force it out to the stack. 542 */ 543 #ifdef DEBUG 544 if (rwindow_debug) 545 printf("cpu%d:%s[%d]: rwindow: T_WINUF 0: pcb<-stack: 0x%x\n", 546 cpuinfo.ci_cpuid, p->p_comm, p->p_pid, 547 tf->tf_out[6]); 548 #endif 549 write_user_windows(); 550 if (rwindow_save(l) || read_rw(tf->tf_out[6], &pcb->pcb_rw[0])) { 551 mutex_enter(p->p_lock); 552 sigexit(l, SIGILL); 553 } 554 #ifdef DEBUG 555 if (rwindow_debug) 556 printf("cpu%d:%s[%d]: rwindow: T_WINUF 1: pcb<-stack: 0x%x\n", 557 cpuinfo.ci_cpuid, p->p_comm, p->p_pid, 558 pcb->pcb_rw[0].rw_in[6]); 559 #endif 560 if (read_rw(pcb->pcb_rw[0].rw_in[6], &pcb->pcb_rw[1])) { 561 mutex_enter(p->p_lock); 562 sigexit(l, SIGILL); 563 } 564 if (pcb->pcb_nsaved) 565 panic("trap T_WINUF"); 566 pcb->pcb_nsaved = -1; /* mark success */ 567 break; 568 569 case T_ALIGN: 570 if ((p->p_md.md_flags & MDP_FIXALIGN) != 0) { 571 n = fixalign(l, tf); 572 if (n == 0) { 573 ADVANCE; 574 break; 575 } 576 } 577 sig = SIGBUS; 578 KSI_INIT_TRAP(&ksi); 579 ksi.ksi_trap = type; 580 ksi.ksi_code = BUS_ADRALN; 581 ksi.ksi_addr = (void *)pc; 582 break; 583 584 case T_FPE: 585 /* 586 * Clean up after a floating point exception. 587 * fpu_cleanup can (and usually does) modify the 588 * state we save here, so we must `give up' the FPU 589 * chip context. (The software and hardware states 590 * will not match once fpu_cleanup does its job, so 591 * we must not save again later.) 592 */ 593 if (l != cpuinfo.fplwp) 594 panic("fpe without being the FP user"); 595 FPU_LOCK(s); 596 savefpstate(l->l_md.md_fpstate); 597 cpuinfo.fplwp = NULL; 598 l->l_md.md_fpu = NULL; 599 FPU_UNLOCK(s); 600 /* tf->tf_psr &= ~PSR_EF; */ /* share_fpu will do this */ 601 if ((code = fpu_cleanup(l, l->l_md.md_fpstate)) != 0) { 602 sig = SIGFPE; 603 KSI_INIT_TRAP(&ksi); 604 ksi.ksi_trap = type; 605 ksi.ksi_code = code; 606 ksi.ksi_addr = (void *)pc; 607 } 608 #if 0 /* ??? really never??? */ 609 ADVANCE; 610 #endif 611 break; 612 613 case T_TAGOF: 614 sig = SIGEMT; 615 KSI_INIT_TRAP(&ksi); 616 ksi.ksi_trap = type; 617 ksi.ksi_code = SI_NOINFO; 618 ksi.ksi_addr = (void *)pc; 619 break; 620 621 case T_CPDISABLED: 622 uprintf("coprocessor instruction\n"); /* XXX */ 623 sig = SIGILL; 624 KSI_INIT_TRAP(&ksi); 625 ksi.ksi_trap = type; 626 ksi.ksi_code = ILL_COPROC; 627 ksi.ksi_addr = (void *)pc; 628 break; 629 630 case T_BREAKPOINT: 631 sig = SIGTRAP; 632 KSI_INIT_TRAP(&ksi); 633 ksi.ksi_trap = type; 634 ksi.ksi_code = TRAP_BRKPT; 635 ksi.ksi_addr = (void *)pc; 636 break; 637 638 case T_DIV0: 639 case T_IDIV0: 640 ADVANCE; 641 sig = SIGFPE; 642 KSI_INIT_TRAP(&ksi); 643 ksi.ksi_trap = type; 644 ksi.ksi_code = FPE_INTDIV; 645 ksi.ksi_addr = (void *)pc; 646 break; 647 648 case T_FLUSHWIN: 649 write_user_windows(); 650 #ifdef probably_slower_since_this_is_usually_false 651 if (pcb->pcb_nsaved && rwindow_save(p)) { 652 mutex_enter(p->p_lock); 653 sigexit(l, SIGILL); 654 } 655 #endif 656 ADVANCE; 657 break; 658 659 case T_CLEANWIN: 660 uprintf("T_CLEANWIN\n"); /* XXX */ 661 ADVANCE; 662 break; 663 664 case T_RANGECHECK: 665 uprintf("T_RANGECHECK\n"); /* XXX */ 666 ADVANCE; 667 sig = SIGILL; 668 KSI_INIT_TRAP(&ksi); 669 ksi.ksi_trap = type; 670 ksi.ksi_code = ILL_ILLOPN; 671 ksi.ksi_addr = (void *)pc; 672 break; 673 674 case T_FIXALIGN: 675 #ifdef DEBUG_ALIGN 676 uprintf("T_FIXALIGN\n"); 677 #endif 678 /* User wants us to fix alignment faults */ 679 p->p_md.md_flags |= MDP_FIXALIGN; 680 ADVANCE; 681 break; 682 683 case T_INTOF: 684 uprintf("T_INTOF\n"); /* XXX */ 685 ADVANCE; 686 sig = SIGFPE; 687 KSI_INIT_TRAP(&ksi); 688 ksi.ksi_trap = type; 689 ksi.ksi_code = FPE_INTOVF; 690 ksi.ksi_addr = (void *)pc; 691 break; 692 } 693 if (sig != 0) { 694 ksi.ksi_signo = sig; 695 trapsignal(l, &ksi); 696 } 697 userret(l, pc, sticks); 698 share_fpu(l, tf); 699 #undef ADVANCE 700 } 701 702 /* 703 * Save windows from PCB into user stack, and return 0. This is used on 704 * window overflow pseudo-traps (from locore.s, just before returning to 705 * user mode) and when ptrace or sendsig needs a consistent state. 706 * As a side effect, rwindow_save() always sets pcb_nsaved to 0, 707 * clobbering the `underflow restore' indicator if it was -1. 708 * 709 * If the windows cannot be saved, pcb_nsaved is restored and we return -1. 710 */ 711 int 712 rwindow_save(struct lwp *l) 713 { 714 struct pcb *pcb = lwp_getpcb(l); 715 struct rwindow *rw = &pcb->pcb_rw[0]; 716 int i; 717 718 i = pcb->pcb_nsaved; 719 if (i < 0) { 720 pcb->pcb_nsaved = 0; 721 return (0); 722 } 723 if (i == 0) 724 return (0); 725 #ifdef DEBUG 726 if (rwindow_debug) 727 printf("cpu%d:%s[%d]: rwindow: pcb->stack:", 728 cpuinfo.ci_cpuid, l->l_proc->p_comm, l->l_proc->p_pid); 729 #endif 730 do { 731 #ifdef DEBUG 732 if (rwindow_debug) 733 printf(" [%d]0x%x", cpuinfo.ci_cpuid, rw[1].rw_in[6]); 734 #endif 735 if (copyout((void *)rw, (void *)rw[1].rw_in[6], 736 sizeof *rw)) 737 return (-1); 738 rw++; 739 } while (--i > 0); 740 #ifdef DEBUG 741 if (rwindow_debug) 742 printf("\n"); 743 #endif 744 pcb->pcb_nsaved = 0; 745 return (0); 746 } 747 748 /* 749 * Kill user windows (before exec) by writing back to stack or pcb 750 * and then erasing any pcb tracks. Otherwise we might try to write 751 * the registers into the new process after the exec. 752 */ 753 void 754 kill_user_windows(struct lwp *l) 755 { 756 struct pcb *pcb = lwp_getpcb(l); 757 758 write_user_windows(); 759 pcb->pcb_nsaved = 0; 760 } 761 762 /* 763 * Called from locore.s trap handling, for synchronous memory faults. 764 * 765 * This duplicates a lot of logic in trap() and perhaps should be 766 * moved there; but the bus-error-register parameters are unique to 767 * this routine. 768 * 769 * Since synchronous errors accumulate during prefetch, we can have 770 * more than one `cause'. But we do not care what the cause, here; 771 * we just want to page in the page and try again. 772 */ 773 void 774 mem_access_fault(unsigned type, int ser, u_int v, int pc, int psr, 775 struct trapframe *tf) 776 { 777 #if defined(SUN4) || defined(SUN4C) 778 struct proc *p; 779 struct lwp *l; 780 struct pcb *pcb; 781 struct vmspace *vm; 782 vaddr_t va; 783 int rv; 784 vm_prot_t atype; 785 vaddr_t onfault; 786 u_quad_t sticks; 787 char bits[64]; 788 ksiginfo_t ksi; 789 790 curcpu()->ci_data.cpu_ntrap++; 791 l = curlwp; 792 p = l->l_proc; 793 pcb = lwp_getpcb(l); 794 onfault = (vaddr_t)pcb->pcb_onfault; 795 796 LWP_CACHE_CREDS(l, p); 797 sticks = p->p_sticks; 798 799 #ifdef FPU_DEBUG 800 if ((tf->tf_psr & PSR_EF) != 0) { 801 if (cpuinfo.fplwp != l) 802 panic("FPU enabled but wrong proc (1) [l=%p, fwlp=%p]", 803 l, cpuinfo.fplwp); 804 savefpstate(l->l_md.md_fpstate); 805 l->l_md.md_fpu = NULL; 806 cpuinfo.fplwp = NULL; 807 tf->tf_psr &= ~PSR_EF; 808 setpsr(getpsr() & ~PSR_EF); 809 } 810 #endif 811 812 /* 813 * Figure out what to pass the VM code, and ignore the sva register 814 * value in v on text faults (text faults are always at pc). 815 * Kernel faults are somewhat different: text faults are always 816 * illegal, and data faults are extra complex. User faults must 817 * set p->p_md.md_tf, in case we decide to deliver a signal. Check 818 * for illegal virtual addresses early since those can induce more 819 * faults. 820 */ 821 if (type == T_TEXTFAULT) 822 v = pc; 823 if (VA_INHOLE(v)) { 824 rv = EACCES; 825 goto fault; 826 } 827 atype = ser & SER_WRITE ? VM_PROT_WRITE : VM_PROT_READ; 828 if ((ser & SER_PROT) && atype == VM_PROT_READ && type != T_TEXTFAULT) { 829 830 /* 831 * The hardware reports faults by the atomic load/store 832 * instructions as read faults, so if the faulting instruction 833 * is one of those, relabel this fault as both read and write. 834 */ 835 if ((fuword((void *)pc) & 0xc1680000) == 0xc0680000) { 836 atype = VM_PROT_READ | VM_PROT_WRITE; 837 } 838 } 839 va = trunc_page(v); 840 if (psr & PSR_PS) { 841 extern char Lfsbail[]; 842 843 if (type == T_TEXTFAULT) { 844 (void) splhigh(); 845 snprintb(bits, sizeof(bits), SER_BITS, ser); 846 printf("cpu%d: text fault: pc=0x%x ser=%s\n", 847 cpu_number(), pc, bits); 848 panic("kernel fault"); 849 /* NOTREACHED */ 850 } 851 /* 852 * If this was an access that we shouldn't try to page in, 853 * resume at the fault handler without any action. 854 */ 855 if (onfault == (vaddr_t)Lfsbail) { 856 rv = EFAULT; 857 goto kfault; 858 } 859 860 /* 861 * During autoconfiguration, faults are never OK unless 862 * pcb_onfault is set. Once running normally we must allow 863 * exec() to cause copy-on-write faults to kernel addresses. 864 */ 865 if (cold) { 866 rv = EFAULT; 867 goto kfault; 868 } 869 if (va >= KERNBASE) { 870 rv = mmu_pagein(pmap_kernel(), va, atype); 871 if (rv < 0) { 872 rv = EACCES; 873 goto kfault; 874 } 875 if (rv > 0) 876 return; 877 pcb->pcb_onfault = NULL; 878 rv = uvm_fault(kernel_map, va, atype); 879 pcb->pcb_onfault = (void *)onfault; 880 if (rv == 0) 881 return; 882 goto kfault; 883 } 884 } else { 885 l->l_md.md_tf = tf; 886 /* 887 * WRS: Can drop LP_SA_NOBLOCK test iff can only get 888 * here from a usermode-initiated access. LP_SA_NOBLOCK 889 * should never be set there - it's kernel-only. 890 */ 891 if ((l->l_flag & LW_SA) 892 && (~l->l_pflag & LP_SA_NOBLOCK)) { 893 l->l_savp->savp_faultaddr = (vaddr_t)v; 894 l->l_pflag |= LP_SA_PAGEFAULT; 895 } 896 } 897 898 /* 899 * mmu_pagein returns -1 if the page is already valid, in which 900 * case we have a hard fault; it returns 1 if it loads a segment 901 * that got bumped out via LRU replacement. 902 */ 903 vm = p->p_vmspace; 904 rv = mmu_pagein(vm->vm_map.pmap, va, atype); 905 if (rv < 0) { 906 rv = EACCES; 907 goto fault; 908 } 909 if (rv > 0) 910 goto out; 911 912 /* alas! must call the horrible vm code */ 913 pcb->pcb_onfault = NULL; 914 rv = uvm_fault(&vm->vm_map, (vaddr_t)va, atype); 915 pcb->pcb_onfault = (void *)onfault; 916 917 /* 918 * If this was a stack access we keep track of the maximum 919 * accessed stack size. Also, if vm_fault gets a protection 920 * failure it is due to accessing the stack region outside 921 * the current limit and we need to reflect that as an access 922 * error. 923 */ 924 if ((void *)va >= vm->vm_maxsaddr 925 #ifdef COMPAT_SUNOS 926 && !(p->p_emul == &emul_sunos && va < USRSTACK - 927 (vaddr_t)p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_cur + 928 SUNOS_MAXSADDR_SLOP) 929 #endif 930 && rv == 0) 931 uvm_grow(p, va); 932 933 if (rv == 0) { 934 /* 935 * pmap_enter() does not enter all requests made from 936 * vm_fault into the MMU (as that causes unnecessary 937 * entries for `wired' pages). Instead, we call 938 * mmu_pagein here to make sure the new PTE gets installed. 939 */ 940 (void) mmu_pagein(vm->vm_map.pmap, va, VM_PROT_NONE); 941 } else { 942 /* 943 * Pagein failed. If doing copyin/out, return to onfault 944 * address. Any other page fault in kernel, die; if user 945 * fault, deliver SIGSEGV. 946 */ 947 fault: 948 if (psr & PSR_PS) { 949 kfault: 950 if (!onfault) { 951 (void) splhigh(); 952 snprintb(bits, sizeof(bits), SER_BITS, ser); 953 printf("cpu%d: data fault: pc=0x%x " 954 "addr=0x%x ser=%s\n", 955 cpu_number(), pc, v, bits); 956 panic("kernel fault"); 957 /* NOTREACHED */ 958 } 959 tf->tf_pc = onfault; 960 tf->tf_npc = onfault + 4; 961 tf->tf_out[0] = (rv == EACCES) ? EFAULT : rv; 962 return; 963 } 964 KSI_INIT_TRAP(&ksi); 965 if (rv == ENOMEM) { 966 printf("UVM: pid %d (%s), uid %d killed: out of swap\n", 967 p->p_pid, p->p_comm, 968 l->l_cred ? 969 kauth_cred_geteuid(l->l_cred) : -1); 970 ksi.ksi_signo = SIGKILL; 971 ksi.ksi_code = SI_NOINFO; 972 } else { 973 ksi.ksi_signo = SIGSEGV; 974 ksi.ksi_code = (rv == EACCES 975 ? SEGV_ACCERR : SEGV_MAPERR); 976 } 977 ksi.ksi_trap = type; 978 ksi.ksi_addr = (void *)v; 979 trapsignal(l, &ksi); 980 } 981 out: 982 if ((psr & PSR_PS) == 0) { 983 l->l_pflag &= ~LP_SA_PAGEFAULT; 984 userret(l, pc, sticks); 985 share_fpu(l, tf); 986 } 987 #endif /* SUN4 || SUN4C */ 988 } 989 990 #if defined(SUN4M) /* 4m version of mem_access_fault() follows */ 991 static int tfaultaddr = (int) 0xdeadbeef; 992 993 void 994 mem_access_fault4m(unsigned type, u_int sfsr, u_int sfva, struct trapframe *tf) 995 { 996 int pc, psr; 997 struct proc *p; 998 struct lwp *l; 999 struct pcb *pcb; 1000 struct vmspace *vm; 1001 vaddr_t va; 1002 int rv; 1003 vm_prot_t atype; 1004 int onfault; 1005 u_quad_t sticks; 1006 char bits[64]; 1007 ksiginfo_t ksi; 1008 1009 curcpu()->ci_data.cpu_ntrap++; 1010 1011 l = curlwp; 1012 p = l->l_proc; 1013 LWP_CACHE_CREDS(l, p); 1014 sticks = p->p_sticks; 1015 pcb = lwp_getpcb(l); 1016 onfault = (vaddr_t)pcb->pcb_onfault; 1017 1018 #ifdef FPU_DEBUG 1019 if ((tf->tf_psr & PSR_EF) != 0) { 1020 if (cpuinfo.fplwp != l) 1021 panic("FPU enabled but wrong proc (2) [l=%p, fwlp=%p]", 1022 l, cpuinfo.fplwp); 1023 savefpstate(l->l_md.md_fpstate); 1024 l->l_md.md_fpu = NULL; 1025 cpuinfo.fplwp = NULL; 1026 tf->tf_psr &= ~PSR_EF; 1027 setpsr(getpsr() & ~PSR_EF); 1028 } 1029 #endif 1030 1031 pc = tf->tf_pc; /* These are needed below */ 1032 psr = tf->tf_psr; 1033 1034 #if /*DIAGNOSTICS*/1 1035 if (type == T_DATAERROR || type == T_TEXTERROR) 1036 printf("%s[%d]: trap 0x%x: pc=0x%x sfsr=0x%x sfva=0x%x\n", 1037 p->p_comm, p->p_pid, type, pc, sfsr, sfva); 1038 #endif 1039 1040 /* 1041 * Our first priority is handling serious faults, such as 1042 * parity errors or async faults that might have come through here. 1043 * If afsr & AFSR_AFO != 0, then we're on a HyperSPARC and we 1044 * got an async fault. We pass it on to memerr4m. Similarly, if 1045 * the trap was T_STOREBUFFAULT, we pass it on to memerr4m. 1046 * If we have a data fault, but SFSR_FAV is not set in the sfsr, 1047 * then things are really bizarre, and we treat it as a hard 1048 * error and pass it on to memerr4m. See section 8.12.4 in the 1049 * SuperSPARC user's guide for more info, and for a possible 1050 * solution which we don't implement here. 1051 * Note: store buffer faults may also lead to a level 15 interrupt 1052 * being posted to the module (see sun4m system architecture, 1053 * section B.I.9). 1054 */ 1055 if (type == T_STOREBUFFAULT || 1056 (type == T_DATAFAULT && (sfsr & SFSR_FAV) == 0)) { 1057 (*cpuinfo.memerr)(type, sfsr, sfva, tf); 1058 /* 1059 * If we get here, exit the trap handler and wait for the 1060 * trap to re-occur. 1061 */ 1062 goto out_nounlock; 1063 } 1064 1065 /* 1066 * Figure out what to pass the VM code. We cannot ignore the sfva 1067 * register on text faults, since this might be a trap on an 1068 * alternate-ASI access to code space. However, if we're on a 1069 * supersparc, we can't help using PC, since we don't get a VA in 1070 * sfva. 1071 * Kernel faults are somewhat different: text faults are always 1072 * illegal, and data faults are extra complex. User faults must 1073 * set p->p_md.md_tf, in case we decide to deliver a signal. Check 1074 * for illegal virtual addresses early since those can induce more 1075 * faults. 1076 * All translation faults are illegal, and result in a SIGSEGV 1077 * being delivered to the running process (or a kernel panic, for 1078 * a kernel fault). We check the translation first to make sure 1079 * it is not spurious. 1080 * Also, note that in the case where we have an overwritten 1081 * text fault (OW==1, AT==2,3), we attempt to service the 1082 * second (overwriting) fault, then restart the instruction 1083 * (which is from the first fault) and allow the first trap 1084 * to reappear. XXX is this right? It will probably change... 1085 */ 1086 if ((sfsr & SFSR_FT) == SFSR_FT_NONE) 1087 goto out; /* No fault. Why were we called? */ 1088 1089 /* 1090 * NOTE: the per-CPU fault status register readers (in locore) 1091 * may already have decided to pass `pc' in `sfva', so we avoid 1092 * testing CPU types here. 1093 * Q: test SFSR_FAV in the locore stubs too? 1094 */ 1095 if ((sfsr & SFSR_FAV) == 0) { 1096 /* note: T_TEXTERROR == T_TEXTFAULT | 0x20 */ 1097 if ((type & ~0x20) == T_TEXTFAULT) 1098 sfva = pc; 1099 else { 1100 rv = EACCES; 1101 goto fault; 1102 } 1103 } 1104 1105 if ((sfsr & SFSR_FT) == SFSR_FT_TRANSERR) { 1106 /* 1107 * Translation errors are always fatal, as they indicate 1108 * a corrupt translation (page) table hierarchy. 1109 */ 1110 rv = EACCES; 1111 1112 /* XXXSMP - why bother with this anyway? */ 1113 if (tfaultaddr == sfva) /* Prevent infinite loops w/a static */ 1114 goto fault; 1115 tfaultaddr = sfva; 1116 if ((lda((sfva & 0xFFFFF000) | ASI_SRMMUFP_LN, ASI_SRMMUFP) & 1117 SRMMU_TETYPE) != SRMMU_TEPTE) 1118 goto fault; /* Translation bad */ 1119 lda(SRMMU_SFSR, ASI_SRMMU); 1120 #ifdef DEBUG 1121 printf("mem_access_fault4m: SFSR_FT_TRANSERR: " 1122 "pid %d, va 0x%x: retrying\n", p->p_pid, sfva); 1123 #endif 1124 goto out; /* Translation OK, retry operation */ 1125 } 1126 1127 va = trunc_page(sfva); 1128 1129 if (((sfsr & SFSR_AT_TEXT) || type == T_TEXTFAULT) && 1130 !(sfsr & SFSR_AT_STORE) && (sfsr & SFSR_OW)) { 1131 if (psr & PSR_PS) { /* never allow in kernel */ 1132 rv = EFAULT; 1133 goto kfault; 1134 } 1135 #if 0 1136 /* 1137 * Double text fault. The evil "case 5" from the HS manual... 1138 * Attempt to handle early fault. Ignores ASI 8,9 issue...may 1139 * do a useless VM read. 1140 * XXX: Is this really necessary? 1141 * XXX: If it's necessary, add SA_PAGEFAULT handling 1142 */ 1143 if (cpuinfo.cpu_type == CPUTYP_HS_MBUS) { 1144 /* On HS, we have va for both */ 1145 vm = p->p_vmspace; 1146 pcb->pcb_onfault = NULL; 1147 rv = uvm_fault(&vm->vm_map, trunc_page(pc), 1148 VM_PROT_READ); 1149 pcb->pcb_onfault = onfault; 1150 if (rv != 0) 1151 #ifdef DEBUG 1152 printf("mem_access_fault: " 1153 "can't pagein 1st text fault.\n") 1154 #endif 1155 ; 1156 } 1157 #endif 1158 } 1159 1160 /* Now munch on protections... */ 1161 if (sfsr & SFSR_AT_STORE) { 1162 /* stores are never text faults. */ 1163 atype = VM_PROT_WRITE; 1164 } else { 1165 if ((sfsr & SFSR_AT_TEXT) || (type & ~0x20) == T_TEXTFAULT) { 1166 atype = VM_PROT_EXECUTE; 1167 } else { 1168 atype = VM_PROT_READ; 1169 } 1170 } 1171 1172 if (psr & PSR_PS) { 1173 extern char Lfsbail[]; 1174 if (sfsr & SFSR_AT_TEXT || type == T_TEXTFAULT) { 1175 (void) splhigh(); 1176 snprintb(bits, sizeof(bits), SFSR_BITS, sfsr); 1177 printf("cpu%d text fault: pc=0x%x sfsr=%s sfva=0x%x\n", 1178 cpu_number(), pc, bits, sfva); 1179 panic("kernel fault"); 1180 /* NOTREACHED */ 1181 } 1182 /* 1183 * If this was an access that we shouldn't try to page in, 1184 * resume at the fault handler without any action. 1185 */ 1186 if (onfault == (vaddr_t)Lfsbail) { 1187 rv = EFAULT; 1188 goto kfault; 1189 } 1190 1191 /* 1192 * During autoconfiguration, faults are never OK unless 1193 * pcb_onfault is set. Once running normally we must allow 1194 * exec() to cause copy-on-write faults to kernel addresses. 1195 */ 1196 if (cold) { 1197 rv = EFAULT; 1198 goto kfault; 1199 } 1200 if (va >= KERNBASE) { 1201 pcb->pcb_onfault = NULL; 1202 rv = uvm_fault(kernel_map, va, atype); 1203 pcb->pcb_onfault = (void *)onfault; 1204 if (rv == 0) { 1205 return; 1206 } 1207 goto kfault; 1208 } 1209 } else { 1210 l->l_md.md_tf = tf; 1211 /* 1212 * WRS: Can drop LP_SA_NOBLOCK test iff can only get 1213 * here from a usermode-initiated access. LP_SA_NOBLOCK 1214 * should never be set there - it's kernel-only. 1215 */ 1216 if ((l->l_flag & LW_SA) 1217 && (~l->l_pflag & LP_SA_NOBLOCK)) { 1218 l->l_savp->savp_faultaddr = (vaddr_t)sfva; 1219 l->l_pflag |= LP_SA_PAGEFAULT; 1220 } 1221 } 1222 1223 vm = p->p_vmspace; 1224 1225 /* alas! must call the horrible vm code */ 1226 pcb->pcb_onfault = NULL; 1227 rv = uvm_fault(&vm->vm_map, (vaddr_t)va, atype); 1228 pcb->pcb_onfault = (void *)onfault; 1229 1230 /* 1231 * If this was a stack access we keep track of the maximum 1232 * accessed stack size. Also, if vm_fault gets a protection 1233 * failure it is due to accessing the stack region outside 1234 * the current limit and we need to reflect that as an access 1235 * error. 1236 */ 1237 if (rv == 0 && (void *)va >= vm->vm_maxsaddr) 1238 uvm_grow(p, va); 1239 if (rv != 0) { 1240 /* 1241 * Pagein failed. If doing copyin/out, return to onfault 1242 * address. Any other page fault in kernel, die; if user 1243 * fault, deliver SIGSEGV. 1244 */ 1245 fault: 1246 if (psr & PSR_PS) { 1247 kfault: 1248 if (!onfault) { 1249 (void) splhigh(); 1250 snprintb(bits, sizeof(bits), SFSR_BITS, sfsr); 1251 printf("cpu%d: data fault: pc=0x%x " 1252 "addr=0x%x sfsr=%s\n", 1253 cpu_number(), pc, sfva, bits); 1254 panic("kernel fault"); 1255 /* NOTREACHED */ 1256 } 1257 tf->tf_pc = onfault; 1258 tf->tf_npc = onfault + 4; 1259 tf->tf_out[0] = (rv == EACCES) ? EFAULT : rv; 1260 return; 1261 } 1262 KSI_INIT_TRAP(&ksi); 1263 if (rv == ENOMEM) { 1264 printf("UVM: pid %d (%s), uid %d killed: out of swap\n", 1265 p->p_pid, p->p_comm, 1266 l->l_cred ? 1267 kauth_cred_geteuid(l->l_cred) : -1); 1268 ksi.ksi_signo = SIGKILL; 1269 ksi.ksi_code = SI_NOINFO; 1270 } else { 1271 ksi.ksi_signo = SIGSEGV; 1272 ksi.ksi_code = (rv == EACCES) 1273 ? SEGV_ACCERR : SEGV_MAPERR; 1274 } 1275 ksi.ksi_trap = type; 1276 ksi.ksi_addr = (void *)sfva; 1277 trapsignal(l, &ksi); 1278 } 1279 out: 1280 if ((psr & PSR_PS) == 0) { 1281 l->l_pflag &= ~LP_SA_PAGEFAULT; 1282 out_nounlock: 1283 userret(l, pc, sticks); 1284 share_fpu(l, tf); 1285 } 1286 } 1287 #endif /* SUN4M */ 1288 1289 /* 1290 * XXX This is a terrible name. 1291 */ 1292 void 1293 upcallret(struct lwp *l) 1294 { 1295 1296 KERNEL_UNLOCK_LAST(l); 1297 userret(l, l->l_md.md_tf->tf_pc, 0); 1298 } 1299 1300 /* 1301 * Start a new LWP 1302 */ 1303 void 1304 startlwp(void *arg) 1305 { 1306 ucontext_t *uc = arg; 1307 lwp_t *l = curlwp; 1308 int error; 1309 1310 error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags); 1311 KASSERT(error == 0); 1312 1313 kmem_free(uc, sizeof(ucontext_t)); 1314 userret(l, l->l_md.md_tf->tf_pc, 0); 1315 } 1316 1317