1 /* $NetBSD: trap.c,v 1.59 2002/05/19 06:35:45 augustss Exp $ */ 2 3 /* 4 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 5 * Copyright (C) 1995, 1996 TooLs GmbH. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by TooLs GmbH. 19 * 4. The name of TooLs GmbH may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "opt_altivec.h" 35 #include "opt_ddb.h" 36 #include "opt_ktrace.h" 37 #include "opt_multiprocessor.h" 38 39 #include <sys/param.h> 40 #include <sys/proc.h> 41 #include <sys/reboot.h> 42 #include <sys/syscall.h> 43 #include <sys/systm.h> 44 #include <sys/user.h> 45 #include <sys/ktrace.h> 46 47 #include <uvm/uvm_extern.h> 48 49 #include <dev/cons.h> 50 51 #include <machine/cpu.h> 52 #include <machine/db_machdep.h> 53 #include <machine/fpu.h> 54 #include <machine/frame.h> 55 #include <machine/pcb.h> 56 #include <machine/pmap.h> 57 #include <machine/psl.h> 58 #include <machine/trap.h> 59 #include <powerpc/spr.h> 60 61 /* These definitions should probably be somewhere else XXX */ 62 #define FIRSTARG 3 /* first argument is in reg 3 */ 63 #define NARGREG 8 /* 8 args are in registers */ 64 #define MOREARGS(sp) ((caddr_t)((int)(sp) + 8)) /* more args go here */ 65 66 #ifndef MULTIPROCESSOR 67 volatile int astpending; 68 volatile int want_resched; 69 extern int intr_depth; 70 #endif 71 72 void *syscall = NULL; /* XXX dummy symbol for emul_netbsd */ 73 74 static int fix_unaligned __P((struct proc *p, struct trapframe *frame)); 75 static inline void setusr __P((int)); 76 77 void trap __P((struct trapframe *)); /* Called from locore / trap_subr */ 78 int setfault __P((faultbuf)); /* defined in locore.S */ 79 /* Why are these not defined in a header? */ 80 int badaddr __P((void *, size_t)); 81 int badaddr_read __P((void *, size_t, int *)); 82 83 void 84 trap(frame) 85 struct trapframe *frame; 86 { 87 struct proc *p = curproc; 88 int type = frame->exc; 89 int ftype, rv; 90 91 curcpu()->ci_ev_traps.ev_count++; 92 93 if (frame->srr1 & PSL_PR) 94 type |= EXC_USER; 95 96 #ifdef DIAGNOSTIC 97 if (curpcb->pcb_pmreal != curpm) 98 panic("trap: curpm (%p) != curpcb->pcb_pmreal (%p)", 99 curpm, curpcb->pcb_pmreal); 100 #endif 101 102 uvmexp.traps++; 103 104 switch (type) { 105 case EXC_RUNMODETRC|EXC_USER: 106 /* FALLTHROUGH */ 107 case EXC_TRC|EXC_USER: 108 KERNEL_PROC_LOCK(p); 109 frame->srr1 &= ~PSL_SE; 110 trapsignal(p, SIGTRAP, EXC_TRC); 111 KERNEL_PROC_UNLOCK(p); 112 break; 113 case EXC_DSI: { 114 faultbuf *fb; 115 /* 116 * Only query UVM if no interrupts are active (this applies 117 * "on-fault" as well. 118 */ 119 curcpu()->ci_ev_kdsi.ev_count++; 120 if (intr_depth < 0) { 121 struct vm_map *map; 122 vaddr_t va; 123 124 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE); 125 map = kernel_map; 126 va = frame->dar; 127 if ((va >> ADDR_SR_SHFT) == USER_SR) { 128 sr_t user_sr; 129 130 asm ("mfsr %0, %1" 131 : "=r"(user_sr) : "K"(USER_SR)); 132 va &= ADDR_PIDX | ADDR_POFF; 133 va |= user_sr << ADDR_SR_SHFT; 134 /* KERNEL_PROC_LOCK(p); XXX */ 135 map = &p->p_vmspace->vm_map; 136 } 137 if (frame->dsisr & DSISR_STORE) 138 ftype = VM_PROT_WRITE; 139 else 140 ftype = VM_PROT_READ; 141 rv = uvm_fault(map, trunc_page(va), 0, ftype); 142 if (map != kernel_map) { 143 /* 144 * Record any stack growth... 145 */ 146 if (rv == 0) 147 uvm_grow(p, trunc_page(va)); 148 /* KERNEL_PROC_UNLOCK(p); XXX */ 149 } 150 KERNEL_UNLOCK(); 151 if (rv == 0) 152 return; 153 if (rv == EACCES) 154 rv = EFAULT; 155 } else { 156 rv = EFAULT; 157 } 158 if ((fb = p->p_addr->u_pcb.pcb_onfault) != NULL) { 159 frame->srr0 = (*fb)[0]; 160 frame->fixreg[1] = (*fb)[1]; 161 frame->fixreg[2] = (*fb)[2]; 162 frame->fixreg[3] = rv; 163 frame->cr = (*fb)[3]; 164 memcpy(&frame->fixreg[13], &(*fb)[4], 165 19 * sizeof(register_t)); 166 return; 167 } 168 printf("trap: kernel %s DSI @ %#x by %#x (DSISR %#x, err=%d)\n", 169 (frame->dsisr & DSISR_STORE) ? "write" : "read", 170 frame->dar, frame->srr0, frame->dsisr, rv); 171 goto brain_damage2; 172 } 173 case EXC_DSI|EXC_USER: 174 KERNEL_PROC_LOCK(p); 175 curcpu()->ci_ev_udsi.ev_count++; 176 if (frame->dsisr & DSISR_STORE) 177 ftype = VM_PROT_WRITE; 178 else 179 ftype = VM_PROT_READ; 180 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->dar), 181 0, ftype); 182 if (rv == 0) { 183 /* 184 * Record any stack growth... 185 */ 186 uvm_grow(p, trunc_page(frame->dar)); 187 KERNEL_PROC_UNLOCK(p); 188 break; 189 } 190 curcpu()->ci_ev_udsi_fatal.ev_count++; 191 if (cpu_printfataltraps) { 192 printf("trap: pid %d (%s): user %s DSI @ %#x " 193 "by %#x (DSISR %#x, err=%d)\n", 194 p->p_pid, p->p_comm, 195 (frame->dsisr & DSISR_STORE) ? "write" : "read", 196 frame->dar, frame->srr0, frame->dsisr, rv); 197 } 198 if (rv == ENOMEM) { 199 printf("UVM: pid %d (%s), uid %d killed: " 200 "out of swap\n", 201 p->p_pid, p->p_comm, 202 p->p_cred && p->p_ucred ? 203 p->p_ucred->cr_uid : -1); 204 trapsignal(p, SIGKILL, EXC_DSI); 205 } else { 206 trapsignal(p, SIGSEGV, EXC_DSI); 207 } 208 KERNEL_PROC_UNLOCK(p); 209 break; 210 case EXC_ISI: 211 printf("trap: kernel ISI by %#x (SRR1 %#x)\n", 212 frame->srr0, frame->srr1); 213 goto brain_damage2; 214 case EXC_ISI|EXC_USER: 215 KERNEL_PROC_LOCK(p); 216 curcpu()->ci_ev_isi.ev_count++; 217 ftype = VM_PROT_READ | VM_PROT_EXECUTE; 218 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->srr0), 219 0, ftype); 220 if (rv == 0) { 221 KERNEL_PROC_UNLOCK(p); 222 break; 223 } 224 curcpu()->ci_ev_isi_fatal.ev_count++; 225 if (cpu_printfataltraps) { 226 printf("trap: pid %d (%s): user ISI trap @ %#x " 227 "(SSR1=%#x)\n", 228 p->p_pid, p->p_comm, frame->srr0, frame->srr1); 229 } 230 trapsignal(p, SIGSEGV, EXC_ISI); 231 KERNEL_PROC_UNLOCK(p); 232 break; 233 case EXC_SC|EXC_USER: 234 curcpu()->ci_ev_scalls.ev_count++; 235 { 236 const struct sysent *callp; 237 size_t argsize; 238 register_t code, error; 239 register_t *params, rval[2]; 240 int n; 241 register_t args[10]; 242 243 KERNEL_PROC_LOCK(p); 244 245 uvmexp.syscalls++; 246 247 code = frame->fixreg[0]; 248 callp = p->p_emul->e_sysent; 249 params = frame->fixreg + FIRSTARG; 250 n = NARGREG; 251 252 switch (code) { 253 case SYS_syscall: 254 /* 255 * code is first argument, 256 * followed by actual args. 257 */ 258 code = *params++; 259 n -= 1; 260 break; 261 case SYS___syscall: 262 params++; 263 code = *params++; 264 n -= 2; 265 break; 266 default: 267 break; 268 } 269 270 code &= (SYS_NSYSENT - 1); 271 callp += code; 272 argsize = callp->sy_argsize; 273 274 if (argsize > n * sizeof(register_t)) { 275 memcpy(args, params, n * sizeof(register_t)); 276 error = copyin(MOREARGS(frame->fixreg[1]), 277 args + n, 278 argsize - n * sizeof(register_t)); 279 if (error) 280 goto syscall_bad; 281 params = args; 282 } 283 284 #ifdef KTRACE 285 if (KTRPOINT(p, KTR_SYSCALL)) 286 ktrsyscall(p, code, argsize, params); 287 #endif 288 289 rval[0] = 0; 290 rval[1] = 0; 291 292 error = (*callp->sy_call)(p, params, rval); 293 switch (error) { 294 case 0: 295 frame->fixreg[FIRSTARG] = rval[0]; 296 frame->fixreg[FIRSTARG + 1] = rval[1]; 297 frame->cr &= ~0x10000000; 298 break; 299 case ERESTART: 300 /* 301 * Set user's pc back to redo the system call. 302 */ 303 frame->srr0 -= 4; 304 break; 305 case EJUSTRETURN: 306 /* nothing to do */ 307 break; 308 default: 309 syscall_bad: 310 if (p->p_emul->e_errno) 311 error = p->p_emul->e_errno[error]; 312 frame->fixreg[FIRSTARG] = error; 313 frame->cr |= 0x10000000; 314 break; 315 } 316 317 #ifdef KTRACE 318 if (KTRPOINT(p, KTR_SYSRET)) 319 ktrsysret(p, code, error, rval[0]); 320 #endif 321 } 322 KERNEL_PROC_UNLOCK(p); 323 break; 324 325 case EXC_FPU|EXC_USER: 326 curcpu()->ci_ev_fpu.ev_count++; 327 if (fpuproc) { 328 curcpu()->ci_ev_fpusw.ev_count++; 329 save_fpu(fpuproc); 330 } 331 #if defined(MULTIPROCESSOR) 332 if (p->p_addr->u_pcb.pcb_fpcpu) 333 save_fpu_proc(p); 334 #endif 335 fpuproc = p; 336 p->p_addr->u_pcb.pcb_fpcpu = curcpu(); 337 enable_fpu(p); 338 break; 339 340 #ifdef ALTIVEC 341 case EXC_VEC|EXC_USER: 342 curcpu()->ci_ev_vec.ev_count++; 343 if (vecproc) { 344 curcpu()->ci_ev_vecsw.ev_count++; 345 save_vec(vecproc); 346 } 347 vecproc = p; 348 enable_vec(p); 349 break; 350 #endif 351 352 case EXC_AST|EXC_USER: 353 astpending = 0; /* we are about to do it */ 354 KERNEL_PROC_LOCK(p); 355 uvmexp.softs++; 356 if (p->p_flag & P_OWEUPC) { 357 p->p_flag &= ~P_OWEUPC; 358 ADDUPROF(p); 359 } 360 /* Check whether we are being preempted. */ 361 if (want_resched) 362 preempt(NULL); 363 KERNEL_PROC_UNLOCK(p); 364 break; 365 366 case EXC_ALI|EXC_USER: 367 KERNEL_PROC_LOCK(p); 368 curcpu()->ci_ev_ali.ev_count++; 369 if (fix_unaligned(p, frame) != 0) { 370 curcpu()->ci_ev_ali_fatal.ev_count++; 371 if (cpu_printfataltraps) { 372 printf("trap: pid %d (%s): user ALI trap @ %#x " 373 "(SSR1=%#x)\n", 374 p->p_pid, p->p_comm, frame->srr0, 375 frame->srr1); 376 } 377 trapsignal(p, SIGBUS, EXC_ALI); 378 } else 379 frame->srr0 += 4; 380 KERNEL_PROC_UNLOCK(p); 381 break; 382 383 case EXC_PGM|EXC_USER: 384 /* XXX temporarily */ 385 KERNEL_PROC_LOCK(p); 386 curcpu()->ci_ev_pgm.ev_count++; 387 if (cpu_printfataltraps) { 388 printf("trap: pid %d (%s): user PGM trap @ %#x " 389 "(SSR1=%#x)\n", 390 p->p_pid, p->p_comm, frame->srr0, frame->srr1); 391 } 392 if (frame->srr1 & 0x00020000) /* Bit 14 is set if trap */ 393 trapsignal(p, SIGTRAP, EXC_PGM); 394 else 395 trapsignal(p, SIGILL, EXC_PGM); 396 KERNEL_PROC_UNLOCK(p); 397 break; 398 399 case EXC_MCHK: { 400 faultbuf *fb; 401 402 if ((fb = p->p_addr->u_pcb.pcb_onfault) != NULL) { 403 frame->srr0 = (*fb)[0]; 404 frame->fixreg[1] = (*fb)[1]; 405 frame->fixreg[2] = (*fb)[2]; 406 frame->fixreg[3] = EFAULT; 407 frame->cr = (*fb)[3]; 408 memcpy(&frame->fixreg[13], &(*fb)[4], 409 19 * sizeof(register_t)); 410 return; 411 } 412 goto brain_damage; 413 } 414 415 default: 416 brain_damage: 417 printf("trap type %x at %x\n", type, frame->srr0); 418 brain_damage2: 419 #ifdef DDBX 420 if (kdb_trap(type, frame)) 421 return; 422 #endif 423 #ifdef TRAP_PANICWAIT 424 printf("Press a key to panic.\n"); 425 cnpollc(1); 426 cngetc(); 427 cnpollc(0); 428 #endif 429 panic("trap"); 430 } 431 432 /* Take pending signals. */ 433 { 434 int sig; 435 436 while ((sig = CURSIG(p)) != 0) 437 postsig(sig); 438 } 439 440 /* 441 * If someone stole the fp or vector unit while we were away, 442 * disable it 443 */ 444 if (p != fpuproc || p->p_addr->u_pcb.pcb_fpcpu != curcpu()) 445 frame->srr1 &= ~PSL_FP; 446 #ifdef ALTIVEC 447 if (p != vecproc) 448 frame->srr1 &= ~PSL_VEC; 449 #endif 450 451 curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri; 452 } 453 454 void 455 child_return(arg) 456 void *arg; 457 { 458 struct proc *p = arg; 459 struct trapframe *tf = trapframe(p); 460 461 KERNEL_PROC_UNLOCK(p); 462 463 tf->fixreg[FIRSTARG] = 0; 464 tf->fixreg[FIRSTARG + 1] = 1; 465 tf->cr &= ~0x10000000; 466 tf->srr1 &= ~(PSL_FP|PSL_VEC); /* Disable FP & AltiVec, as we can't 467 be them. */ 468 p->p_addr->u_pcb.pcb_fpcpu = NULL; 469 #ifdef KTRACE 470 if (KTRPOINT(p, KTR_SYSRET)) { 471 KERNEL_PROC_LOCK(p); 472 ktrsysret(p, SYS_fork, 0, 0); 473 KERNEL_PROC_UNLOCK(p); 474 } 475 #endif 476 /* Profiling? XXX */ 477 curcpu()->ci_schedstate.spc_curpriority = p->p_priority; 478 } 479 480 static inline void 481 setusr(content) 482 int content; 483 { 484 asm volatile ("isync; mtsr %0,%1; isync" 485 :: "n"(USER_SR), "r"(content)); 486 } 487 488 int 489 copyin(udaddr, kaddr, len) 490 const void *udaddr; 491 void *kaddr; 492 size_t len; 493 { 494 const char *up = udaddr; 495 char *kp = kaddr; 496 char *p; 497 int rv; 498 size_t l; 499 faultbuf env; 500 501 if ((rv = setfault(env)) != 0) { 502 curpcb->pcb_onfault = 0; 503 return rv; 504 } 505 while (len > 0) { 506 p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK); 507 l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p; 508 if (l > len) 509 l = len; 510 setusr(curpcb->pcb_pm->pm_sr[(u_int)up >> ADDR_SR_SHFT]); 511 memcpy(kp, p, l); 512 up += l; 513 kp += l; 514 len -= l; 515 } 516 curpcb->pcb_onfault = 0; 517 return 0; 518 } 519 520 int 521 copyout(kaddr, udaddr, len) 522 const void *kaddr; 523 void *udaddr; 524 size_t len; 525 { 526 const char *kp = kaddr; 527 char *up = udaddr; 528 char *p; 529 int rv; 530 size_t l; 531 faultbuf env; 532 533 if ((rv = setfault(env)) != 0) { 534 curpcb->pcb_onfault = 0; 535 return rv; 536 } 537 while (len > 0) { 538 p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK); 539 l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p; 540 if (l > len) 541 l = len; 542 setusr(curpcb->pcb_pm->pm_sr[(u_int)up >> ADDR_SR_SHFT]); 543 memcpy(p, kp, l); 544 up += l; 545 kp += l; 546 len -= l; 547 } 548 curpcb->pcb_onfault = 0; 549 return 0; 550 } 551 552 /* 553 * kcopy(const void *src, void *dst, size_t len); 554 * 555 * Copy len bytes from src to dst, aborting if we encounter a fatal 556 * page fault. 557 * 558 * kcopy() _must_ save and restore the old fault handler since it is 559 * called by uiomove(), which may be in the path of servicing a non-fatal 560 * page fault. 561 */ 562 int 563 kcopy(src, dst, len) 564 const void *src; 565 void *dst; 566 size_t len; 567 { 568 faultbuf env, *oldfault; 569 int rv; 570 571 oldfault = curpcb->pcb_onfault; 572 if ((rv = setfault(env)) != 0) { 573 curpcb->pcb_onfault = oldfault; 574 return rv; 575 } 576 577 memcpy(dst, src, len); 578 579 curpcb->pcb_onfault = oldfault; 580 return 0; 581 } 582 583 int 584 badaddr(addr, size) 585 void *addr; 586 size_t size; 587 { 588 return badaddr_read(addr, size, NULL); 589 } 590 591 int 592 badaddr_read(addr, size, rptr) 593 void *addr; 594 size_t size; 595 int *rptr; 596 { 597 faultbuf env; 598 int x; 599 600 /* Get rid of any stale machine checks that have been waiting. */ 601 __asm __volatile ("sync; isync"); 602 603 if (setfault(env)) { 604 curpcb->pcb_onfault = 0; 605 __asm __volatile ("sync"); 606 return 1; 607 } 608 609 __asm __volatile ("sync"); 610 611 switch (size) { 612 case 1: 613 x = *(volatile int8_t *)addr; 614 break; 615 case 2: 616 x = *(volatile int16_t *)addr; 617 break; 618 case 4: 619 x = *(volatile int32_t *)addr; 620 break; 621 default: 622 panic("badaddr: invalid size (%d)", size); 623 } 624 625 /* Make sure we took the machine check, if we caused one. */ 626 __asm __volatile ("sync; isync"); 627 628 curpcb->pcb_onfault = 0; 629 __asm __volatile ("sync"); /* To be sure. */ 630 631 /* Use the value to avoid reorder. */ 632 if (rptr) 633 *rptr = x; 634 635 return 0; 636 } 637 638 /* 639 * For now, this only deals with the particular unaligned access case 640 * that gcc tends to generate. Eventually it should handle all of the 641 * possibilities that can happen on a 32-bit PowerPC in big-endian mode. 642 */ 643 644 static int 645 fix_unaligned(p, frame) 646 struct proc *p; 647 struct trapframe *frame; 648 { 649 int indicator = EXC_ALI_OPCODE_INDICATOR(frame->dsisr); 650 651 switch (indicator) { 652 case EXC_ALI_DCBZ: 653 { 654 /* 655 * The DCBZ (Data Cache Block Zero) instruction 656 * gives an alignment fault if used on non-cacheable 657 * memory. We handle the fault mainly for the 658 * case when we are running with the cache disabled 659 * for debugging. 660 */ 661 static char zeroes[CACHELINESIZE]; 662 int error; 663 error = copyout(zeroes, 664 (void *)(frame->dar & -CACHELINESIZE), 665 CACHELINESIZE); 666 if (error) 667 return -1; 668 return 0; 669 } 670 671 case EXC_ALI_LFD: 672 case EXC_ALI_STFD: 673 { 674 int reg = EXC_ALI_RST(frame->dsisr); 675 double *fpr = &p->p_addr->u_pcb.pcb_fpu.fpr[reg]; 676 677 /* Juggle the FPU to ensure that we've initialized 678 * the FPRs, and that their current state is in 679 * the PCB. 680 */ 681 if (fpuproc != p) { 682 if (fpuproc) 683 save_fpu(fpuproc); 684 enable_fpu(p); 685 } 686 save_fpu(p); 687 688 if (indicator == EXC_ALI_LFD) { 689 if (copyin((void *)frame->dar, fpr, 690 sizeof(double)) != 0) 691 return -1; 692 enable_fpu(p); 693 } else { 694 if (copyout(fpr, (void *)frame->dar, 695 sizeof(double)) != 0) 696 return -1; 697 } 698 return 0; 699 } 700 break; 701 } 702 703 return -1; 704 } 705