1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1992, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, Ralph Campbell, Sony Corp. and Kazumasa Utashiro 9 * of Software Research Associates, Inc. 10 * 11 * %sccs.include.redist.c% 12 * 13 * from: Utah $Hdr: trap.c 1.32 91/04/06$ 14 * 15 * @(#)trap.c 8.1 (Berkeley) 06/16/93 16 */ 17 18 #include <sys/param.h> 19 #include <sys/systm.h> 20 #include <sys/proc.h> 21 #include <sys/kernel.h> 22 #include <sys/signalvar.h> 23 #include <sys/syscall.h> 24 #include <sys/user.h> 25 #include <sys/buf.h> 26 #ifdef KTRACE 27 #include <sys/ktrace.h> 28 #endif 29 #include <net/netisr.h> 30 31 #include <machine/trap.h> 32 #include <machine/psl.h> 33 #include <machine/reg.h> 34 #include <machine/cpu.h> 35 #include <machine/pte.h> 36 #include <machine/mips_opcode.h> 37 #include <machine/adrsmap.h> 38 39 #include <vm/vm.h> 40 #include <vm/vm_kern.h> 41 #include <vm/vm_page.h> 42 43 #include "lp.h" 44 #include "bm.h" 45 #include "ms.h" 46 #include "en.h" 47 #include <news3400/hbdev/dmac_0448.h> 48 #include <news3400/sio/scc.h> 49 50 struct proc *machFPCurProcPtr; /* pointer to last proc to use FP */ 51 52 extern void MachKernGenException(); 53 extern void MachUserGenException(); 54 extern void MachKernIntr(); 55 extern void MachUserIntr(); 56 extern void MachTLBModException(); 57 extern void MachTLBMissException(); 58 extern unsigned MachEmulateBranch(); 59 60 void (*machExceptionTable[])() = { 61 /* 62 * The kernel exception handlers. 63 */ 64 MachKernIntr, /* external interrupt */ 65 MachKernGenException, /* TLB modification */ 66 MachTLBMissException, /* TLB miss (load or instr. fetch) */ 67 MachTLBMissException, /* TLB miss (store) */ 68 MachKernGenException, /* address error (load or I-fetch) */ 69 MachKernGenException, /* address error (store) */ 70 MachKernGenException, /* bus error (I-fetch) */ 71 MachKernGenException, /* bus error (load or store) */ 72 MachKernGenException, /* system call */ 73 MachKernGenException, /* breakpoint */ 74 MachKernGenException, /* reserved instruction */ 75 MachKernGenException, /* coprocessor unusable */ 76 MachKernGenException, /* arithmetic overflow */ 77 MachKernGenException, /* reserved */ 78 MachKernGenException, /* reserved */ 79 MachKernGenException, /* reserved */ 80 /* 81 * The user exception handlers. 82 */ 83 MachUserIntr, 84 MachUserGenException, 85 MachUserGenException, 86 MachUserGenException, 87 MachUserGenException, 88 MachUserGenException, 89 MachUserGenException, 90 MachUserGenException, 91 MachUserGenException, 92 MachUserGenException, 93 MachUserGenException, 94 MachUserGenException, 95 MachUserGenException, 96 MachUserGenException, 97 MachUserGenException, 98 MachUserGenException, 99 }; 100 101 char *trap_type[] = { 102 "external interrupt", 103 "TLB modification", 104 "TLB miss (load or instr. fetch)", 105 "TLB miss (store)", 106 "address error (load or I-fetch)", 107 "address error (store)", 108 "bus error (I-fetch)", 109 "bus error (load or store)", 110 "system call", 111 "breakpoint", 112 "reserved instruction", 113 "coprocessor unusable", 114 "arithmetic overflow", 115 "reserved 13", 116 "reserved 14", 117 "reserved 15", 118 }; 119 120 #ifdef DEBUG 121 #define TRAPSIZE 10 122 struct trapdebug { /* trap history buffer for debugging */ 123 u_int status; 124 u_int cause; 125 u_int vadr; 126 u_int pc; 127 u_int ra; 128 u_int code; 129 } trapdebug[TRAPSIZE], *trp = trapdebug; 130 #endif 131 132 /* 133 * Handle an exception. 134 * Called from MachKernGenException() or MachUserGenException() 135 * when a processor trap occurs. 136 * In the case of a kernel trap, we return the pc where to resume if 137 * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc. 138 */ 139 unsigned 140 trap(statusReg, causeReg, vadr, pc, args) 141 unsigned statusReg; /* status register at time of the exception */ 142 unsigned causeReg; /* cause register at time of exception */ 143 unsigned vadr; /* address (if any) the fault occured on */ 144 unsigned pc; /* program counter where to continue */ 145 { 146 register int type, i; 147 unsigned ucode = 0; 148 register struct proc *p = curproc; 149 u_quad_t sticks; 150 vm_prot_t ftype; 151 extern unsigned onfault_table[]; 152 153 #ifdef DEBUG 154 trp->status = statusReg; 155 trp->cause = causeReg; 156 trp->vadr = vadr; 157 trp->pc = pc; 158 trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] : 159 p->p_md.md_regs[RA]; 160 trp->code = 0; 161 if (++trp == &trapdebug[TRAPSIZE]) 162 trp = trapdebug; 163 #endif 164 165 cnt.v_trap++; 166 type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT; 167 if (USERMODE(statusReg)) { 168 type |= T_USER; 169 sticks = p->p_sticks; 170 } 171 172 /* 173 * Enable hardware interrupts if they were on before. 174 * We only respond to software interrupts when returning to user mode. 175 */ 176 if (statusReg & MACH_SR_INT_ENA_PREV) 177 splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR); 178 179 switch (type) { 180 case T_TLB_MOD: 181 /* check for kernel address */ 182 if ((int)vadr < 0) { 183 register pt_entry_t *pte; 184 register unsigned entry; 185 register vm_offset_t pa; 186 187 pte = kvtopte(vadr); 188 entry = pte->pt_entry; 189 #ifdef DIAGNOSTIC 190 if (!(entry & PG_V) || (entry & PG_M)) 191 panic("trap: ktlbmod: invalid pte"); 192 #endif 193 if (entry & PG_RO) { 194 /* write to read only page in the kernel */ 195 ftype = VM_PROT_WRITE; 196 goto kernel_fault; 197 } 198 entry |= PG_M; 199 pte->pt_entry = entry; 200 vadr &= ~PGOFSET; 201 MachTLBUpdate(vadr, entry); 202 pa = entry & PG_FRAME; 203 #ifdef ATTR 204 pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD; 205 #else 206 if (!IS_VM_PHYSADDR(pa)) 207 panic("trap: ktlbmod: unmanaged page"); 208 PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN; 209 #endif 210 return (pc); 211 } 212 /* FALLTHROUGH */ 213 214 case T_TLB_MOD+T_USER: 215 { 216 register pt_entry_t *pte; 217 register unsigned entry; 218 register vm_offset_t pa; 219 pmap_t pmap = &p->p_vmspace->vm_pmap; 220 221 if (!(pte = pmap_segmap(pmap, vadr))) 222 panic("trap: utlbmod: invalid segmap"); 223 pte += (vadr >> PGSHIFT) & (NPTEPG - 1); 224 entry = pte->pt_entry; 225 #ifdef DIAGNOSTIC 226 if (!(entry & PG_V) || (entry & PG_M)) 227 panic("trap: utlbmod: invalid pte"); 228 #endif 229 if (entry & PG_RO) { 230 /* write to read only page */ 231 ftype = VM_PROT_WRITE; 232 goto dofault; 233 } 234 entry |= PG_M; 235 pte->pt_entry = entry; 236 vadr = (vadr & ~PGOFSET) | 237 (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 238 MachTLBUpdate(vadr, entry); 239 pa = entry & PG_FRAME; 240 #ifdef ATTR 241 pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD; 242 #else 243 if (!IS_VM_PHYSADDR(pa)) 244 panic("trap: utlbmod: unmanaged page"); 245 PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN; 246 #endif 247 if (!USERMODE(statusReg)) 248 return (pc); 249 goto out; 250 } 251 252 case T_TLB_LD_MISS: 253 case T_TLB_ST_MISS: 254 ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ; 255 /* check for kernel address */ 256 if ((int)vadr < 0) { 257 register vm_offset_t va; 258 int rv; 259 260 kernel_fault: 261 va = trunc_page((vm_offset_t)vadr); 262 rv = vm_fault(kernel_map, va, ftype, FALSE); 263 if (rv == KERN_SUCCESS) 264 return (pc); 265 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 266 ((struct pcb *)UADDR)->pcb_onfault = 0; 267 return (onfault_table[i]); 268 } 269 goto err; 270 } 271 /* 272 * It is an error for the kernel to access user space except 273 * through the copyin/copyout routines. 274 */ 275 if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 0) 276 goto err; 277 /* check for fuswintr() or suswintr() getting a page fault */ 278 if (i == 4) 279 return (onfault_table[i]); 280 goto dofault; 281 282 case T_TLB_LD_MISS+T_USER: 283 ftype = VM_PROT_READ; 284 goto dofault; 285 286 case T_TLB_ST_MISS+T_USER: 287 ftype = VM_PROT_WRITE; 288 dofault: 289 { 290 register vm_offset_t va; 291 register struct vmspace *vm; 292 register vm_map_t map; 293 int rv; 294 295 vm = p->p_vmspace; 296 map = &vm->vm_map; 297 va = trunc_page((vm_offset_t)vadr); 298 rv = vm_fault(map, va, ftype, FALSE); 299 /* 300 * If this was a stack access we keep track of the maximum 301 * accessed stack size. Also, if vm_fault gets a protection 302 * failure it is due to accessing the stack region outside 303 * the current limit and we need to reflect that as an access 304 * error. 305 */ 306 if ((caddr_t)va >= vm->vm_maxsaddr) { 307 if (rv == KERN_SUCCESS) { 308 unsigned nss; 309 310 nss = clrnd(btoc(USRSTACK-(unsigned)va)); 311 if (nss > vm->vm_ssize) 312 vm->vm_ssize = nss; 313 } else if (rv == KERN_PROTECTION_FAILURE) 314 rv = KERN_INVALID_ADDRESS; 315 } 316 if (rv == KERN_SUCCESS) { 317 if (!USERMODE(statusReg)) 318 return (pc); 319 goto out; 320 } 321 if (!USERMODE(statusReg)) { 322 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 323 ((struct pcb *)UADDR)->pcb_onfault = 0; 324 return (onfault_table[i]); 325 } 326 goto err; 327 } 328 ucode = vadr; 329 i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV; 330 break; 331 } 332 333 case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ 334 case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ 335 case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */ 336 case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */ 337 i = SIGSEGV; 338 break; 339 340 case T_SYSCALL+T_USER: 341 { 342 register int *locr0 = p->p_md.md_regs; 343 register struct sysent *callp; 344 unsigned int code; 345 int numsys; 346 struct args { 347 int i[8]; 348 } args; 349 int rval[2]; 350 struct sysent *systab; 351 extern int nsysent; 352 #ifdef COMPAT_NEWSOS 353 extern int nnewssys; 354 extern struct sysent newssys[]; 355 #endif 356 357 cnt.v_syscall++; 358 /* compute next PC after syscall instruction */ 359 if ((int)causeReg < 0) 360 locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0); 361 else 362 locr0[PC] += 4; 363 systab = sysent; 364 numsys = nsysent; 365 code = locr0[V0]; 366 #ifdef COMPAT_NEWSOS 367 if (code >= 1000) { 368 code -= 1000; 369 systab = newssys; 370 numsys = nnewssys; 371 } 372 #endif 373 switch (code) { 374 case SYS_syscall: 375 /* 376 * Code is first argument, followed by actual args. 377 */ 378 code = locr0[A0]; 379 #ifdef COMPAT_NEWSOS 380 if (code >= 1000) { 381 code -= 1000; 382 systab = newssys; 383 numsys = nnewssys; 384 } 385 #endif 386 if (code >= numsys) 387 callp = &systab[SYS_syscall]; /* (illegal) */ 388 else 389 callp = &systab[code]; 390 i = callp->sy_narg; 391 args.i[0] = locr0[A1]; 392 args.i[1] = locr0[A2]; 393 args.i[2] = locr0[A3]; 394 if (i > 3) { 395 i = copyin((caddr_t)(locr0[SP] + 396 4 * sizeof(int)), 397 (caddr_t)&args.i[3], 398 (u_int)(i - 3) * sizeof(int)); 399 if (i) { 400 locr0[V0] = i; 401 locr0[A3] = 1; 402 #ifdef KTRACE 403 if (KTRPOINT(p, KTR_SYSCALL)) 404 ktrsyscall(p->p_tracep, code, 405 callp->sy_narg, args.i); 406 #endif 407 goto done; 408 } 409 } 410 break; 411 412 case SYS___syscall: 413 /* 414 * Like syscall, but code is a quad, so as to maintain 415 * quad alignment for the rest of the arguments. 416 */ 417 code = locr0[A0 + _QUAD_LOWWORD]; 418 if (code >= numsys) 419 callp = &systab[SYS_syscall]; /* (illegal) */ 420 else 421 callp = &systab[code]; 422 i = callp->sy_narg; 423 args.i[0] = locr0[A2]; 424 args.i[1] = locr0[A3]; 425 if (i > 2) { 426 i = copyin((caddr_t)(locr0[SP] + 427 4 * sizeof(int)), 428 (caddr_t)&args.i[2], 429 (u_int)(i - 2) * sizeof(int)); 430 if (i) { 431 locr0[V0] = i; 432 locr0[A3] = 1; 433 #ifdef KTRACE 434 if (KTRPOINT(p, KTR_SYSCALL)) 435 ktrsyscall(p->p_tracep, code, 436 callp->sy_narg, args.i); 437 #endif 438 goto done; 439 } 440 } 441 break; 442 443 default: 444 if (code >= numsys) 445 callp = &systab[SYS_syscall]; /* (illegal) */ 446 else 447 callp = &systab[code]; 448 i = callp->sy_narg; 449 args.i[0] = locr0[A0]; 450 args.i[1] = locr0[A1]; 451 args.i[2] = locr0[A2]; 452 args.i[3] = locr0[A3]; 453 if (i > 4) { 454 i = copyin((caddr_t)(locr0[SP] + 455 4 * sizeof(int)), 456 (caddr_t)&args.i[4], 457 (u_int)(i - 4) * sizeof(int)); 458 if (i) { 459 locr0[V0] = i; 460 locr0[A3] = 1; 461 #ifdef KTRACE 462 if (KTRPOINT(p, KTR_SYSCALL)) 463 ktrsyscall(p->p_tracep, code, 464 callp->sy_narg, args.i); 465 #endif 466 goto done; 467 } 468 } 469 } 470 #ifdef KTRACE 471 if (KTRPOINT(p, KTR_SYSCALL)) 472 ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i); 473 #endif 474 rval[0] = 0; 475 rval[1] = locr0[V1]; 476 #ifdef DEBUG 477 if (trp == trapdebug) 478 trapdebug[TRAPSIZE - 1].code = code; 479 else 480 trp[-1].code = code; 481 #endif 482 i = (*callp->sy_call)(p, &args, rval); 483 /* 484 * Reinitialize proc pointer `p' as it may be different 485 * if this is a child returning from fork syscall. 486 */ 487 p = curproc; 488 locr0 = p->p_md.md_regs; 489 #ifdef DEBUG 490 { int s; 491 s = splhigh(); 492 trp->status = statusReg; 493 trp->cause = causeReg; 494 trp->vadr = locr0[SP]; 495 trp->pc = locr0[PC]; 496 trp->ra = locr0[RA]; 497 trp->code = -code; 498 if (++trp == &trapdebug[TRAPSIZE]) 499 trp = trapdebug; 500 splx(s); 501 } 502 #endif 503 switch (i) { 504 case 0: 505 locr0[V0] = rval[0]; 506 locr0[V1] = rval[1]; 507 locr0[A3] = 0; 508 break; 509 510 case ERESTART: 511 locr0[PC] = pc; 512 break; 513 514 case EJUSTRETURN: 515 break; /* nothing to do */ 516 517 default: 518 locr0[V0] = i; 519 locr0[A3] = 1; 520 } 521 done: 522 #ifdef KTRACE 523 if (KTRPOINT(p, KTR_SYSRET)) 524 ktrsysret(p->p_tracep, code, i, rval[0]); 525 #endif 526 527 goto out; 528 } 529 530 case T_BREAK+T_USER: 531 { 532 register unsigned va, instr; 533 534 /* compute address of break instruction */ 535 va = pc; 536 if ((int)causeReg < 0) 537 va += 4; 538 539 /* read break instruction */ 540 instr = fuiword((caddr_t)va); 541 #ifdef KADB 542 if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP) 543 goto err; 544 #endif 545 if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) { 546 i = SIGTRAP; 547 break; 548 } 549 550 /* restore original instruction and clear BP */ 551 i = suiword((caddr_t)va, p->p_md.md_ss_instr); 552 if (i < 0) { 553 vm_offset_t sa, ea; 554 int rv; 555 556 sa = trunc_page((vm_offset_t)va); 557 ea = round_page((vm_offset_t)va+sizeof(int)-1); 558 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, 559 VM_PROT_DEFAULT, FALSE); 560 if (rv == KERN_SUCCESS) { 561 i = suiword((caddr_t)va, p->p_md.md_ss_instr); 562 (void) vm_map_protect(&p->p_vmspace->vm_map, 563 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, 564 FALSE); 565 } 566 } 567 if (i < 0) { 568 i = SIGTRAP; 569 break; 570 } 571 p->p_md.md_ss_addr = 0; 572 goto out; 573 } 574 575 case T_RES_INST+T_USER: 576 i = SIGILL; 577 break; 578 579 case T_COP_UNUSABLE+T_USER: 580 if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) { 581 i = SIGILL; /* only FPU instructions allowed */ 582 break; 583 } 584 MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs); 585 machFPCurProcPtr = p; 586 p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT; 587 p->p_md.md_flags |= MDP_FPUSED; 588 goto out; 589 590 case T_OVFLOW+T_USER: 591 i = SIGFPE; 592 break; 593 594 case T_ADDR_ERR_LD: /* misaligned access */ 595 case T_ADDR_ERR_ST: /* misaligned access */ 596 case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */ 597 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 598 ((struct pcb *)UADDR)->pcb_onfault = 0; 599 return (onfault_table[i]); 600 } 601 /* FALLTHROUGH */ 602 603 default: 604 err: 605 #ifdef KADB 606 { 607 extern struct pcb kdbpcb; 608 609 if (USERMODE(statusReg)) 610 kdbpcb = p->p_addr->u_pcb; 611 else { 612 kdbpcb.pcb_regs[ZERO] = 0; 613 kdbpcb.pcb_regs[AST] = ((int *)&args)[2]; 614 kdbpcb.pcb_regs[V0] = ((int *)&args)[3]; 615 kdbpcb.pcb_regs[V1] = ((int *)&args)[4]; 616 kdbpcb.pcb_regs[A0] = ((int *)&args)[5]; 617 kdbpcb.pcb_regs[A1] = ((int *)&args)[6]; 618 kdbpcb.pcb_regs[A2] = ((int *)&args)[7]; 619 kdbpcb.pcb_regs[A3] = ((int *)&args)[8]; 620 kdbpcb.pcb_regs[T0] = ((int *)&args)[9]; 621 kdbpcb.pcb_regs[T1] = ((int *)&args)[10]; 622 kdbpcb.pcb_regs[T2] = ((int *)&args)[11]; 623 kdbpcb.pcb_regs[T3] = ((int *)&args)[12]; 624 kdbpcb.pcb_regs[T4] = ((int *)&args)[13]; 625 kdbpcb.pcb_regs[T5] = ((int *)&args)[14]; 626 kdbpcb.pcb_regs[T6] = ((int *)&args)[15]; 627 kdbpcb.pcb_regs[T7] = ((int *)&args)[16]; 628 kdbpcb.pcb_regs[T8] = ((int *)&args)[17]; 629 kdbpcb.pcb_regs[T9] = ((int *)&args)[18]; 630 kdbpcb.pcb_regs[RA] = ((int *)&args)[19]; 631 kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21]; 632 kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22]; 633 kdbpcb.pcb_regs[PC] = pc; 634 kdbpcb.pcb_regs[SR] = statusReg; 635 bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int)); 636 } 637 if (kdb(causeReg, vadr, p, !USERMODE(statusReg))) 638 return (kdbpcb.pcb_regs[PC]); 639 } 640 #else 641 #ifdef DEBUG 642 printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid, 643 p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */ 644 trapDump("trap"); 645 #endif 646 #endif 647 panic("trap"); 648 } 649 trapsignal(p, i, ucode); 650 out: 651 /* 652 * Note: we should only get here if returning to user mode. 653 */ 654 /* take pending signals */ 655 while ((i = CURSIG(p)) != 0) 656 psig(i); 657 p->p_pri = p->p_usrpri; 658 astpending = 0; 659 if (want_resched) { 660 int s; 661 662 /* 663 * Since we are curproc, clock will normally just change 664 * our priority without moving us from one queue to another 665 * (since the running process is not on a queue.) 666 * If that happened after we setrq ourselves but before we 667 * swtch()'ed, we might not be on the queue indicated by 668 * our priority. 669 */ 670 s = splstatclock(); 671 setrq(p); 672 p->p_stats->p_ru.ru_nivcsw++; 673 swtch(); 674 splx(s); 675 while ((i = CURSIG(p)) != 0) 676 psig(i); 677 } 678 /* 679 * If profiling, charge system time to the trapped pc. 680 */ 681 if (p->p_flag & SPROFIL) { 682 extern int psratio; 683 684 addupc_task(p, pc, (int)(p->p_sticks - sticks) * psratio); 685 } 686 curpri = p->p_pri; 687 return (pc); 688 } 689 690 int badaddr_flag; 691 692 /* 693 * Handle an interrupt. 694 * Called from MachKernIntr() or MachUserIntr() 695 * Note: curproc might be NULL. 696 */ 697 interrupt(statusReg, causeReg, pc) 698 unsigned statusReg; /* status register at time of the exception */ 699 unsigned causeReg; /* cause register at time of exception */ 700 unsigned pc; /* program counter where to continue */ 701 { 702 register unsigned mask; 703 struct clockframe cf; 704 int oonfault = ((struct pcb *)UADDR)->pcb_onfault; 705 706 #ifdef DEBUG 707 trp->status = statusReg; 708 trp->cause = causeReg; 709 trp->vadr = 0; 710 trp->pc = pc; 711 trp->ra = 0; 712 trp->code = 0; 713 if (++trp == &trapdebug[TRAPSIZE]) 714 trp = trapdebug; 715 #endif 716 717 mask = causeReg & statusReg; /* pending interrupts & enable mask */ 718 if (mask & MACH_INT_MASK_5) { /* level 5 interrupt */ 719 splx((MACH_SPL_MASK_8 & ~causeReg) | MACH_SR_INT_ENA_CUR); 720 printf("level 5 interrupt: PC %x CR %x SR %x\n", 721 pc, causeReg, statusReg); 722 causeReg &= ~MACH_INT_MASK_5; 723 } 724 if (mask & MACH_INT_MASK_4) { /* level 4 interrupt */ 725 /* 726 * asynchronous bus error 727 */ 728 splx((MACH_SPL_MASK_7 & ~causeReg) | MACH_SR_INT_ENA_CUR); 729 *(char *)INTCLR0 = INTCLR0_BERR; 730 causeReg &= ~MACH_INT_MASK_4; 731 #define BADADDR 1 732 if (oonfault == BADADDR) { /* XXX */ 733 badaddr_flag = 1; 734 } else { 735 printf("level 4 interrupt: PC %x CR %x SR %x\n", 736 pc, causeReg, statusReg); 737 } 738 } 739 if (mask & MACH_INT_MASK_3) { /* level 3 interrupt */ 740 /* 741 * fp error 742 */ 743 splx((MACH_SPL_MASK_6 & ~causeReg) | MACH_SR_INT_ENA_CUR); 744 if (!USERMODE(statusReg)) { 745 #ifdef DEBUG 746 trapDump("fpintr"); 747 #else 748 printf("FPU interrupt: PC %x CR %x SR %x\n", 749 pc, causeReg, statusReg); 750 #endif 751 } else 752 MachFPInterrupt(statusReg, causeReg, pc); 753 causeReg &= ~MACH_INT_MASK_3; 754 } 755 if (mask & MACH_INT_MASK_2) { /* level 2 interrupt */ 756 register int stat; 757 758 splx((MACH_SPL_MASK_5 & ~causeReg) | MACH_SR_INT_ENA_CUR); 759 stat = *(volatile u_char *)INTST0; 760 if (stat & INTST0_TIMINT) { /* timer */ 761 static int led_count = 0; 762 763 *(volatile u_char *)INTCLR0 = INTCLR0_TIMINT; 764 cf.pc = pc; 765 cf.sr = statusReg; 766 hardclock(&cf); 767 if (++led_count > hz) { 768 led_count = 0; 769 *(volatile u_char *)DEBUG_PORT ^= DP_LED1; 770 } 771 } 772 #if NBM > 0 773 if (stat & INTST0_KBDINT) /* keyboard */ 774 kbm_rint(SCC_KEYBOARD); 775 #endif 776 #if NMS > 0 777 if (stat & INTST0_MSINT) /* mouse */ 778 kbm_rint(SCC_MOUSE); 779 #endif 780 causeReg &= ~MACH_INT_MASK_2; 781 } 782 if (mask & MACH_INT_MASK_1) { /* level 1 interrupt */ 783 splx((MACH_SPL_MASK_4 & ~causeReg) | MACH_SR_INT_ENA_CUR); 784 level1_intr(); 785 causeReg &= ~MACH_INT_MASK_1; 786 } 787 if (mask & MACH_INT_MASK_0) { /* level 0 interrupt */ 788 splx((MACH_SPL_MASK_3 & ~causeReg) | MACH_SR_INT_ENA_CUR); 789 level0_intr(); 790 causeReg &= ~MACH_INT_MASK_0; 791 } 792 splx((MACH_SPL_MASK_3 & ~causeReg) | MACH_SR_INT_ENA_CUR); 793 794 if (mask & MACH_SOFT_INT_MASK_0) { 795 struct clockframe cf; 796 797 clearsoftclock(); 798 cnt.v_soft++; 799 cf.pc = pc; 800 cf.sr = statusReg; 801 softclock(); 802 } 803 /* process network interrupt if we trapped or will very soon */ 804 if ((mask & MACH_SOFT_INT_MASK_1) || 805 netisr && (statusReg & MACH_SOFT_INT_MASK_1)) { 806 clearsoftnet(); 807 cnt.v_soft++; 808 #ifdef INET 809 if (netisr & (1 << NETISR_ARP)) { 810 netisr &= ~(1 << NETISR_ARP); 811 arpintr(); 812 } 813 if (netisr & (1 << NETISR_IP)) { 814 netisr &= ~(1 << NETISR_IP); 815 ipintr(); 816 } 817 #endif 818 #ifdef NS 819 if (netisr & (1 << NETISR_NS)) { 820 netisr &= ~(1 << NETISR_NS); 821 nsintr(); 822 } 823 #endif 824 #ifdef ISO 825 if (netisr & (1 << NETISR_ISO)) { 826 netisr &= ~(1 << NETISR_ISO); 827 clnlintr(); 828 } 829 #endif 830 } 831 /* restore onfault flag */ 832 ((struct pcb *)UADDR)->pcb_onfault = oonfault; 833 } 834 835 /* 836 * This is called from MachUserIntr() if astpending is set. 837 * This is very similar to the tail of trap(). 838 */ 839 softintr(statusReg, pc) 840 unsigned statusReg; /* status register at time of the exception */ 841 unsigned pc; /* program counter where to continue */ 842 { 843 register struct proc *p = curproc; 844 int sig; 845 846 cnt.v_soft++; 847 /* take pending signals */ 848 while ((sig = CURSIG(p)) != 0) 849 psig(sig); 850 p->p_pri = p->p_usrpri; 851 astpending = 0; 852 if (p->p_flag & SOWEUPC) { 853 p->p_flag &= ~SOWEUPC; 854 ADDUPROF(p); 855 } 856 if (want_resched) { 857 int s; 858 859 /* 860 * Since we are curproc, clock will normally just change 861 * our priority without moving us from one queue to another 862 * (since the running process is not on a queue.) 863 * If that happened after we setrq ourselves but before we 864 * swtch()'ed, we might not be on the queue indicated by 865 * our priority. 866 */ 867 s = splstatclock(); 868 setrq(p); 869 p->p_stats->p_ru.ru_nivcsw++; 870 swtch(); 871 splx(s); 872 while ((sig = CURSIG(p)) != 0) 873 psig(sig); 874 } 875 curpri = p->p_pri; 876 } 877 878 #ifdef DEBUG 879 trapDump(msg) 880 char *msg; 881 { 882 register int i; 883 int s; 884 885 s = splhigh(); 886 printf("trapDump(%s)\n", msg); 887 for (i = 0; i < TRAPSIZE; i++) { 888 if (trp == trapdebug) 889 trp = &trapdebug[TRAPSIZE - 1]; 890 else 891 trp--; 892 if (trp->cause == 0) 893 break; 894 printf("%s: ADR %x PC %x CR %x SR %x\n", 895 trap_type[(trp->cause & MACH_CR_EXC_CODE) >> 896 MACH_CR_EXC_CODE_SHIFT], 897 trp->vadr, trp->pc, trp->cause, trp->status); 898 printf(" RA %x code %d\n", trp->ra, trp->code); 899 } 900 bzero(trapdebug, sizeof(trapdebug)); 901 trp = trapdebug; 902 splx(s); 903 } 904 #endif 905 906 /* 907 * Return the resulting PC as if the branch was executed. 908 */ 909 unsigned 910 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch) 911 unsigned *regsPtr; 912 unsigned instPC; 913 unsigned fpcCSR; 914 int allowNonBranch; 915 { 916 InstFmt inst; 917 unsigned retAddr; 918 int condition; 919 extern unsigned GetBranchDest(); 920 921 #if 0 922 printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC, 923 *(unsigned *)instPC, fpcCSR); 924 #endif 925 926 inst = *(InstFmt *)instPC; 927 switch ((int)inst.JType.op) { 928 case OP_SPECIAL: 929 switch ((int)inst.RType.func) { 930 case OP_JR: 931 case OP_JALR: 932 retAddr = regsPtr[inst.RType.rs]; 933 break; 934 935 default: 936 if (!allowNonBranch) 937 panic("MachEmulateBranch: Non-branch"); 938 retAddr = instPC + 4; 939 break; 940 } 941 break; 942 943 case OP_BCOND: 944 switch ((int)inst.IType.rt) { 945 case OP_BLTZ: 946 case OP_BLTZAL: 947 if ((int)(regsPtr[inst.RType.rs]) < 0) 948 retAddr = GetBranchDest((InstFmt *)instPC); 949 else 950 retAddr = instPC + 8; 951 break; 952 953 case OP_BGEZAL: 954 case OP_BGEZ: 955 if ((int)(regsPtr[inst.RType.rs]) >= 0) 956 retAddr = GetBranchDest((InstFmt *)instPC); 957 else 958 retAddr = instPC + 8; 959 break; 960 961 default: 962 panic("MachEmulateBranch: Bad branch cond"); 963 } 964 break; 965 966 case OP_J: 967 case OP_JAL: 968 retAddr = (inst.JType.target << 2) | 969 ((unsigned)instPC & 0xF0000000); 970 break; 971 972 case OP_BEQ: 973 if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt]) 974 retAddr = GetBranchDest((InstFmt *)instPC); 975 else 976 retAddr = instPC + 8; 977 break; 978 979 case OP_BNE: 980 if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt]) 981 retAddr = GetBranchDest((InstFmt *)instPC); 982 else 983 retAddr = instPC + 8; 984 break; 985 986 case OP_BLEZ: 987 if ((int)(regsPtr[inst.RType.rs]) <= 0) 988 retAddr = GetBranchDest((InstFmt *)instPC); 989 else 990 retAddr = instPC + 8; 991 break; 992 993 case OP_BGTZ: 994 if ((int)(regsPtr[inst.RType.rs]) > 0) 995 retAddr = GetBranchDest((InstFmt *)instPC); 996 else 997 retAddr = instPC + 8; 998 break; 999 1000 case OP_COP1: 1001 switch (inst.RType.rs) { 1002 case OP_BCx: 1003 case OP_BCy: 1004 if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE) 1005 condition = fpcCSR & MACH_FPC_COND_BIT; 1006 else 1007 condition = !(fpcCSR & MACH_FPC_COND_BIT); 1008 if (condition) 1009 retAddr = GetBranchDest((InstFmt *)instPC); 1010 else 1011 retAddr = instPC + 8; 1012 break; 1013 1014 default: 1015 if (!allowNonBranch) 1016 panic("MachEmulateBranch: Bad coproc branch instruction"); 1017 retAddr = instPC + 4; 1018 } 1019 break; 1020 1021 default: 1022 if (!allowNonBranch) 1023 panic("MachEmulateBranch: Non-branch instruction"); 1024 retAddr = instPC + 4; 1025 } 1026 #if 0 1027 printf("Target addr=%x\n", retAddr); 1028 #endif 1029 return (retAddr); 1030 } 1031 1032 unsigned 1033 GetBranchDest(InstPtr) 1034 InstFmt *InstPtr; 1035 { 1036 return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2)); 1037 } 1038 1039 /* 1040 * This routine is called by procxmt() to single step one instruction. 1041 * We do this by storing a break instruction after the current instruction, 1042 * resuming execution, and then restoring the old instruction. 1043 */ 1044 cpu_singlestep(p) 1045 register struct proc *p; 1046 { 1047 register unsigned va; 1048 register int *locr0 = p->p_md.md_regs; 1049 int i; 1050 1051 /* compute next address after current location */ 1052 va = MachEmulateBranch(locr0, locr0[PC], 0, 1); 1053 if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va || 1054 !useracc((caddr_t)va, 4, B_READ)) { 1055 printf("SS %s (%d): breakpoint already set at %x (va %x)\n", 1056 p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */ 1057 return (EFAULT); 1058 } 1059 p->p_md.md_ss_addr = va; 1060 p->p_md.md_ss_instr = fuiword((caddr_t)va); 1061 i = suiword((caddr_t)va, MACH_BREAK_SSTEP); 1062 if (i < 0) { 1063 vm_offset_t sa, ea; 1064 int rv; 1065 1066 sa = trunc_page((vm_offset_t)va); 1067 ea = round_page((vm_offset_t)va+sizeof(int)-1); 1068 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, 1069 VM_PROT_DEFAULT, FALSE); 1070 if (rv == KERN_SUCCESS) { 1071 i = suiword((caddr_t)va, MACH_BREAK_SSTEP); 1072 (void) vm_map_protect(&p->p_vmspace->vm_map, 1073 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE); 1074 } 1075 } 1076 if (i < 0) 1077 return (EFAULT); 1078 printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n", 1079 p->p_comm, p->p_pid, p->p_md.md_ss_addr, 1080 p->p_md.md_ss_instr, locr0[PC]); /* XXX */ 1081 return (0); 1082 } 1083 1084 /* 1085 * news3400 - INT0 service routine. 1086 * 1087 * INTST0 bit 4: dma 1088 * 3: slot #1 1089 * 2: slot #3 1090 * 1: external #1 1091 * 0: external #3 1092 */ 1093 1094 #define LEVEL0_MASK \ 1095 (INTST1_DMA|INTST1_SLOT1|INTST1_SLOT3|INTST1_EXT1|INTST1_EXT3) 1096 1097 level0_intr() 1098 { 1099 register int stat; 1100 1101 stat = *(volatile u_char *)INTST1 & LEVEL0_MASK; 1102 *(u_char *)INTCLR1 = stat; 1103 1104 if (stat & INTST1_DMA) 1105 dma_intr(); 1106 if (stat & INTST1_SLOT1) 1107 exec_hb_intr2(); 1108 #if NEN > 0 1109 if (stat & INTST1_SLOT3) { 1110 int s, t; 1111 1112 s = splimp(); 1113 t = lance_intr(); 1114 (void) splx(s); 1115 if (t == 0) 1116 exec_hb_intr4(); 1117 } 1118 #endif 1119 #if NLE > 0 1120 if (stat & INTST1_SLOT3) { 1121 int s; 1122 1123 s = splimp(); 1124 leintr(0); 1125 (void) splx(s); 1126 } 1127 #endif 1128 if (stat & INTST1_EXT1) 1129 print_int_stat("EXT #1"); 1130 if (stat & INTST1_EXT3) 1131 print_int_stat("EXT #3"); 1132 } 1133 1134 /* 1135 * news3400 - INT1 service routine. 1136 * 1137 * INTST0 bit 1: centro fault 1138 * 0: centro busy 1139 * INTST1 bit 7: beep 1140 * 6: scc 1141 * 5: lance 1142 */ 1143 1144 #define LEVEL1_MASK2 (INTST0_CFLT|INTST0_CBSY) 1145 #define LEVEL1_MASK1 (INTST1_BEEP|INTST1_SCC|INTST1_LANCE) 1146 1147 level1_intr(pc) 1148 unsigned pc; 1149 { 1150 register int stat; 1151 register u_int saved_inten1 = *(u_char *)INTEN1; 1152 1153 *(u_char *)INTEN1 = 0; /* disable intr: beep, lance, scc */ 1154 1155 stat = *(volatile u_char *)INTST1 & LEVEL1_MASK1; 1156 *(u_char *)INTCLR1 = stat; 1157 1158 stat &= saved_inten1; 1159 1160 if (stat & INTST1_BEEP) { 1161 *(volatile u_char *)INTCLR1 = INTCLR1_BEEP; 1162 print_int_stat("BEEP"); 1163 } 1164 if (stat & INTST1_SCC) { 1165 scc_intr(); 1166 if (saved_inten1 & *(u_char *)INTST1 & INTST1_SCC) 1167 scc_intr(); 1168 } 1169 #if NEN > 0 1170 if (stat & INTST1_LANCE) 1171 lance_intr(); 1172 #endif 1173 #if NLE > 0 1174 if (stat & INTST1_LANCE) 1175 leintr(0); 1176 #endif 1177 1178 *(u_char *)INTEN1 = saved_inten1; 1179 1180 #if NLP > 0 1181 /* 1182 * The PARK2 cannot find centro interrupt correctly. 1183 * We must check it by reading the cause register of cpu 1184 * while other interrupts are disabled. 1185 */ 1186 { 1187 register int causereg; 1188 int s = splhigh(); 1189 1190 causereg = get_causereg(); 1191 (void) splx(s); 1192 1193 if ((causereg & CAUSE_IP4) == 0) 1194 return; 1195 } 1196 #endif 1197 1198 stat = (int)(*(u_char *)INTST0) & LEVEL1_MASK2; 1199 *(u_char *)INTCLR0 = stat; 1200 1201 if (stat & INTST0_CBSY) /* centro busy */ 1202 #if NLP > 0 1203 lpxint(0); 1204 #else 1205 printf("stray intr: CBSY\n"); 1206 #endif 1207 } 1208 1209 /* 1210 * DMA interrupt service routine. 1211 */ 1212 dma_intr() 1213 { 1214 register volatile u_char *gsp = (u_char *)DMAC_GSTAT; 1215 register u_int gstat = *gsp; 1216 register int mrqb, i; 1217 1218 /* 1219 * when DMA intrrupt occurs there remain some untransferred data. 1220 * wait data transfer completion. 1221 */ 1222 mrqb = (gstat & (CH0_INT|CH1_INT|CH2_INT|CH3_INT)) << 1; 1223 if (gstat & mrqb) { 1224 /* 1225 * SHOULD USE DELAY() 1226 */ 1227 for (i = 0; i < 50; i++) 1228 ; 1229 if (*gsp & mrqb) 1230 printf("dma_intr: MRQ\n"); 1231 } 1232 1233 /* SCSI Dispatch */ 1234 if (gstat & CH_INT(CH_SCSI)) 1235 scintr(); 1236 1237 #include "fd.h" 1238 #if NFD > 0 1239 /* FDC Interrupt Dispatch */ 1240 if (gstat & CH_INT(CH_FDC)) 1241 fdc_intr(0); 1242 #endif /* NFD > 0 */ 1243 1244 #include "sb.h" 1245 #if NSB > 0 1246 /* Audio Interface Dispatch */ 1247 sbintr(0); 1248 #endif /* NSB > 0 */ 1249 1250 /* Video I/F Dispatch */ 1251 if (gstat & CH_INT(CH_VIDEO)) 1252 ; 1253 } 1254 1255 /* 1256 * SCC vector interrupt service routine. 1257 */ 1258 scc_intr() 1259 { 1260 int vec; 1261 extern int scc_xint(), scc_sint(), scc_rint(), scc_cint(); 1262 static int (*func[])() = { 1263 scc_xint, 1264 scc_sint, 1265 scc_rint, 1266 scc_cint 1267 }; 1268 1269 vec = *(volatile u_char *)SCCVECT; 1270 (*func[(vec & SCC_INT_MASK) >> 1])(vec); 1271 } 1272 1273 print_int_stat(msg) 1274 char *msg; 1275 { 1276 int s0 = *(volatile u_char *)INTST0; 1277 int s1 = *(volatile u_char *)INTST1; 1278 1279 if (msg) 1280 printf("%s: ", msg); 1281 else 1282 printf("intr: "); 1283 printf("INTST0=0x%x, INTST1=0x%x.\n", s0, s1); 1284 } 1285 1286 traceback() 1287 { 1288 u_int pc, sp; 1289 1290 getpcsp(&pc, &sp); 1291 backtr(pc, sp); 1292 } 1293 1294 #define EF_RA 92 /* r31: return address */ 1295 #define KERN_REG_SIZE (18 * 4) 1296 #define STAND_FRAME_SIZE 24 1297 #define EF_SIZE STAND_FRAME_SIZE + KERN_REG_SIZE + 12 1298 1299 extern u_int MachKernGenExceptionEnd[]; 1300 extern u_int end[]; 1301 #define ENDOFTXT (end + 1) 1302 1303 #define VALID_TEXT(pc) \ 1304 ((u_int *)MACH_CODE_START <= (u_int *)MACH_UNCACHED_TO_CACHED(pc) && \ 1305 (u_int *)MACH_UNCACHED_TO_CACHED(pc) <= (u_int *)ENDOFTXT) 1306 1307 #define ExceptionHandler(x) \ 1308 ((u_int*)MachKernGenException < (u_int*)MACH_UNCACHED_TO_CACHED(x) && \ 1309 (u_int*)MACH_UNCACHED_TO_CACHED(x) < (u_int*)MachKernGenExceptionEnd) 1310 1311 backtr(pc, sp) 1312 register u_int *pc; 1313 register caddr_t sp; 1314 { 1315 int fsize; 1316 u_int *getra(); 1317 extern int _gp[]; 1318 1319 printf("start trace back pc=%x, sp=%x, pid=%d[%s]\n", 1320 pc, sp, curproc->p_pid, curproc->p_comm); 1321 1322 while (VALID_TEXT(pc)) { 1323 if (sp >= (caddr_t)KERNELSTACK || sp < (caddr_t)UADDR) { 1324 printf("stack exhausted (sp=0x%x)\n", sp); 1325 break; 1326 } 1327 if (ExceptionHandler(pc)) { 1328 pc = (u_int *)(*((u_int *)&sp[EF_RA])); 1329 sp += EF_SIZE; 1330 printf("trapped from pc=%x, sp=%x\n", pc, sp); 1331 } else { 1332 pc = getra(pc, sp, &fsize); 1333 sp += fsize; 1334 printf("called from pc=%x, sp=%x\n", pc, sp); 1335 } 1336 } 1337 printf("trace back END. pid=%d[%s]\n", curproc->p_pid, curproc->p_comm); 1338 } 1339 1340 #define NPCSTOCK 128 1341 1342 u_int * 1343 getra(pc, sp, fsize) 1344 register int *pc; 1345 register caddr_t sp; 1346 int *fsize; 1347 { 1348 u_int regs[32]; 1349 int *opcs[NPCSTOCK]; 1350 register int i, nbpc = 0; 1351 int printed = 0; 1352 InstFmt I; 1353 1354 *fsize = 0; 1355 for (i = 0; i < 32; i++) regs[i] = 0; 1356 for (; (u_int*)MACH_UNCACHED_TO_CACHED(pc) < (u_int*)ENDOFTXT; pc++) { 1357 I.word = *pc; 1358 switch (I.IType.op) { 1359 1360 case OP_ADDIU: 1361 /* sp += fsize */ 1362 if (I.IType.rs == SP && I.IType.rt == SP) 1363 *fsize = (u_short)I.IType.imm; 1364 break; 1365 1366 case OP_LW: 1367 if (I.IType.rs != SP) 1368 break; 1369 regs[I.IType.rt] = *(u_int *)&sp[(short)I.IType.imm]; 1370 break; 1371 1372 case OP_BEQ: 1373 if (I.IType.rs != ZERO || I.IType.rt != ZERO) 1374 break; 1375 for (i = 0; i < nbpc; i++) 1376 if (pc == opcs[i]) { 1377 /* 1378 * Brach constructs infinite loop. 1379 */ 1380 if (!printed) { 1381 printf("branch loop\n"); 1382 printed = 1; 1383 } 1384 break; 1385 } 1386 if (i == nbpc) { 1387 opcs[nbpc] = pc; 1388 nbpc = imin(nbpc + 1, NPCSTOCK); 1389 pc = pc + (short)I.IType.imm; 1390 } 1391 break; 1392 1393 default: 1394 break; 1395 } 1396 1397 I.word = *(pc - 1); 1398 if (I.RType.op == OP_SPECIAL && I.RType.func == OP_JR) 1399 return ((int *)regs[I.RType.rs]); 1400 } 1401 printf("pc run out of TEXT\n"); 1402 return (0); 1403 } 1404