1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1992 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department and Ralph Campbell. 9 * 10 * %sccs.include.redist.c% 11 * 12 * from: Utah $Hdr: trap.c 1.32 91/04/06$ 13 * 14 * @(#)trap.c 7.6 (Berkeley) 06/20/92 15 */ 16 17 #include "param.h" 18 #include "systm.h" 19 #include "proc.h" 20 #include "kernel.h" 21 #include "signalvar.h" 22 #include "user.h" 23 #include "buf.h" 24 #ifdef KTRACE 25 #include "ktrace.h" 26 #endif 27 #include "net/netisr.h" 28 29 #include "../include/trap.h" 30 #include "../include/psl.h" 31 #include "../include/reg.h" 32 #include "../include/cpu.h" 33 #include "../include/pte.h" 34 #include "../include/mips_opcode.h" 35 #include "clockreg.h" 36 37 #include "vm/vm.h" 38 #include "vm/vm_kern.h" 39 #include "vm/vm_page.h" 40 41 /* 42 * This is a kludge to allow X windows to work. 43 */ 44 #define X_KLUGE 45 46 #ifdef X_KLUGE 47 #define USER_MAP_ADDR 0x4000 48 #define NPTES 300 49 static pt_entry_t UserMapPtes[NPTES]; 50 static unsigned nUserMapPtes; 51 static pid_t UserMapPid; 52 #endif 53 54 struct proc *machFPCurProcPtr; /* pointer to last proc to use FP */ 55 56 extern void MachKernGenException(); 57 extern void MachUserGenException(); 58 extern void MachKernIntr(); 59 extern void MachUserIntr(); 60 extern void MachTLBModException(); 61 extern void MachTLBMissException(); 62 extern void MemErrorInterrupt(); 63 extern unsigned MachEmulateBranch(); 64 65 void (*machExceptionTable[])() = { 66 /* 67 * The kernel exception handlers. 68 */ 69 MachKernIntr, /* external interrupt */ 70 MachKernGenException, /* TLB modification */ 71 MachTLBMissException, /* TLB miss (load or instr. fetch) */ 72 MachTLBMissException, /* TLB miss (store) */ 73 MachKernGenException, /* address error (load or I-fetch) */ 74 MachKernGenException, /* address error (store) */ 75 MachKernGenException, /* bus error (I-fetch) */ 76 MachKernGenException, /* bus error (load or store) */ 77 MachKernGenException, /* system call */ 78 MachKernGenException, /* breakpoint */ 79 MachKernGenException, /* reserved instruction */ 80 MachKernGenException, /* coprocessor unusable */ 81 MachKernGenException, /* arithmetic overflow */ 82 MachKernGenException, /* reserved */ 83 MachKernGenException, /* reserved */ 84 MachKernGenException, /* reserved */ 85 /* 86 * The user exception handlers. 87 */ 88 MachUserIntr, 89 MachUserGenException, 90 MachUserGenException, 91 MachUserGenException, 92 MachUserGenException, 93 MachUserGenException, 94 MachUserGenException, 95 MachUserGenException, 96 MachUserGenException, 97 MachUserGenException, 98 MachUserGenException, 99 MachUserGenException, 100 MachUserGenException, 101 MachUserGenException, 102 MachUserGenException, 103 MachUserGenException, 104 }; 105 106 char *trap_type[] = { 107 "external interrupt", 108 "TLB modification", 109 "TLB miss (load or instr. fetch)", 110 "TLB miss (store)", 111 "address error (load or I-fetch)", 112 "address error (store)", 113 "bus error (I-fetch)", 114 "bus error (load or store)", 115 "system call", 116 "breakpoint", 117 "reserved instruction", 118 "coprocessor unusable", 119 "arithmetic overflow", 120 "reserved 13", 121 "reserved 14", 122 "reserved 15", 123 }; 124 125 #ifdef DEBUG 126 #define TRAPSIZE 10 127 struct trapdebug { /* trap history buffer for debugging */ 128 u_int status; 129 u_int cause; 130 u_int vadr; 131 u_int pc; 132 u_int ra; 133 u_int code; 134 } trapdebug[TRAPSIZE], *trp = trapdebug; 135 #endif 136 137 /* 138 * Handle an exception. 139 * Called from MachKernGenException() or MachUserGenException() 140 * when a processor trap occurs. 141 * In the case of a kernel trap, we return the pc where to resume if 142 * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc. 143 */ 144 unsigned 145 trap(statusReg, causeReg, vadr, pc, args) 146 unsigned statusReg; /* status register at time of the exception */ 147 unsigned causeReg; /* cause register at time of exception */ 148 unsigned vadr; /* address (if any) the fault occured on */ 149 unsigned pc; /* program counter where to continue */ 150 { 151 register int type, i; 152 unsigned ucode = 0; 153 register struct proc *p = curproc; 154 struct timeval syst; 155 vm_prot_t ftype; 156 extern unsigned onfault_table[]; 157 158 #ifdef DEBUG 159 trp->status = statusReg; 160 trp->cause = causeReg; 161 trp->vadr = vadr; 162 trp->pc = pc; 163 trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] : 164 p->p_md.md_regs[RA]; 165 trp->code = 0; 166 if (++trp == &trapdebug[TRAPSIZE]) 167 trp = trapdebug; 168 #endif 169 170 cnt.v_trap++; 171 type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT; 172 if (USERMODE(statusReg)) { 173 type |= T_USER; 174 syst = p->p_stime; 175 } 176 177 /* 178 * Enable hardware interrupts if they were on before. 179 * We only respond to software interrupts when returning to user mode. 180 */ 181 if (statusReg & MACH_SR_INT_ENA_PREV) 182 splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR); 183 184 switch (type) { 185 case T_TLB_MOD: 186 if ((int)vadr < 0) { 187 register pt_entry_t *pte; 188 register unsigned entry; 189 #ifndef ATTR 190 register vm_offset_t pa; 191 #endif 192 193 pte = kvtopte(vadr); 194 entry = pte->pt_entry; 195 if (entry & PG_RO) { 196 /* write to read only page in the kernel */ 197 ftype = VM_PROT_WRITE; 198 goto kernel_fault; 199 } 200 entry |= PG_M; 201 pte->pt_entry = entry; 202 vadr &= PG_FRAME; 203 printf("trap: TLBupdate hi %x lo %x i %x\n", vadr, 204 entry, MachTLBUpdate(vadr, entry)); /* XXX */ 205 #ifdef ATTR 206 pmap_attributes[atop(entry - KERNBASE)] |= PMAP_ATTR_MOD; 207 #else 208 pa = entry & PG_FRAME; 209 if (!IS_VM_PHYSADDR(pa)) 210 panic("trap: kmod"); 211 PHYS_TO_VM_PAGE(pa)->clean = FALSE; 212 #endif 213 return (pc); 214 } 215 /* FALLTHROUGH */ 216 217 case T_TLB_MOD+T_USER: 218 { 219 pmap_hash_t hp; 220 #ifndef ATTR 221 vm_offset_t pa; 222 #endif 223 #ifdef DIAGNOSTIC 224 extern pmap_hash_t zero_pmap_hash; 225 extern pmap_t cur_pmap; 226 227 if (cur_pmap->pm_hash == zero_pmap_hash) 228 panic("tlbmod"); 229 #endif 230 hp = &((pmap_hash_t)PMAP_HASH_UADDR)[PMAP_HASH(vadr)]; 231 if (((hp->pmh_pte[0].high ^ vadr) & ~PGOFSET) == 0) 232 i = 0; 233 else if (((hp->pmh_pte[1].high ^ vadr) & ~PGOFSET) == 0) 234 i = 1; 235 else 236 panic("trap: tlb umod not found"); 237 if (hp->pmh_pte[i].low & PG_RO) { 238 ftype = VM_PROT_WRITE; 239 goto dofault; 240 } 241 hp->pmh_pte[i].low |= PG_M; 242 printf("trap: TLBupdate hi %x lo %x i %x\n", 243 hp->pmh_pte[i].high, hp->pmh_pte[i].low, 244 MachTLBUpdate(hp->pmh_pte[i].high, hp->pmh_pte[i].low)); /* XXX */ 245 #ifdef ATTR 246 pmap_attributes[atop(hp->pmh_pte[i].low - KERNBASE)] |= 247 PMAP_ATTR_MOD; 248 #else 249 pa = hp->pmh_pte[i].low & PG_FRAME; 250 if (!IS_VM_PHYSADDR(pa)) 251 panic("trap: umod"); 252 PHYS_TO_VM_PAGE(pa)->clean = FALSE; 253 #endif 254 if (!USERMODE(statusReg)) 255 return (pc); 256 goto out; 257 } 258 259 case T_TLB_LD_MISS: 260 case T_TLB_ST_MISS: 261 ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ; 262 if ((int)vadr < 0) { 263 register vm_offset_t va; 264 int rv; 265 266 kernel_fault: 267 va = trunc_page((vm_offset_t)vadr); 268 rv = vm_fault(kernel_map, va, ftype, FALSE); 269 if (rv == KERN_SUCCESS) 270 return (pc); 271 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 272 ((struct pcb *)UADDR)->pcb_onfault = 0; 273 return (onfault_table[i]); 274 } 275 goto err; 276 } 277 goto dofault; 278 279 case T_TLB_LD_MISS+T_USER: 280 ftype = VM_PROT_READ; 281 goto dofault; 282 283 case T_TLB_ST_MISS+T_USER: 284 ftype = VM_PROT_WRITE; 285 dofault: 286 { 287 register vm_offset_t va; 288 register struct vmspace *vm = p->p_vmspace; 289 register vm_map_t map = &vm->vm_map; 290 int rv; 291 292 #ifdef X_KLUGE 293 if (p->p_pid == UserMapPid && 294 (va = pmax_btop(vadr - USER_MAP_ADDR)) < nUserMapPtes) { 295 register pt_entry_t *pte; 296 297 pte = &UserMapPtes[va]; 298 MachTLBWriteRandom((vadr & PG_FRAME) | 299 (vm->vm_pmap.pm_tlbpid << VMMACH_TLB_PID_SHIFT), 300 pte->pt_entry); 301 return (pc); 302 } 303 #endif 304 va = trunc_page((vm_offset_t)vadr); 305 rv = vm_fault(map, va, ftype, FALSE); 306 if (rv != KERN_SUCCESS) { 307 printf("vm_fault(%x, %x, %x, 0) -> %x ADR %x PC %x RA %x\n", 308 map, va, ftype, rv, vadr, pc, 309 !USERMODE(statusReg) ? ((int *)&args)[19] : 310 p->p_md.md_regs[RA]); /* XXX */ 311 printf("\tpid %d %s PC %x RA %x\n", p->p_pid, 312 p->p_comm, p->p_md.md_regs[PC], 313 p->p_md.md_regs[RA]); /* XXX */ 314 trapDump("vm_fault"); 315 } 316 /* 317 * If this was a stack access we keep track of the maximum 318 * accessed stack size. Also, if vm_fault gets a protection 319 * failure it is due to accessing the stack region outside 320 * the current limit and we need to reflect that as an access 321 * error. 322 */ 323 if ((caddr_t)va >= vm->vm_maxsaddr) { 324 if (rv == KERN_SUCCESS) { 325 unsigned nss; 326 327 nss = clrnd(btoc(USRSTACK-(unsigned)va)); 328 if (nss > vm->vm_ssize) 329 vm->vm_ssize = nss; 330 } else if (rv == KERN_PROTECTION_FAILURE) 331 rv = KERN_INVALID_ADDRESS; 332 } 333 if (rv == KERN_SUCCESS) { 334 if (!USERMODE(statusReg)) 335 return (pc); 336 goto out; 337 } 338 if (!USERMODE(statusReg)) { 339 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 340 ((struct pcb *)UADDR)->pcb_onfault = 0; 341 return (onfault_table[i]); 342 } 343 goto err; 344 } 345 ucode = vadr; 346 i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV; 347 break; 348 } 349 350 case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ 351 if (vadr == KERNBASE) { 352 struct args { 353 int i[1]; 354 } args; 355 int rval[2]; 356 357 /* 358 * Assume a signal handler is trying to return 359 * (see sendsig() and sigreturn()). We have to 360 * pop the sigframe struct to get the address of 361 * the sigcontext. 362 */ 363 args.i[0] = p->p_md.md_regs[SP] + 4 * sizeof(int); 364 (void) sigreturn(curproc, &args, rval); 365 goto out; 366 } 367 /* FALLTHROUGH */ 368 369 case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ 370 case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */ 371 case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */ 372 i = SIGSEGV; 373 break; 374 375 case T_SYSCALL+T_USER: 376 { 377 register int *locr0 = p->p_md.md_regs; 378 register struct sysent *callp; 379 unsigned int code; 380 int numsys; 381 struct args { 382 int i[8]; 383 } args; 384 int rval[2]; 385 struct sysent *systab; 386 extern int nsysent; 387 #ifdef ULTRIXCOMPAT 388 extern struct sysent ultrixsysent[]; 389 extern int ultrixnsysent; 390 #endif 391 392 cnt.v_syscall++; 393 /* compute next PC after syscall instruction */ 394 if ((int)causeReg < 0) 395 locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0); 396 else 397 locr0[PC] += 4; 398 systab = sysent; 399 numsys = nsysent; 400 #ifdef ULTRIXCOMPAT 401 if (p->p_md.md_flags & MDP_ULTRIX) { 402 systab = ultrixsysent; 403 numsys = ultrixnsysent; 404 } 405 #endif 406 code = locr0[V0]; 407 if (code == 0) { /* indir */ 408 code = locr0[A0]; 409 if (code >= numsys) 410 callp = &systab[0]; /* indir (illegal) */ 411 else 412 callp = &systab[code]; 413 i = callp->sy_narg; 414 args.i[0] = locr0[A1]; 415 args.i[1] = locr0[A2]; 416 args.i[2] = locr0[A3]; 417 if (i > 3) { 418 i = copyin((caddr_t)(locr0[SP] + 419 3 * sizeof(int)), 420 (caddr_t)&args.i[3], 421 (u_int)(i - 3) * sizeof(int)); 422 if (i) { 423 locr0[V0] = i; 424 locr0[A3] = 1; 425 #ifdef KTRACE 426 if (KTRPOINT(p, KTR_SYSCALL)) 427 ktrsyscall(p->p_tracep, code, 428 callp->sy_narg, args.i); 429 #endif 430 goto done; 431 } 432 } 433 } else { 434 if (code >= numsys) 435 callp = &systab[0]; /* indir (illegal) */ 436 else 437 callp = &systab[code]; 438 i = callp->sy_narg; 439 args.i[0] = locr0[A0]; 440 args.i[1] = locr0[A1]; 441 args.i[2] = locr0[A2]; 442 args.i[3] = locr0[A3]; 443 if (i > 4) { 444 i = copyin((caddr_t)(locr0[SP] + 445 4 * sizeof(int)), 446 (caddr_t)&args.i[4], 447 (u_int)(i - 4) * sizeof(int)); 448 if (i) { 449 locr0[V0] = i; 450 locr0[A3] = 1; 451 #ifdef KTRACE 452 if (KTRPOINT(p, KTR_SYSCALL)) 453 ktrsyscall(p->p_tracep, code, 454 callp->sy_narg, args.i); 455 #endif 456 goto done; 457 } 458 } 459 } 460 #ifdef KTRACE 461 if (KTRPOINT(p, KTR_SYSCALL)) 462 ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i); 463 #endif 464 rval[0] = 0; 465 rval[1] = locr0[V1]; 466 #ifdef DEBUG 467 if (trp == trapdebug) 468 trapdebug[TRAPSIZE - 1].code = code; 469 else 470 trp[-1].code = code; 471 #endif 472 i = (*callp->sy_call)(p, &args, rval); 473 /* 474 * Reinitialize proc pointer `p' as it may be different 475 * if this is a child returning from fork syscall. 476 */ 477 p = curproc; 478 locr0 = p->p_md.md_regs; 479 #ifdef DEBUG 480 { int s; 481 s = splhigh(); 482 trp->status = statusReg; 483 trp->cause = causeReg; 484 trp->vadr = locr0[SP]; 485 trp->pc = locr0[PC]; 486 trp->ra = locr0[RA]; 487 trp->code = -code; 488 if (++trp == &trapdebug[TRAPSIZE]) 489 trp = trapdebug; 490 splx(s); 491 } 492 #endif 493 if (i == ERESTART) 494 locr0[PC] = pc; 495 else if (i != EJUSTRETURN) { 496 if (i) { 497 locr0[V0] = i; 498 locr0[A3] = 1; 499 } else { 500 locr0[V0] = rval[0]; 501 locr0[V1] = rval[1]; 502 locr0[A3] = 0; 503 } 504 } 505 /* else if (i == EJUSTRETURN) */ 506 /* nothing to do */ 507 done: 508 #ifdef KTRACE 509 if (KTRPOINT(p, KTR_SYSRET)) 510 ktrsysret(p->p_tracep, code, i, rval[0]); 511 #endif 512 goto out; 513 } 514 515 case T_BREAK+T_USER: 516 { 517 register unsigned va, instr; 518 519 /* compute address of break instruction */ 520 va = pc; 521 if ((int)causeReg < 0) 522 va += 4; 523 524 /* read break instruction */ 525 instr = fuiword((caddr_t)va); 526 #ifdef KADB 527 if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP) 528 goto err; 529 #endif 530 if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) { 531 i = SIGTRAP; 532 break; 533 } 534 535 /* restore original instruction and clear BP */ 536 i = suiword((caddr_t)va, p->p_md.md_ss_instr); 537 if (i < 0) { 538 vm_offset_t sa, ea; 539 int rv; 540 541 sa = trunc_page((vm_offset_t)va); 542 ea = round_page((vm_offset_t)va+sizeof(int)-1); 543 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, 544 VM_PROT_DEFAULT, FALSE); 545 if (rv == KERN_SUCCESS) { 546 i = suiword((caddr_t)va, p->p_md.md_ss_instr); 547 (void) vm_map_protect(&p->p_vmspace->vm_map, 548 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, 549 FALSE); 550 } 551 } 552 if (i < 0) { 553 i = SIGTRAP; 554 break; 555 } 556 p->p_md.md_ss_addr = 0; 557 goto out; 558 } 559 560 case T_RES_INST+T_USER: 561 i = SIGILL; 562 break; 563 564 case T_COP_UNUSABLE+T_USER: 565 if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) { 566 i = SIGILL; /* only FPU instructions allowed */ 567 break; 568 } 569 MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs); 570 machFPCurProcPtr = p; 571 p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT; 572 p->p_md.md_flags |= MDP_FPUSED; 573 goto out; 574 575 case T_OVFLOW+T_USER: 576 i = SIGFPE; 577 break; 578 579 case T_ADDR_ERR_LD: /* misaligned access */ 580 case T_ADDR_ERR_ST: /* misaligned access */ 581 case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */ 582 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 583 ((struct pcb *)UADDR)->pcb_onfault = 0; 584 return (onfault_table[i]); 585 } 586 /* FALLTHROUGH */ 587 588 default: 589 err: 590 #ifdef KADB 591 { 592 extern struct pcb kdbpcb; 593 594 if (USERMODE(statusReg)) 595 kdbpcb = p->p_addr->u_pcb; 596 else { 597 kdbpcb.pcb_regs[ZERO] = 0; 598 kdbpcb.pcb_regs[AST] = ((int *)&args)[2]; 599 kdbpcb.pcb_regs[V0] = ((int *)&args)[3]; 600 kdbpcb.pcb_regs[V1] = ((int *)&args)[4]; 601 kdbpcb.pcb_regs[A0] = ((int *)&args)[5]; 602 kdbpcb.pcb_regs[A1] = ((int *)&args)[6]; 603 kdbpcb.pcb_regs[A2] = ((int *)&args)[7]; 604 kdbpcb.pcb_regs[A3] = ((int *)&args)[8]; 605 kdbpcb.pcb_regs[T0] = ((int *)&args)[9]; 606 kdbpcb.pcb_regs[T1] = ((int *)&args)[10]; 607 kdbpcb.pcb_regs[T2] = ((int *)&args)[11]; 608 kdbpcb.pcb_regs[T3] = ((int *)&args)[12]; 609 kdbpcb.pcb_regs[T4] = ((int *)&args)[13]; 610 kdbpcb.pcb_regs[T5] = ((int *)&args)[14]; 611 kdbpcb.pcb_regs[T6] = ((int *)&args)[15]; 612 kdbpcb.pcb_regs[T7] = ((int *)&args)[16]; 613 kdbpcb.pcb_regs[T8] = ((int *)&args)[17]; 614 kdbpcb.pcb_regs[T9] = ((int *)&args)[18]; 615 kdbpcb.pcb_regs[RA] = ((int *)&args)[19]; 616 kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21]; 617 kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22]; 618 kdbpcb.pcb_regs[PC] = pc; 619 kdbpcb.pcb_regs[SR] = statusReg; 620 bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int)); 621 } 622 if (kdb(causeReg, vadr, p, !USERMODE(statusReg))) 623 return (kdbpcb.pcb_regs[PC]); 624 } 625 #else 626 #ifdef DEBUG 627 trapDump("trap"); 628 #endif 629 #endif 630 panic("trap"); 631 } 632 printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid, 633 p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */ 634 trapsignal(p, i, ucode); 635 out: 636 /* 637 * Note: we should only get here if returning to user mode. 638 */ 639 astpending = 0; 640 while (i = CURSIG(p)) 641 psig(i); 642 p->p_pri = p->p_usrpri; 643 if (want_resched) { 644 int s; 645 646 /* 647 * Since we are curproc, clock will normally just change 648 * our priority without moving us from one queue to another 649 * (since the running process is not on a queue.) 650 * If that happened after we setrq ourselves but before we 651 * swtch()'ed, we might not be on the queue indicated by 652 * our priority. 653 */ 654 s = splclock(); 655 setrq(p); 656 p->p_stats->p_ru.ru_nivcsw++; 657 swtch(); 658 splx(s); 659 while (i = CURSIG(p)) 660 psig(i); 661 } 662 if (p->p_stats->p_prof.pr_scale) { 663 int ticks; 664 struct timeval *tv = &p->p_stime; 665 666 ticks = ((tv->tv_sec - syst.tv_sec) * 1000 + 667 (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000); 668 if (ticks) 669 addupc(pc, &p->p_stats->p_prof, ticks); 670 } 671 curpri = p->p_pri; 672 return (pc); 673 } 674 675 #ifdef DS5000 676 struct intr_tab intr_tab[8]; 677 #endif 678 679 int temp; /* XXX ULTRIX compiler bug with -O */ 680 681 /* 682 * Handle an interrupt. 683 * Called from MachKernIntr() or MachUserIntr() 684 * Note: curproc might be NULL. 685 */ 686 interrupt(statusReg, causeReg, pc) 687 unsigned statusReg; /* status register at time of the exception */ 688 unsigned causeReg; /* cause register at time of exception */ 689 unsigned pc; /* program counter where to continue */ 690 { 691 register unsigned mask; 692 clockframe cf; 693 694 #ifdef DEBUG 695 trp->status = statusReg; 696 trp->cause = causeReg; 697 trp->vadr = 0; 698 trp->pc = pc; 699 trp->ra = 0; 700 trp->code = 0; 701 if (++trp == &trapdebug[TRAPSIZE]) 702 trp = trapdebug; 703 #endif 704 705 cnt.v_intr++; 706 mask = causeReg & statusReg; /* pending interrupts & enable mask */ 707 #ifdef DS3100 708 /* handle clock interrupts ASAP */ 709 if (mask & MACH_INT_MASK_3) { 710 register volatile struct chiptime *c = 711 (volatile struct chiptime *)MACH_CLOCK_ADDR; 712 713 temp = c->regc; /* clear interrupt bits */ 714 cf.pc = pc; 715 cf.ps = statusReg; 716 hardclock(cf); 717 causeReg &= ~MACH_INT_MASK_3; /* reenable clock interrupts */ 718 } 719 /* 720 * Enable hardware interrupts which were enabled but not pending. 721 * We only respond to software interrupts when returning to spl0. 722 */ 723 splx((statusReg & ~causeReg & MACH_HARD_INT_MASK) | 724 MACH_SR_INT_ENA_CUR); 725 if (mask & MACH_INT_MASK_0) 726 siiintr(0); 727 if (mask & MACH_INT_MASK_1) 728 leintr(0); 729 if (mask & MACH_INT_MASK_2) 730 dcintr(0); 731 if (mask & MACH_INT_MASK_4) 732 MemErrorInterrupt(); 733 #endif /* DS3100 */ 734 #ifdef DS5000 735 /* handle clock interrupts ASAP */ 736 if (mask & MACH_INT_MASK_1) { 737 register volatile struct chiptime *c = 738 (volatile struct chiptime *)MACH_CLOCK_ADDR; 739 register unsigned csr; 740 static int warned = 0; 741 742 csr = *(unsigned *)MACH_SYS_CSR_ADDR; 743 if ((csr & MACH_CSR_PSWARN) && !warned) { 744 warned = 1; 745 printf("WARNING: power supply is overheating!\n"); 746 } else if (warned && !(csr & MACH_CSR_PSWARN)) { 747 warned = 0; 748 printf("WARNING: power supply is OK again\n"); 749 } 750 751 temp = c->regc; /* clear interrupt bits */ 752 cf.pc = pc; 753 cf.ps = statusReg; 754 hardclock(cf); 755 causeReg &= ~MACH_INT_MASK_1; /* reenable clock interrupts */ 756 } 757 if (mask & MACH_INT_MASK_0) { 758 register unsigned csr; 759 register unsigned i, m; 760 761 csr = *(unsigned *)MACH_SYS_CSR_ADDR; 762 m = csr & (csr >> MACH_CSR_IOINTEN_SHIFT) & MACH_CSR_IOINT_MASK; 763 #if 0 764 *(unsigned *)MACH_SYS_CSR_ADDR = 765 (csr & ~(MACH_CSR_MBZ | 0xFF)) | 766 (m << MACH_CSR_IOINTEN_SHIFT); 767 #endif 768 /* 769 * Enable hardware interrupts which were enabled but not 770 * pending. We only respond to software interrupts when 771 * returning to spl0. 772 */ 773 splx((statusReg & ~causeReg & MACH_HARD_INT_MASK) | 774 MACH_SR_INT_ENA_CUR); 775 for (i = 0; m; i++, m >>= 1) { 776 if (!(m & 1)) 777 continue; 778 if (intr_tab[i].func) 779 (*intr_tab[i].func)(intr_tab[i].unit); 780 else 781 printf("spurious interrupt %d\n", i); 782 } 783 #if 0 784 *(unsigned *)MACH_SYS_CSR_ADDR = 785 csr & ~(MACH_CSR_MBZ | 0xFF); 786 #endif 787 } else { 788 /* 789 * Enable hardware interrupts which were enabled but not 790 * pending. We only respond to software interrupts when 791 * returning to spl0. 792 */ 793 splx((statusReg & ~causeReg & MACH_HARD_INT_MASK) | 794 MACH_SR_INT_ENA_CUR); 795 } 796 if (mask & MACH_INT_MASK_3) 797 MemErrorInterrupt(); 798 #endif /* DS5000 */ 799 if (mask & MACH_INT_MASK_5) { 800 if (!USERMODE(statusReg)) { 801 #ifdef DEBUG 802 trapDump("fpintr"); 803 #else 804 printf("FPU interrupt: PC %x CR %x SR %x\n", 805 pc, causeReg, statusReg); 806 #endif 807 } else 808 MachFPInterrupt(statusReg, causeReg, pc); 809 } 810 if (mask & MACH_SOFT_INT_MASK_0) { 811 clockframe cf; 812 813 clearsoftclock(); 814 cnt.v_soft++; 815 cf.pc = pc; 816 cf.ps = statusReg; 817 softclock(cf); 818 } 819 /* process network interrupt if we trapped or will very soon */ 820 if ((mask & MACH_SOFT_INT_MASK_1) || 821 netisr && (statusReg & MACH_SOFT_INT_MASK_1)) { 822 clearsoftnet(); 823 cnt.v_soft++; 824 #ifdef INET 825 if (netisr & (1 << NETISR_ARP)) { 826 netisr &= ~(1 << NETISR_ARP); 827 arpintr(); 828 } 829 if (netisr & (1 << NETISR_IP)) { 830 netisr &= ~(1 << NETISR_IP); 831 ipintr(); 832 } 833 #endif 834 #ifdef NS 835 if (netisr & (1 << NETISR_NS)) { 836 netisr &= ~(1 << NETISR_NS); 837 nsintr(); 838 } 839 #endif 840 #ifdef ISO 841 if (netisr & (1 << NETISR_ISO)) { 842 netisr &= ~(1 << NETISR_ISO); 843 clnlintr(); 844 } 845 #endif 846 } 847 } 848 849 /* 850 * This is called from MachUserIntr() if astpending is set. 851 * This is very similar to the tail of trap(). 852 */ 853 softintr(statusReg, pc) 854 unsigned statusReg; /* status register at time of the exception */ 855 unsigned pc; /* program counter where to continue */ 856 { 857 register struct proc *p = curproc; 858 register int i; 859 860 cnt.v_soft++; 861 astpending = 0; 862 while (i = CURSIG(p)) 863 psig(i); 864 p->p_pri = p->p_usrpri; 865 if (want_resched) { 866 int s; 867 868 /* 869 * Since we are curproc, clock will normally just change 870 * our priority without moving us from one queue to another 871 * (since the running process is not on a queue.) 872 * If that happened after we setrq ourselves but before we 873 * swtch()'ed, we might not be on the queue indicated by 874 * our priority. 875 */ 876 s = splclock(); 877 setrq(p); 878 p->p_stats->p_ru.ru_nivcsw++; 879 swtch(); 880 splx(s); 881 while (i = CURSIG(p)) 882 psig(i); 883 } 884 curpri = p->p_pri; 885 } 886 887 #ifdef DEBUG 888 trapDump(msg) 889 char *msg; 890 { 891 register int i; 892 int s; 893 894 s = splhigh(); 895 printf("trapDump(%s)\n", msg); 896 for (i = 0; i < TRAPSIZE; i++) { 897 if (trp == trapdebug) 898 trp = &trapdebug[TRAPSIZE - 1]; 899 else 900 trp--; 901 if (trp->cause == 0) 902 break; 903 printf("%s: ADR %x PC %x CR %x SR %x\n", 904 trap_type[(trp->cause & MACH_CR_EXC_CODE) >> 905 MACH_CR_EXC_CODE_SHIFT], 906 trp->vadr, trp->pc, trp->cause, trp->status); 907 printf(" RA %x code %d\n", trp-> ra, trp->code); 908 } 909 bzero(trapdebug, sizeof(trapdebug)); 910 trp = trapdebug; 911 splx(s); 912 } 913 #endif 914 915 #ifdef X_KLUGE 916 /* 917 * This is a kludge to allow X windows to work. 918 */ 919 caddr_t 920 vmUserMap(size, pa) 921 int size; 922 unsigned pa; 923 { 924 register caddr_t v; 925 unsigned off, entry; 926 927 if (nUserMapPtes == 0) 928 UserMapPid = curproc->p_pid; 929 else if (UserMapPid != curproc->p_pid) 930 return ((caddr_t)0); 931 off = pa & PGOFSET; 932 size = btoc(off + size); 933 if (nUserMapPtes + size > NPTES) 934 return ((caddr_t)0); 935 v = (caddr_t)(USER_MAP_ADDR + pmax_ptob(nUserMapPtes) + off); 936 entry = (pa & 0x9ffff000) | PG_V | PG_M; 937 if (pa >= MACH_UNCACHED_MEMORY_ADDR) 938 entry |= PG_N; 939 while (size > 0) { 940 UserMapPtes[nUserMapPtes].pt_entry = entry; 941 entry += NBPG; 942 nUserMapPtes++; 943 size--; 944 } 945 return (v); 946 } 947 948 vmUserUnmap() 949 { 950 int id; 951 952 nUserMapPtes = 0; 953 if (UserMapPid == curproc->p_pid) { 954 id = curproc->p_vmspace->vm_pmap.pm_tlbpid; 955 if (id >= 0) 956 MachTLBFlushPID(id); 957 } 958 UserMapPid = 0; 959 } 960 #endif 961 962 /* 963 *---------------------------------------------------------------------- 964 * 965 * MemErrorInterrupt -- 966 * 967 * Handler an interrupt for the control register. 968 * 969 * Results: 970 * None. 971 * 972 * Side effects: 973 * None. 974 * 975 *---------------------------------------------------------------------- 976 */ 977 static void 978 MemErrorInterrupt() 979 { 980 #ifdef DS3100 981 volatile u_short *sysCSRPtr = (u_short *)MACH_SYS_CSR_ADDR; 982 u_short csr; 983 984 csr = *sysCSRPtr; 985 986 if (csr & MACH_CSR_MEM_ERR) { 987 printf("Memory error at 0x%x\n", 988 *(unsigned *)MACH_WRITE_ERROR_ADDR); 989 panic("Mem error interrupt"); 990 } 991 *sysCSRPtr = (csr & ~MACH_CSR_MBZ) | 0xff; 992 #endif /* DS3100 */ 993 #ifdef DS5000 994 printf("erradr %x\n", *(unsigned *)MACH_ERROR_ADDR); 995 *(unsigned *)MACH_ERROR_ADDR = 0; 996 MachEmptyWriteBuffer(); 997 #endif /* DS5000 */ 998 } 999 1000 /* 1001 * Return the resulting PC as if the branch was executed. 1002 */ 1003 unsigned 1004 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch) 1005 unsigned *regsPtr; 1006 unsigned instPC; 1007 unsigned fpcCSR; 1008 int allowNonBranch; 1009 { 1010 InstFmt inst; 1011 unsigned retAddr; 1012 int condition; 1013 extern unsigned GetBranchDest(); 1014 1015 #if 0 1016 printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC, 1017 *instPC, fpcCSR); 1018 #endif 1019 1020 inst = *(InstFmt *)instPC; 1021 switch ((int)inst.JType.op) { 1022 case OP_SPECIAL: 1023 switch ((int)inst.RType.func) { 1024 case OP_JR: 1025 case OP_JALR: 1026 retAddr = regsPtr[inst.RType.rs]; 1027 break; 1028 1029 default: 1030 if (!allowNonBranch) 1031 panic("MachEmulateBranch: Non-branch"); 1032 retAddr = instPC + 4; 1033 break; 1034 } 1035 break; 1036 1037 case OP_BCOND: 1038 switch ((int)inst.IType.rt) { 1039 case OP_BLTZ: 1040 case OP_BLTZAL: 1041 if ((int)(regsPtr[inst.RType.rs]) < 0) 1042 retAddr = GetBranchDest((InstFmt *)instPC); 1043 else 1044 retAddr = instPC + 8; 1045 break; 1046 1047 case OP_BGEZAL: 1048 case OP_BGEZ: 1049 if ((int)(regsPtr[inst.RType.rs]) >= 0) 1050 retAddr = GetBranchDest((InstFmt *)instPC); 1051 else 1052 retAddr = instPC + 8; 1053 break; 1054 1055 default: 1056 panic("MachEmulateBranch: Bad branch cond"); 1057 } 1058 break; 1059 1060 case OP_J: 1061 case OP_JAL: 1062 retAddr = (inst.JType.target << 2) | 1063 ((unsigned)instPC & 0xF0000000); 1064 break; 1065 1066 case OP_BEQ: 1067 if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt]) 1068 retAddr = GetBranchDest((InstFmt *)instPC); 1069 else 1070 retAddr = instPC + 8; 1071 break; 1072 1073 case OP_BNE: 1074 if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt]) 1075 retAddr = GetBranchDest((InstFmt *)instPC); 1076 else 1077 retAddr = instPC + 8; 1078 break; 1079 1080 case OP_BLEZ: 1081 if ((int)(regsPtr[inst.RType.rs]) <= 0) 1082 retAddr = GetBranchDest((InstFmt *)instPC); 1083 else 1084 retAddr = instPC + 8; 1085 break; 1086 1087 case OP_BGTZ: 1088 if ((int)(regsPtr[inst.RType.rs]) > 0) 1089 retAddr = GetBranchDest((InstFmt *)instPC); 1090 else 1091 retAddr = instPC + 8; 1092 break; 1093 1094 case OP_COP1: 1095 switch (inst.RType.rs) { 1096 case OP_BCx: 1097 case OP_BCy: 1098 if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE) 1099 condition = fpcCSR & MACH_FPC_COND_BIT; 1100 else 1101 condition = !(fpcCSR & MACH_FPC_COND_BIT); 1102 if (condition) 1103 retAddr = GetBranchDest((InstFmt *)instPC); 1104 else 1105 retAddr = instPC + 8; 1106 break; 1107 1108 default: 1109 if (!allowNonBranch) 1110 panic("MachEmulateBranch: Bad coproc branch instruction"); 1111 retAddr = instPC + 4; 1112 } 1113 break; 1114 1115 default: 1116 if (!allowNonBranch) 1117 panic("MachEmulateBranch: Non-branch instruction"); 1118 retAddr = instPC + 4; 1119 } 1120 #if 0 1121 printf("Target addr=%x\n", retAddr); 1122 #endif 1123 return (retAddr); 1124 } 1125 1126 unsigned 1127 GetBranchDest(InstPtr) 1128 InstFmt *InstPtr; 1129 { 1130 return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2)); 1131 } 1132 1133 /* 1134 * This routine is called by procxmt() to single step one instruction. 1135 * We do this by storing a break instruction after the current instruction, 1136 * resuming execution, and then restoring the old instruction. 1137 */ 1138 cpu_singlestep(p) 1139 register struct proc *p; 1140 { 1141 register unsigned va; 1142 register int *locr0 = p->p_md.md_regs; 1143 int i; 1144 1145 /* compute next address after current location */ 1146 va = MachEmulateBranch(locr0, locr0[PC], 0, 1); 1147 if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va || 1148 !useracc((caddr_t)va, 4, B_READ)) { 1149 printf("SS %s (%d): breakpoint already set at %x (va %x)\n", 1150 p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */ 1151 return (EFAULT); 1152 } 1153 p->p_md.md_ss_addr = va; 1154 p->p_md.md_ss_instr = fuiword((caddr_t)va); 1155 i = suiword((caddr_t)va, MACH_BREAK_SSTEP); 1156 if (i < 0) { 1157 vm_offset_t sa, ea; 1158 int rv; 1159 1160 sa = trunc_page((vm_offset_t)va); 1161 ea = round_page((vm_offset_t)va+sizeof(int)-1); 1162 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, 1163 VM_PROT_DEFAULT, FALSE); 1164 if (rv == KERN_SUCCESS) { 1165 i = suiword((caddr_t)va, MACH_BREAK_SSTEP); 1166 (void) vm_map_protect(&p->p_vmspace->vm_map, 1167 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE); 1168 } 1169 } 1170 if (i < 0) 1171 return (EFAULT); 1172 printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n", 1173 p->p_comm, p->p_pid, p->p_md.md_ss_addr, 1174 p->p_md.md_ss_instr, locr0[PC]); /* XXX */ 1175 return (0); 1176 } 1177