1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1992 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department and Ralph Campbell. 9 * 10 * %sccs.include.redist.c% 11 * 12 * from: Utah $Hdr: trap.c 1.32 91/04/06$ 13 * 14 * @(#)trap.c 7.8 (Berkeley) 09/13/92 15 */ 16 17 #include "param.h" 18 #include "systm.h" 19 #include "proc.h" 20 #include "kernel.h" 21 #include "signalvar.h" 22 #include "syscall.h" 23 #include "user.h" 24 #include "buf.h" 25 #ifdef KTRACE 26 #include "ktrace.h" 27 #endif 28 #include "net/netisr.h" 29 30 #include "../include/trap.h" 31 #include "../include/psl.h" 32 #include "../include/reg.h" 33 #include "../include/cpu.h" 34 #include "../include/pte.h" 35 #include "../include/mips_opcode.h" 36 #include "clockreg.h" 37 38 #include "vm/vm.h" 39 #include "vm/vm_kern.h" 40 #include "vm/vm_page.h" 41 42 /* 43 * This is a kludge to allow X windows to work. 44 */ 45 #define X_KLUGE 46 47 #ifdef X_KLUGE 48 #define USER_MAP_ADDR 0x4000 49 #define NPTES 300 50 static pt_entry_t UserMapPtes[NPTES]; 51 static unsigned nUserMapPtes; 52 static pid_t UserMapPid; 53 #endif 54 55 struct proc *machFPCurProcPtr; /* pointer to last proc to use FP */ 56 57 extern void MachKernGenException(); 58 extern void MachUserGenException(); 59 extern void MachKernIntr(); 60 extern void MachUserIntr(); 61 extern void MachTLBModException(); 62 extern void MachTLBMissException(); 63 static void MemErrorInterrupt(); 64 extern unsigned MachEmulateBranch(); 65 66 void (*machExceptionTable[])() = { 67 /* 68 * The kernel exception handlers. 69 */ 70 MachKernIntr, /* external interrupt */ 71 MachKernGenException, /* TLB modification */ 72 MachTLBMissException, /* TLB miss (load or instr. fetch) */ 73 MachTLBMissException, /* TLB miss (store) */ 74 MachKernGenException, /* address error (load or I-fetch) */ 75 MachKernGenException, /* address error (store) */ 76 MachKernGenException, /* bus error (I-fetch) */ 77 MachKernGenException, /* bus error (load or store) */ 78 MachKernGenException, /* system call */ 79 MachKernGenException, /* breakpoint */ 80 MachKernGenException, /* reserved instruction */ 81 MachKernGenException, /* coprocessor unusable */ 82 MachKernGenException, /* arithmetic overflow */ 83 MachKernGenException, /* reserved */ 84 MachKernGenException, /* reserved */ 85 MachKernGenException, /* reserved */ 86 /* 87 * The user exception handlers. 88 */ 89 MachUserIntr, 90 MachUserGenException, 91 MachUserGenException, 92 MachUserGenException, 93 MachUserGenException, 94 MachUserGenException, 95 MachUserGenException, 96 MachUserGenException, 97 MachUserGenException, 98 MachUserGenException, 99 MachUserGenException, 100 MachUserGenException, 101 MachUserGenException, 102 MachUserGenException, 103 MachUserGenException, 104 MachUserGenException, 105 }; 106 107 char *trap_type[] = { 108 "external interrupt", 109 "TLB modification", 110 "TLB miss (load or instr. fetch)", 111 "TLB miss (store)", 112 "address error (load or I-fetch)", 113 "address error (store)", 114 "bus error (I-fetch)", 115 "bus error (load or store)", 116 "system call", 117 "breakpoint", 118 "reserved instruction", 119 "coprocessor unusable", 120 "arithmetic overflow", 121 "reserved 13", 122 "reserved 14", 123 "reserved 15", 124 }; 125 126 #ifdef DEBUG 127 #define TRAPSIZE 10 128 struct trapdebug { /* trap history buffer for debugging */ 129 u_int status; 130 u_int cause; 131 u_int vadr; 132 u_int pc; 133 u_int ra; 134 u_int code; 135 } trapdebug[TRAPSIZE], *trp = trapdebug; 136 #endif 137 138 /* 139 * Handle an exception. 140 * Called from MachKernGenException() or MachUserGenException() 141 * when a processor trap occurs. 142 * In the case of a kernel trap, we return the pc where to resume if 143 * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc. 144 */ 145 unsigned 146 trap(statusReg, causeReg, vadr, pc, args) 147 unsigned statusReg; /* status register at time of the exception */ 148 unsigned causeReg; /* cause register at time of exception */ 149 unsigned vadr; /* address (if any) the fault occured on */ 150 unsigned pc; /* program counter where to continue */ 151 { 152 register int type, i; 153 unsigned ucode = 0; 154 register struct proc *p = curproc; 155 u_quad_t sticks; 156 vm_prot_t ftype; 157 extern unsigned onfault_table[]; 158 159 #ifdef DEBUG 160 trp->status = statusReg; 161 trp->cause = causeReg; 162 trp->vadr = vadr; 163 trp->pc = pc; 164 trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] : 165 p->p_md.md_regs[RA]; 166 trp->code = 0; 167 if (++trp == &trapdebug[TRAPSIZE]) 168 trp = trapdebug; 169 #endif 170 171 cnt.v_trap++; 172 type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT; 173 if (USERMODE(statusReg)) { 174 type |= T_USER; 175 sticks = p->p_sticks; 176 } 177 178 /* 179 * Enable hardware interrupts if they were on before. 180 * We only respond to software interrupts when returning to user mode. 181 */ 182 if (statusReg & MACH_SR_INT_ENA_PREV) 183 splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR); 184 185 switch (type) { 186 case T_TLB_MOD: 187 /* check for kernel address */ 188 if ((int)vadr < 0) { 189 register pt_entry_t *pte; 190 register unsigned entry; 191 #ifndef ATTR 192 register vm_offset_t pa; 193 #endif 194 195 pte = kvtopte(vadr); 196 entry = pte->pt_entry; 197 if (entry & PG_RO) { 198 /* write to read only page in the kernel */ 199 ftype = VM_PROT_WRITE; 200 goto kernel_fault; 201 } 202 entry |= PG_M; 203 pte->pt_entry = entry; 204 vadr &= PG_FRAME; 205 printf("trap: TLBupdate hi %x lo %x i %x\n", vadr, 206 entry, MachTLBUpdate(vadr, entry)); /* XXX */ 207 #ifdef ATTR 208 pmap_attributes[atop(entry - KERNBASE)] |= PMAP_ATTR_MOD; 209 #else 210 pa = entry & PG_FRAME; 211 if (!IS_VM_PHYSADDR(pa)) 212 panic("trap: kmod"); 213 PHYS_TO_VM_PAGE(pa)->clean = FALSE; 214 #endif 215 return (pc); 216 } 217 /* FALLTHROUGH */ 218 219 case T_TLB_MOD+T_USER: 220 { 221 pmap_hash_t hp; 222 #ifndef ATTR 223 vm_offset_t pa; 224 #endif 225 #ifdef DIAGNOSTIC 226 extern pmap_hash_t zero_pmap_hash; 227 extern pmap_t cur_pmap; 228 229 if (cur_pmap->pm_hash == zero_pmap_hash) 230 panic("tlbmod"); 231 #endif 232 hp = &((pmap_hash_t)PMAP_HASH_UADDR)[PMAP_HASH(vadr)]; 233 if (((hp->pmh_pte[0].high ^ vadr) & ~PGOFSET) == 0) 234 i = 0; 235 else if (((hp->pmh_pte[1].high ^ vadr) & ~PGOFSET) == 0) 236 i = 1; 237 else 238 panic("trap: tlb umod not found"); 239 if (hp->pmh_pte[i].low & PG_RO) { 240 ftype = VM_PROT_WRITE; 241 goto dofault; 242 } 243 hp->pmh_pte[i].low |= PG_M; 244 printf("trap: TLBupdate hi %x lo %x i %x\n", 245 hp->pmh_pte[i].high, hp->pmh_pte[i].low, 246 MachTLBUpdate(hp->pmh_pte[i].high, hp->pmh_pte[i].low)); /* XXX */ 247 #ifdef ATTR 248 pmap_attributes[atop(hp->pmh_pte[i].low - KERNBASE)] |= 249 PMAP_ATTR_MOD; 250 #else 251 pa = hp->pmh_pte[i].low & PG_FRAME; 252 if (!IS_VM_PHYSADDR(pa)) 253 panic("trap: umod"); 254 PHYS_TO_VM_PAGE(pa)->clean = FALSE; 255 #endif 256 if (!USERMODE(statusReg)) 257 return (pc); 258 goto out; 259 } 260 261 case T_TLB_LD_MISS: 262 case T_TLB_ST_MISS: 263 ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ; 264 /* check for kernel address */ 265 if ((int)vadr < 0) { 266 register vm_offset_t va; 267 int rv; 268 269 kernel_fault: 270 va = trunc_page((vm_offset_t)vadr); 271 rv = vm_fault(kernel_map, va, ftype, FALSE); 272 if (rv == KERN_SUCCESS) 273 return (pc); 274 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 275 ((struct pcb *)UADDR)->pcb_onfault = 0; 276 return (onfault_table[i]); 277 } 278 goto err; 279 } 280 /* check for fuswintr() or suswintr() getting a page fault */ 281 if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 4) 282 return (onfault_table[i]); 283 goto dofault; 284 285 case T_TLB_LD_MISS+T_USER: 286 ftype = VM_PROT_READ; 287 goto dofault; 288 289 case T_TLB_ST_MISS+T_USER: 290 ftype = VM_PROT_WRITE; 291 dofault: 292 { 293 register vm_offset_t va; 294 register struct vmspace *vm = p->p_vmspace; 295 register vm_map_t map = &vm->vm_map; 296 int rv; 297 298 #ifdef X_KLUGE 299 if (p->p_pid == UserMapPid && 300 (va = pmax_btop(vadr - USER_MAP_ADDR)) < nUserMapPtes) { 301 register pt_entry_t *pte; 302 303 pte = &UserMapPtes[va]; 304 MachTLBWriteRandom((vadr & PG_FRAME) | 305 (vm->vm_pmap.pm_tlbpid << VMMACH_TLB_PID_SHIFT), 306 pte->pt_entry); 307 return (pc); 308 } 309 #endif 310 va = trunc_page((vm_offset_t)vadr); 311 rv = vm_fault(map, va, ftype, FALSE); 312 if (rv != KERN_SUCCESS) { 313 printf("vm_fault(%x, %x, %x, 0) -> %x ADR %x PC %x RA %x\n", 314 map, va, ftype, rv, vadr, pc, 315 !USERMODE(statusReg) ? ((int *)&args)[19] : 316 p->p_md.md_regs[RA]); /* XXX */ 317 printf("\tpid %d %s PC %x RA %x\n", p->p_pid, 318 p->p_comm, p->p_md.md_regs[PC], 319 p->p_md.md_regs[RA]); /* XXX */ 320 trapDump("vm_fault"); 321 } 322 /* 323 * If this was a stack access we keep track of the maximum 324 * accessed stack size. Also, if vm_fault gets a protection 325 * failure it is due to accessing the stack region outside 326 * the current limit and we need to reflect that as an access 327 * error. 328 */ 329 if ((caddr_t)va >= vm->vm_maxsaddr) { 330 if (rv == KERN_SUCCESS) { 331 unsigned nss; 332 333 nss = clrnd(btoc(USRSTACK-(unsigned)va)); 334 if (nss > vm->vm_ssize) 335 vm->vm_ssize = nss; 336 } else if (rv == KERN_PROTECTION_FAILURE) 337 rv = KERN_INVALID_ADDRESS; 338 } 339 if (rv == KERN_SUCCESS) { 340 if (!USERMODE(statusReg)) 341 return (pc); 342 goto out; 343 } 344 if (!USERMODE(statusReg)) { 345 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 346 ((struct pcb *)UADDR)->pcb_onfault = 0; 347 return (onfault_table[i]); 348 } 349 goto err; 350 } 351 ucode = vadr; 352 i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV; 353 break; 354 } 355 356 case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ 357 if (vadr == KERNBASE) { 358 struct args { 359 int i[1]; 360 } args; 361 int rval[2]; 362 363 /* 364 * Assume a signal handler is trying to return 365 * (see sendsig() and sigreturn()). We have to 366 * pop the sigframe struct to get the address of 367 * the sigcontext. 368 */ 369 args.i[0] = p->p_md.md_regs[SP] + 4 * sizeof(int); 370 (void) sigreturn(curproc, &args, rval); 371 goto out; 372 } 373 /* FALLTHROUGH */ 374 375 case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ 376 case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */ 377 case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */ 378 i = SIGSEGV; 379 break; 380 381 case T_SYSCALL+T_USER: 382 { 383 register int *locr0 = p->p_md.md_regs; 384 register struct sysent *callp; 385 unsigned int code; 386 int numsys; 387 struct args { 388 int i[8]; 389 } args; 390 int rval[2]; 391 struct sysent *systab; 392 extern int nsysent; 393 #ifdef ULTRIXCOMPAT 394 extern struct sysent ultrixsysent[]; 395 extern int ultrixnsysent; 396 #endif 397 398 cnt.v_syscall++; 399 /* compute next PC after syscall instruction */ 400 if ((int)causeReg < 0) 401 locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0); 402 else 403 locr0[PC] += 4; 404 systab = sysent; 405 numsys = nsysent; 406 #ifdef ULTRIXCOMPAT 407 if (p->p_md.md_flags & MDP_ULTRIX) { 408 systab = ultrixsysent; 409 numsys = ultrixnsysent; 410 } 411 #endif 412 code = locr0[V0]; 413 switch (code) { 414 case SYS_indir: 415 /* 416 * Code is first argument, followed by actual args. 417 */ 418 code = locr0[A0]; 419 if (code >= numsys) 420 callp = &systab[SYS_indir]; /* (illegal) */ 421 else 422 callp = &systab[code]; 423 i = callp->sy_narg; 424 args.i[0] = locr0[A1]; 425 args.i[1] = locr0[A2]; 426 args.i[2] = locr0[A3]; 427 if (i > 3) { 428 i = copyin((caddr_t)(locr0[SP] + 429 4 * sizeof(int)), 430 (caddr_t)&args.i[3], 431 (u_int)(i - 3) * sizeof(int)); 432 if (i) { 433 locr0[V0] = i; 434 locr0[A3] = 1; 435 #ifdef KTRACE 436 if (KTRPOINT(p, KTR_SYSCALL)) 437 ktrsyscall(p->p_tracep, code, 438 callp->sy_narg, args.i); 439 #endif 440 goto done; 441 } 442 } 443 break; 444 445 case SYS___indir: 446 /* 447 * Like indir, but code is a quad, so as to maintain 448 * quad alignment for the rest of the arguments. 449 */ 450 code = locr0[A0 + _QUAD_LOWWORD]; 451 if (code >= numsys) 452 callp = &systab[SYS_indir]; /* (illegal) */ 453 else 454 callp = &systab[code]; 455 i = callp->sy_narg; 456 args.i[0] = locr0[A2]; 457 args.i[1] = locr0[A3]; 458 if (i > 2) { 459 i = copyin((caddr_t)(locr0[SP] + 460 4 * sizeof(int)), 461 (caddr_t)&args.i[2], 462 (u_int)(i - 2) * sizeof(int)); 463 if (i) { 464 locr0[V0] = i; 465 locr0[A3] = 1; 466 #ifdef KTRACE 467 if (KTRPOINT(p, KTR_SYSCALL)) 468 ktrsyscall(p->p_tracep, code, 469 callp->sy_narg, args.i); 470 #endif 471 goto done; 472 } 473 } 474 break; 475 476 default: 477 if (code >= numsys) 478 callp = &systab[SYS_indir]; /* (illegal) */ 479 else 480 callp = &systab[code]; 481 i = callp->sy_narg; 482 args.i[0] = locr0[A0]; 483 args.i[1] = locr0[A1]; 484 args.i[2] = locr0[A2]; 485 args.i[3] = locr0[A3]; 486 if (i > 4) { 487 i = copyin((caddr_t)(locr0[SP] + 488 4 * sizeof(int)), 489 (caddr_t)&args.i[4], 490 (u_int)(i - 4) * sizeof(int)); 491 if (i) { 492 locr0[V0] = i; 493 locr0[A3] = 1; 494 #ifdef KTRACE 495 if (KTRPOINT(p, KTR_SYSCALL)) 496 ktrsyscall(p->p_tracep, code, 497 callp->sy_narg, args.i); 498 #endif 499 goto done; 500 } 501 } 502 } 503 #ifdef KTRACE 504 if (KTRPOINT(p, KTR_SYSCALL)) 505 ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i); 506 #endif 507 rval[0] = 0; 508 rval[1] = locr0[V1]; 509 #ifdef DEBUG 510 if (trp == trapdebug) 511 trapdebug[TRAPSIZE - 1].code = code; 512 else 513 trp[-1].code = code; 514 #endif 515 i = (*callp->sy_call)(p, &args, rval); 516 /* 517 * Reinitialize proc pointer `p' as it may be different 518 * if this is a child returning from fork syscall. 519 */ 520 p = curproc; 521 locr0 = p->p_md.md_regs; 522 #ifdef DEBUG 523 { int s; 524 s = splhigh(); 525 trp->status = statusReg; 526 trp->cause = causeReg; 527 trp->vadr = locr0[SP]; 528 trp->pc = locr0[PC]; 529 trp->ra = locr0[RA]; 530 trp->code = -code; 531 if (++trp == &trapdebug[TRAPSIZE]) 532 trp = trapdebug; 533 splx(s); 534 } 535 #endif 536 switch (i) { 537 case 0: 538 locr0[V0] = rval[0]; 539 locr0[V1] = rval[1]; 540 locr0[A3] = 0; 541 break; 542 543 case ERESTART: 544 locr0[PC] = pc; 545 break; 546 547 case EJUSTRETURN: 548 break; /* nothing to do */ 549 550 default: 551 locr0[V0] = i; 552 locr0[A3] = 1; 553 } 554 done: 555 #ifdef KTRACE 556 if (KTRPOINT(p, KTR_SYSRET)) 557 ktrsysret(p->p_tracep, code, i, rval[0]); 558 #endif 559 goto out; 560 } 561 562 case T_BREAK+T_USER: 563 { 564 register unsigned va, instr; 565 566 /* compute address of break instruction */ 567 va = pc; 568 if ((int)causeReg < 0) 569 va += 4; 570 571 /* read break instruction */ 572 instr = fuiword((caddr_t)va); 573 #ifdef KADB 574 if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP) 575 goto err; 576 #endif 577 if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) { 578 i = SIGTRAP; 579 break; 580 } 581 582 /* restore original instruction and clear BP */ 583 i = suiword((caddr_t)va, p->p_md.md_ss_instr); 584 if (i < 0) { 585 vm_offset_t sa, ea; 586 int rv; 587 588 sa = trunc_page((vm_offset_t)va); 589 ea = round_page((vm_offset_t)va+sizeof(int)-1); 590 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, 591 VM_PROT_DEFAULT, FALSE); 592 if (rv == KERN_SUCCESS) { 593 i = suiword((caddr_t)va, p->p_md.md_ss_instr); 594 (void) vm_map_protect(&p->p_vmspace->vm_map, 595 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, 596 FALSE); 597 } 598 } 599 if (i < 0) { 600 i = SIGTRAP; 601 break; 602 } 603 p->p_md.md_ss_addr = 0; 604 goto out; 605 } 606 607 case T_RES_INST+T_USER: 608 i = SIGILL; 609 break; 610 611 case T_COP_UNUSABLE+T_USER: 612 if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) { 613 i = SIGILL; /* only FPU instructions allowed */ 614 break; 615 } 616 MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs); 617 machFPCurProcPtr = p; 618 p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT; 619 p->p_md.md_flags |= MDP_FPUSED; 620 goto out; 621 622 case T_OVFLOW+T_USER: 623 i = SIGFPE; 624 break; 625 626 case T_ADDR_ERR_LD: /* misaligned access */ 627 case T_ADDR_ERR_ST: /* misaligned access */ 628 case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */ 629 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 630 ((struct pcb *)UADDR)->pcb_onfault = 0; 631 return (onfault_table[i]); 632 } 633 /* FALLTHROUGH */ 634 635 default: 636 err: 637 #ifdef KADB 638 { 639 extern struct pcb kdbpcb; 640 641 if (USERMODE(statusReg)) 642 kdbpcb = p->p_addr->u_pcb; 643 else { 644 kdbpcb.pcb_regs[ZERO] = 0; 645 kdbpcb.pcb_regs[AST] = ((int *)&args)[2]; 646 kdbpcb.pcb_regs[V0] = ((int *)&args)[3]; 647 kdbpcb.pcb_regs[V1] = ((int *)&args)[4]; 648 kdbpcb.pcb_regs[A0] = ((int *)&args)[5]; 649 kdbpcb.pcb_regs[A1] = ((int *)&args)[6]; 650 kdbpcb.pcb_regs[A2] = ((int *)&args)[7]; 651 kdbpcb.pcb_regs[A3] = ((int *)&args)[8]; 652 kdbpcb.pcb_regs[T0] = ((int *)&args)[9]; 653 kdbpcb.pcb_regs[T1] = ((int *)&args)[10]; 654 kdbpcb.pcb_regs[T2] = ((int *)&args)[11]; 655 kdbpcb.pcb_regs[T3] = ((int *)&args)[12]; 656 kdbpcb.pcb_regs[T4] = ((int *)&args)[13]; 657 kdbpcb.pcb_regs[T5] = ((int *)&args)[14]; 658 kdbpcb.pcb_regs[T6] = ((int *)&args)[15]; 659 kdbpcb.pcb_regs[T7] = ((int *)&args)[16]; 660 kdbpcb.pcb_regs[T8] = ((int *)&args)[17]; 661 kdbpcb.pcb_regs[T9] = ((int *)&args)[18]; 662 kdbpcb.pcb_regs[RA] = ((int *)&args)[19]; 663 kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21]; 664 kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22]; 665 kdbpcb.pcb_regs[PC] = pc; 666 kdbpcb.pcb_regs[SR] = statusReg; 667 bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int)); 668 } 669 if (kdb(causeReg, vadr, p, !USERMODE(statusReg))) 670 return (kdbpcb.pcb_regs[PC]); 671 } 672 #else 673 #ifdef DEBUG 674 trapDump("trap"); 675 #endif 676 #endif 677 panic("trap"); 678 } 679 printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid, 680 p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */ 681 trapsignal(p, i, ucode); 682 out: 683 /* 684 * Note: we should only get here if returning to user mode. 685 */ 686 /* take pending signals */ 687 while ((i = CURSIG(p)) != 0) 688 psig(i); 689 p->p_pri = p->p_usrpri; 690 astpending = 0; 691 if (want_resched) { 692 int s; 693 694 /* 695 * Since we are curproc, clock will normally just change 696 * our priority without moving us from one queue to another 697 * (since the running process is not on a queue.) 698 * If that happened after we setrq ourselves but before we 699 * swtch()'ed, we might not be on the queue indicated by 700 * our priority. 701 */ 702 s = splstatclock(); 703 setrq(p); 704 p->p_stats->p_ru.ru_nivcsw++; 705 swtch(); 706 splx(s); 707 while ((i = CURSIG(p)) != 0) 708 psig(i); 709 } 710 711 /* 712 * If profiling, charge system time to the trapped pc. 713 */ 714 if (p->p_flag & SPROFIL) 715 addupc_task(p, pc, (int)(p->p_sticks - sticks)); 716 717 curpri = p->p_pri; 718 return (pc); 719 } 720 721 #ifdef DS5000 722 struct intr_tab intr_tab[8]; 723 #endif 724 725 int temp; /* XXX ULTRIX compiler bug with -O */ 726 727 /* 728 * Handle an interrupt. 729 * Called from MachKernIntr() or MachUserIntr() 730 * Note: curproc might be NULL. 731 */ 732 interrupt(statusReg, causeReg, pc) 733 unsigned statusReg; /* status register at time of the exception */ 734 unsigned causeReg; /* cause register at time of exception */ 735 unsigned pc; /* program counter where to continue */ 736 { 737 register unsigned mask; 738 struct clockframe cf; 739 740 #ifdef DEBUG 741 trp->status = statusReg; 742 trp->cause = causeReg; 743 trp->vadr = 0; 744 trp->pc = pc; 745 trp->ra = 0; 746 trp->code = 0; 747 if (++trp == &trapdebug[TRAPSIZE]) 748 trp = trapdebug; 749 #endif 750 751 cnt.v_intr++; 752 mask = causeReg & statusReg; /* pending interrupts & enable mask */ 753 #ifdef DS3100 754 /* handle clock interrupts ASAP */ 755 if (mask & MACH_INT_MASK_3) { 756 register volatile struct chiptime *c = 757 (volatile struct chiptime *)MACH_CLOCK_ADDR; 758 759 temp = c->regc; /* XXX clear interrupt bits */ 760 cf.pc = pc; 761 cf.sr = statusReg; 762 hardclock(&cf); 763 causeReg &= ~MACH_INT_MASK_3; /* reenable clock interrupts */ 764 } 765 /* 766 * Enable hardware interrupts which were enabled but not pending. 767 * We only respond to software interrupts when returning to spl0. 768 */ 769 splx((statusReg & ~causeReg & MACH_HARD_INT_MASK) | 770 MACH_SR_INT_ENA_CUR); 771 if (mask & MACH_INT_MASK_0) 772 siiintr(0); 773 if (mask & MACH_INT_MASK_1) 774 leintr(0); 775 if (mask & MACH_INT_MASK_2) 776 dcintr(0); 777 if (mask & MACH_INT_MASK_4) 778 MemErrorInterrupt(); 779 #endif /* DS3100 */ 780 #ifdef DS5000 781 /* handle clock interrupts ASAP */ 782 if (mask & MACH_INT_MASK_1) { 783 register volatile struct chiptime *c = 784 (volatile struct chiptime *)MACH_CLOCK_ADDR; 785 register unsigned csr; 786 static int warned = 0; 787 788 csr = *(unsigned *)MACH_SYS_CSR_ADDR; 789 if ((csr & MACH_CSR_PSWARN) && !warned) { 790 warned = 1; 791 printf("WARNING: power supply is overheating!\n"); 792 } else if (warned && !(csr & MACH_CSR_PSWARN)) { 793 warned = 0; 794 printf("WARNING: power supply is OK again\n"); 795 } 796 797 temp = c->regc; /* XXX clear interrupt bits */ 798 cf.pc = pc; 799 cf.sr = statusReg; 800 hardclock(&cf); 801 causeReg &= ~MACH_INT_MASK_1; /* reenable clock interrupts */ 802 } 803 if (mask & MACH_INT_MASK_0) { 804 register unsigned csr; 805 register unsigned i, m; 806 807 csr = *(unsigned *)MACH_SYS_CSR_ADDR; 808 m = csr & (csr >> MACH_CSR_IOINTEN_SHIFT) & MACH_CSR_IOINT_MASK; 809 #if 0 810 *(unsigned *)MACH_SYS_CSR_ADDR = 811 (csr & ~(MACH_CSR_MBZ | 0xFF)) | 812 (m << MACH_CSR_IOINTEN_SHIFT); 813 #endif 814 /* 815 * Enable hardware interrupts which were enabled but not 816 * pending. We only respond to software interrupts when 817 * returning to spl0. 818 */ 819 splx((statusReg & ~causeReg & MACH_HARD_INT_MASK) | 820 MACH_SR_INT_ENA_CUR); 821 for (i = 0; m; i++, m >>= 1) { 822 if (!(m & 1)) 823 continue; 824 if (intr_tab[i].func) 825 (*intr_tab[i].func)(intr_tab[i].unit); 826 else 827 printf("spurious interrupt %d\n", i); 828 } 829 #if 0 830 *(unsigned *)MACH_SYS_CSR_ADDR = 831 csr & ~(MACH_CSR_MBZ | 0xFF); 832 #endif 833 } else { 834 /* 835 * Enable hardware interrupts which were enabled but not 836 * pending. We only respond to software interrupts when 837 * returning to spl0. 838 */ 839 splx((statusReg & ~causeReg & MACH_HARD_INT_MASK) | 840 MACH_SR_INT_ENA_CUR); 841 } 842 if (mask & MACH_INT_MASK_3) 843 MemErrorInterrupt(); 844 #endif /* DS5000 */ 845 if (mask & MACH_INT_MASK_5) { 846 if (!USERMODE(statusReg)) { 847 #ifdef DEBUG 848 trapDump("fpintr"); 849 #else 850 printf("FPU interrupt: PC %x CR %x SR %x\n", 851 pc, causeReg, statusReg); 852 #endif 853 } else 854 MachFPInterrupt(statusReg, causeReg, pc); 855 } 856 if (mask & MACH_SOFT_INT_MASK_0) { 857 clearsoftclock(); 858 cnt.v_soft++; 859 softclock(); 860 } 861 /* process network interrupt if we trapped or will very soon */ 862 if ((mask & MACH_SOFT_INT_MASK_1) || 863 netisr && (statusReg & MACH_SOFT_INT_MASK_1)) { 864 clearsoftnet(); 865 cnt.v_soft++; 866 #ifdef INET 867 if (netisr & (1 << NETISR_ARP)) { 868 netisr &= ~(1 << NETISR_ARP); 869 arpintr(); 870 } 871 if (netisr & (1 << NETISR_IP)) { 872 netisr &= ~(1 << NETISR_IP); 873 ipintr(); 874 } 875 #endif 876 #ifdef NS 877 if (netisr & (1 << NETISR_NS)) { 878 netisr &= ~(1 << NETISR_NS); 879 nsintr(); 880 } 881 #endif 882 #ifdef ISO 883 if (netisr & (1 << NETISR_ISO)) { 884 netisr &= ~(1 << NETISR_ISO); 885 clnlintr(); 886 } 887 #endif 888 } 889 } 890 891 /* 892 * This is called from MachUserIntr() if astpending is set. 893 * This is very similar to the tail of trap(). 894 */ 895 softintr(statusReg, pc) 896 unsigned statusReg; /* status register at time of the exception */ 897 unsigned pc; /* program counter where to continue */ 898 { 899 register struct proc *p = curproc; 900 int sig; 901 902 cnt.v_soft++; 903 /* take pending signals */ 904 while ((sig = CURSIG(p)) != 0) 905 psig(sig); 906 p->p_pri = p->p_usrpri; 907 astpending = 0; 908 if (p->p_flag & SOWEUPC) { 909 p->p_flag &= ~SOWEUPC; 910 ADDUPROF(p); 911 } 912 if (want_resched) { 913 int s; 914 915 /* 916 * Since we are curproc, clock will normally just change 917 * our priority without moving us from one queue to another 918 * (since the running process is not on a queue.) 919 * If that happened after we setrq ourselves but before we 920 * swtch()'ed, we might not be on the queue indicated by 921 * our priority. 922 */ 923 s = splstatclock(); 924 setrq(p); 925 p->p_stats->p_ru.ru_nivcsw++; 926 swtch(); 927 splx(s); 928 while ((sig = CURSIG(p)) != 0) 929 psig(sig); 930 } 931 curpri = p->p_pri; 932 } 933 934 #ifdef DEBUG 935 trapDump(msg) 936 char *msg; 937 { 938 register int i; 939 int s; 940 941 s = splhigh(); 942 printf("trapDump(%s)\n", msg); 943 for (i = 0; i < TRAPSIZE; i++) { 944 if (trp == trapdebug) 945 trp = &trapdebug[TRAPSIZE - 1]; 946 else 947 trp--; 948 if (trp->cause == 0) 949 break; 950 printf("%s: ADR %x PC %x CR %x SR %x\n", 951 trap_type[(trp->cause & MACH_CR_EXC_CODE) >> 952 MACH_CR_EXC_CODE_SHIFT], 953 trp->vadr, trp->pc, trp->cause, trp->status); 954 printf(" RA %x code %d\n", trp-> ra, trp->code); 955 } 956 bzero(trapdebug, sizeof(trapdebug)); 957 trp = trapdebug; 958 splx(s); 959 } 960 #endif 961 962 #ifdef X_KLUGE 963 /* 964 * This is a kludge to allow X windows to work. 965 */ 966 caddr_t 967 vmUserMap(size, pa) 968 int size; 969 unsigned pa; 970 { 971 register caddr_t v; 972 unsigned off, entry; 973 974 if (nUserMapPtes == 0) 975 UserMapPid = curproc->p_pid; 976 else if (UserMapPid != curproc->p_pid) 977 return ((caddr_t)0); 978 off = pa & PGOFSET; 979 size = btoc(off + size); 980 if (nUserMapPtes + size > NPTES) 981 return ((caddr_t)0); 982 v = (caddr_t)(USER_MAP_ADDR + pmax_ptob(nUserMapPtes) + off); 983 entry = (pa & 0x9ffff000) | PG_V | PG_M; 984 if (pa >= MACH_UNCACHED_MEMORY_ADDR) 985 entry |= PG_N; 986 while (size > 0) { 987 UserMapPtes[nUserMapPtes].pt_entry = entry; 988 entry += NBPG; 989 nUserMapPtes++; 990 size--; 991 } 992 return (v); 993 } 994 995 vmUserUnmap() 996 { 997 int id; 998 999 nUserMapPtes = 0; 1000 if (UserMapPid == curproc->p_pid) { 1001 id = curproc->p_vmspace->vm_pmap.pm_tlbpid; 1002 if (id >= 0) 1003 MachTLBFlushPID(id); 1004 } 1005 UserMapPid = 0; 1006 } 1007 #endif 1008 1009 /* 1010 *---------------------------------------------------------------------- 1011 * 1012 * MemErrorInterrupt -- 1013 * 1014 * Handler an interrupt for the control register. 1015 * 1016 * Results: 1017 * None. 1018 * 1019 * Side effects: 1020 * None. 1021 * 1022 *---------------------------------------------------------------------- 1023 */ 1024 static void 1025 MemErrorInterrupt() 1026 { 1027 #ifdef DS3100 1028 volatile u_short *sysCSRPtr = (u_short *)MACH_SYS_CSR_ADDR; 1029 u_short csr; 1030 1031 csr = *sysCSRPtr; 1032 1033 if (csr & MACH_CSR_MEM_ERR) { 1034 printf("Memory error at 0x%x\n", 1035 *(unsigned *)MACH_WRITE_ERROR_ADDR); 1036 panic("Mem error interrupt"); 1037 } 1038 *sysCSRPtr = (csr & ~MACH_CSR_MBZ) | 0xff; 1039 #endif /* DS3100 */ 1040 #ifdef DS5000 1041 printf("erradr %x\n", *(unsigned *)MACH_ERROR_ADDR); 1042 *(unsigned *)MACH_ERROR_ADDR = 0; 1043 MachEmptyWriteBuffer(); 1044 #endif /* DS5000 */ 1045 } 1046 1047 /* 1048 * Return the resulting PC as if the branch was executed. 1049 */ 1050 unsigned 1051 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch) 1052 unsigned *regsPtr; 1053 unsigned instPC; 1054 unsigned fpcCSR; 1055 int allowNonBranch; 1056 { 1057 InstFmt inst; 1058 unsigned retAddr; 1059 int condition; 1060 extern unsigned GetBranchDest(); 1061 1062 #if 0 1063 printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC, 1064 *instPC, fpcCSR); 1065 #endif 1066 1067 inst = *(InstFmt *)instPC; 1068 switch ((int)inst.JType.op) { 1069 case OP_SPECIAL: 1070 switch ((int)inst.RType.func) { 1071 case OP_JR: 1072 case OP_JALR: 1073 retAddr = regsPtr[inst.RType.rs]; 1074 break; 1075 1076 default: 1077 if (!allowNonBranch) 1078 panic("MachEmulateBranch: Non-branch"); 1079 retAddr = instPC + 4; 1080 break; 1081 } 1082 break; 1083 1084 case OP_BCOND: 1085 switch ((int)inst.IType.rt) { 1086 case OP_BLTZ: 1087 case OP_BLTZAL: 1088 if ((int)(regsPtr[inst.RType.rs]) < 0) 1089 retAddr = GetBranchDest((InstFmt *)instPC); 1090 else 1091 retAddr = instPC + 8; 1092 break; 1093 1094 case OP_BGEZAL: 1095 case OP_BGEZ: 1096 if ((int)(regsPtr[inst.RType.rs]) >= 0) 1097 retAddr = GetBranchDest((InstFmt *)instPC); 1098 else 1099 retAddr = instPC + 8; 1100 break; 1101 1102 default: 1103 panic("MachEmulateBranch: Bad branch cond"); 1104 } 1105 break; 1106 1107 case OP_J: 1108 case OP_JAL: 1109 retAddr = (inst.JType.target << 2) | 1110 ((unsigned)instPC & 0xF0000000); 1111 break; 1112 1113 case OP_BEQ: 1114 if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt]) 1115 retAddr = GetBranchDest((InstFmt *)instPC); 1116 else 1117 retAddr = instPC + 8; 1118 break; 1119 1120 case OP_BNE: 1121 if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt]) 1122 retAddr = GetBranchDest((InstFmt *)instPC); 1123 else 1124 retAddr = instPC + 8; 1125 break; 1126 1127 case OP_BLEZ: 1128 if ((int)(regsPtr[inst.RType.rs]) <= 0) 1129 retAddr = GetBranchDest((InstFmt *)instPC); 1130 else 1131 retAddr = instPC + 8; 1132 break; 1133 1134 case OP_BGTZ: 1135 if ((int)(regsPtr[inst.RType.rs]) > 0) 1136 retAddr = GetBranchDest((InstFmt *)instPC); 1137 else 1138 retAddr = instPC + 8; 1139 break; 1140 1141 case OP_COP1: 1142 switch (inst.RType.rs) { 1143 case OP_BCx: 1144 case OP_BCy: 1145 if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE) 1146 condition = fpcCSR & MACH_FPC_COND_BIT; 1147 else 1148 condition = !(fpcCSR & MACH_FPC_COND_BIT); 1149 if (condition) 1150 retAddr = GetBranchDest((InstFmt *)instPC); 1151 else 1152 retAddr = instPC + 8; 1153 break; 1154 1155 default: 1156 if (!allowNonBranch) 1157 panic("MachEmulateBranch: Bad coproc branch instruction"); 1158 retAddr = instPC + 4; 1159 } 1160 break; 1161 1162 default: 1163 if (!allowNonBranch) 1164 panic("MachEmulateBranch: Non-branch instruction"); 1165 retAddr = instPC + 4; 1166 } 1167 #if 0 1168 printf("Target addr=%x\n", retAddr); 1169 #endif 1170 return (retAddr); 1171 } 1172 1173 unsigned 1174 GetBranchDest(InstPtr) 1175 InstFmt *InstPtr; 1176 { 1177 return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2)); 1178 } 1179 1180 /* 1181 * This routine is called by procxmt() to single step one instruction. 1182 * We do this by storing a break instruction after the current instruction, 1183 * resuming execution, and then restoring the old instruction. 1184 */ 1185 cpu_singlestep(p) 1186 register struct proc *p; 1187 { 1188 register unsigned va; 1189 register int *locr0 = p->p_md.md_regs; 1190 int i; 1191 1192 /* compute next address after current location */ 1193 va = MachEmulateBranch(locr0, locr0[PC], 0, 1); 1194 if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va || 1195 !useracc((caddr_t)va, 4, B_READ)) { 1196 printf("SS %s (%d): breakpoint already set at %x (va %x)\n", 1197 p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */ 1198 return (EFAULT); 1199 } 1200 p->p_md.md_ss_addr = va; 1201 p->p_md.md_ss_instr = fuiword((caddr_t)va); 1202 i = suiword((caddr_t)va, MACH_BREAK_SSTEP); 1203 if (i < 0) { 1204 vm_offset_t sa, ea; 1205 int rv; 1206 1207 sa = trunc_page((vm_offset_t)va); 1208 ea = round_page((vm_offset_t)va+sizeof(int)-1); 1209 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, 1210 VM_PROT_DEFAULT, FALSE); 1211 if (rv == KERN_SUCCESS) { 1212 i = suiword((caddr_t)va, MACH_BREAK_SSTEP); 1213 (void) vm_map_protect(&p->p_vmspace->vm_map, 1214 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE); 1215 } 1216 } 1217 if (i < 0) 1218 return (EFAULT); 1219 printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n", 1220 p->p_comm, p->p_pid, p->p_md.md_ss_addr, 1221 p->p_md.md_ss_instr, locr0[PC]); /* XXX */ 1222 return (0); 1223 } 1224