1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1992 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, Ralph Campbell, Sony Corp. and Kazumasa Utashiro 9 * of Software Research Associates, Inc. 10 * 11 * %sccs.include.redist.c% 12 * 13 * from: Utah $Hdr: trap.c 1.32 91/04/06$ 14 * 15 * @(#)trap.c 7.3 (Berkeley) 07/28/92 16 */ 17 18 #include "../include/fix_machine_type.h" 19 #include "param.h" 20 #include "systm.h" 21 #include "proc.h" 22 #include "kernel.h" 23 #include "signalvar.h" 24 #include "user.h" 25 #include "buf.h" 26 #ifdef KTRACE 27 #include "ktrace.h" 28 #endif 29 #include "net/netisr.h" 30 31 #include "../include/trap.h" 32 #include "../include/psl.h" 33 #include "../include/reg.h" 34 #include "../include/cpu.h" 35 #include "../include/pte.h" 36 #include "../include/mips_opcode.h" 37 #include "../include/adrsmap.h" 38 39 #include "vm/vm.h" 40 #include "vm/vm_kern.h" 41 #include "vm/vm_page.h" 42 43 #include "lp.h" 44 #include "bm.h" 45 #include "ms.h" 46 #include "en.h" 47 #include "../hbdev/dmac_0448.h" 48 #include "../sio/scc.h" 49 50 /* 51 * This is a kludge to allow X windows to work. 52 */ 53 #undef X_KLUGE 54 55 #ifdef X_KLUGE 56 #define USER_MAP_ADDR 0x4000 57 #define NPTES 300 58 static pt_entry_t UserMapPtes[NPTES]; 59 static unsigned nUserMapPtes; 60 static pid_t UserMapPid; 61 #endif 62 63 struct proc *machFPCurProcPtr; /* pointer to last proc to use FP */ 64 65 extern void MachKernGenException(); 66 extern void MachUserGenException(); 67 extern void MachKernIntr(); 68 extern void MachUserIntr(); 69 extern void MachTLBModException(); 70 extern void MachTLBMissException(); 71 static void MemErrorInterrupt(); 72 extern unsigned MachEmulateBranch(); 73 74 void (*machExceptionTable[])() = { 75 /* 76 * The kernel exception handlers. 77 */ 78 MachKernIntr, /* external interrupt */ 79 MachKernGenException, /* TLB modification */ 80 MachTLBMissException, /* TLB miss (load or instr. fetch) */ 81 MachTLBMissException, /* TLB miss (store) */ 82 MachKernGenException, /* address error (load or I-fetch) */ 83 MachKernGenException, /* address error (store) */ 84 MachKernGenException, /* bus error (I-fetch) */ 85 MachKernGenException, /* bus error (load or store) */ 86 MachKernGenException, /* system call */ 87 MachKernGenException, /* breakpoint */ 88 MachKernGenException, /* reserved instruction */ 89 MachKernGenException, /* coprocessor unusable */ 90 MachKernGenException, /* arithmetic overflow */ 91 MachKernGenException, /* reserved */ 92 MachKernGenException, /* reserved */ 93 MachKernGenException, /* reserved */ 94 /* 95 * The user exception handlers. 96 */ 97 MachUserIntr, 98 MachUserGenException, 99 MachUserGenException, 100 MachUserGenException, 101 MachUserGenException, 102 MachUserGenException, 103 MachUserGenException, 104 MachUserGenException, 105 MachUserGenException, 106 MachUserGenException, 107 MachUserGenException, 108 MachUserGenException, 109 MachUserGenException, 110 MachUserGenException, 111 MachUserGenException, 112 MachUserGenException, 113 }; 114 115 char *trap_type[] = { 116 "external interrupt", 117 "TLB modification", 118 "TLB miss (load or instr. fetch)", 119 "TLB miss (store)", 120 "address error (load or I-fetch)", 121 "address error (store)", 122 "bus error (I-fetch)", 123 "bus error (load or store)", 124 "system call", 125 "breakpoint", 126 "reserved instruction", 127 "coprocessor unusable", 128 "arithmetic overflow", 129 "reserved 13", 130 "reserved 14", 131 "reserved 15", 132 }; 133 134 #ifdef DEBUG 135 #define TRAPSIZE 10 136 struct trapdebug { /* trap history buffer for debugging */ 137 u_int status; 138 u_int cause; 139 u_int vadr; 140 u_int pc; 141 u_int ra; 142 u_int code; 143 } trapdebug[TRAPSIZE], *trp = trapdebug; 144 #endif 145 146 /* 147 * Handle an exception. 148 * Called from MachKernGenException() or MachUserGenException() 149 * when a processor trap occurs. 150 * In the case of a kernel trap, we return the pc where to resume if 151 * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc. 152 */ 153 unsigned 154 trap(statusReg, causeReg, vadr, pc, args) 155 unsigned statusReg; /* status register at time of the exception */ 156 unsigned causeReg; /* cause register at time of exception */ 157 unsigned vadr; /* address (if any) the fault occured on */ 158 unsigned pc; /* program counter where to continue */ 159 { 160 register int type, i; 161 unsigned ucode = 0; 162 register struct proc *p = curproc; 163 u_quad_t sticks; 164 vm_prot_t ftype; 165 extern unsigned onfault_table[]; 166 167 #ifdef DEBUG 168 trp->status = statusReg; 169 trp->cause = causeReg; 170 trp->vadr = vadr; 171 trp->pc = pc; 172 trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] : 173 p->p_md.md_regs[RA]; 174 trp->code = 0; 175 if (++trp == &trapdebug[TRAPSIZE]) 176 trp = trapdebug; 177 #endif 178 179 cnt.v_trap++; 180 type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT; 181 if (USERMODE(statusReg)) { 182 type |= T_USER; 183 sticks = p->p_sticks; 184 } 185 186 /* 187 * Enable hardware interrupts if they were on before. 188 * We only respond to software interrupts when returning to user mode. 189 */ 190 if (statusReg & MACH_SR_INT_ENA_PREV) 191 splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR); 192 193 switch (type) { 194 case T_TLB_MOD: 195 /* check for kernel address */ 196 if ((int)vadr < 0) { 197 register pt_entry_t *pte; 198 register unsigned entry; 199 #ifndef ATTR 200 register vm_offset_t pa; 201 #endif 202 203 pte = kvtopte(vadr); 204 entry = pte->pt_entry; 205 if (entry & PG_RO) { 206 /* write to read only page in the kernel */ 207 ftype = VM_PROT_WRITE; 208 goto kernel_fault; 209 } 210 entry |= PG_M; 211 pte->pt_entry = entry; 212 vadr &= PG_FRAME; 213 printf("trap: TLBupdate hi %x lo %x i %x\n", vadr, 214 entry, MachTLBUpdate(vadr, entry)); /* XXX */ 215 #ifdef ATTR 216 pmap_attributes[atop(entry - KERNBASE)] |= PMAP_ATTR_MOD; 217 #else 218 pa = entry & PG_FRAME; 219 if (!IS_VM_PHYSADDR(pa)) 220 panic("trap: kmod"); 221 PHYS_TO_VM_PAGE(pa)->clean = FALSE; 222 #endif 223 return (pc); 224 } 225 /* FALLTHROUGH */ 226 227 case T_TLB_MOD+T_USER: 228 { 229 pmap_hash_t hp; 230 #ifndef ATTR 231 vm_offset_t pa; 232 #endif 233 #ifdef DIAGNOSTIC 234 extern pmap_hash_t zero_pmap_hash; 235 extern pmap_t cur_pmap; 236 237 if (cur_pmap->pm_hash == zero_pmap_hash) 238 panic("tlbmod"); 239 #endif 240 hp = &((pmap_hash_t)PMAP_HASH_UADDR)[PMAP_HASH(vadr)]; 241 if (((hp->pmh_pte[0].high ^ vadr) & ~PGOFSET) == 0) 242 i = 0; 243 else if (((hp->pmh_pte[1].high ^ vadr) & ~PGOFSET) == 0) 244 i = 1; 245 else 246 panic("trap: tlb umod not found"); 247 if (hp->pmh_pte[i].low & PG_RO) { 248 ftype = VM_PROT_WRITE; 249 goto dofault; 250 } 251 hp->pmh_pte[i].low |= PG_M; 252 printf("trap: TLBupdate hi %x lo %x i %x\n", 253 hp->pmh_pte[i].high, hp->pmh_pte[i].low, 254 MachTLBUpdate(hp->pmh_pte[i].high, hp->pmh_pte[i].low)); /* XXX */ 255 #ifdef ATTR 256 pmap_attributes[atop(hp->pmh_pte[i].low - KERNBASE)] |= 257 PMAP_ATTR_MOD; 258 #else 259 pa = hp->pmh_pte[i].low & PG_FRAME; 260 if (!IS_VM_PHYSADDR(pa)) 261 panic("trap: umod"); 262 PHYS_TO_VM_PAGE(pa)->clean = FALSE; 263 #endif 264 if (!USERMODE(statusReg)) 265 return (pc); 266 goto out; 267 } 268 269 case T_TLB_LD_MISS: 270 case T_TLB_ST_MISS: 271 ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ; 272 /* check for kernel address */ 273 if ((int)vadr < 0) { 274 register vm_offset_t va; 275 int rv; 276 277 kernel_fault: 278 va = trunc_page((vm_offset_t)vadr); 279 rv = vm_fault(kernel_map, va, ftype, FALSE); 280 if (rv == KERN_SUCCESS) 281 return (pc); 282 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 283 ((struct pcb *)UADDR)->pcb_onfault = 0; 284 return (onfault_table[i]); 285 } 286 goto err; 287 } 288 /* check for fuswintr() or suswintr() getting a page fault */ 289 if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 4) 290 return (onfault_table[i]); 291 goto dofault; 292 293 case T_TLB_LD_MISS+T_USER: 294 ftype = VM_PROT_READ; 295 goto dofault; 296 297 case T_TLB_ST_MISS+T_USER: 298 ftype = VM_PROT_WRITE; 299 dofault: 300 { 301 register vm_offset_t va; 302 register struct vmspace *vm = p->p_vmspace; 303 register vm_map_t map = &vm->vm_map; 304 int rv; 305 306 #ifdef X_KLUGE 307 if (p->p_pid == UserMapPid && 308 (va = pmax_btop(vadr - USER_MAP_ADDR)) < nUserMapPtes) { 309 register pt_entry_t *pte; 310 311 pte = &UserMapPtes[va]; 312 MachTLBWriteRandom((vadr & PG_FRAME) | 313 (vm->vm_pmap.pm_tlbpid << VMMACH_TLB_PID_SHIFT), 314 pte->pt_entry); 315 return (pc); 316 } 317 #endif 318 va = trunc_page((vm_offset_t)vadr); 319 rv = vm_fault(map, va, ftype, FALSE); 320 if (rv != KERN_SUCCESS) { 321 printf("vm_fault(%x, %x, %x, 0) -> %x ADR %x PC %x RA %x\n", 322 map, va, ftype, rv, vadr, pc, 323 !USERMODE(statusReg) ? ((int *)&args)[19] : 324 p->p_md.md_regs[RA]); /* XXX */ 325 printf("\tpid %d %s PC %x RA %x\n", p->p_pid, 326 p->p_comm, p->p_md.md_regs[PC], 327 p->p_md.md_regs[RA]); /* XXX */ 328 #ifdef DEBUG 329 trapDump("vm_fault"); 330 #endif 331 } 332 /* 333 * If this was a stack access we keep track of the maximum 334 * accessed stack size. Also, if vm_fault gets a protection 335 * failure it is due to accessing the stack region outside 336 * the current limit and we need to reflect that as an access 337 * error. 338 */ 339 if ((caddr_t)va >= vm->vm_maxsaddr) { 340 if (rv == KERN_SUCCESS) { 341 unsigned nss; 342 343 nss = clrnd(btoc(USRSTACK-(unsigned)va)); 344 if (nss > vm->vm_ssize) 345 vm->vm_ssize = nss; 346 } else if (rv == KERN_PROTECTION_FAILURE) 347 rv = KERN_INVALID_ADDRESS; 348 } 349 if (rv == KERN_SUCCESS) { 350 if (!USERMODE(statusReg)) 351 return (pc); 352 goto out; 353 } 354 if (!USERMODE(statusReg)) { 355 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 356 ((struct pcb *)UADDR)->pcb_onfault = 0; 357 return (onfault_table[i]); 358 } 359 goto err; 360 } 361 ucode = vadr; 362 i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV; 363 break; 364 } 365 366 case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ 367 if (vadr == KERNBASE) { 368 struct args { 369 int i[1]; 370 } args; 371 int rval[2]; 372 373 /* 374 * Assume a signal handler is trying to return 375 * (see sendsig() and sigreturn()). We have to 376 * pop the sigframe struct to get the address of 377 * the sigcontext. 378 */ 379 args.i[0] = p->p_md.md_regs[SP] + 4 * sizeof(int); 380 (void) sigreturn(curproc, &args, rval); 381 goto out; 382 } 383 /* FALLTHROUGH */ 384 385 case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ 386 case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */ 387 case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */ 388 i = SIGSEGV; 389 break; 390 391 case T_SYSCALL+T_USER: 392 { 393 register int *locr0 = p->p_md.md_regs; 394 register struct sysent *callp; 395 unsigned int code; 396 int numsys; 397 struct args { 398 int i[8]; 399 } args; 400 int rval[2]; 401 struct sysent *systab; 402 extern int nsysent; 403 404 cnt.v_syscall++; 405 /* compute next PC after syscall instruction */ 406 if ((int)causeReg < 0) 407 locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0); 408 else 409 locr0[PC] += 4; 410 systab = sysent; 411 numsys = nsysent; 412 code = locr0[V0]; 413 #ifdef COMPAT_NEWSOS 414 if (code >= 1000) 415 code -= 1000; /* too easy */ 416 #endif 417 if (code == 0) { /* indir */ 418 code = locr0[A0]; 419 #ifdef COMPAT_NEWSOS 420 if (code >= 1000) 421 code -= 1000; /* too easy */ 422 #endif 423 if (code >= numsys) 424 callp = &systab[0]; /* indir (illegal) */ 425 else 426 callp = &systab[code]; 427 i = callp->sy_narg; 428 args.i[0] = locr0[A1]; 429 args.i[1] = locr0[A2]; 430 args.i[2] = locr0[A3]; 431 if (i > 3) { 432 i = copyin((caddr_t)(locr0[SP] + 433 3 * sizeof(int)), 434 (caddr_t)&args.i[3], 435 (u_int)(i - 3) * sizeof(int)); 436 if (i) { 437 locr0[V0] = i; 438 locr0[A3] = 1; 439 #ifdef KTRACE 440 if (KTRPOINT(p, KTR_SYSCALL)) 441 ktrsyscall(p->p_tracep, code, 442 callp->sy_narg, args.i); 443 #endif 444 goto done; 445 } 446 } 447 } else { 448 if (code >= numsys) 449 callp = &systab[0]; /* indir (illegal) */ 450 else 451 callp = &systab[code]; 452 i = callp->sy_narg; 453 args.i[0] = locr0[A0]; 454 args.i[1] = locr0[A1]; 455 args.i[2] = locr0[A2]; 456 args.i[3] = locr0[A3]; 457 if (i > 4) { 458 i = copyin((caddr_t)(locr0[SP] + 459 4 * sizeof(int)), 460 (caddr_t)&args.i[4], 461 (u_int)(i - 4) * sizeof(int)); 462 if (i) { 463 locr0[V0] = i; 464 locr0[A3] = 1; 465 #ifdef KTRACE 466 if (KTRPOINT(p, KTR_SYSCALL)) 467 ktrsyscall(p->p_tracep, code, 468 callp->sy_narg, args.i); 469 #endif 470 goto done; 471 } 472 } 473 } 474 #ifdef KTRACE 475 if (KTRPOINT(p, KTR_SYSCALL)) 476 ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i); 477 #endif 478 rval[0] = 0; 479 rval[1] = locr0[V1]; 480 #ifdef DEBUG 481 if (trp == trapdebug) 482 trapdebug[TRAPSIZE - 1].code = code; 483 else 484 trp[-1].code = code; 485 #endif 486 #ifdef COMPAT_NEWSOS 487 /* 151 = setenvp, 152 = sysnews, 162 = getdomainname KU:XXX */ 488 if (code == 151 || code == 152 || code == 162) 489 i = 0; 490 else 491 #endif 492 i = (*callp->sy_call)(p, &args, rval); 493 if(i==EINVAL) 494 printf("EINVAL: pid=%d, code=%d\n", p->p_pid, code); 495 /* 496 * Reinitialize proc pointer `p' as it may be different 497 * if this is a child returning from fork syscall. 498 */ 499 p = curproc; 500 locr0 = p->p_md.md_regs; 501 #ifdef DEBUG 502 { int s; 503 s = splhigh(); 504 trp->status = statusReg; 505 trp->cause = causeReg; 506 trp->vadr = locr0[SP]; 507 trp->pc = locr0[PC]; 508 trp->ra = locr0[RA]; 509 trp->code = -code; 510 if (++trp == &trapdebug[TRAPSIZE]) 511 trp = trapdebug; 512 splx(s); 513 } 514 #endif 515 if (i == ERESTART) 516 locr0[PC] = pc; 517 else if (i != EJUSTRETURN) { 518 if (i) { 519 locr0[V0] = i; 520 locr0[A3] = 1; 521 } else { 522 locr0[V0] = rval[0]; 523 locr0[V1] = rval[1]; 524 locr0[A3] = 0; 525 } 526 } 527 /* else if (i == EJUSTRETURN) */ 528 /* nothing to do */ 529 done: 530 #ifdef KTRACE 531 if (KTRPOINT(p, KTR_SYSRET)) 532 ktrsysret(p->p_tracep, code, i, rval[0]); 533 #endif 534 535 goto out; 536 } 537 538 case T_BREAK+T_USER: 539 { 540 register unsigned va, instr; 541 542 /* compute address of break instruction */ 543 va = pc; 544 if ((int)causeReg < 0) 545 va += 4; 546 547 /* read break instruction */ 548 instr = fuiword((caddr_t)va); 549 #ifdef KADB 550 if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP) 551 goto err; 552 #endif 553 if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) { 554 i = SIGTRAP; 555 break; 556 } 557 558 /* restore original instruction and clear BP */ 559 i = suiword((caddr_t)va, p->p_md.md_ss_instr); 560 if (i < 0) { 561 vm_offset_t sa, ea; 562 int rv; 563 564 sa = trunc_page((vm_offset_t)va); 565 ea = round_page((vm_offset_t)va+sizeof(int)-1); 566 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, 567 VM_PROT_DEFAULT, FALSE); 568 if (rv == KERN_SUCCESS) { 569 i = suiword((caddr_t)va, p->p_md.md_ss_instr); 570 (void) vm_map_protect(&p->p_vmspace->vm_map, 571 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, 572 FALSE); 573 } 574 } 575 if (i < 0) { 576 i = SIGTRAP; 577 break; 578 } 579 p->p_md.md_ss_addr = 0; 580 goto out; 581 } 582 583 case T_RES_INST+T_USER: 584 i = SIGILL; 585 break; 586 587 case T_COP_UNUSABLE+T_USER: 588 if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) { 589 i = SIGILL; /* only FPU instructions allowed */ 590 break; 591 } 592 MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs); 593 machFPCurProcPtr = p; 594 p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT; 595 p->p_md.md_flags |= MDP_FPUSED; 596 goto out; 597 598 case T_OVFLOW+T_USER: 599 i = SIGFPE; 600 break; 601 602 case T_ADDR_ERR_LD: /* misaligned access */ 603 case T_ADDR_ERR_ST: /* misaligned access */ 604 case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */ 605 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 606 ((struct pcb *)UADDR)->pcb_onfault = 0; 607 return (onfault_table[i]); 608 } 609 /* FALLTHROUGH */ 610 611 default: 612 err: 613 #ifdef KADB 614 { 615 extern struct pcb kdbpcb; 616 617 if (USERMODE(statusReg)) 618 kdbpcb = p->p_addr->u_pcb; 619 else { 620 kdbpcb.pcb_regs[ZERO] = 0; 621 kdbpcb.pcb_regs[AST] = ((int *)&args)[2]; 622 kdbpcb.pcb_regs[V0] = ((int *)&args)[3]; 623 kdbpcb.pcb_regs[V1] = ((int *)&args)[4]; 624 kdbpcb.pcb_regs[A0] = ((int *)&args)[5]; 625 kdbpcb.pcb_regs[A1] = ((int *)&args)[6]; 626 kdbpcb.pcb_regs[A2] = ((int *)&args)[7]; 627 kdbpcb.pcb_regs[A3] = ((int *)&args)[8]; 628 kdbpcb.pcb_regs[T0] = ((int *)&args)[9]; 629 kdbpcb.pcb_regs[T1] = ((int *)&args)[10]; 630 kdbpcb.pcb_regs[T2] = ((int *)&args)[11]; 631 kdbpcb.pcb_regs[T3] = ((int *)&args)[12]; 632 kdbpcb.pcb_regs[T4] = ((int *)&args)[13]; 633 kdbpcb.pcb_regs[T5] = ((int *)&args)[14]; 634 kdbpcb.pcb_regs[T6] = ((int *)&args)[15]; 635 kdbpcb.pcb_regs[T7] = ((int *)&args)[16]; 636 kdbpcb.pcb_regs[T8] = ((int *)&args)[17]; 637 kdbpcb.pcb_regs[T9] = ((int *)&args)[18]; 638 kdbpcb.pcb_regs[RA] = ((int *)&args)[19]; 639 kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21]; 640 kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22]; 641 kdbpcb.pcb_regs[PC] = pc; 642 kdbpcb.pcb_regs[SR] = statusReg; 643 bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int)); 644 } 645 if (kdb(causeReg, vadr, p, !USERMODE(statusReg))) 646 return (kdbpcb.pcb_regs[PC]); 647 } 648 #else 649 #ifdef DEBUG 650 trapDump("trap"); 651 #endif 652 #endif 653 panic("trap"); 654 } 655 printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid, 656 p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */ 657 trapsignal(p, i, ucode); 658 out: 659 /* 660 * Note: we should only get here if returning to user mode. 661 */ 662 /* take pending signals */ 663 while ((i = CURSIG(p)) != 0) 664 psig(i); 665 p->p_pri = p->p_usrpri; 666 astpending = 0; 667 if (want_resched) { 668 int s; 669 670 /* 671 * Since we are curproc, clock will normally just change 672 * our priority without moving us from one queue to another 673 * (since the running process is not on a queue.) 674 * If that happened after we setrq ourselves but before we 675 * swtch()'ed, we might not be on the queue indicated by 676 * our priority. 677 */ 678 s = splstatclock(); 679 setrq(p); 680 p->p_stats->p_ru.ru_nivcsw++; 681 swtch(); 682 splx(s); 683 while ((i = CURSIG(p)) != 0) 684 psig(i); 685 } 686 /* 687 * If profiling, charge system time to the trapped pc. 688 */ 689 if (p->p_flag & SPROFIL) 690 addupc_task(p, pc, (int)(p->p_sticks - sticks)); 691 curpri = p->p_pri; 692 return (pc); 693 } 694 695 /* 696 * Handle an interrupt. 697 * Called from MachKernIntr() or MachUserIntr() 698 * Note: curproc might be NULL. 699 */ 700 interrupt(statusReg, causeReg, pc) 701 unsigned statusReg; /* status register at time of the exception */ 702 unsigned causeReg; /* cause register at time of exception */ 703 unsigned pc; /* program counter where to continue */ 704 { 705 register unsigned mask; 706 struct clockframe cf; 707 708 #ifdef DEBUG 709 trp->status = statusReg; 710 trp->cause = causeReg; 711 trp->vadr = 0; 712 trp->pc = pc; 713 trp->ra = 0; 714 trp->code = 0; 715 if (++trp == &trapdebug[TRAPSIZE]) 716 trp = trapdebug; 717 #endif 718 719 mask = causeReg & statusReg; /* pending interrupts & enable mask */ 720 #ifndef NOPRIORITY 721 if (mask & MACH_INT_MASK_5) { /* level 5 interrupt */ 722 splx((MACH_SPL_MASK_8 & ~causeReg) | MACH_SR_INT_ENA_CUR); 723 printf("level 5 interrupt: PC %x CR %x SR %x\n", 724 pc, causeReg, statusReg); 725 causeReg &= ~MACH_INT_MASK_5; 726 } 727 if (mask & MACH_INT_MASK_4) { /* level 4 interrupt */ 728 /* 729 * asynchronous bus error 730 */ 731 splx((MACH_SPL_MASK_7 & ~causeReg) | MACH_SR_INT_ENA_CUR); 732 printf("level 4 interrupt: PC %x CR %x SR %x\n", 733 pc, causeReg, statusReg); 734 *(char *)INTCLR0 = INTCLR0_BERR; 735 causeReg &= ~MACH_INT_MASK_4; 736 } 737 if (mask & MACH_INT_MASK_3) { /* level 3 interrupt */ 738 /* 739 * fp error 740 */ 741 splx((MACH_SPL_MASK_6 & ~causeReg) | MACH_SR_INT_ENA_CUR); 742 if (!USERMODE(statusReg)) { 743 #ifdef DEBUG 744 trapDump("fpintr"); 745 #else 746 printf("FPU interrupt: PC %x CR %x SR %x\n", 747 pc, causeReg, statusReg); 748 #endif 749 } else 750 MachFPInterrupt(statusReg, causeReg, pc); 751 causeReg &= ~MACH_INT_MASK_3; 752 } 753 if (mask & MACH_INT_MASK_2) { /* level 2 interrupt */ 754 register int stat; 755 756 splx((MACH_SPL_MASK_5 & ~causeReg) | MACH_SR_INT_ENA_CUR); 757 stat = *(volatile u_char *)INTST0; 758 if (stat & INTST0_TIMINT) { /* timer */ 759 static int led_count = 0; 760 761 *(volatile u_char *)INTCLR0 = INTCLR0_TIMINT; 762 cf.pc = pc; 763 cf.sr = statusReg; 764 hardclock(&cf); 765 if (++led_count > hz) { 766 led_count = 0; 767 *(volatile u_char *)DEBUG_PORT ^= DP_LED1; 768 } 769 } 770 #if NBM > 0 771 if (stat & INTST0_KBDINT) /* keyboard */ 772 kbm_rint(SCC_KEYBOARD); 773 #endif 774 #if NMS > 0 775 if (stat & INTST0_MSINT) /* mouse */ 776 kbm_rint(SCC_MOUSE); 777 #endif 778 causeReg &= ~MACH_INT_MASK_2; 779 } 780 if (mask & MACH_INT_MASK_1) { /* level 1 interrupt */ 781 splx((MACH_SPL_MASK_4 & ~causeReg) | MACH_SR_INT_ENA_CUR); 782 level1_intr(); 783 causeReg &= ~MACH_INT_MASK_1; 784 } 785 if (mask & MACH_INT_MASK_0) { /* level 0 interrupt */ 786 splx((MACH_SPL_MASK_3 & ~causeReg) | MACH_SR_INT_ENA_CUR); 787 level0_intr(); 788 causeReg &= ~MACH_INT_MASK_0; 789 } 790 splx((MACH_SPL_MASK_3 & ~causeReg) | MACH_SR_INT_ENA_CUR); 791 #else /* NOPRIORITY */ 792 /* handle clock interrupts ASAP */ 793 if (mask & MACH_INT_MASK_2) { /* level 2 interrupt */ 794 register int stat; 795 796 stat = *(volatile u_char *)INTST0; 797 if (stat & INTST0_TIMINT) { /* timer */ 798 static int led_count = 0; 799 800 *(volatile u_char *)INTCLR0 = INTCLR0_TIMINT; 801 cf.pc = pc; 802 cf.sr = statusReg; 803 hardclock(&cf); 804 if (++led_count > hz) { 805 led_count = 0; 806 *(volatile u_char *)DEBUG_PORT ^= DP_LED1; 807 } 808 } 809 #if NBM > 0 810 if (stat & INTST0_KBDINT) /* keyboard */ 811 kbm_rint(SCC_KEYBOARD); 812 #endif 813 #if NMS > 0 814 if (stat & INTST0_MSINT) /* mouse */ 815 kbm_rint(SCC_MOUSE); 816 #endif 817 causeReg &= ~MACH_INT_MASK_2; /* reenable clock interrupts */ 818 } 819 /* 820 * Enable hardware interrupts which were enabled but not pending. 821 * We only respond to software interrupts when returning to spl0. 822 */ 823 splx((statusReg & ~causeReg & MACH_HARD_INT_MASK)|MACH_SR_INT_ENA_CUR); 824 825 if (mask & MACH_INT_MASK_5) { /* level 5 interrupt */ 826 printf("level 5 interrupt: PC %x CR %x SR %x\n", 827 pc, causeReg, statusReg); 828 ; 829 } 830 if (mask & MACH_INT_MASK_4) { /* level 4 interrupt */ 831 /* 832 * asynchronous bus error 833 */ 834 printf("level 4 interrupt: PC %x CR %x SR %x\n", 835 pc, causeReg, statusReg); 836 *(char *)INTCLR0 = INTCLR0_BERR; 837 } 838 if (mask & MACH_INT_MASK_3) { /* level 3 interrupt */ 839 /* 840 * fp error 841 */ 842 if (!USERMODE(statusReg)) { 843 #ifdef DEBUG 844 trapDump("fpintr"); 845 #else 846 printf("FPU interrupt: PC %x CR %x SR %x\n", 847 pc, causeReg, statusReg); 848 #endif 849 } else 850 MachFPInterrupt(statusReg, causeReg, pc); 851 } 852 if (mask & MACH_INT_MASK_1) /* level 1 interrupt */ 853 level1_intr(); 854 if (mask & MACH_INT_MASK_0) /* level 0 interrupt */ 855 level0_intr(); 856 #endif /* NOPRIORITY */ 857 if (mask & MACH_SOFT_INT_MASK_0) { 858 struct clockframe cf; 859 860 clearsoftclock(); 861 cnt.v_soft++; 862 cf.pc = pc; 863 cf.sr = statusReg; 864 softclock(); 865 } 866 /* process network interrupt if we trapped or will very soon */ 867 if ((mask & MACH_SOFT_INT_MASK_1) || 868 netisr && (statusReg & MACH_SOFT_INT_MASK_1)) { 869 clearsoftnet(); 870 cnt.v_soft++; 871 #ifdef INET 872 if (netisr & (1 << NETISR_ARP)) { 873 netisr &= ~(1 << NETISR_ARP); 874 arpintr(); 875 } 876 if (netisr & (1 << NETISR_IP)) { 877 netisr &= ~(1 << NETISR_IP); 878 ipintr(); 879 } 880 #endif 881 #ifdef NS 882 if (netisr & (1 << NETISR_NS)) { 883 netisr &= ~(1 << NETISR_NS); 884 nsintr(); 885 } 886 #endif 887 #ifdef ISO 888 if (netisr & (1 << NETISR_ISO)) { 889 netisr &= ~(1 << NETISR_ISO); 890 clnlintr(); 891 } 892 #endif 893 } 894 } 895 896 /* 897 * This is called from MachUserIntr() if astpending is set. 898 * This is very similar to the tail of trap(). 899 */ 900 softintr(statusReg, pc) 901 unsigned statusReg; /* status register at time of the exception */ 902 unsigned pc; /* program counter where to continue */ 903 { 904 register struct proc *p = curproc; 905 int sig; 906 907 cnt.v_soft++; 908 /* take pending signals */ 909 while ((sig = CURSIG(p)) != 0) 910 psig(sig); 911 p->p_pri = p->p_usrpri; 912 astpending = 0; 913 if (p->p_flag & SOWEUPC) { 914 p->p_flag &= ~SOWEUPC; 915 ADDUPROF(p); 916 } 917 if (want_resched) { 918 int s; 919 920 /* 921 * Since we are curproc, clock will normally just change 922 * our priority without moving us from one queue to another 923 * (since the running process is not on a queue.) 924 * If that happened after we setrq ourselves but before we 925 * swtch()'ed, we might not be on the queue indicated by 926 * our priority. 927 */ 928 s = splstatclock(); 929 setrq(p); 930 p->p_stats->p_ru.ru_nivcsw++; 931 swtch(); 932 splx(s); 933 while ((sig = CURSIG(p)) != 0) 934 psig(sig); 935 } 936 curpri = p->p_pri; 937 } 938 939 #ifdef DEBUG 940 trapDump(msg) 941 char *msg; 942 { 943 register int i; 944 int s; 945 946 s = splhigh(); 947 printf("trapDump(%s)\n", msg); 948 for (i = 0; i < TRAPSIZE; i++) { 949 if (trp == trapdebug) 950 trp = &trapdebug[TRAPSIZE - 1]; 951 else 952 trp--; 953 if (trp->cause == 0) 954 break; 955 printf("%s: ADR %x PC %x CR %x SR %x\n", 956 trap_type[(trp->cause & MACH_CR_EXC_CODE) >> 957 MACH_CR_EXC_CODE_SHIFT], 958 trp->vadr, trp->pc, trp->cause, trp->status); 959 printf(" RA %x code %d\n", trp-> ra, trp->code); 960 } 961 bzero(trapdebug, sizeof(trapdebug)); 962 trp = trapdebug; 963 splx(s); 964 } 965 #endif 966 967 #ifdef X_KLUGE 968 /* 969 * This is a kludge to allow X windows to work. 970 */ 971 caddr_t 972 vmUserMap(size, pa) 973 int size; 974 unsigned pa; 975 { 976 register caddr_t v; 977 unsigned off, entry; 978 979 if (nUserMapPtes == 0) 980 UserMapPid = curproc->p_pid; 981 else if (UserMapPid != curproc->p_pid) 982 return ((caddr_t)0); 983 off = pa & PGOFSET; 984 size = btoc(off + size); 985 if (nUserMapPtes + size > NPTES) 986 return ((caddr_t)0); 987 v = (caddr_t)(USER_MAP_ADDR + pmax_ptob(nUserMapPtes) + off); 988 entry = (pa & 0x9ffff000) | PG_V | PG_M; 989 if (pa >= MACH_UNCACHED_MEMORY_ADDR) 990 entry |= PG_N; 991 while (size > 0) { 992 UserMapPtes[nUserMapPtes].pt_entry = entry; 993 entry += NBPG; 994 nUserMapPtes++; 995 size--; 996 } 997 return (v); 998 } 999 1000 vmUserUnmap() 1001 { 1002 int id; 1003 1004 nUserMapPtes = 0; 1005 if (UserMapPid == curproc->p_pid) { 1006 id = curproc->p_vmspace->vm_pmap.pm_tlbpid; 1007 if (id >= 0) 1008 MachTLBFlushPID(id); 1009 } 1010 UserMapPid = 0; 1011 } 1012 #endif 1013 1014 /* 1015 *---------------------------------------------------------------------- 1016 * 1017 * MemErrorInterrupt -- 1018 * 1019 * Handler an interrupt for the control register. 1020 * 1021 * Results: 1022 * None. 1023 * 1024 * Side effects: 1025 * None. 1026 * 1027 *---------------------------------------------------------------------- 1028 */ 1029 #ifdef NOTDEF 1030 static void 1031 MemErrorInterrupt() 1032 { 1033 1034 } 1035 #endif 1036 1037 /* 1038 * Return the resulting PC as if the branch was executed. 1039 */ 1040 unsigned 1041 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch) 1042 unsigned *regsPtr; 1043 unsigned instPC; 1044 unsigned fpcCSR; 1045 int allowNonBranch; 1046 { 1047 InstFmt inst; 1048 unsigned retAddr; 1049 int condition; 1050 extern unsigned GetBranchDest(); 1051 1052 #if 0 1053 printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC, 1054 *(unsigned *)instPC, fpcCSR); 1055 #endif 1056 1057 inst = *(InstFmt *)instPC; 1058 switch ((int)inst.JType.op) { 1059 case OP_SPECIAL: 1060 switch ((int)inst.RType.func) { 1061 case OP_JR: 1062 case OP_JALR: 1063 retAddr = regsPtr[inst.RType.rs]; 1064 break; 1065 1066 default: 1067 if (!allowNonBranch) 1068 panic("MachEmulateBranch: Non-branch"); 1069 retAddr = instPC + 4; 1070 break; 1071 } 1072 break; 1073 1074 case OP_BCOND: 1075 switch ((int)inst.IType.rt) { 1076 case OP_BLTZ: 1077 case OP_BLTZAL: 1078 if ((int)(regsPtr[inst.RType.rs]) < 0) 1079 retAddr = GetBranchDest((InstFmt *)instPC); 1080 else 1081 retAddr = instPC + 8; 1082 break; 1083 1084 case OP_BGEZAL: 1085 case OP_BGEZ: 1086 if ((int)(regsPtr[inst.RType.rs]) >= 0) 1087 retAddr = GetBranchDest((InstFmt *)instPC); 1088 else 1089 retAddr = instPC + 8; 1090 break; 1091 1092 default: 1093 panic("MachEmulateBranch: Bad branch cond"); 1094 } 1095 break; 1096 1097 case OP_J: 1098 case OP_JAL: 1099 retAddr = (inst.JType.target << 2) | 1100 ((unsigned)instPC & 0xF0000000); 1101 break; 1102 1103 case OP_BEQ: 1104 if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt]) 1105 retAddr = GetBranchDest((InstFmt *)instPC); 1106 else 1107 retAddr = instPC + 8; 1108 break; 1109 1110 case OP_BNE: 1111 if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt]) 1112 retAddr = GetBranchDest((InstFmt *)instPC); 1113 else 1114 retAddr = instPC + 8; 1115 break; 1116 1117 case OP_BLEZ: 1118 if ((int)(regsPtr[inst.RType.rs]) <= 0) 1119 retAddr = GetBranchDest((InstFmt *)instPC); 1120 else 1121 retAddr = instPC + 8; 1122 break; 1123 1124 case OP_BGTZ: 1125 if ((int)(regsPtr[inst.RType.rs]) > 0) 1126 retAddr = GetBranchDest((InstFmt *)instPC); 1127 else 1128 retAddr = instPC + 8; 1129 break; 1130 1131 case OP_COP1: 1132 switch (inst.RType.rs) { 1133 case OP_BCx: 1134 case OP_BCy: 1135 if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE) 1136 condition = fpcCSR & MACH_FPC_COND_BIT; 1137 else 1138 condition = !(fpcCSR & MACH_FPC_COND_BIT); 1139 if (condition) 1140 retAddr = GetBranchDest((InstFmt *)instPC); 1141 else 1142 retAddr = instPC + 8; 1143 break; 1144 1145 default: 1146 if (!allowNonBranch) 1147 panic("MachEmulateBranch: Bad coproc branch instruction"); 1148 retAddr = instPC + 4; 1149 } 1150 break; 1151 1152 default: 1153 if (!allowNonBranch) 1154 panic("MachEmulateBranch: Non-branch instruction"); 1155 retAddr = instPC + 4; 1156 } 1157 #if 0 1158 printf("Target addr=%x\n", retAddr); 1159 #endif 1160 return (retAddr); 1161 } 1162 1163 unsigned 1164 GetBranchDest(InstPtr) 1165 InstFmt *InstPtr; 1166 { 1167 return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2)); 1168 } 1169 1170 /* 1171 * This routine is called by procxmt() to single step one instruction. 1172 * We do this by storing a break instruction after the current instruction, 1173 * resuming execution, and then restoring the old instruction. 1174 */ 1175 cpu_singlestep(p) 1176 register struct proc *p; 1177 { 1178 register unsigned va; 1179 register int *locr0 = p->p_md.md_regs; 1180 int i; 1181 1182 /* compute next address after current location */ 1183 va = MachEmulateBranch(locr0, locr0[PC], 0, 1); 1184 if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va || 1185 !useracc((caddr_t)va, 4, B_READ)) { 1186 printf("SS %s (%d): breakpoint already set at %x (va %x)\n", 1187 p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */ 1188 return (EFAULT); 1189 } 1190 p->p_md.md_ss_addr = va; 1191 p->p_md.md_ss_instr = fuiword((caddr_t)va); 1192 i = suiword((caddr_t)va, MACH_BREAK_SSTEP); 1193 if (i < 0) { 1194 vm_offset_t sa, ea; 1195 int rv; 1196 1197 sa = trunc_page((vm_offset_t)va); 1198 ea = round_page((vm_offset_t)va+sizeof(int)-1); 1199 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, 1200 VM_PROT_DEFAULT, FALSE); 1201 if (rv == KERN_SUCCESS) { 1202 i = suiword((caddr_t)va, MACH_BREAK_SSTEP); 1203 (void) vm_map_protect(&p->p_vmspace->vm_map, 1204 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE); 1205 } 1206 } 1207 if (i < 0) 1208 return (EFAULT); 1209 printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n", 1210 p->p_comm, p->p_pid, p->p_md.md_ss_addr, 1211 p->p_md.md_ss_instr, locr0[PC]); /* XXX */ 1212 return (0); 1213 } 1214 1215 /* 1216 * news3400 - INT0 service routine. 1217 * 1218 * INTST0 bit 4: dma 1219 * 3: slot #1 1220 * 2: slot #3 1221 * 1: external #1 1222 * 0: external #3 1223 */ 1224 1225 #define LEVEL0_MASK \ 1226 (INTST1_DMA|INTST1_SLOT1|INTST1_SLOT3|INTST1_EXT1|INTST1_EXT3) 1227 1228 level0_intr() 1229 { 1230 register int stat; 1231 1232 stat = *(volatile u_char *)INTST1 & LEVEL0_MASK; 1233 *(u_char *)INTCLR1 = stat; 1234 1235 if (stat & INTST1_DMA) 1236 dma_intr(); 1237 if (stat & INTST1_SLOT1) 1238 exec_hb_intr2(); 1239 #if NEN > 0 1240 if (stat & INTST1_SLOT3) { 1241 int s, t; 1242 1243 s = splimp(); 1244 t = lance_intr(); 1245 (void) splx(s); 1246 if (t == 0) 1247 exec_hb_intr4(); 1248 } 1249 #endif 1250 #if NLE > 0 1251 if (stat & INTST1_SLOT3) { 1252 int s; 1253 1254 s = splimp(); 1255 leintr(0); 1256 (void) splx(s); 1257 } 1258 #endif 1259 if (stat & INTST1_EXT1) 1260 print_int_stat("EXT #1"); 1261 if (stat & INTST1_EXT3) 1262 print_int_stat("EXT #3"); 1263 } 1264 1265 /* 1266 * news3400 - INT1 service routine. 1267 * 1268 * INTST0 bit 1: centro fault 1269 * 0: centro busy 1270 * INTST1 bit 7: beep 1271 * 6: scc 1272 * 5: lance 1273 */ 1274 1275 #define LEVEL1_MASK2 (INTST0_CFLT|INTST0_CBSY) 1276 #define LEVEL1_MASK1 (INTST1_BEEP|INTST1_SCC|INTST1_LANCE) 1277 1278 level1_intr(pc) 1279 unsigned pc; 1280 { 1281 register int stat; 1282 register u_int saved_inten1 = *(u_char *)INTEN1; 1283 1284 *(u_char *)INTEN1 = 0; /* disable intr: beep, lance, scc */ 1285 1286 stat = *(volatile u_char *)INTST1 & LEVEL1_MASK1; 1287 *(u_char *)INTCLR1 = stat; 1288 1289 stat &= saved_inten1; 1290 1291 if (stat & INTST1_BEEP) { 1292 *(volatile u_char *)INTCLR1 = INTCLR1_BEEP; 1293 print_int_stat("BEEP"); 1294 } 1295 if (stat & INTST1_SCC) { 1296 scc_intr(); 1297 if (saved_inten1 & *(u_char *)INTST1 & INTST1_SCC) 1298 scc_intr(); 1299 } 1300 #if NEN > 0 1301 if (stat & INTST1_LANCE) 1302 lance_intr(); 1303 #endif 1304 #if NLE > 0 1305 if (stat & INTST1_LANCE) 1306 leintr(0); 1307 #endif 1308 1309 *(u_char *)INTEN1 = saved_inten1; 1310 1311 #if NLP > 0 1312 /* 1313 * The PARK2 cannot find centro interrupt correctly. 1314 * We must check it by reading the cause register of cpu 1315 * while other interrupts are disabled. 1316 */ 1317 { 1318 register int causereg; 1319 int s = splhigh(); 1320 1321 causereg = get_causereg(); 1322 (void) splx(s); 1323 1324 if ((causereg & CAUSE_IP4) == 0) 1325 return; 1326 } 1327 #endif 1328 1329 stat = (int)(*(u_char *)INTST0) & LEVEL1_MASK2; 1330 *(u_char *)INTCLR0 = stat; 1331 1332 if (stat & INTST0_CBSY) /* centro busy */ 1333 #if NLP > 0 1334 lpxint(0); 1335 #else 1336 printf("stray intr: CBSY\n"); 1337 #endif 1338 } 1339 1340 /* 1341 * DMA interrupt service routine. 1342 */ 1343 dma_intr() 1344 { 1345 register volatile u_char *gsp = (u_char *)DMAC_GSTAT; 1346 register u_int gstat = *gsp; 1347 register int mrqb, i; 1348 1349 /* 1350 * when DMA intrrupt occurs there remain some untransferred data. 1351 * wait data transfer completion. 1352 */ 1353 mrqb = (gstat & (CH0_INT|CH1_INT|CH2_INT|CH3_INT)) << 1; 1354 if (gstat & mrqb) { 1355 /* 1356 * SHOULD USE DELAY() 1357 */ 1358 for (i = 0; i < 50; i++) 1359 ; 1360 if (*gsp & mrqb) 1361 printf("dma_intr: MRQ\n"); 1362 } 1363 1364 /* SCSI Dispatch */ 1365 if (gstat & CH_INT(CH_SCSI)) 1366 scintr(); 1367 1368 #include "fd.h" 1369 #if NFD > 0 1370 /* FDC Interrupt Dispatch */ 1371 if (gstat & CH_INT(CH_FDC)) 1372 fdc_intr(0); 1373 #endif /* NFD > 0 */ 1374 1375 #include "sb.h" 1376 #if NSB > 0 1377 /* Audio Interface Dispatch */ 1378 sbintr(0); 1379 #endif /* NSB > 0 */ 1380 1381 /* Video I/F Dispatch */ 1382 if (gstat & CH_INT(CH_VIDEO)) 1383 ; 1384 } 1385 1386 /* 1387 * SCC vector interrupt service routine. 1388 */ 1389 scc_intr() 1390 { 1391 int vec; 1392 extern int scc_xint(), scc_sint(), scc_rint(), scc_cint(); 1393 static int (*func[])() = { 1394 scc_xint, 1395 scc_sint, 1396 scc_rint, 1397 scc_cint 1398 }; 1399 1400 vec = *(volatile u_char *)SCCVECT; 1401 (*func[(vec & SCC_INT_MASK) >> 1])(vec); 1402 } 1403 1404 print_int_stat(msg) 1405 char *msg; 1406 { 1407 int s0 = *(volatile u_char *)INTST0; 1408 int s1 = *(volatile u_char *)INTST1; 1409 1410 if (msg) 1411 printf("%s: ", msg); 1412 else 1413 printf("intr: "); 1414 printf("INTST0=0x%x, INTST1=0x%x.\n", s0, s1); 1415 } 1416