1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1992 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, Ralph Campbell, Sony Corp. and Kazumasa Utashiro 9 * of Software Research Associates, Inc. 10 * 11 * %sccs.include.redist.c% 12 * 13 * from: Utah $Hdr: trap.c 1.32 91/04/06$ 14 * 15 * @(#)trap.c 7.6 (Berkeley) 01/20/93 16 */ 17 18 #include <machine/fix_machine_type.h> 19 #include <sys/param.h> 20 #include <sys/systm.h> 21 #include <sys/proc.h> 22 #include <sys/kernel.h> 23 #include <sys/signalvar.h> 24 #include <sys/syscall.h> 25 #include <sys/user.h> 26 #include <sys/buf.h> 27 #ifdef KTRACE 28 #include <sys/ktrace.h> 29 #endif 30 #include <net/netisr.h> 31 32 #include <machine/trap.h> 33 #include <machine/psl.h> 34 #include <machine/reg.h> 35 #include <machine/cpu.h> 36 #include <machine/pte.h> 37 #include <machine/mips_opcode.h> 38 #include <machine/adrsmap.h> 39 40 #include <vm/vm.h> 41 #include <vm/vm_kern.h> 42 #include <vm/vm_page.h> 43 44 #include "lp.h" 45 #include "bm.h" 46 #include "ms.h" 47 #include "en.h" 48 #include <news3400/hbdev/dmac_0448.h> 49 #include <news3400/sio/scc.h> 50 51 /* 52 * This is a kludge to allow X windows to work. 53 */ 54 #undef X_KLUGE 55 56 #ifdef X_KLUGE 57 #define USER_MAP_ADDR 0x4000 58 #define NPTES 300 59 static pt_entry_t UserMapPtes[NPTES]; 60 static unsigned nUserMapPtes; 61 static pid_t UserMapPid; 62 #endif 63 64 struct proc *machFPCurProcPtr; /* pointer to last proc to use FP */ 65 66 extern void MachKernGenException(); 67 extern void MachUserGenException(); 68 extern void MachKernIntr(); 69 extern void MachUserIntr(); 70 extern void MachTLBModException(); 71 extern void MachTLBMissException(); 72 extern unsigned MachEmulateBranch(); 73 74 void (*machExceptionTable[])() = { 75 /* 76 * The kernel exception handlers. 77 */ 78 MachKernIntr, /* external interrupt */ 79 MachKernGenException, /* TLB modification */ 80 MachTLBMissException, /* TLB miss (load or instr. fetch) */ 81 MachTLBMissException, /* TLB miss (store) */ 82 MachKernGenException, /* address error (load or I-fetch) */ 83 MachKernGenException, /* address error (store) */ 84 MachKernGenException, /* bus error (I-fetch) */ 85 MachKernGenException, /* bus error (load or store) */ 86 MachKernGenException, /* system call */ 87 MachKernGenException, /* breakpoint */ 88 MachKernGenException, /* reserved instruction */ 89 MachKernGenException, /* coprocessor unusable */ 90 MachKernGenException, /* arithmetic overflow */ 91 MachKernGenException, /* reserved */ 92 MachKernGenException, /* reserved */ 93 MachKernGenException, /* reserved */ 94 /* 95 * The user exception handlers. 96 */ 97 MachUserIntr, 98 MachUserGenException, 99 MachUserGenException, 100 MachUserGenException, 101 MachUserGenException, 102 MachUserGenException, 103 MachUserGenException, 104 MachUserGenException, 105 MachUserGenException, 106 MachUserGenException, 107 MachUserGenException, 108 MachUserGenException, 109 MachUserGenException, 110 MachUserGenException, 111 MachUserGenException, 112 MachUserGenException, 113 }; 114 115 char *trap_type[] = { 116 "external interrupt", 117 "TLB modification", 118 "TLB miss (load or instr. fetch)", 119 "TLB miss (store)", 120 "address error (load or I-fetch)", 121 "address error (store)", 122 "bus error (I-fetch)", 123 "bus error (load or store)", 124 "system call", 125 "breakpoint", 126 "reserved instruction", 127 "coprocessor unusable", 128 "arithmetic overflow", 129 "reserved 13", 130 "reserved 14", 131 "reserved 15", 132 }; 133 134 #ifdef DEBUG 135 #define TRAPSIZE 10 136 struct trapdebug { /* trap history buffer for debugging */ 137 u_int status; 138 u_int cause; 139 u_int vadr; 140 u_int pc; 141 u_int ra; 142 u_int code; 143 } trapdebug[TRAPSIZE], *trp = trapdebug; 144 #endif 145 146 /* 147 * Handle an exception. 148 * Called from MachKernGenException() or MachUserGenException() 149 * when a processor trap occurs. 150 * In the case of a kernel trap, we return the pc where to resume if 151 * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc. 152 */ 153 unsigned 154 trap(statusReg, causeReg, vadr, pc, args) 155 unsigned statusReg; /* status register at time of the exception */ 156 unsigned causeReg; /* cause register at time of exception */ 157 unsigned vadr; /* address (if any) the fault occured on */ 158 unsigned pc; /* program counter where to continue */ 159 { 160 register int type, i; 161 unsigned ucode = 0; 162 register struct proc *p = curproc; 163 u_quad_t sticks; 164 vm_prot_t ftype; 165 extern unsigned onfault_table[]; 166 167 #ifdef DEBUG 168 trp->status = statusReg; 169 trp->cause = causeReg; 170 trp->vadr = vadr; 171 trp->pc = pc; 172 trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] : 173 p->p_md.md_regs[RA]; 174 trp->code = 0; 175 if (++trp == &trapdebug[TRAPSIZE]) 176 trp = trapdebug; 177 #endif 178 179 cnt.v_trap++; 180 type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT; 181 if (USERMODE(statusReg)) { 182 type |= T_USER; 183 sticks = p->p_sticks; 184 } 185 186 /* 187 * Enable hardware interrupts if they were on before. 188 * We only respond to software interrupts when returning to user mode. 189 */ 190 if (statusReg & MACH_SR_INT_ENA_PREV) 191 splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR); 192 193 switch (type) { 194 case T_TLB_MOD: 195 /* check for kernel address */ 196 if ((int)vadr < 0) { 197 register pt_entry_t *pte; 198 register unsigned entry; 199 #ifndef ATTR 200 register vm_offset_t pa; 201 #endif 202 203 pte = kvtopte(vadr); 204 entry = pte->pt_entry; 205 if (entry & PG_RO) { 206 /* write to read only page in the kernel */ 207 ftype = VM_PROT_WRITE; 208 goto kernel_fault; 209 } 210 entry |= PG_M; 211 pte->pt_entry = entry; 212 vadr &= PG_FRAME; 213 printf("trap: TLBupdate hi %x lo %x i %x\n", vadr, 214 entry, MachTLBUpdate(vadr, entry)); /* XXX */ 215 #ifdef ATTR 216 pmap_attributes[atop(entry - KERNBASE)] |= PMAP_ATTR_MOD; 217 #else 218 pa = entry & PG_FRAME; 219 if (!IS_VM_PHYSADDR(pa)) 220 panic("trap: kmod"); 221 PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN; 222 #endif 223 return (pc); 224 } 225 /* FALLTHROUGH */ 226 227 case T_TLB_MOD+T_USER: 228 { 229 pmap_hash_t hp; 230 #ifndef ATTR 231 vm_offset_t pa; 232 #endif 233 #ifdef DIAGNOSTIC 234 extern pmap_hash_t zero_pmap_hash; 235 extern pmap_t cur_pmap; 236 237 if (cur_pmap->pm_hash == zero_pmap_hash || 238 cur_pmap->pm_hash == (pmap_hash_t)0) 239 panic("tlbmod"); 240 #endif 241 hp = &((pmap_hash_t)PMAP_HASH_UADDR)[PMAP_HASH(vadr)]; 242 if (((hp->pmh_pte[0].high ^ vadr) & ~PGOFSET) == 0) 243 i = 0; 244 else if (((hp->pmh_pte[1].high ^ vadr) & ~PGOFSET) == 0) 245 i = 1; 246 else 247 panic("trap: tlb umod not found"); 248 if (hp->pmh_pte[i].low & PG_RO) { 249 ftype = VM_PROT_WRITE; 250 goto dofault; 251 } 252 hp->pmh_pte[i].low |= PG_M; 253 printf("trap: TLBupdate hi %x lo %x i %x\n", 254 hp->pmh_pte[i].high, hp->pmh_pte[i].low, 255 MachTLBUpdate(hp->pmh_pte[i].high, hp->pmh_pte[i].low)); /* XXX */ 256 #ifdef ATTR 257 pmap_attributes[atop(hp->pmh_pte[i].low - KERNBASE)] |= 258 PMAP_ATTR_MOD; 259 #else 260 pa = hp->pmh_pte[i].low & PG_FRAME; 261 if (!IS_VM_PHYSADDR(pa)) 262 panic("trap: umod"); 263 PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN; 264 #endif 265 if (!USERMODE(statusReg)) 266 return (pc); 267 goto out; 268 } 269 270 case T_TLB_LD_MISS: 271 case T_TLB_ST_MISS: 272 ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ; 273 /* check for kernel address */ 274 if ((int)vadr < 0) { 275 register vm_offset_t va; 276 int rv; 277 278 kernel_fault: 279 va = trunc_page((vm_offset_t)vadr); 280 rv = vm_fault(kernel_map, va, ftype, FALSE); 281 if (rv == KERN_SUCCESS) 282 return (pc); 283 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 284 ((struct pcb *)UADDR)->pcb_onfault = 0; 285 return (onfault_table[i]); 286 } 287 goto err; 288 } 289 /* 290 * It is an error for the kernel to access user space except 291 * through the copyin/copyout routines. 292 */ 293 if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 0) 294 goto err; 295 /* check for fuswintr() or suswintr() getting a page fault */ 296 if (i == 4) 297 return (onfault_table[i]); 298 goto dofault; 299 300 case T_TLB_LD_MISS+T_USER: 301 ftype = VM_PROT_READ; 302 goto dofault; 303 304 case T_TLB_ST_MISS+T_USER: 305 ftype = VM_PROT_WRITE; 306 dofault: 307 { 308 register vm_offset_t va; 309 register struct vmspace *vm = p->p_vmspace; 310 register vm_map_t map = &vm->vm_map; 311 int rv; 312 313 #ifdef X_KLUGE 314 if (p->p_pid == UserMapPid && 315 (va = pmax_btop(vadr - USER_MAP_ADDR)) < nUserMapPtes) { 316 register pt_entry_t *pte; 317 318 pte = &UserMapPtes[va]; 319 MachTLBWriteRandom((vadr & PG_FRAME) | 320 (vm->vm_pmap.pm_tlbpid << VMMACH_TLB_PID_SHIFT), 321 pte->pt_entry); 322 return (pc); 323 } 324 #endif 325 va = trunc_page((vm_offset_t)vadr); 326 rv = vm_fault(map, va, ftype, FALSE); 327 if (rv != KERN_SUCCESS) { 328 printf("vm_fault(%x, %x, %x, 0) -> %x ADR %x PC %x RA %x\n", 329 map, va, ftype, rv, vadr, pc, 330 !USERMODE(statusReg) ? ((int *)&args)[19] : 331 p->p_md.md_regs[RA]); /* XXX */ 332 printf("\tpid %d %s PC %x RA %x\n", p->p_pid, 333 p->p_comm, p->p_md.md_regs[PC], 334 p->p_md.md_regs[RA]); /* XXX */ 335 #ifdef DEBUG 336 trapDump("vm_fault"); 337 #endif 338 } 339 /* 340 * If this was a stack access we keep track of the maximum 341 * accessed stack size. Also, if vm_fault gets a protection 342 * failure it is due to accessing the stack region outside 343 * the current limit and we need to reflect that as an access 344 * error. 345 */ 346 if ((caddr_t)va >= vm->vm_maxsaddr) { 347 if (rv == KERN_SUCCESS) { 348 unsigned nss; 349 350 nss = clrnd(btoc(USRSTACK-(unsigned)va)); 351 if (nss > vm->vm_ssize) 352 vm->vm_ssize = nss; 353 } else if (rv == KERN_PROTECTION_FAILURE) 354 rv = KERN_INVALID_ADDRESS; 355 } 356 if (rv == KERN_SUCCESS) { 357 if (!USERMODE(statusReg)) 358 return (pc); 359 goto out; 360 } 361 if (!USERMODE(statusReg)) { 362 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 363 ((struct pcb *)UADDR)->pcb_onfault = 0; 364 return (onfault_table[i]); 365 } 366 goto err; 367 } 368 ucode = vadr; 369 i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV; 370 break; 371 } 372 373 case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ 374 case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ 375 case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */ 376 case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */ 377 i = SIGSEGV; 378 break; 379 380 case T_SYSCALL+T_USER: 381 { 382 register int *locr0 = p->p_md.md_regs; 383 register struct sysent *callp; 384 unsigned int code; 385 int numsys; 386 struct args { 387 int i[8]; 388 } args; 389 int rval[2]; 390 struct sysent *systab; 391 extern int nsysent; 392 393 cnt.v_syscall++; 394 /* compute next PC after syscall instruction */ 395 if ((int)causeReg < 0) 396 locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0); 397 else 398 locr0[PC] += 4; 399 systab = sysent; 400 numsys = nsysent; 401 code = locr0[V0]; 402 #ifdef COMPAT_NEWSOS 403 if (code >= 1000) 404 code -= 1000; /* too easy */ 405 #endif 406 switch (code) { 407 case SYS_indir: 408 /* 409 * Code is first argument, followed by actual args. 410 */ 411 code = locr0[A0]; 412 #ifdef COMPAT_NEWSOS 413 if (code >= 1000) 414 code -= 1000; /* too easy */ 415 #endif 416 if (code >= numsys) 417 callp = &systab[SYS_indir]; /* (illegal) */ 418 else 419 callp = &systab[code]; 420 i = callp->sy_narg; 421 args.i[0] = locr0[A1]; 422 args.i[1] = locr0[A2]; 423 args.i[2] = locr0[A3]; 424 if (i > 3) { 425 i = copyin((caddr_t)(locr0[SP] + 426 4 * sizeof(int)), 427 (caddr_t)&args.i[3], 428 (u_int)(i - 3) * sizeof(int)); 429 if (i) { 430 locr0[V0] = i; 431 locr0[A3] = 1; 432 #ifdef KTRACE 433 if (KTRPOINT(p, KTR_SYSCALL)) 434 ktrsyscall(p->p_tracep, code, 435 callp->sy_narg, args.i); 436 #endif 437 goto done; 438 } 439 } 440 break; 441 442 case SYS___indir: 443 /* 444 * Like indir, but code is a quad, so as to maintain 445 * quad alignment for the rest of the arguments. 446 */ 447 code = locr0[A0 + _QUAD_LOWWORD]; 448 if (code >= numsys) 449 callp = &systab[SYS_indir]; /* (illegal) */ 450 else 451 callp = &systab[code]; 452 i = callp->sy_narg; 453 args.i[0] = locr0[A2]; 454 args.i[1] = locr0[A3]; 455 if (i > 2) { 456 i = copyin((caddr_t)(locr0[SP] + 457 4 * sizeof(int)), 458 (caddr_t)&args.i[2], 459 (u_int)(i - 2) * sizeof(int)); 460 if (i) { 461 locr0[V0] = i; 462 locr0[A3] = 1; 463 #ifdef KTRACE 464 if (KTRPOINT(p, KTR_SYSCALL)) 465 ktrsyscall(p->p_tracep, code, 466 callp->sy_narg, args.i); 467 #endif 468 goto done; 469 } 470 } 471 break; 472 473 default: 474 if (code >= numsys) 475 callp = &systab[SYS_indir]; /* (illegal) */ 476 else 477 callp = &systab[code]; 478 i = callp->sy_narg; 479 args.i[0] = locr0[A0]; 480 args.i[1] = locr0[A1]; 481 args.i[2] = locr0[A2]; 482 args.i[3] = locr0[A3]; 483 if (i > 4) { 484 i = copyin((caddr_t)(locr0[SP] + 485 4 * sizeof(int)), 486 (caddr_t)&args.i[4], 487 (u_int)(i - 4) * sizeof(int)); 488 if (i) { 489 locr0[V0] = i; 490 locr0[A3] = 1; 491 #ifdef KTRACE 492 if (KTRPOINT(p, KTR_SYSCALL)) 493 ktrsyscall(p->p_tracep, code, 494 callp->sy_narg, args.i); 495 #endif 496 goto done; 497 } 498 } 499 } 500 #ifdef KTRACE 501 if (KTRPOINT(p, KTR_SYSCALL)) 502 ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i); 503 #endif 504 rval[0] = 0; 505 rval[1] = locr0[V1]; 506 #ifdef DEBUG 507 if (trp == trapdebug) 508 trapdebug[TRAPSIZE - 1].code = code; 509 else 510 trp[-1].code = code; 511 #endif 512 #ifdef COMPAT_NEWSOS 513 /* 151 = setenvp, 152 = sysnews, 162 = getdomainname KU:XXX */ 514 if (code == 151 || code == 152 || code == 162) 515 i = 0; 516 else 517 #endif 518 i = (*callp->sy_call)(p, &args, rval); 519 if(i==EINVAL) 520 printf("EINVAL: pid=%d, code=%d\n", p->p_pid, code); 521 /* 522 * Reinitialize proc pointer `p' as it may be different 523 * if this is a child returning from fork syscall. 524 */ 525 p = curproc; 526 locr0 = p->p_md.md_regs; 527 #ifdef DEBUG 528 { int s; 529 s = splhigh(); 530 trp->status = statusReg; 531 trp->cause = causeReg; 532 trp->vadr = locr0[SP]; 533 trp->pc = locr0[PC]; 534 trp->ra = locr0[RA]; 535 trp->code = -code; 536 if (++trp == &trapdebug[TRAPSIZE]) 537 trp = trapdebug; 538 splx(s); 539 } 540 #endif 541 switch (i) { 542 case 0: 543 locr0[V0] = rval[0]; 544 locr0[V1] = rval[1]; 545 locr0[A3] = 0; 546 break; 547 548 case ERESTART: 549 locr0[PC] = pc; 550 break; 551 552 case EJUSTRETURN: 553 break; /* nothing to do */ 554 555 default: 556 locr0[V0] = i; 557 locr0[A3] = 1; 558 } 559 done: 560 #ifdef KTRACE 561 if (KTRPOINT(p, KTR_SYSRET)) 562 ktrsysret(p->p_tracep, code, i, rval[0]); 563 #endif 564 565 goto out; 566 } 567 568 case T_BREAK+T_USER: 569 { 570 register unsigned va, instr; 571 572 /* compute address of break instruction */ 573 va = pc; 574 if ((int)causeReg < 0) 575 va += 4; 576 577 /* read break instruction */ 578 instr = fuiword((caddr_t)va); 579 #ifdef KADB 580 if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP) 581 goto err; 582 #endif 583 if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) { 584 i = SIGTRAP; 585 break; 586 } 587 588 /* restore original instruction and clear BP */ 589 i = suiword((caddr_t)va, p->p_md.md_ss_instr); 590 if (i < 0) { 591 vm_offset_t sa, ea; 592 int rv; 593 594 sa = trunc_page((vm_offset_t)va); 595 ea = round_page((vm_offset_t)va+sizeof(int)-1); 596 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, 597 VM_PROT_DEFAULT, FALSE); 598 if (rv == KERN_SUCCESS) { 599 i = suiword((caddr_t)va, p->p_md.md_ss_instr); 600 (void) vm_map_protect(&p->p_vmspace->vm_map, 601 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, 602 FALSE); 603 } 604 } 605 if (i < 0) { 606 i = SIGTRAP; 607 break; 608 } 609 p->p_md.md_ss_addr = 0; 610 goto out; 611 } 612 613 case T_RES_INST+T_USER: 614 i = SIGILL; 615 break; 616 617 case T_COP_UNUSABLE+T_USER: 618 if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) { 619 i = SIGILL; /* only FPU instructions allowed */ 620 break; 621 } 622 MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs); 623 machFPCurProcPtr = p; 624 p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT; 625 p->p_md.md_flags |= MDP_FPUSED; 626 goto out; 627 628 case T_OVFLOW+T_USER: 629 i = SIGFPE; 630 break; 631 632 case T_ADDR_ERR_LD: /* misaligned access */ 633 case T_ADDR_ERR_ST: /* misaligned access */ 634 case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */ 635 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 636 ((struct pcb *)UADDR)->pcb_onfault = 0; 637 return (onfault_table[i]); 638 } 639 /* FALLTHROUGH */ 640 641 default: 642 err: 643 #ifdef KADB 644 { 645 extern struct pcb kdbpcb; 646 647 if (USERMODE(statusReg)) 648 kdbpcb = p->p_addr->u_pcb; 649 else { 650 kdbpcb.pcb_regs[ZERO] = 0; 651 kdbpcb.pcb_regs[AST] = ((int *)&args)[2]; 652 kdbpcb.pcb_regs[V0] = ((int *)&args)[3]; 653 kdbpcb.pcb_regs[V1] = ((int *)&args)[4]; 654 kdbpcb.pcb_regs[A0] = ((int *)&args)[5]; 655 kdbpcb.pcb_regs[A1] = ((int *)&args)[6]; 656 kdbpcb.pcb_regs[A2] = ((int *)&args)[7]; 657 kdbpcb.pcb_regs[A3] = ((int *)&args)[8]; 658 kdbpcb.pcb_regs[T0] = ((int *)&args)[9]; 659 kdbpcb.pcb_regs[T1] = ((int *)&args)[10]; 660 kdbpcb.pcb_regs[T2] = ((int *)&args)[11]; 661 kdbpcb.pcb_regs[T3] = ((int *)&args)[12]; 662 kdbpcb.pcb_regs[T4] = ((int *)&args)[13]; 663 kdbpcb.pcb_regs[T5] = ((int *)&args)[14]; 664 kdbpcb.pcb_regs[T6] = ((int *)&args)[15]; 665 kdbpcb.pcb_regs[T7] = ((int *)&args)[16]; 666 kdbpcb.pcb_regs[T8] = ((int *)&args)[17]; 667 kdbpcb.pcb_regs[T9] = ((int *)&args)[18]; 668 kdbpcb.pcb_regs[RA] = ((int *)&args)[19]; 669 kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21]; 670 kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22]; 671 kdbpcb.pcb_regs[PC] = pc; 672 kdbpcb.pcb_regs[SR] = statusReg; 673 bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int)); 674 } 675 if (kdb(causeReg, vadr, p, !USERMODE(statusReg))) 676 return (kdbpcb.pcb_regs[PC]); 677 } 678 #else 679 #ifdef DEBUG 680 trapDump("trap"); 681 #endif 682 #endif 683 panic("trap"); 684 } 685 printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid, 686 p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */ 687 trapsignal(p, i, ucode); 688 out: 689 /* 690 * Note: we should only get here if returning to user mode. 691 */ 692 /* take pending signals */ 693 while ((i = CURSIG(p)) != 0) 694 psig(i); 695 p->p_pri = p->p_usrpri; 696 astpending = 0; 697 if (want_resched) { 698 int s; 699 700 /* 701 * Since we are curproc, clock will normally just change 702 * our priority without moving us from one queue to another 703 * (since the running process is not on a queue.) 704 * If that happened after we setrq ourselves but before we 705 * swtch()'ed, we might not be on the queue indicated by 706 * our priority. 707 */ 708 s = splstatclock(); 709 setrq(p); 710 p->p_stats->p_ru.ru_nivcsw++; 711 swtch(); 712 splx(s); 713 while ((i = CURSIG(p)) != 0) 714 psig(i); 715 } 716 /* 717 * If profiling, charge system time to the trapped pc. 718 */ 719 if (p->p_flag & SPROFIL) { 720 extern int psratio; 721 722 addupc_task(p, pc, (int)(p->p_sticks - sticks) * psratio); 723 } 724 curpri = p->p_pri; 725 return (pc); 726 } 727 728 /* 729 * Handle an interrupt. 730 * Called from MachKernIntr() or MachUserIntr() 731 * Note: curproc might be NULL. 732 */ 733 interrupt(statusReg, causeReg, pc) 734 unsigned statusReg; /* status register at time of the exception */ 735 unsigned causeReg; /* cause register at time of exception */ 736 unsigned pc; /* program counter where to continue */ 737 { 738 register unsigned mask; 739 struct clockframe cf; 740 741 #ifdef DEBUG 742 trp->status = statusReg; 743 trp->cause = causeReg; 744 trp->vadr = 0; 745 trp->pc = pc; 746 trp->ra = 0; 747 trp->code = 0; 748 if (++trp == &trapdebug[TRAPSIZE]) 749 trp = trapdebug; 750 #endif 751 752 mask = causeReg & statusReg; /* pending interrupts & enable mask */ 753 #ifndef NOPRIORITY 754 if (mask & MACH_INT_MASK_5) { /* level 5 interrupt */ 755 splx((MACH_SPL_MASK_8 & ~causeReg) | MACH_SR_INT_ENA_CUR); 756 printf("level 5 interrupt: PC %x CR %x SR %x\n", 757 pc, causeReg, statusReg); 758 causeReg &= ~MACH_INT_MASK_5; 759 } 760 if (mask & MACH_INT_MASK_4) { /* level 4 interrupt */ 761 /* 762 * asynchronous bus error 763 */ 764 splx((MACH_SPL_MASK_7 & ~causeReg) | MACH_SR_INT_ENA_CUR); 765 printf("level 4 interrupt: PC %x CR %x SR %x\n", 766 pc, causeReg, statusReg); 767 *(char *)INTCLR0 = INTCLR0_BERR; 768 causeReg &= ~MACH_INT_MASK_4; 769 } 770 if (mask & MACH_INT_MASK_3) { /* level 3 interrupt */ 771 /* 772 * fp error 773 */ 774 splx((MACH_SPL_MASK_6 & ~causeReg) | MACH_SR_INT_ENA_CUR); 775 if (!USERMODE(statusReg)) { 776 #ifdef DEBUG 777 trapDump("fpintr"); 778 #else 779 printf("FPU interrupt: PC %x CR %x SR %x\n", 780 pc, causeReg, statusReg); 781 #endif 782 } else 783 MachFPInterrupt(statusReg, causeReg, pc); 784 causeReg &= ~MACH_INT_MASK_3; 785 } 786 if (mask & MACH_INT_MASK_2) { /* level 2 interrupt */ 787 register int stat; 788 789 splx((MACH_SPL_MASK_5 & ~causeReg) | MACH_SR_INT_ENA_CUR); 790 stat = *(volatile u_char *)INTST0; 791 if (stat & INTST0_TIMINT) { /* timer */ 792 static int led_count = 0; 793 794 *(volatile u_char *)INTCLR0 = INTCLR0_TIMINT; 795 cf.pc = pc; 796 cf.sr = statusReg; 797 hardclock(&cf); 798 if (++led_count > hz) { 799 led_count = 0; 800 *(volatile u_char *)DEBUG_PORT ^= DP_LED1; 801 } 802 } 803 #if NBM > 0 804 if (stat & INTST0_KBDINT) /* keyboard */ 805 kbm_rint(SCC_KEYBOARD); 806 #endif 807 #if NMS > 0 808 if (stat & INTST0_MSINT) /* mouse */ 809 kbm_rint(SCC_MOUSE); 810 #endif 811 causeReg &= ~MACH_INT_MASK_2; 812 } 813 if (mask & MACH_INT_MASK_1) { /* level 1 interrupt */ 814 splx((MACH_SPL_MASK_4 & ~causeReg) | MACH_SR_INT_ENA_CUR); 815 level1_intr(); 816 causeReg &= ~MACH_INT_MASK_1; 817 } 818 if (mask & MACH_INT_MASK_0) { /* level 0 interrupt */ 819 splx((MACH_SPL_MASK_3 & ~causeReg) | MACH_SR_INT_ENA_CUR); 820 level0_intr(); 821 causeReg &= ~MACH_INT_MASK_0; 822 } 823 splx((MACH_SPL_MASK_3 & ~causeReg) | MACH_SR_INT_ENA_CUR); 824 #else /* NOPRIORITY */ 825 /* handle clock interrupts ASAP */ 826 if (mask & MACH_INT_MASK_2) { /* level 2 interrupt */ 827 register int stat; 828 829 stat = *(volatile u_char *)INTST0; 830 if (stat & INTST0_TIMINT) { /* timer */ 831 static int led_count = 0; 832 833 *(volatile u_char *)INTCLR0 = INTCLR0_TIMINT; 834 cf.pc = pc; 835 cf.sr = statusReg; 836 hardclock(&cf); 837 if (++led_count > hz) { 838 led_count = 0; 839 *(volatile u_char *)DEBUG_PORT ^= DP_LED1; 840 } 841 } 842 #if NBM > 0 843 if (stat & INTST0_KBDINT) /* keyboard */ 844 kbm_rint(SCC_KEYBOARD); 845 #endif 846 #if NMS > 0 847 if (stat & INTST0_MSINT) /* mouse */ 848 kbm_rint(SCC_MOUSE); 849 #endif 850 causeReg &= ~MACH_INT_MASK_2; /* reenable clock interrupts */ 851 } 852 /* 853 * Enable hardware interrupts which were enabled but not pending. 854 * We only respond to software interrupts when returning to spl0. 855 */ 856 splx((statusReg & ~causeReg & MACH_HARD_INT_MASK)|MACH_SR_INT_ENA_CUR); 857 858 if (mask & MACH_INT_MASK_5) { /* level 5 interrupt */ 859 printf("level 5 interrupt: PC %x CR %x SR %x\n", 860 pc, causeReg, statusReg); 861 ; 862 } 863 if (mask & MACH_INT_MASK_4) { /* level 4 interrupt */ 864 /* 865 * asynchronous bus error 866 */ 867 printf("level 4 interrupt: PC %x CR %x SR %x\n", 868 pc, causeReg, statusReg); 869 *(char *)INTCLR0 = INTCLR0_BERR; 870 } 871 if (mask & MACH_INT_MASK_3) { /* level 3 interrupt */ 872 /* 873 * fp error 874 */ 875 if (!USERMODE(statusReg)) { 876 #ifdef DEBUG 877 trapDump("fpintr"); 878 #else 879 printf("FPU interrupt: PC %x CR %x SR %x\n", 880 pc, causeReg, statusReg); 881 #endif 882 } else 883 MachFPInterrupt(statusReg, causeReg, pc); 884 } 885 if (mask & MACH_INT_MASK_1) /* level 1 interrupt */ 886 level1_intr(); 887 if (mask & MACH_INT_MASK_0) /* level 0 interrupt */ 888 level0_intr(); 889 #endif /* NOPRIORITY */ 890 if (mask & MACH_SOFT_INT_MASK_0) { 891 struct clockframe cf; 892 893 clearsoftclock(); 894 cnt.v_soft++; 895 cf.pc = pc; 896 cf.sr = statusReg; 897 softclock(); 898 } 899 /* process network interrupt if we trapped or will very soon */ 900 if ((mask & MACH_SOFT_INT_MASK_1) || 901 netisr && (statusReg & MACH_SOFT_INT_MASK_1)) { 902 clearsoftnet(); 903 cnt.v_soft++; 904 #ifdef INET 905 if (netisr & (1 << NETISR_ARP)) { 906 netisr &= ~(1 << NETISR_ARP); 907 arpintr(); 908 } 909 if (netisr & (1 << NETISR_IP)) { 910 netisr &= ~(1 << NETISR_IP); 911 ipintr(); 912 } 913 #endif 914 #ifdef NS 915 if (netisr & (1 << NETISR_NS)) { 916 netisr &= ~(1 << NETISR_NS); 917 nsintr(); 918 } 919 #endif 920 #ifdef ISO 921 if (netisr & (1 << NETISR_ISO)) { 922 netisr &= ~(1 << NETISR_ISO); 923 clnlintr(); 924 } 925 #endif 926 } 927 } 928 929 /* 930 * This is called from MachUserIntr() if astpending is set. 931 * This is very similar to the tail of trap(). 932 */ 933 softintr(statusReg, pc) 934 unsigned statusReg; /* status register at time of the exception */ 935 unsigned pc; /* program counter where to continue */ 936 { 937 register struct proc *p = curproc; 938 int sig; 939 940 cnt.v_soft++; 941 /* take pending signals */ 942 while ((sig = CURSIG(p)) != 0) 943 psig(sig); 944 p->p_pri = p->p_usrpri; 945 astpending = 0; 946 if (p->p_flag & SOWEUPC) { 947 p->p_flag &= ~SOWEUPC; 948 ADDUPROF(p); 949 } 950 if (want_resched) { 951 int s; 952 953 /* 954 * Since we are curproc, clock will normally just change 955 * our priority without moving us from one queue to another 956 * (since the running process is not on a queue.) 957 * If that happened after we setrq ourselves but before we 958 * swtch()'ed, we might not be on the queue indicated by 959 * our priority. 960 */ 961 s = splstatclock(); 962 setrq(p); 963 p->p_stats->p_ru.ru_nivcsw++; 964 swtch(); 965 splx(s); 966 while ((sig = CURSIG(p)) != 0) 967 psig(sig); 968 } 969 curpri = p->p_pri; 970 } 971 972 #ifdef DEBUG 973 trapDump(msg) 974 char *msg; 975 { 976 register int i; 977 int s; 978 979 s = splhigh(); 980 printf("trapDump(%s)\n", msg); 981 for (i = 0; i < TRAPSIZE; i++) { 982 if (trp == trapdebug) 983 trp = &trapdebug[TRAPSIZE - 1]; 984 else 985 trp--; 986 if (trp->cause == 0) 987 break; 988 printf("%s: ADR %x PC %x CR %x SR %x\n", 989 trap_type[(trp->cause & MACH_CR_EXC_CODE) >> 990 MACH_CR_EXC_CODE_SHIFT], 991 trp->vadr, trp->pc, trp->cause, trp->status); 992 printf(" RA %x code %d\n", trp-> ra, trp->code); 993 } 994 bzero(trapdebug, sizeof(trapdebug)); 995 trp = trapdebug; 996 splx(s); 997 } 998 #endif 999 1000 #ifdef X_KLUGE 1001 /* 1002 * This is a kludge to allow X windows to work. 1003 */ 1004 caddr_t 1005 vmUserMap(size, pa) 1006 int size; 1007 unsigned pa; 1008 { 1009 register caddr_t v; 1010 unsigned off, entry; 1011 1012 if (nUserMapPtes == 0) 1013 UserMapPid = curproc->p_pid; 1014 else if (UserMapPid != curproc->p_pid) 1015 return ((caddr_t)0); 1016 off = pa & PGOFSET; 1017 size = btoc(off + size); 1018 if (nUserMapPtes + size > NPTES) 1019 return ((caddr_t)0); 1020 v = (caddr_t)(USER_MAP_ADDR + pmax_ptob(nUserMapPtes) + off); 1021 entry = (pa & 0x9ffff000) | PG_V | PG_M; 1022 if (pa >= MACH_UNCACHED_MEMORY_ADDR) 1023 entry |= PG_N; 1024 while (size > 0) { 1025 UserMapPtes[nUserMapPtes].pt_entry = entry; 1026 entry += NBPG; 1027 nUserMapPtes++; 1028 size--; 1029 } 1030 return (v); 1031 } 1032 1033 vmUserUnmap() 1034 { 1035 int id; 1036 1037 nUserMapPtes = 0; 1038 if (UserMapPid == curproc->p_pid) { 1039 id = curproc->p_vmspace->vm_pmap.pm_tlbpid; 1040 if (id >= 0) 1041 MachTLBFlushPID(id); 1042 } 1043 UserMapPid = 0; 1044 } 1045 #endif 1046 1047 /* 1048 * Return the resulting PC as if the branch was executed. 1049 */ 1050 unsigned 1051 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch) 1052 unsigned *regsPtr; 1053 unsigned instPC; 1054 unsigned fpcCSR; 1055 int allowNonBranch; 1056 { 1057 InstFmt inst; 1058 unsigned retAddr; 1059 int condition; 1060 extern unsigned GetBranchDest(); 1061 1062 #if 0 1063 printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC, 1064 *(unsigned *)instPC, fpcCSR); 1065 #endif 1066 1067 inst = *(InstFmt *)instPC; 1068 switch ((int)inst.JType.op) { 1069 case OP_SPECIAL: 1070 switch ((int)inst.RType.func) { 1071 case OP_JR: 1072 case OP_JALR: 1073 retAddr = regsPtr[inst.RType.rs]; 1074 break; 1075 1076 default: 1077 if (!allowNonBranch) 1078 panic("MachEmulateBranch: Non-branch"); 1079 retAddr = instPC + 4; 1080 break; 1081 } 1082 break; 1083 1084 case OP_BCOND: 1085 switch ((int)inst.IType.rt) { 1086 case OP_BLTZ: 1087 case OP_BLTZAL: 1088 if ((int)(regsPtr[inst.RType.rs]) < 0) 1089 retAddr = GetBranchDest((InstFmt *)instPC); 1090 else 1091 retAddr = instPC + 8; 1092 break; 1093 1094 case OP_BGEZAL: 1095 case OP_BGEZ: 1096 if ((int)(regsPtr[inst.RType.rs]) >= 0) 1097 retAddr = GetBranchDest((InstFmt *)instPC); 1098 else 1099 retAddr = instPC + 8; 1100 break; 1101 1102 default: 1103 panic("MachEmulateBranch: Bad branch cond"); 1104 } 1105 break; 1106 1107 case OP_J: 1108 case OP_JAL: 1109 retAddr = (inst.JType.target << 2) | 1110 ((unsigned)instPC & 0xF0000000); 1111 break; 1112 1113 case OP_BEQ: 1114 if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt]) 1115 retAddr = GetBranchDest((InstFmt *)instPC); 1116 else 1117 retAddr = instPC + 8; 1118 break; 1119 1120 case OP_BNE: 1121 if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt]) 1122 retAddr = GetBranchDest((InstFmt *)instPC); 1123 else 1124 retAddr = instPC + 8; 1125 break; 1126 1127 case OP_BLEZ: 1128 if ((int)(regsPtr[inst.RType.rs]) <= 0) 1129 retAddr = GetBranchDest((InstFmt *)instPC); 1130 else 1131 retAddr = instPC + 8; 1132 break; 1133 1134 case OP_BGTZ: 1135 if ((int)(regsPtr[inst.RType.rs]) > 0) 1136 retAddr = GetBranchDest((InstFmt *)instPC); 1137 else 1138 retAddr = instPC + 8; 1139 break; 1140 1141 case OP_COP1: 1142 switch (inst.RType.rs) { 1143 case OP_BCx: 1144 case OP_BCy: 1145 if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE) 1146 condition = fpcCSR & MACH_FPC_COND_BIT; 1147 else 1148 condition = !(fpcCSR & MACH_FPC_COND_BIT); 1149 if (condition) 1150 retAddr = GetBranchDest((InstFmt *)instPC); 1151 else 1152 retAddr = instPC + 8; 1153 break; 1154 1155 default: 1156 if (!allowNonBranch) 1157 panic("MachEmulateBranch: Bad coproc branch instruction"); 1158 retAddr = instPC + 4; 1159 } 1160 break; 1161 1162 default: 1163 if (!allowNonBranch) 1164 panic("MachEmulateBranch: Non-branch instruction"); 1165 retAddr = instPC + 4; 1166 } 1167 #if 0 1168 printf("Target addr=%x\n", retAddr); 1169 #endif 1170 return (retAddr); 1171 } 1172 1173 unsigned 1174 GetBranchDest(InstPtr) 1175 InstFmt *InstPtr; 1176 { 1177 return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2)); 1178 } 1179 1180 /* 1181 * This routine is called by procxmt() to single step one instruction. 1182 * We do this by storing a break instruction after the current instruction, 1183 * resuming execution, and then restoring the old instruction. 1184 */ 1185 cpu_singlestep(p) 1186 register struct proc *p; 1187 { 1188 register unsigned va; 1189 register int *locr0 = p->p_md.md_regs; 1190 int i; 1191 1192 /* compute next address after current location */ 1193 va = MachEmulateBranch(locr0, locr0[PC], 0, 1); 1194 if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va || 1195 !useracc((caddr_t)va, 4, B_READ)) { 1196 printf("SS %s (%d): breakpoint already set at %x (va %x)\n", 1197 p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */ 1198 return (EFAULT); 1199 } 1200 p->p_md.md_ss_addr = va; 1201 p->p_md.md_ss_instr = fuiword((caddr_t)va); 1202 i = suiword((caddr_t)va, MACH_BREAK_SSTEP); 1203 if (i < 0) { 1204 vm_offset_t sa, ea; 1205 int rv; 1206 1207 sa = trunc_page((vm_offset_t)va); 1208 ea = round_page((vm_offset_t)va+sizeof(int)-1); 1209 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, 1210 VM_PROT_DEFAULT, FALSE); 1211 if (rv == KERN_SUCCESS) { 1212 i = suiword((caddr_t)va, MACH_BREAK_SSTEP); 1213 (void) vm_map_protect(&p->p_vmspace->vm_map, 1214 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE); 1215 } 1216 } 1217 if (i < 0) 1218 return (EFAULT); 1219 printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n", 1220 p->p_comm, p->p_pid, p->p_md.md_ss_addr, 1221 p->p_md.md_ss_instr, locr0[PC]); /* XXX */ 1222 return (0); 1223 } 1224 1225 /* 1226 * news3400 - INT0 service routine. 1227 * 1228 * INTST0 bit 4: dma 1229 * 3: slot #1 1230 * 2: slot #3 1231 * 1: external #1 1232 * 0: external #3 1233 */ 1234 1235 #define LEVEL0_MASK \ 1236 (INTST1_DMA|INTST1_SLOT1|INTST1_SLOT3|INTST1_EXT1|INTST1_EXT3) 1237 1238 level0_intr() 1239 { 1240 register int stat; 1241 1242 stat = *(volatile u_char *)INTST1 & LEVEL0_MASK; 1243 *(u_char *)INTCLR1 = stat; 1244 1245 if (stat & INTST1_DMA) 1246 dma_intr(); 1247 if (stat & INTST1_SLOT1) 1248 exec_hb_intr2(); 1249 #if NEN > 0 1250 if (stat & INTST1_SLOT3) { 1251 int s, t; 1252 1253 s = splimp(); 1254 t = lance_intr(); 1255 (void) splx(s); 1256 if (t == 0) 1257 exec_hb_intr4(); 1258 } 1259 #endif 1260 #if NLE > 0 1261 if (stat & INTST1_SLOT3) { 1262 int s; 1263 1264 s = splimp(); 1265 leintr(0); 1266 (void) splx(s); 1267 } 1268 #endif 1269 if (stat & INTST1_EXT1) 1270 print_int_stat("EXT #1"); 1271 if (stat & INTST1_EXT3) 1272 print_int_stat("EXT #3"); 1273 } 1274 1275 /* 1276 * news3400 - INT1 service routine. 1277 * 1278 * INTST0 bit 1: centro fault 1279 * 0: centro busy 1280 * INTST1 bit 7: beep 1281 * 6: scc 1282 * 5: lance 1283 */ 1284 1285 #define LEVEL1_MASK2 (INTST0_CFLT|INTST0_CBSY) 1286 #define LEVEL1_MASK1 (INTST1_BEEP|INTST1_SCC|INTST1_LANCE) 1287 1288 level1_intr(pc) 1289 unsigned pc; 1290 { 1291 register int stat; 1292 register u_int saved_inten1 = *(u_char *)INTEN1; 1293 1294 *(u_char *)INTEN1 = 0; /* disable intr: beep, lance, scc */ 1295 1296 stat = *(volatile u_char *)INTST1 & LEVEL1_MASK1; 1297 *(u_char *)INTCLR1 = stat; 1298 1299 stat &= saved_inten1; 1300 1301 if (stat & INTST1_BEEP) { 1302 *(volatile u_char *)INTCLR1 = INTCLR1_BEEP; 1303 print_int_stat("BEEP"); 1304 } 1305 if (stat & INTST1_SCC) { 1306 scc_intr(); 1307 if (saved_inten1 & *(u_char *)INTST1 & INTST1_SCC) 1308 scc_intr(); 1309 } 1310 #if NEN > 0 1311 if (stat & INTST1_LANCE) 1312 lance_intr(); 1313 #endif 1314 #if NLE > 0 1315 if (stat & INTST1_LANCE) 1316 leintr(0); 1317 #endif 1318 1319 *(u_char *)INTEN1 = saved_inten1; 1320 1321 #if NLP > 0 1322 /* 1323 * The PARK2 cannot find centro interrupt correctly. 1324 * We must check it by reading the cause register of cpu 1325 * while other interrupts are disabled. 1326 */ 1327 { 1328 register int causereg; 1329 int s = splhigh(); 1330 1331 causereg = get_causereg(); 1332 (void) splx(s); 1333 1334 if ((causereg & CAUSE_IP4) == 0) 1335 return; 1336 } 1337 #endif 1338 1339 stat = (int)(*(u_char *)INTST0) & LEVEL1_MASK2; 1340 *(u_char *)INTCLR0 = stat; 1341 1342 if (stat & INTST0_CBSY) /* centro busy */ 1343 #if NLP > 0 1344 lpxint(0); 1345 #else 1346 printf("stray intr: CBSY\n"); 1347 #endif 1348 } 1349 1350 /* 1351 * DMA interrupt service routine. 1352 */ 1353 dma_intr() 1354 { 1355 register volatile u_char *gsp = (u_char *)DMAC_GSTAT; 1356 register u_int gstat = *gsp; 1357 register int mrqb, i; 1358 1359 /* 1360 * when DMA intrrupt occurs there remain some untransferred data. 1361 * wait data transfer completion. 1362 */ 1363 mrqb = (gstat & (CH0_INT|CH1_INT|CH2_INT|CH3_INT)) << 1; 1364 if (gstat & mrqb) { 1365 /* 1366 * SHOULD USE DELAY() 1367 */ 1368 for (i = 0; i < 50; i++) 1369 ; 1370 if (*gsp & mrqb) 1371 printf("dma_intr: MRQ\n"); 1372 } 1373 1374 /* SCSI Dispatch */ 1375 if (gstat & CH_INT(CH_SCSI)) 1376 scintr(); 1377 1378 #include "fd.h" 1379 #if NFD > 0 1380 /* FDC Interrupt Dispatch */ 1381 if (gstat & CH_INT(CH_FDC)) 1382 fdc_intr(0); 1383 #endif /* NFD > 0 */ 1384 1385 #include "sb.h" 1386 #if NSB > 0 1387 /* Audio Interface Dispatch */ 1388 sbintr(0); 1389 #endif /* NSB > 0 */ 1390 1391 /* Video I/F Dispatch */ 1392 if (gstat & CH_INT(CH_VIDEO)) 1393 ; 1394 } 1395 1396 /* 1397 * SCC vector interrupt service routine. 1398 */ 1399 scc_intr() 1400 { 1401 int vec; 1402 extern int scc_xint(), scc_sint(), scc_rint(), scc_cint(); 1403 static int (*func[])() = { 1404 scc_xint, 1405 scc_sint, 1406 scc_rint, 1407 scc_cint 1408 }; 1409 1410 vec = *(volatile u_char *)SCCVECT; 1411 (*func[(vec & SCC_INT_MASK) >> 1])(vec); 1412 } 1413 1414 print_int_stat(msg) 1415 char *msg; 1416 { 1417 int s0 = *(volatile u_char *)INTST0; 1418 int s1 = *(volatile u_char *)INTST1; 1419 1420 if (msg) 1421 printf("%s: ", msg); 1422 else 1423 printf("intr: "); 1424 printf("INTST0=0x%x, INTST1=0x%x.\n", s0, s1); 1425 } 1426