1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1992 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, Ralph Campbell, Sony Corp. and Kazumasa Utashiro 9 * of Software Research Associates, Inc. 10 * 11 * %sccs.include.redist.c% 12 * 13 * from: Utah $Hdr: trap.c 1.32 91/04/06$ 14 * 15 * @(#)trap.c 7.9 (Berkeley) 05/13/93 16 */ 17 18 #include <sys/param.h> 19 #include <sys/systm.h> 20 #include <sys/proc.h> 21 #include <sys/kernel.h> 22 #include <sys/signalvar.h> 23 #include <sys/syscall.h> 24 #include <sys/user.h> 25 #include <sys/buf.h> 26 #ifdef KTRACE 27 #include <sys/ktrace.h> 28 #endif 29 #include <net/netisr.h> 30 31 #include <machine/trap.h> 32 #include <machine/psl.h> 33 #include <machine/reg.h> 34 #include <machine/cpu.h> 35 #include <machine/pte.h> 36 #include <machine/mips_opcode.h> 37 #include <machine/adrsmap.h> 38 39 #include <vm/vm.h> 40 #include <vm/vm_kern.h> 41 #include <vm/vm_page.h> 42 43 #include "lp.h" 44 #include "bm.h" 45 #include "ms.h" 46 #include "en.h" 47 #include <news3400/hbdev/dmac_0448.h> 48 #include <news3400/sio/scc.h> 49 50 struct proc *machFPCurProcPtr; /* pointer to last proc to use FP */ 51 52 extern void MachKernGenException(); 53 extern void MachUserGenException(); 54 extern void MachKernIntr(); 55 extern void MachUserIntr(); 56 extern void MachTLBModException(); 57 extern void MachTLBMissException(); 58 extern unsigned MachEmulateBranch(); 59 60 void (*machExceptionTable[])() = { 61 /* 62 * The kernel exception handlers. 63 */ 64 MachKernIntr, /* external interrupt */ 65 MachKernGenException, /* TLB modification */ 66 MachTLBMissException, /* TLB miss (load or instr. fetch) */ 67 MachTLBMissException, /* TLB miss (store) */ 68 MachKernGenException, /* address error (load or I-fetch) */ 69 MachKernGenException, /* address error (store) */ 70 MachKernGenException, /* bus error (I-fetch) */ 71 MachKernGenException, /* bus error (load or store) */ 72 MachKernGenException, /* system call */ 73 MachKernGenException, /* breakpoint */ 74 MachKernGenException, /* reserved instruction */ 75 MachKernGenException, /* coprocessor unusable */ 76 MachKernGenException, /* arithmetic overflow */ 77 MachKernGenException, /* reserved */ 78 MachKernGenException, /* reserved */ 79 MachKernGenException, /* reserved */ 80 /* 81 * The user exception handlers. 82 */ 83 MachUserIntr, 84 MachUserGenException, 85 MachUserGenException, 86 MachUserGenException, 87 MachUserGenException, 88 MachUserGenException, 89 MachUserGenException, 90 MachUserGenException, 91 MachUserGenException, 92 MachUserGenException, 93 MachUserGenException, 94 MachUserGenException, 95 MachUserGenException, 96 MachUserGenException, 97 MachUserGenException, 98 MachUserGenException, 99 }; 100 101 char *trap_type[] = { 102 "external interrupt", 103 "TLB modification", 104 "TLB miss (load or instr. fetch)", 105 "TLB miss (store)", 106 "address error (load or I-fetch)", 107 "address error (store)", 108 "bus error (I-fetch)", 109 "bus error (load or store)", 110 "system call", 111 "breakpoint", 112 "reserved instruction", 113 "coprocessor unusable", 114 "arithmetic overflow", 115 "reserved 13", 116 "reserved 14", 117 "reserved 15", 118 }; 119 120 #ifdef DEBUG 121 #define TRAPSIZE 10 122 struct trapdebug { /* trap history buffer for debugging */ 123 u_int status; 124 u_int cause; 125 u_int vadr; 126 u_int pc; 127 u_int ra; 128 u_int code; 129 } trapdebug[TRAPSIZE], *trp = trapdebug; 130 #endif 131 132 /* 133 * Handle an exception. 134 * Called from MachKernGenException() or MachUserGenException() 135 * when a processor trap occurs. 136 * In the case of a kernel trap, we return the pc where to resume if 137 * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc. 138 */ 139 unsigned 140 trap(statusReg, causeReg, vadr, pc, args) 141 unsigned statusReg; /* status register at time of the exception */ 142 unsigned causeReg; /* cause register at time of exception */ 143 unsigned vadr; /* address (if any) the fault occured on */ 144 unsigned pc; /* program counter where to continue */ 145 { 146 register int type, i; 147 unsigned ucode = 0; 148 register struct proc *p = curproc; 149 u_quad_t sticks; 150 vm_prot_t ftype; 151 extern unsigned onfault_table[]; 152 153 #ifdef DEBUG 154 trp->status = statusReg; 155 trp->cause = causeReg; 156 trp->vadr = vadr; 157 trp->pc = pc; 158 trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] : 159 p->p_md.md_regs[RA]; 160 trp->code = 0; 161 if (++trp == &trapdebug[TRAPSIZE]) 162 trp = trapdebug; 163 #endif 164 165 cnt.v_trap++; 166 type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT; 167 if (USERMODE(statusReg)) { 168 type |= T_USER; 169 sticks = p->p_sticks; 170 } 171 172 /* 173 * Enable hardware interrupts if they were on before. 174 * We only respond to software interrupts when returning to user mode. 175 */ 176 if (statusReg & MACH_SR_INT_ENA_PREV) 177 splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR); 178 179 switch (type) { 180 case T_TLB_MOD: 181 /* check for kernel address */ 182 if ((int)vadr < 0) { 183 register pt_entry_t *pte; 184 register unsigned entry; 185 register vm_offset_t pa; 186 187 pte = kvtopte(vadr); 188 entry = pte->pt_entry; 189 #ifdef DIAGNOSTIC 190 if (!(entry & PG_V) || (entry & PG_M)) 191 panic("trap: ktlbmod: invalid pte"); 192 #endif 193 if (entry & PG_RO) { 194 /* write to read only page in the kernel */ 195 ftype = VM_PROT_WRITE; 196 goto kernel_fault; 197 } 198 entry |= PG_M; 199 pte->pt_entry = entry; 200 vadr &= ~PGOFSET; 201 printf("trap: ktlbmod: TLBupdate hi %x lo %x i %x\n", 202 vadr, entry, 203 MachTLBUpdate(vadr, entry)); /* XXX */ 204 pa = entry & PG_FRAME; 205 #ifdef ATTR 206 pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD; 207 #else 208 if (!IS_VM_PHYSADDR(pa)) 209 panic("trap: ktlbmod: unmanaged page"); 210 PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN; 211 #endif 212 return (pc); 213 } 214 /* FALLTHROUGH */ 215 216 case T_TLB_MOD+T_USER: 217 { 218 register pt_entry_t *pte; 219 register unsigned entry; 220 register vm_offset_t pa; 221 pmap_t pmap = &p->p_vmspace->vm_pmap; 222 223 if (!(pte = pmap_segmap(pmap, vadr))) 224 panic("trap: utlbmod: invalid segmap"); 225 pte += (vadr >> PGSHIFT) & (NPTEPG - 1); 226 entry = pte->pt_entry; 227 #ifdef DIAGNOSTIC 228 if (!(entry & PG_V) || (entry & PG_M)) 229 panic("trap: utlbmod: invalid pte"); 230 #endif 231 if (entry & PG_RO) { 232 /* write to read only page */ 233 ftype = VM_PROT_WRITE; 234 goto dofault; 235 } 236 entry |= PG_M; 237 pte->pt_entry = entry; 238 vadr = (vadr & ~PGOFSET) | 239 (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 240 printf("trap: utlbmod: TLBupdate hi %x lo %x i %x\n", 241 vadr, entry, MachTLBUpdate(vadr, entry)); /* XXX */ 242 pa = entry & PG_FRAME; 243 #ifdef ATTR 244 pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD; 245 #else 246 if (!IS_VM_PHYSADDR(pa)) 247 panic("trap: utlbmod: unmanaged page"); 248 PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN; 249 #endif 250 if (!USERMODE(statusReg)) 251 return (pc); 252 goto out; 253 } 254 255 case T_TLB_LD_MISS: 256 case T_TLB_ST_MISS: 257 ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ; 258 /* check for kernel address */ 259 if ((int)vadr < 0) { 260 register vm_offset_t va; 261 int rv; 262 263 kernel_fault: 264 va = trunc_page((vm_offset_t)vadr); 265 rv = vm_fault(kernel_map, va, ftype, FALSE); 266 if (rv == KERN_SUCCESS) 267 return (pc); 268 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 269 ((struct pcb *)UADDR)->pcb_onfault = 0; 270 return (onfault_table[i]); 271 } 272 goto err; 273 } 274 /* 275 * It is an error for the kernel to access user space except 276 * through the copyin/copyout routines. 277 */ 278 if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 0) 279 goto err; 280 /* check for fuswintr() or suswintr() getting a page fault */ 281 if (i == 4) 282 return (onfault_table[i]); 283 goto dofault; 284 285 case T_TLB_LD_MISS+T_USER: 286 ftype = VM_PROT_READ; 287 goto dofault; 288 289 case T_TLB_ST_MISS+T_USER: 290 ftype = VM_PROT_WRITE; 291 dofault: 292 { 293 register vm_offset_t va; 294 register struct vmspace *vm; 295 register vm_map_t map; 296 int rv; 297 298 vm = p->p_vmspace; 299 map = &vm->vm_map; 300 va = trunc_page((vm_offset_t)vadr); 301 rv = vm_fault(map, va, ftype, FALSE); 302 /* 303 * If this was a stack access we keep track of the maximum 304 * accessed stack size. Also, if vm_fault gets a protection 305 * failure it is due to accessing the stack region outside 306 * the current limit and we need to reflect that as an access 307 * error. 308 */ 309 if ((caddr_t)va >= vm->vm_maxsaddr) { 310 if (rv == KERN_SUCCESS) { 311 unsigned nss; 312 313 nss = clrnd(btoc(USRSTACK-(unsigned)va)); 314 if (nss > vm->vm_ssize) 315 vm->vm_ssize = nss; 316 } else if (rv == KERN_PROTECTION_FAILURE) 317 rv = KERN_INVALID_ADDRESS; 318 } 319 if (rv == KERN_SUCCESS) { 320 if (!USERMODE(statusReg)) 321 return (pc); 322 goto out; 323 } 324 if (!USERMODE(statusReg)) { 325 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 326 ((struct pcb *)UADDR)->pcb_onfault = 0; 327 return (onfault_table[i]); 328 } 329 goto err; 330 } 331 ucode = vadr; 332 i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV; 333 break; 334 } 335 336 case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ 337 case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ 338 case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */ 339 case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */ 340 i = SIGSEGV; 341 break; 342 343 case T_SYSCALL+T_USER: 344 { 345 register int *locr0 = p->p_md.md_regs; 346 register struct sysent *callp; 347 unsigned int code; 348 int numsys; 349 struct args { 350 int i[8]; 351 } args; 352 int rval[2]; 353 struct sysent *systab; 354 extern int nsysent; 355 #ifdef COMPAT_NEWSOS 356 extern int nnewssys; 357 extern struct sysent newssys[]; 358 #endif 359 360 cnt.v_syscall++; 361 /* compute next PC after syscall instruction */ 362 if ((int)causeReg < 0) 363 locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0); 364 else 365 locr0[PC] += 4; 366 systab = sysent; 367 numsys = nsysent; 368 code = locr0[V0]; 369 #ifdef COMPAT_NEWSOS 370 if (code >= 1000) { 371 code -= 1000; 372 systab = newssys; 373 numsys = nnewssys; 374 } 375 #endif 376 switch (code) { 377 case SYS_indir: 378 /* 379 * Code is first argument, followed by actual args. 380 */ 381 code = locr0[A0]; 382 #ifdef COMPAT_NEWSOS 383 if (code >= 1000) { 384 code -= 1000; 385 systab = newssys; 386 numsys = nnewssys; 387 } 388 #endif 389 if (code >= numsys) 390 callp = &systab[SYS_indir]; /* (illegal) */ 391 else 392 callp = &systab[code]; 393 i = callp->sy_narg; 394 args.i[0] = locr0[A1]; 395 args.i[1] = locr0[A2]; 396 args.i[2] = locr0[A3]; 397 if (i > 3) { 398 i = copyin((caddr_t)(locr0[SP] + 399 4 * sizeof(int)), 400 (caddr_t)&args.i[3], 401 (u_int)(i - 3) * sizeof(int)); 402 if (i) { 403 locr0[V0] = i; 404 locr0[A3] = 1; 405 #ifdef KTRACE 406 if (KTRPOINT(p, KTR_SYSCALL)) 407 ktrsyscall(p->p_tracep, code, 408 callp->sy_narg, args.i); 409 #endif 410 goto done; 411 } 412 } 413 break; 414 415 case SYS___indir: 416 /* 417 * Like indir, but code is a quad, so as to maintain 418 * quad alignment for the rest of the arguments. 419 */ 420 code = locr0[A0 + _QUAD_LOWWORD]; 421 if (code >= numsys) 422 callp = &systab[SYS_indir]; /* (illegal) */ 423 else 424 callp = &systab[code]; 425 i = callp->sy_narg; 426 args.i[0] = locr0[A2]; 427 args.i[1] = locr0[A3]; 428 if (i > 2) { 429 i = copyin((caddr_t)(locr0[SP] + 430 4 * sizeof(int)), 431 (caddr_t)&args.i[2], 432 (u_int)(i - 2) * sizeof(int)); 433 if (i) { 434 locr0[V0] = i; 435 locr0[A3] = 1; 436 #ifdef KTRACE 437 if (KTRPOINT(p, KTR_SYSCALL)) 438 ktrsyscall(p->p_tracep, code, 439 callp->sy_narg, args.i); 440 #endif 441 goto done; 442 } 443 } 444 break; 445 446 default: 447 if (code >= numsys) 448 callp = &systab[SYS_indir]; /* (illegal) */ 449 else 450 callp = &systab[code]; 451 i = callp->sy_narg; 452 args.i[0] = locr0[A0]; 453 args.i[1] = locr0[A1]; 454 args.i[2] = locr0[A2]; 455 args.i[3] = locr0[A3]; 456 if (i > 4) { 457 i = copyin((caddr_t)(locr0[SP] + 458 4 * sizeof(int)), 459 (caddr_t)&args.i[4], 460 (u_int)(i - 4) * sizeof(int)); 461 if (i) { 462 locr0[V0] = i; 463 locr0[A3] = 1; 464 #ifdef KTRACE 465 if (KTRPOINT(p, KTR_SYSCALL)) 466 ktrsyscall(p->p_tracep, code, 467 callp->sy_narg, args.i); 468 #endif 469 goto done; 470 } 471 } 472 } 473 #ifdef KTRACE 474 if (KTRPOINT(p, KTR_SYSCALL)) 475 ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i); 476 #endif 477 rval[0] = 0; 478 rval[1] = locr0[V1]; 479 #ifdef DEBUG 480 if (trp == trapdebug) 481 trapdebug[TRAPSIZE - 1].code = code; 482 else 483 trp[-1].code = code; 484 #endif 485 i = (*callp->sy_call)(p, &args, rval); 486 /* 487 * Reinitialize proc pointer `p' as it may be different 488 * if this is a child returning from fork syscall. 489 */ 490 p = curproc; 491 locr0 = p->p_md.md_regs; 492 #ifdef DEBUG 493 { int s; 494 s = splhigh(); 495 trp->status = statusReg; 496 trp->cause = causeReg; 497 trp->vadr = locr0[SP]; 498 trp->pc = locr0[PC]; 499 trp->ra = locr0[RA]; 500 trp->code = -code; 501 if (++trp == &trapdebug[TRAPSIZE]) 502 trp = trapdebug; 503 splx(s); 504 } 505 #endif 506 switch (i) { 507 case 0: 508 locr0[V0] = rval[0]; 509 locr0[V1] = rval[1]; 510 locr0[A3] = 0; 511 break; 512 513 case ERESTART: 514 locr0[PC] = pc; 515 break; 516 517 case EJUSTRETURN: 518 break; /* nothing to do */ 519 520 default: 521 locr0[V0] = i; 522 locr0[A3] = 1; 523 } 524 done: 525 #ifdef KTRACE 526 if (KTRPOINT(p, KTR_SYSRET)) 527 ktrsysret(p->p_tracep, code, i, rval[0]); 528 #endif 529 530 goto out; 531 } 532 533 case T_BREAK+T_USER: 534 { 535 register unsigned va, instr; 536 537 /* compute address of break instruction */ 538 va = pc; 539 if ((int)causeReg < 0) 540 va += 4; 541 542 /* read break instruction */ 543 instr = fuiword((caddr_t)va); 544 #ifdef KADB 545 if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP) 546 goto err; 547 #endif 548 if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) { 549 i = SIGTRAP; 550 break; 551 } 552 553 /* restore original instruction and clear BP */ 554 i = suiword((caddr_t)va, p->p_md.md_ss_instr); 555 if (i < 0) { 556 vm_offset_t sa, ea; 557 int rv; 558 559 sa = trunc_page((vm_offset_t)va); 560 ea = round_page((vm_offset_t)va+sizeof(int)-1); 561 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, 562 VM_PROT_DEFAULT, FALSE); 563 if (rv == KERN_SUCCESS) { 564 i = suiword((caddr_t)va, p->p_md.md_ss_instr); 565 (void) vm_map_protect(&p->p_vmspace->vm_map, 566 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, 567 FALSE); 568 } 569 } 570 if (i < 0) { 571 i = SIGTRAP; 572 break; 573 } 574 p->p_md.md_ss_addr = 0; 575 goto out; 576 } 577 578 case T_RES_INST+T_USER: 579 i = SIGILL; 580 break; 581 582 case T_COP_UNUSABLE+T_USER: 583 if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) { 584 i = SIGILL; /* only FPU instructions allowed */ 585 break; 586 } 587 MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs); 588 machFPCurProcPtr = p; 589 p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT; 590 p->p_md.md_flags |= MDP_FPUSED; 591 goto out; 592 593 case T_OVFLOW+T_USER: 594 i = SIGFPE; 595 break; 596 597 case T_ADDR_ERR_LD: /* misaligned access */ 598 case T_ADDR_ERR_ST: /* misaligned access */ 599 case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */ 600 if (i = ((struct pcb *)UADDR)->pcb_onfault) { 601 ((struct pcb *)UADDR)->pcb_onfault = 0; 602 return (onfault_table[i]); 603 } 604 /* FALLTHROUGH */ 605 606 default: 607 err: 608 #ifdef KADB 609 { 610 extern struct pcb kdbpcb; 611 612 if (USERMODE(statusReg)) 613 kdbpcb = p->p_addr->u_pcb; 614 else { 615 kdbpcb.pcb_regs[ZERO] = 0; 616 kdbpcb.pcb_regs[AST] = ((int *)&args)[2]; 617 kdbpcb.pcb_regs[V0] = ((int *)&args)[3]; 618 kdbpcb.pcb_regs[V1] = ((int *)&args)[4]; 619 kdbpcb.pcb_regs[A0] = ((int *)&args)[5]; 620 kdbpcb.pcb_regs[A1] = ((int *)&args)[6]; 621 kdbpcb.pcb_regs[A2] = ((int *)&args)[7]; 622 kdbpcb.pcb_regs[A3] = ((int *)&args)[8]; 623 kdbpcb.pcb_regs[T0] = ((int *)&args)[9]; 624 kdbpcb.pcb_regs[T1] = ((int *)&args)[10]; 625 kdbpcb.pcb_regs[T2] = ((int *)&args)[11]; 626 kdbpcb.pcb_regs[T3] = ((int *)&args)[12]; 627 kdbpcb.pcb_regs[T4] = ((int *)&args)[13]; 628 kdbpcb.pcb_regs[T5] = ((int *)&args)[14]; 629 kdbpcb.pcb_regs[T6] = ((int *)&args)[15]; 630 kdbpcb.pcb_regs[T7] = ((int *)&args)[16]; 631 kdbpcb.pcb_regs[T8] = ((int *)&args)[17]; 632 kdbpcb.pcb_regs[T9] = ((int *)&args)[18]; 633 kdbpcb.pcb_regs[RA] = ((int *)&args)[19]; 634 kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21]; 635 kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22]; 636 kdbpcb.pcb_regs[PC] = pc; 637 kdbpcb.pcb_regs[SR] = statusReg; 638 bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int)); 639 } 640 if (kdb(causeReg, vadr, p, !USERMODE(statusReg))) 641 return (kdbpcb.pcb_regs[PC]); 642 } 643 #else 644 #ifdef DEBUG 645 printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid, 646 p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */ 647 trapDump("trap"); 648 #endif 649 #endif 650 panic("trap"); 651 } 652 printf("trap: pid %d '%s' sig %d adr %x pc %x ra %x\n", p->p_pid, 653 p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */ 654 trapsignal(p, i, ucode); 655 out: 656 /* 657 * Note: we should only get here if returning to user mode. 658 */ 659 /* take pending signals */ 660 while ((i = CURSIG(p)) != 0) 661 psig(i); 662 p->p_pri = p->p_usrpri; 663 astpending = 0; 664 if (want_resched) { 665 int s; 666 667 /* 668 * Since we are curproc, clock will normally just change 669 * our priority without moving us from one queue to another 670 * (since the running process is not on a queue.) 671 * If that happened after we setrq ourselves but before we 672 * swtch()'ed, we might not be on the queue indicated by 673 * our priority. 674 */ 675 s = splstatclock(); 676 setrq(p); 677 p->p_stats->p_ru.ru_nivcsw++; 678 swtch(); 679 splx(s); 680 while ((i = CURSIG(p)) != 0) 681 psig(i); 682 } 683 /* 684 * If profiling, charge system time to the trapped pc. 685 */ 686 if (p->p_flag & SPROFIL) { 687 extern int psratio; 688 689 addupc_task(p, pc, (int)(p->p_sticks - sticks) * psratio); 690 } 691 curpri = p->p_pri; 692 return (pc); 693 } 694 695 int badaddr_flag; 696 697 /* 698 * Handle an interrupt. 699 * Called from MachKernIntr() or MachUserIntr() 700 * Note: curproc might be NULL. 701 */ 702 interrupt(statusReg, causeReg, pc) 703 unsigned statusReg; /* status register at time of the exception */ 704 unsigned causeReg; /* cause register at time of exception */ 705 unsigned pc; /* program counter where to continue */ 706 { 707 register unsigned mask; 708 struct clockframe cf; 709 int oonfault = ((struct pcb *)UADDR)->pcb_onfault; 710 711 #ifdef DEBUG 712 trp->status = statusReg; 713 trp->cause = causeReg; 714 trp->vadr = 0; 715 trp->pc = pc; 716 trp->ra = 0; 717 trp->code = 0; 718 if (++trp == &trapdebug[TRAPSIZE]) 719 trp = trapdebug; 720 #endif 721 722 mask = causeReg & statusReg; /* pending interrupts & enable mask */ 723 if (mask & MACH_INT_MASK_5) { /* level 5 interrupt */ 724 splx((MACH_SPL_MASK_8 & ~causeReg) | MACH_SR_INT_ENA_CUR); 725 printf("level 5 interrupt: PC %x CR %x SR %x\n", 726 pc, causeReg, statusReg); 727 causeReg &= ~MACH_INT_MASK_5; 728 } 729 if (mask & MACH_INT_MASK_4) { /* level 4 interrupt */ 730 /* 731 * asynchronous bus error 732 */ 733 splx((MACH_SPL_MASK_7 & ~causeReg) | MACH_SR_INT_ENA_CUR); 734 *(char *)INTCLR0 = INTCLR0_BERR; 735 causeReg &= ~MACH_INT_MASK_4; 736 #define BADADDR 1 737 if (oonfault == BADADDR) { /* XXX */ 738 badaddr_flag = 1; 739 } else { 740 printf("level 4 interrupt: PC %x CR %x SR %x\n", 741 pc, causeReg, statusReg); 742 } 743 } 744 if (mask & MACH_INT_MASK_3) { /* level 3 interrupt */ 745 /* 746 * fp error 747 */ 748 splx((MACH_SPL_MASK_6 & ~causeReg) | MACH_SR_INT_ENA_CUR); 749 if (!USERMODE(statusReg)) { 750 #ifdef DEBUG 751 trapDump("fpintr"); 752 #else 753 printf("FPU interrupt: PC %x CR %x SR %x\n", 754 pc, causeReg, statusReg); 755 #endif 756 } else 757 MachFPInterrupt(statusReg, causeReg, pc); 758 causeReg &= ~MACH_INT_MASK_3; 759 } 760 if (mask & MACH_INT_MASK_2) { /* level 2 interrupt */ 761 register int stat; 762 763 splx((MACH_SPL_MASK_5 & ~causeReg) | MACH_SR_INT_ENA_CUR); 764 stat = *(volatile u_char *)INTST0; 765 if (stat & INTST0_TIMINT) { /* timer */ 766 static int led_count = 0; 767 768 *(volatile u_char *)INTCLR0 = INTCLR0_TIMINT; 769 cf.pc = pc; 770 cf.sr = statusReg; 771 hardclock(&cf); 772 if (++led_count > hz) { 773 led_count = 0; 774 *(volatile u_char *)DEBUG_PORT ^= DP_LED1; 775 } 776 } 777 #if NBM > 0 778 if (stat & INTST0_KBDINT) /* keyboard */ 779 kbm_rint(SCC_KEYBOARD); 780 #endif 781 #if NMS > 0 782 if (stat & INTST0_MSINT) /* mouse */ 783 kbm_rint(SCC_MOUSE); 784 #endif 785 causeReg &= ~MACH_INT_MASK_2; 786 } 787 if (mask & MACH_INT_MASK_1) { /* level 1 interrupt */ 788 splx((MACH_SPL_MASK_4 & ~causeReg) | MACH_SR_INT_ENA_CUR); 789 level1_intr(); 790 causeReg &= ~MACH_INT_MASK_1; 791 } 792 if (mask & MACH_INT_MASK_0) { /* level 0 interrupt */ 793 splx((MACH_SPL_MASK_3 & ~causeReg) | MACH_SR_INT_ENA_CUR); 794 level0_intr(); 795 causeReg &= ~MACH_INT_MASK_0; 796 } 797 splx((MACH_SPL_MASK_3 & ~causeReg) | MACH_SR_INT_ENA_CUR); 798 799 if (mask & MACH_SOFT_INT_MASK_0) { 800 struct clockframe cf; 801 802 clearsoftclock(); 803 cnt.v_soft++; 804 cf.pc = pc; 805 cf.sr = statusReg; 806 softclock(); 807 } 808 /* process network interrupt if we trapped or will very soon */ 809 if ((mask & MACH_SOFT_INT_MASK_1) || 810 netisr && (statusReg & MACH_SOFT_INT_MASK_1)) { 811 clearsoftnet(); 812 cnt.v_soft++; 813 #ifdef INET 814 if (netisr & (1 << NETISR_ARP)) { 815 netisr &= ~(1 << NETISR_ARP); 816 arpintr(); 817 } 818 if (netisr & (1 << NETISR_IP)) { 819 netisr &= ~(1 << NETISR_IP); 820 ipintr(); 821 } 822 #endif 823 #ifdef NS 824 if (netisr & (1 << NETISR_NS)) { 825 netisr &= ~(1 << NETISR_NS); 826 nsintr(); 827 } 828 #endif 829 #ifdef ISO 830 if (netisr & (1 << NETISR_ISO)) { 831 netisr &= ~(1 << NETISR_ISO); 832 clnlintr(); 833 } 834 #endif 835 } 836 /* restore onfault flag */ 837 ((struct pcb *)UADDR)->pcb_onfault = oonfault; 838 } 839 840 /* 841 * This is called from MachUserIntr() if astpending is set. 842 * This is very similar to the tail of trap(). 843 */ 844 softintr(statusReg, pc) 845 unsigned statusReg; /* status register at time of the exception */ 846 unsigned pc; /* program counter where to continue */ 847 { 848 register struct proc *p = curproc; 849 int sig; 850 851 cnt.v_soft++; 852 /* take pending signals */ 853 while ((sig = CURSIG(p)) != 0) 854 psig(sig); 855 p->p_pri = p->p_usrpri; 856 astpending = 0; 857 if (p->p_flag & SOWEUPC) { 858 p->p_flag &= ~SOWEUPC; 859 ADDUPROF(p); 860 } 861 if (want_resched) { 862 int s; 863 864 /* 865 * Since we are curproc, clock will normally just change 866 * our priority without moving us from one queue to another 867 * (since the running process is not on a queue.) 868 * If that happened after we setrq ourselves but before we 869 * swtch()'ed, we might not be on the queue indicated by 870 * our priority. 871 */ 872 s = splstatclock(); 873 setrq(p); 874 p->p_stats->p_ru.ru_nivcsw++; 875 swtch(); 876 splx(s); 877 while ((sig = CURSIG(p)) != 0) 878 psig(sig); 879 } 880 curpri = p->p_pri; 881 } 882 883 #ifdef DEBUG 884 trapDump(msg) 885 char *msg; 886 { 887 register int i; 888 int s; 889 890 s = splhigh(); 891 printf("trapDump(%s)\n", msg); 892 for (i = 0; i < TRAPSIZE; i++) { 893 if (trp == trapdebug) 894 trp = &trapdebug[TRAPSIZE - 1]; 895 else 896 trp--; 897 if (trp->cause == 0) 898 break; 899 printf("%s: ADR %x PC %x CR %x SR %x\n", 900 trap_type[(trp->cause & MACH_CR_EXC_CODE) >> 901 MACH_CR_EXC_CODE_SHIFT], 902 trp->vadr, trp->pc, trp->cause, trp->status); 903 printf(" RA %x code %d\n", trp->ra, trp->code); 904 } 905 bzero(trapdebug, sizeof(trapdebug)); 906 trp = trapdebug; 907 splx(s); 908 } 909 #endif 910 911 /* 912 * Return the resulting PC as if the branch was executed. 913 */ 914 unsigned 915 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch) 916 unsigned *regsPtr; 917 unsigned instPC; 918 unsigned fpcCSR; 919 int allowNonBranch; 920 { 921 InstFmt inst; 922 unsigned retAddr; 923 int condition; 924 extern unsigned GetBranchDest(); 925 926 #if 0 927 printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC, 928 *(unsigned *)instPC, fpcCSR); 929 #endif 930 931 inst = *(InstFmt *)instPC; 932 switch ((int)inst.JType.op) { 933 case OP_SPECIAL: 934 switch ((int)inst.RType.func) { 935 case OP_JR: 936 case OP_JALR: 937 retAddr = regsPtr[inst.RType.rs]; 938 break; 939 940 default: 941 if (!allowNonBranch) 942 panic("MachEmulateBranch: Non-branch"); 943 retAddr = instPC + 4; 944 break; 945 } 946 break; 947 948 case OP_BCOND: 949 switch ((int)inst.IType.rt) { 950 case OP_BLTZ: 951 case OP_BLTZAL: 952 if ((int)(regsPtr[inst.RType.rs]) < 0) 953 retAddr = GetBranchDest((InstFmt *)instPC); 954 else 955 retAddr = instPC + 8; 956 break; 957 958 case OP_BGEZAL: 959 case OP_BGEZ: 960 if ((int)(regsPtr[inst.RType.rs]) >= 0) 961 retAddr = GetBranchDest((InstFmt *)instPC); 962 else 963 retAddr = instPC + 8; 964 break; 965 966 default: 967 panic("MachEmulateBranch: Bad branch cond"); 968 } 969 break; 970 971 case OP_J: 972 case OP_JAL: 973 retAddr = (inst.JType.target << 2) | 974 ((unsigned)instPC & 0xF0000000); 975 break; 976 977 case OP_BEQ: 978 if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt]) 979 retAddr = GetBranchDest((InstFmt *)instPC); 980 else 981 retAddr = instPC + 8; 982 break; 983 984 case OP_BNE: 985 if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt]) 986 retAddr = GetBranchDest((InstFmt *)instPC); 987 else 988 retAddr = instPC + 8; 989 break; 990 991 case OP_BLEZ: 992 if ((int)(regsPtr[inst.RType.rs]) <= 0) 993 retAddr = GetBranchDest((InstFmt *)instPC); 994 else 995 retAddr = instPC + 8; 996 break; 997 998 case OP_BGTZ: 999 if ((int)(regsPtr[inst.RType.rs]) > 0) 1000 retAddr = GetBranchDest((InstFmt *)instPC); 1001 else 1002 retAddr = instPC + 8; 1003 break; 1004 1005 case OP_COP1: 1006 switch (inst.RType.rs) { 1007 case OP_BCx: 1008 case OP_BCy: 1009 if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE) 1010 condition = fpcCSR & MACH_FPC_COND_BIT; 1011 else 1012 condition = !(fpcCSR & MACH_FPC_COND_BIT); 1013 if (condition) 1014 retAddr = GetBranchDest((InstFmt *)instPC); 1015 else 1016 retAddr = instPC + 8; 1017 break; 1018 1019 default: 1020 if (!allowNonBranch) 1021 panic("MachEmulateBranch: Bad coproc branch instruction"); 1022 retAddr = instPC + 4; 1023 } 1024 break; 1025 1026 default: 1027 if (!allowNonBranch) 1028 panic("MachEmulateBranch: Non-branch instruction"); 1029 retAddr = instPC + 4; 1030 } 1031 #if 0 1032 printf("Target addr=%x\n", retAddr); 1033 #endif 1034 return (retAddr); 1035 } 1036 1037 unsigned 1038 GetBranchDest(InstPtr) 1039 InstFmt *InstPtr; 1040 { 1041 return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2)); 1042 } 1043 1044 /* 1045 * This routine is called by procxmt() to single step one instruction. 1046 * We do this by storing a break instruction after the current instruction, 1047 * resuming execution, and then restoring the old instruction. 1048 */ 1049 cpu_singlestep(p) 1050 register struct proc *p; 1051 { 1052 register unsigned va; 1053 register int *locr0 = p->p_md.md_regs; 1054 int i; 1055 1056 /* compute next address after current location */ 1057 va = MachEmulateBranch(locr0, locr0[PC], 0, 1); 1058 if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va || 1059 !useracc((caddr_t)va, 4, B_READ)) { 1060 printf("SS %s (%d): breakpoint already set at %x (va %x)\n", 1061 p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */ 1062 return (EFAULT); 1063 } 1064 p->p_md.md_ss_addr = va; 1065 p->p_md.md_ss_instr = fuiword((caddr_t)va); 1066 i = suiword((caddr_t)va, MACH_BREAK_SSTEP); 1067 if (i < 0) { 1068 vm_offset_t sa, ea; 1069 int rv; 1070 1071 sa = trunc_page((vm_offset_t)va); 1072 ea = round_page((vm_offset_t)va+sizeof(int)-1); 1073 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, 1074 VM_PROT_DEFAULT, FALSE); 1075 if (rv == KERN_SUCCESS) { 1076 i = suiword((caddr_t)va, MACH_BREAK_SSTEP); 1077 (void) vm_map_protect(&p->p_vmspace->vm_map, 1078 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE); 1079 } 1080 } 1081 if (i < 0) 1082 return (EFAULT); 1083 printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n", 1084 p->p_comm, p->p_pid, p->p_md.md_ss_addr, 1085 p->p_md.md_ss_instr, locr0[PC]); /* XXX */ 1086 return (0); 1087 } 1088 1089 /* 1090 * news3400 - INT0 service routine. 1091 * 1092 * INTST0 bit 4: dma 1093 * 3: slot #1 1094 * 2: slot #3 1095 * 1: external #1 1096 * 0: external #3 1097 */ 1098 1099 #define LEVEL0_MASK \ 1100 (INTST1_DMA|INTST1_SLOT1|INTST1_SLOT3|INTST1_EXT1|INTST1_EXT3) 1101 1102 level0_intr() 1103 { 1104 register int stat; 1105 1106 stat = *(volatile u_char *)INTST1 & LEVEL0_MASK; 1107 *(u_char *)INTCLR1 = stat; 1108 1109 if (stat & INTST1_DMA) 1110 dma_intr(); 1111 if (stat & INTST1_SLOT1) 1112 exec_hb_intr2(); 1113 #if NEN > 0 1114 if (stat & INTST1_SLOT3) { 1115 int s, t; 1116 1117 s = splimp(); 1118 t = lance_intr(); 1119 (void) splx(s); 1120 if (t == 0) 1121 exec_hb_intr4(); 1122 } 1123 #endif 1124 #if NLE > 0 1125 if (stat & INTST1_SLOT3) { 1126 int s; 1127 1128 s = splimp(); 1129 leintr(0); 1130 (void) splx(s); 1131 } 1132 #endif 1133 if (stat & INTST1_EXT1) 1134 print_int_stat("EXT #1"); 1135 if (stat & INTST1_EXT3) 1136 print_int_stat("EXT #3"); 1137 } 1138 1139 /* 1140 * news3400 - INT1 service routine. 1141 * 1142 * INTST0 bit 1: centro fault 1143 * 0: centro busy 1144 * INTST1 bit 7: beep 1145 * 6: scc 1146 * 5: lance 1147 */ 1148 1149 #define LEVEL1_MASK2 (INTST0_CFLT|INTST0_CBSY) 1150 #define LEVEL1_MASK1 (INTST1_BEEP|INTST1_SCC|INTST1_LANCE) 1151 1152 level1_intr(pc) 1153 unsigned pc; 1154 { 1155 register int stat; 1156 register u_int saved_inten1 = *(u_char *)INTEN1; 1157 1158 *(u_char *)INTEN1 = 0; /* disable intr: beep, lance, scc */ 1159 1160 stat = *(volatile u_char *)INTST1 & LEVEL1_MASK1; 1161 *(u_char *)INTCLR1 = stat; 1162 1163 stat &= saved_inten1; 1164 1165 if (stat & INTST1_BEEP) { 1166 *(volatile u_char *)INTCLR1 = INTCLR1_BEEP; 1167 print_int_stat("BEEP"); 1168 } 1169 if (stat & INTST1_SCC) { 1170 scc_intr(); 1171 if (saved_inten1 & *(u_char *)INTST1 & INTST1_SCC) 1172 scc_intr(); 1173 } 1174 #if NEN > 0 1175 if (stat & INTST1_LANCE) 1176 lance_intr(); 1177 #endif 1178 #if NLE > 0 1179 if (stat & INTST1_LANCE) 1180 leintr(0); 1181 #endif 1182 1183 *(u_char *)INTEN1 = saved_inten1; 1184 1185 #if NLP > 0 1186 /* 1187 * The PARK2 cannot find centro interrupt correctly. 1188 * We must check it by reading the cause register of cpu 1189 * while other interrupts are disabled. 1190 */ 1191 { 1192 register int causereg; 1193 int s = splhigh(); 1194 1195 causereg = get_causereg(); 1196 (void) splx(s); 1197 1198 if ((causereg & CAUSE_IP4) == 0) 1199 return; 1200 } 1201 #endif 1202 1203 stat = (int)(*(u_char *)INTST0) & LEVEL1_MASK2; 1204 *(u_char *)INTCLR0 = stat; 1205 1206 if (stat & INTST0_CBSY) /* centro busy */ 1207 #if NLP > 0 1208 lpxint(0); 1209 #else 1210 printf("stray intr: CBSY\n"); 1211 #endif 1212 } 1213 1214 /* 1215 * DMA interrupt service routine. 1216 */ 1217 dma_intr() 1218 { 1219 register volatile u_char *gsp = (u_char *)DMAC_GSTAT; 1220 register u_int gstat = *gsp; 1221 register int mrqb, i; 1222 1223 /* 1224 * when DMA intrrupt occurs there remain some untransferred data. 1225 * wait data transfer completion. 1226 */ 1227 mrqb = (gstat & (CH0_INT|CH1_INT|CH2_INT|CH3_INT)) << 1; 1228 if (gstat & mrqb) { 1229 /* 1230 * SHOULD USE DELAY() 1231 */ 1232 for (i = 0; i < 50; i++) 1233 ; 1234 if (*gsp & mrqb) 1235 printf("dma_intr: MRQ\n"); 1236 } 1237 1238 /* SCSI Dispatch */ 1239 if (gstat & CH_INT(CH_SCSI)) 1240 scintr(); 1241 1242 #include "fd.h" 1243 #if NFD > 0 1244 /* FDC Interrupt Dispatch */ 1245 if (gstat & CH_INT(CH_FDC)) 1246 fdc_intr(0); 1247 #endif /* NFD > 0 */ 1248 1249 #include "sb.h" 1250 #if NSB > 0 1251 /* Audio Interface Dispatch */ 1252 sbintr(0); 1253 #endif /* NSB > 0 */ 1254 1255 /* Video I/F Dispatch */ 1256 if (gstat & CH_INT(CH_VIDEO)) 1257 ; 1258 } 1259 1260 /* 1261 * SCC vector interrupt service routine. 1262 */ 1263 scc_intr() 1264 { 1265 int vec; 1266 extern int scc_xint(), scc_sint(), scc_rint(), scc_cint(); 1267 static int (*func[])() = { 1268 scc_xint, 1269 scc_sint, 1270 scc_rint, 1271 scc_cint 1272 }; 1273 1274 vec = *(volatile u_char *)SCCVECT; 1275 (*func[(vec & SCC_INT_MASK) >> 1])(vec); 1276 } 1277 1278 print_int_stat(msg) 1279 char *msg; 1280 { 1281 int s0 = *(volatile u_char *)INTST0; 1282 int s1 = *(volatile u_char *)INTST1; 1283 1284 if (msg) 1285 printf("%s: ", msg); 1286 else 1287 printf("intr: "); 1288 printf("INTST0=0x%x, INTST1=0x%x.\n", s0, s1); 1289 } 1290 1291 traceback() 1292 { 1293 u_int pc, sp; 1294 1295 getpcsp(&pc, &sp); 1296 backtr(pc, sp); 1297 } 1298 1299 #define EF_RA 92 /* r31: return address */ 1300 #define KERN_REG_SIZE (18 * 4) 1301 #define STAND_FRAME_SIZE 24 1302 #define EF_SIZE STAND_FRAME_SIZE + KERN_REG_SIZE + 12 1303 1304 extern u_int MachKernGenExceptionEnd[]; 1305 extern u_int end[]; 1306 #define ENDOFTXT (end + 1) 1307 1308 #define VALID_TEXT(pc) \ 1309 ((u_int *)MACH_CODE_START <= (u_int *)MACH_UNCACHED_TO_CACHED(pc) && \ 1310 (u_int *)MACH_UNCACHED_TO_CACHED(pc) <= (u_int *)ENDOFTXT) 1311 1312 #define ExceptionHandler(x) \ 1313 ((u_int*)MachKernGenException < (u_int*)MACH_UNCACHED_TO_CACHED(x) && \ 1314 (u_int*)MACH_UNCACHED_TO_CACHED(x) < (u_int*)MachKernGenExceptionEnd) 1315 1316 backtr(pc, sp) 1317 register u_int *pc; 1318 register caddr_t sp; 1319 { 1320 int fsize; 1321 u_int *getra(); 1322 extern int _gp[]; 1323 1324 printf("start trace back pc=%x, sp=%x, pid=%d[%s]\n", 1325 pc, sp, curproc->p_pid, curproc->p_comm); 1326 1327 while (VALID_TEXT(pc)) { 1328 if (sp >= (caddr_t)KERNELSTACK || sp < (caddr_t)UADDR) { 1329 printf("stack exhausted (sp=0x%x)\n", sp); 1330 break; 1331 } 1332 if (ExceptionHandler(pc)) { 1333 pc = (u_int *)(*((u_int *)&sp[EF_RA])); 1334 sp += EF_SIZE; 1335 printf("trapped from pc=%x, sp=%x\n", pc, sp); 1336 } else { 1337 pc = getra(pc, sp, &fsize); 1338 sp += fsize; 1339 printf("called from pc=%x, sp=%x\n", pc, sp); 1340 } 1341 } 1342 printf("trace back END. pid=%d[%s]\n", curproc->p_pid, curproc->p_comm); 1343 } 1344 1345 #define NPCSTOCK 128 1346 1347 u_int * 1348 getra(pc, sp, fsize) 1349 register int *pc; 1350 register caddr_t sp; 1351 int *fsize; 1352 { 1353 u_int regs[32]; 1354 int *opcs[NPCSTOCK]; 1355 register int i, nbpc = 0; 1356 int printed = 0; 1357 InstFmt I; 1358 1359 *fsize = 0; 1360 for (i = 0; i < 32; i++) regs[i] = 0; 1361 for (; (u_int*)MACH_UNCACHED_TO_CACHED(pc) < (u_int*)ENDOFTXT; pc++) { 1362 I.word = *pc; 1363 switch (I.IType.op) { 1364 1365 case OP_ADDIU: 1366 /* sp += fsize */ 1367 if (I.IType.rs == SP && I.IType.rt == SP) 1368 *fsize = (u_short)I.IType.imm; 1369 break; 1370 1371 case OP_LW: 1372 if (I.IType.rs != SP) 1373 break; 1374 regs[I.IType.rt] = *(u_int *)&sp[(short)I.IType.imm]; 1375 break; 1376 1377 case OP_BEQ: 1378 if (I.IType.rs != ZERO || I.IType.rt != ZERO) 1379 break; 1380 for (i = 0; i < nbpc; i++) 1381 if (pc == opcs[i]) { 1382 /* 1383 * Brach constructs infinite loop. 1384 */ 1385 if (!printed) { 1386 printf("branch loop\n"); 1387 printed = 1; 1388 } 1389 break; 1390 } 1391 if (i == nbpc) { 1392 opcs[nbpc] = pc; 1393 nbpc = imin(nbpc + 1, NPCSTOCK); 1394 pc = pc + (short)I.IType.imm; 1395 } 1396 break; 1397 1398 default: 1399 break; 1400 } 1401 1402 I.word = *(pc - 1); 1403 if (I.RType.op == OP_SPECIAL && I.RType.func == OP_JR) 1404 return ((int *)regs[I.RType.rs]); 1405 } 1406 printf("pc run out of TEXT\n"); 1407 return (0); 1408 } 1409