/* * Copyright (c) 1988 University of Utah. * Copyright (c) 1992 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and Ralph Campbell. * * %sccs.include.redist.c% * * from: Utah $Hdr: trap.c 1.32 91/04/06$ * * @(#)trap.c 7.6 (Berkeley) 06/20/92 */ #include "param.h" #include "systm.h" #include "proc.h" #include "kernel.h" #include "signalvar.h" #include "user.h" #include "buf.h" #ifdef KTRACE #include "ktrace.h" #endif #include "net/netisr.h" #include "../include/trap.h" #include "../include/psl.h" #include "../include/reg.h" #include "../include/cpu.h" #include "../include/pte.h" #include "../include/mips_opcode.h" #include "clockreg.h" #include "vm/vm.h" #include "vm/vm_kern.h" #include "vm/vm_page.h" /* * This is a kludge to allow X windows to work. */ #define X_KLUGE #ifdef X_KLUGE #define USER_MAP_ADDR 0x4000 #define NPTES 300 static pt_entry_t UserMapPtes[NPTES]; static unsigned nUserMapPtes; static pid_t UserMapPid; #endif struct proc *machFPCurProcPtr; /* pointer to last proc to use FP */ extern void MachKernGenException(); extern void MachUserGenException(); extern void MachKernIntr(); extern void MachUserIntr(); extern void MachTLBModException(); extern void MachTLBMissException(); extern void MemErrorInterrupt(); extern unsigned MachEmulateBranch(); void (*machExceptionTable[])() = { /* * The kernel exception handlers. */ MachKernIntr, /* external interrupt */ MachKernGenException, /* TLB modification */ MachTLBMissException, /* TLB miss (load or instr. fetch) */ MachTLBMissException, /* TLB miss (store) */ MachKernGenException, /* address error (load or I-fetch) */ MachKernGenException, /* address error (store) */ MachKernGenException, /* bus error (I-fetch) */ MachKernGenException, /* bus error (load or store) */ MachKernGenException, /* system call */ MachKernGenException, /* breakpoint */ MachKernGenException, /* reserved instruction */ MachKernGenException, /* coprocessor unusable */ MachKernGenException, /* arithmetic overflow */ MachKernGenException, /* reserved */ MachKernGenException, /* reserved */ MachKernGenException, /* reserved */ /* * The user exception handlers. */ MachUserIntr, MachUserGenException, MachUserGenException, MachUserGenException, MachUserGenException, MachUserGenException, MachUserGenException, MachUserGenException, MachUserGenException, MachUserGenException, MachUserGenException, MachUserGenException, MachUserGenException, MachUserGenException, MachUserGenException, MachUserGenException, }; char *trap_type[] = { "external interrupt", "TLB modification", "TLB miss (load or instr. fetch)", "TLB miss (store)", "address error (load or I-fetch)", "address error (store)", "bus error (I-fetch)", "bus error (load or store)", "system call", "breakpoint", "reserved instruction", "coprocessor unusable", "arithmetic overflow", "reserved 13", "reserved 14", "reserved 15", }; #ifdef DEBUG #define TRAPSIZE 10 struct trapdebug { /* trap history buffer for debugging */ u_int status; u_int cause; u_int vadr; u_int pc; u_int ra; u_int code; } trapdebug[TRAPSIZE], *trp = trapdebug; #endif /* * Handle an exception. * Called from MachKernGenException() or MachUserGenException() * when a processor trap occurs. * In the case of a kernel trap, we return the pc where to resume if * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc. */ unsigned trap(statusReg, causeReg, vadr, pc, args) unsigned statusReg; /* status register at time of the exception */ unsigned causeReg; /* cause register at time of exception */ unsigned vadr; /* address (if any) the fault occured on */ unsigned pc; /* program counter where to continue */ { register int type, i; unsigned ucode = 0; register struct proc *p = curproc; struct timeval syst; vm_prot_t ftype; extern unsigned onfault_table[]; #ifdef DEBUG trp->status = statusReg; trp->cause = causeReg; trp->vadr = vadr; trp->pc = pc; trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] : p->p_md.md_regs[RA]; trp->code = 0; if (++trp == &trapdebug[TRAPSIZE]) trp = trapdebug; #endif cnt.v_trap++; type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT; if (USERMODE(statusReg)) { type |= T_USER; syst = p->p_stime; } /* * Enable hardware interrupts if they were on before. * We only respond to software interrupts when returning to user mode. */ if (statusReg & MACH_SR_INT_ENA_PREV) splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR); switch (type) { case T_TLB_MOD: if ((int)vadr < 0) { register pt_entry_t *pte; register unsigned entry; #ifndef ATTR register vm_offset_t pa; #endif pte = kvtopte(vadr); entry = pte->pt_entry; if (entry & PG_RO) { /* write to read only page in the kernel */ ftype = VM_PROT_WRITE; goto kernel_fault; } entry |= PG_M; pte->pt_entry = entry; vadr &= PG_FRAME; printf("trap: TLBupdate hi %x lo %x i %x\n", vadr, entry, MachTLBUpdate(vadr, entry)); /* XXX */ #ifdef ATTR pmap_attributes[atop(entry - KERNBASE)] |= PMAP_ATTR_MOD; #else pa = entry & PG_FRAME; if (!IS_VM_PHYSADDR(pa)) panic("trap: kmod"); PHYS_TO_VM_PAGE(pa)->clean = FALSE; #endif return (pc); } /* FALLTHROUGH */ case T_TLB_MOD+T_USER: { pmap_hash_t hp; #ifndef ATTR vm_offset_t pa; #endif #ifdef DIAGNOSTIC extern pmap_hash_t zero_pmap_hash; extern pmap_t cur_pmap; if (cur_pmap->pm_hash == zero_pmap_hash) panic("tlbmod"); #endif hp = &((pmap_hash_t)PMAP_HASH_UADDR)[PMAP_HASH(vadr)]; if (((hp->pmh_pte[0].high ^ vadr) & ~PGOFSET) == 0) i = 0; else if (((hp->pmh_pte[1].high ^ vadr) & ~PGOFSET) == 0) i = 1; else panic("trap: tlb umod not found"); if (hp->pmh_pte[i].low & PG_RO) { ftype = VM_PROT_WRITE; goto dofault; } hp->pmh_pte[i].low |= PG_M; printf("trap: TLBupdate hi %x lo %x i %x\n", hp->pmh_pte[i].high, hp->pmh_pte[i].low, MachTLBUpdate(hp->pmh_pte[i].high, hp->pmh_pte[i].low)); /* XXX */ #ifdef ATTR pmap_attributes[atop(hp->pmh_pte[i].low - KERNBASE)] |= PMAP_ATTR_MOD; #else pa = hp->pmh_pte[i].low & PG_FRAME; if (!IS_VM_PHYSADDR(pa)) panic("trap: umod"); PHYS_TO_VM_PAGE(pa)->clean = FALSE; #endif if (!USERMODE(statusReg)) return (pc); goto out; } case T_TLB_LD_MISS: case T_TLB_ST_MISS: ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ; if ((int)vadr < 0) { register vm_offset_t va; int rv; kernel_fault: va = trunc_page((vm_offset_t)vadr); rv = vm_fault(kernel_map, va, ftype, FALSE); if (rv == KERN_SUCCESS) return (pc); if (i = ((struct pcb *)UADDR)->pcb_onfault) { ((struct pcb *)UADDR)->pcb_onfault = 0; return (onfault_table[i]); } goto err; } goto dofault; case T_TLB_LD_MISS+T_USER: ftype = VM_PROT_READ; goto dofault; case T_TLB_ST_MISS+T_USER: ftype = VM_PROT_WRITE; dofault: { register vm_offset_t va; register struct vmspace *vm = p->p_vmspace; register vm_map_t map = &vm->vm_map; int rv; #ifdef X_KLUGE if (p->p_pid == UserMapPid && (va = pmax_btop(vadr - USER_MAP_ADDR)) < nUserMapPtes) { register pt_entry_t *pte; pte = &UserMapPtes[va]; MachTLBWriteRandom((vadr & PG_FRAME) | (vm->vm_pmap.pm_tlbpid << VMMACH_TLB_PID_SHIFT), pte->pt_entry); return (pc); } #endif va = trunc_page((vm_offset_t)vadr); rv = vm_fault(map, va, ftype, FALSE); if (rv != KERN_SUCCESS) { printf("vm_fault(%x, %x, %x, 0) -> %x ADR %x PC %x RA %x\n", map, va, ftype, rv, vadr, pc, !USERMODE(statusReg) ? ((int *)&args)[19] : p->p_md.md_regs[RA]); /* XXX */ printf("\tpid %d %s PC %x RA %x\n", p->p_pid, p->p_comm, p->p_md.md_regs[PC], p->p_md.md_regs[RA]); /* XXX */ trapDump("vm_fault"); } /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if ((caddr_t)va >= vm->vm_maxsaddr) { if (rv == KERN_SUCCESS) { unsigned nss; nss = clrnd(btoc(USRSTACK-(unsigned)va)); if (nss > vm->vm_ssize) vm->vm_ssize = nss; } else if (rv == KERN_PROTECTION_FAILURE) rv = KERN_INVALID_ADDRESS; } if (rv == KERN_SUCCESS) { if (!USERMODE(statusReg)) return (pc); goto out; } if (!USERMODE(statusReg)) { if (i = ((struct pcb *)UADDR)->pcb_onfault) { ((struct pcb *)UADDR)->pcb_onfault = 0; return (onfault_table[i]); } goto err; } ucode = vadr; i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV; break; } case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ if (vadr == KERNBASE) { struct args { int i[1]; } args; int rval[2]; /* * Assume a signal handler is trying to return * (see sendsig() and sigreturn()). We have to * pop the sigframe struct to get the address of * the sigcontext. */ args.i[0] = p->p_md.md_regs[SP] + 4 * sizeof(int); (void) sigreturn(curproc, &args, rval); goto out; } /* FALLTHROUGH */ case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */ case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */ i = SIGSEGV; break; case T_SYSCALL+T_USER: { register int *locr0 = p->p_md.md_regs; register struct sysent *callp; unsigned int code; int numsys; struct args { int i[8]; } args; int rval[2]; struct sysent *systab; extern int nsysent; #ifdef ULTRIXCOMPAT extern struct sysent ultrixsysent[]; extern int ultrixnsysent; #endif cnt.v_syscall++; /* compute next PC after syscall instruction */ if ((int)causeReg < 0) locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0); else locr0[PC] += 4; systab = sysent; numsys = nsysent; #ifdef ULTRIXCOMPAT if (p->p_md.md_flags & MDP_ULTRIX) { systab = ultrixsysent; numsys = ultrixnsysent; } #endif code = locr0[V0]; if (code == 0) { /* indir */ code = locr0[A0]; if (code >= numsys) callp = &systab[0]; /* indir (illegal) */ else callp = &systab[code]; i = callp->sy_narg; args.i[0] = locr0[A1]; args.i[1] = locr0[A2]; args.i[2] = locr0[A3]; if (i > 3) { i = copyin((caddr_t)(locr0[SP] + 3 * sizeof(int)), (caddr_t)&args.i[3], (u_int)(i - 3) * sizeof(int)); if (i) { locr0[V0] = i; locr0[A3] = 1; #ifdef KTRACE if (KTRPOINT(p, KTR_SYSCALL)) ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i); #endif goto done; } } } else { if (code >= numsys) callp = &systab[0]; /* indir (illegal) */ else callp = &systab[code]; i = callp->sy_narg; args.i[0] = locr0[A0]; args.i[1] = locr0[A1]; args.i[2] = locr0[A2]; args.i[3] = locr0[A3]; if (i > 4) { i = copyin((caddr_t)(locr0[SP] + 4 * sizeof(int)), (caddr_t)&args.i[4], (u_int)(i - 4) * sizeof(int)); if (i) { locr0[V0] = i; locr0[A3] = 1; #ifdef KTRACE if (KTRPOINT(p, KTR_SYSCALL)) ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i); #endif goto done; } } } #ifdef KTRACE if (KTRPOINT(p, KTR_SYSCALL)) ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i); #endif rval[0] = 0; rval[1] = locr0[V1]; #ifdef DEBUG if (trp == trapdebug) trapdebug[TRAPSIZE - 1].code = code; else trp[-1].code = code; #endif i = (*callp->sy_call)(p, &args, rval); /* * Reinitialize proc pointer `p' as it may be different * if this is a child returning from fork syscall. */ p = curproc; locr0 = p->p_md.md_regs; #ifdef DEBUG { int s; s = splhigh(); trp->status = statusReg; trp->cause = causeReg; trp->vadr = locr0[SP]; trp->pc = locr0[PC]; trp->ra = locr0[RA]; trp->code = -code; if (++trp == &trapdebug[TRAPSIZE]) trp = trapdebug; splx(s); } #endif if (i == ERESTART) locr0[PC] = pc; else if (i != EJUSTRETURN) { if (i) { locr0[V0] = i; locr0[A3] = 1; } else { locr0[V0] = rval[0]; locr0[V1] = rval[1]; locr0[A3] = 0; } } /* else if (i == EJUSTRETURN) */ /* nothing to do */ done: #ifdef KTRACE if (KTRPOINT(p, KTR_SYSRET)) ktrsysret(p->p_tracep, code, i, rval[0]); #endif goto out; } case T_BREAK+T_USER: { register unsigned va, instr; /* compute address of break instruction */ va = pc; if ((int)causeReg < 0) va += 4; /* read break instruction */ instr = fuiword((caddr_t)va); #ifdef KADB if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP) goto err; #endif if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) { i = SIGTRAP; break; } /* restore original instruction and clear BP */ i = suiword((caddr_t)va, p->p_md.md_ss_instr); if (i < 0) { vm_offset_t sa, ea; int rv; sa = trunc_page((vm_offset_t)va); ea = round_page((vm_offset_t)va+sizeof(int)-1); rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, VM_PROT_DEFAULT, FALSE); if (rv == KERN_SUCCESS) { i = suiword((caddr_t)va, p->p_md.md_ss_instr); (void) vm_map_protect(&p->p_vmspace->vm_map, sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE); } } if (i < 0) { i = SIGTRAP; break; } p->p_md.md_ss_addr = 0; goto out; } case T_RES_INST+T_USER: i = SIGILL; break; case T_COP_UNUSABLE+T_USER: if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) { i = SIGILL; /* only FPU instructions allowed */ break; } MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs); machFPCurProcPtr = p; p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT; p->p_md.md_flags |= MDP_FPUSED; goto out; case T_OVFLOW+T_USER: i = SIGFPE; break; case T_ADDR_ERR_LD: /* misaligned access */ case T_ADDR_ERR_ST: /* misaligned access */ case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */ if (i = ((struct pcb *)UADDR)->pcb_onfault) { ((struct pcb *)UADDR)->pcb_onfault = 0; return (onfault_table[i]); } /* FALLTHROUGH */ default: err: #ifdef KADB { extern struct pcb kdbpcb; if (USERMODE(statusReg)) kdbpcb = p->p_addr->u_pcb; else { kdbpcb.pcb_regs[ZERO] = 0; kdbpcb.pcb_regs[AST] = ((int *)&args)[2]; kdbpcb.pcb_regs[V0] = ((int *)&args)[3]; kdbpcb.pcb_regs[V1] = ((int *)&args)[4]; kdbpcb.pcb_regs[A0] = ((int *)&args)[5]; kdbpcb.pcb_regs[A1] = ((int *)&args)[6]; kdbpcb.pcb_regs[A2] = ((int *)&args)[7]; kdbpcb.pcb_regs[A3] = ((int *)&args)[8]; kdbpcb.pcb_regs[T0] = ((int *)&args)[9]; kdbpcb.pcb_regs[T1] = ((int *)&args)[10]; kdbpcb.pcb_regs[T2] = ((int *)&args)[11]; kdbpcb.pcb_regs[T3] = ((int *)&args)[12]; kdbpcb.pcb_regs[T4] = ((int *)&args)[13]; kdbpcb.pcb_regs[T5] = ((int *)&args)[14]; kdbpcb.pcb_regs[T6] = ((int *)&args)[15]; kdbpcb.pcb_regs[T7] = ((int *)&args)[16]; kdbpcb.pcb_regs[T8] = ((int *)&args)[17]; kdbpcb.pcb_regs[T9] = ((int *)&args)[18]; kdbpcb.pcb_regs[RA] = ((int *)&args)[19]; kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21]; kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22]; kdbpcb.pcb_regs[PC] = pc; kdbpcb.pcb_regs[SR] = statusReg; bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int)); } if (kdb(causeReg, vadr, p, !USERMODE(statusReg))) return (kdbpcb.pcb_regs[PC]); } #else #ifdef DEBUG trapDump("trap"); #endif #endif panic("trap"); } printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid, p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */ trapsignal(p, i, ucode); out: /* * Note: we should only get here if returning to user mode. */ astpending = 0; while (i = CURSIG(p)) psig(i); p->p_pri = p->p_usrpri; if (want_resched) { int s; /* * Since we are curproc, clock will normally just change * our priority without moving us from one queue to another * (since the running process is not on a queue.) * If that happened after we setrq ourselves but before we * swtch()'ed, we might not be on the queue indicated by * our priority. */ s = splclock(); setrq(p); p->p_stats->p_ru.ru_nivcsw++; swtch(); splx(s); while (i = CURSIG(p)) psig(i); } if (p->p_stats->p_prof.pr_scale) { int ticks; struct timeval *tv = &p->p_stime; ticks = ((tv->tv_sec - syst.tv_sec) * 1000 + (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000); if (ticks) addupc(pc, &p->p_stats->p_prof, ticks); } curpri = p->p_pri; return (pc); } #ifdef DS5000 struct intr_tab intr_tab[8]; #endif int temp; /* XXX ULTRIX compiler bug with -O */ /* * Handle an interrupt. * Called from MachKernIntr() or MachUserIntr() * Note: curproc might be NULL. */ interrupt(statusReg, causeReg, pc) unsigned statusReg; /* status register at time of the exception */ unsigned causeReg; /* cause register at time of exception */ unsigned pc; /* program counter where to continue */ { register unsigned mask; clockframe cf; #ifdef DEBUG trp->status = statusReg; trp->cause = causeReg; trp->vadr = 0; trp->pc = pc; trp->ra = 0; trp->code = 0; if (++trp == &trapdebug[TRAPSIZE]) trp = trapdebug; #endif cnt.v_intr++; mask = causeReg & statusReg; /* pending interrupts & enable mask */ #ifdef DS3100 /* handle clock interrupts ASAP */ if (mask & MACH_INT_MASK_3) { register volatile struct chiptime *c = (volatile struct chiptime *)MACH_CLOCK_ADDR; temp = c->regc; /* clear interrupt bits */ cf.pc = pc; cf.ps = statusReg; hardclock(cf); causeReg &= ~MACH_INT_MASK_3; /* reenable clock interrupts */ } /* * Enable hardware interrupts which were enabled but not pending. * We only respond to software interrupts when returning to spl0. */ splx((statusReg & ~causeReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR); if (mask & MACH_INT_MASK_0) siiintr(0); if (mask & MACH_INT_MASK_1) leintr(0); if (mask & MACH_INT_MASK_2) dcintr(0); if (mask & MACH_INT_MASK_4) MemErrorInterrupt(); #endif /* DS3100 */ #ifdef DS5000 /* handle clock interrupts ASAP */ if (mask & MACH_INT_MASK_1) { register volatile struct chiptime *c = (volatile struct chiptime *)MACH_CLOCK_ADDR; register unsigned csr; static int warned = 0; csr = *(unsigned *)MACH_SYS_CSR_ADDR; if ((csr & MACH_CSR_PSWARN) && !warned) { warned = 1; printf("WARNING: power supply is overheating!\n"); } else if (warned && !(csr & MACH_CSR_PSWARN)) { warned = 0; printf("WARNING: power supply is OK again\n"); } temp = c->regc; /* clear interrupt bits */ cf.pc = pc; cf.ps = statusReg; hardclock(cf); causeReg &= ~MACH_INT_MASK_1; /* reenable clock interrupts */ } if (mask & MACH_INT_MASK_0) { register unsigned csr; register unsigned i, m; csr = *(unsigned *)MACH_SYS_CSR_ADDR; m = csr & (csr >> MACH_CSR_IOINTEN_SHIFT) & MACH_CSR_IOINT_MASK; #if 0 *(unsigned *)MACH_SYS_CSR_ADDR = (csr & ~(MACH_CSR_MBZ | 0xFF)) | (m << MACH_CSR_IOINTEN_SHIFT); #endif /* * Enable hardware interrupts which were enabled but not * pending. We only respond to software interrupts when * returning to spl0. */ splx((statusReg & ~causeReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR); for (i = 0; m; i++, m >>= 1) { if (!(m & 1)) continue; if (intr_tab[i].func) (*intr_tab[i].func)(intr_tab[i].unit); else printf("spurious interrupt %d\n", i); } #if 0 *(unsigned *)MACH_SYS_CSR_ADDR = csr & ~(MACH_CSR_MBZ | 0xFF); #endif } else { /* * Enable hardware interrupts which were enabled but not * pending. We only respond to software interrupts when * returning to spl0. */ splx((statusReg & ~causeReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR); } if (mask & MACH_INT_MASK_3) MemErrorInterrupt(); #endif /* DS5000 */ if (mask & MACH_INT_MASK_5) { if (!USERMODE(statusReg)) { #ifdef DEBUG trapDump("fpintr"); #else printf("FPU interrupt: PC %x CR %x SR %x\n", pc, causeReg, statusReg); #endif } else MachFPInterrupt(statusReg, causeReg, pc); } if (mask & MACH_SOFT_INT_MASK_0) { clockframe cf; clearsoftclock(); cnt.v_soft++; cf.pc = pc; cf.ps = statusReg; softclock(cf); } /* process network interrupt if we trapped or will very soon */ if ((mask & MACH_SOFT_INT_MASK_1) || netisr && (statusReg & MACH_SOFT_INT_MASK_1)) { clearsoftnet(); cnt.v_soft++; #ifdef INET if (netisr & (1 << NETISR_ARP)) { netisr &= ~(1 << NETISR_ARP); arpintr(); } if (netisr & (1 << NETISR_IP)) { netisr &= ~(1 << NETISR_IP); ipintr(); } #endif #ifdef NS if (netisr & (1 << NETISR_NS)) { netisr &= ~(1 << NETISR_NS); nsintr(); } #endif #ifdef ISO if (netisr & (1 << NETISR_ISO)) { netisr &= ~(1 << NETISR_ISO); clnlintr(); } #endif } } /* * This is called from MachUserIntr() if astpending is set. * This is very similar to the tail of trap(). */ softintr(statusReg, pc) unsigned statusReg; /* status register at time of the exception */ unsigned pc; /* program counter where to continue */ { register struct proc *p = curproc; register int i; cnt.v_soft++; astpending = 0; while (i = CURSIG(p)) psig(i); p->p_pri = p->p_usrpri; if (want_resched) { int s; /* * Since we are curproc, clock will normally just change * our priority without moving us from one queue to another * (since the running process is not on a queue.) * If that happened after we setrq ourselves but before we * swtch()'ed, we might not be on the queue indicated by * our priority. */ s = splclock(); setrq(p); p->p_stats->p_ru.ru_nivcsw++; swtch(); splx(s); while (i = CURSIG(p)) psig(i); } curpri = p->p_pri; } #ifdef DEBUG trapDump(msg) char *msg; { register int i; int s; s = splhigh(); printf("trapDump(%s)\n", msg); for (i = 0; i < TRAPSIZE; i++) { if (trp == trapdebug) trp = &trapdebug[TRAPSIZE - 1]; else trp--; if (trp->cause == 0) break; printf("%s: ADR %x PC %x CR %x SR %x\n", trap_type[(trp->cause & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT], trp->vadr, trp->pc, trp->cause, trp->status); printf(" RA %x code %d\n", trp-> ra, trp->code); } bzero(trapdebug, sizeof(trapdebug)); trp = trapdebug; splx(s); } #endif #ifdef X_KLUGE /* * This is a kludge to allow X windows to work. */ caddr_t vmUserMap(size, pa) int size; unsigned pa; { register caddr_t v; unsigned off, entry; if (nUserMapPtes == 0) UserMapPid = curproc->p_pid; else if (UserMapPid != curproc->p_pid) return ((caddr_t)0); off = pa & PGOFSET; size = btoc(off + size); if (nUserMapPtes + size > NPTES) return ((caddr_t)0); v = (caddr_t)(USER_MAP_ADDR + pmax_ptob(nUserMapPtes) + off); entry = (pa & 0x9ffff000) | PG_V | PG_M; if (pa >= MACH_UNCACHED_MEMORY_ADDR) entry |= PG_N; while (size > 0) { UserMapPtes[nUserMapPtes].pt_entry = entry; entry += NBPG; nUserMapPtes++; size--; } return (v); } vmUserUnmap() { int id; nUserMapPtes = 0; if (UserMapPid == curproc->p_pid) { id = curproc->p_vmspace->vm_pmap.pm_tlbpid; if (id >= 0) MachTLBFlushPID(id); } UserMapPid = 0; } #endif /* *---------------------------------------------------------------------- * * MemErrorInterrupt -- * * Handler an interrupt for the control register. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void MemErrorInterrupt() { #ifdef DS3100 volatile u_short *sysCSRPtr = (u_short *)MACH_SYS_CSR_ADDR; u_short csr; csr = *sysCSRPtr; if (csr & MACH_CSR_MEM_ERR) { printf("Memory error at 0x%x\n", *(unsigned *)MACH_WRITE_ERROR_ADDR); panic("Mem error interrupt"); } *sysCSRPtr = (csr & ~MACH_CSR_MBZ) | 0xff; #endif /* DS3100 */ #ifdef DS5000 printf("erradr %x\n", *(unsigned *)MACH_ERROR_ADDR); *(unsigned *)MACH_ERROR_ADDR = 0; MachEmptyWriteBuffer(); #endif /* DS5000 */ } /* * Return the resulting PC as if the branch was executed. */ unsigned MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch) unsigned *regsPtr; unsigned instPC; unsigned fpcCSR; int allowNonBranch; { InstFmt inst; unsigned retAddr; int condition; extern unsigned GetBranchDest(); #if 0 printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC, *instPC, fpcCSR); #endif inst = *(InstFmt *)instPC; switch ((int)inst.JType.op) { case OP_SPECIAL: switch ((int)inst.RType.func) { case OP_JR: case OP_JALR: retAddr = regsPtr[inst.RType.rs]; break; default: if (!allowNonBranch) panic("MachEmulateBranch: Non-branch"); retAddr = instPC + 4; break; } break; case OP_BCOND: switch ((int)inst.IType.rt) { case OP_BLTZ: case OP_BLTZAL: if ((int)(regsPtr[inst.RType.rs]) < 0) retAddr = GetBranchDest((InstFmt *)instPC); else retAddr = instPC + 8; break; case OP_BGEZAL: case OP_BGEZ: if ((int)(regsPtr[inst.RType.rs]) >= 0) retAddr = GetBranchDest((InstFmt *)instPC); else retAddr = instPC + 8; break; default: panic("MachEmulateBranch: Bad branch cond"); } break; case OP_J: case OP_JAL: retAddr = (inst.JType.target << 2) | ((unsigned)instPC & 0xF0000000); break; case OP_BEQ: if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt]) retAddr = GetBranchDest((InstFmt *)instPC); else retAddr = instPC + 8; break; case OP_BNE: if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt]) retAddr = GetBranchDest((InstFmt *)instPC); else retAddr = instPC + 8; break; case OP_BLEZ: if ((int)(regsPtr[inst.RType.rs]) <= 0) retAddr = GetBranchDest((InstFmt *)instPC); else retAddr = instPC + 8; break; case OP_BGTZ: if ((int)(regsPtr[inst.RType.rs]) > 0) retAddr = GetBranchDest((InstFmt *)instPC); else retAddr = instPC + 8; break; case OP_COP1: switch (inst.RType.rs) { case OP_BCx: case OP_BCy: if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE) condition = fpcCSR & MACH_FPC_COND_BIT; else condition = !(fpcCSR & MACH_FPC_COND_BIT); if (condition) retAddr = GetBranchDest((InstFmt *)instPC); else retAddr = instPC + 8; break; default: if (!allowNonBranch) panic("MachEmulateBranch: Bad coproc branch instruction"); retAddr = instPC + 4; } break; default: if (!allowNonBranch) panic("MachEmulateBranch: Non-branch instruction"); retAddr = instPC + 4; } #if 0 printf("Target addr=%x\n", retAddr); #endif return (retAddr); } unsigned GetBranchDest(InstPtr) InstFmt *InstPtr; { return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2)); } /* * This routine is called by procxmt() to single step one instruction. * We do this by storing a break instruction after the current instruction, * resuming execution, and then restoring the old instruction. */ cpu_singlestep(p) register struct proc *p; { register unsigned va; register int *locr0 = p->p_md.md_regs; int i; /* compute next address after current location */ va = MachEmulateBranch(locr0, locr0[PC], 0, 1); if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va || !useracc((caddr_t)va, 4, B_READ)) { printf("SS %s (%d): breakpoint already set at %x (va %x)\n", p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */ return (EFAULT); } p->p_md.md_ss_addr = va; p->p_md.md_ss_instr = fuiword((caddr_t)va); i = suiword((caddr_t)va, MACH_BREAK_SSTEP); if (i < 0) { vm_offset_t sa, ea; int rv; sa = trunc_page((vm_offset_t)va); ea = round_page((vm_offset_t)va+sizeof(int)-1); rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea, VM_PROT_DEFAULT, FALSE); if (rv == KERN_SUCCESS) { i = suiword((caddr_t)va, MACH_BREAK_SSTEP); (void) vm_map_protect(&p->p_vmspace->vm_map, sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE); } } if (i < 0) return (EFAULT); printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n", p->p_comm, p->p_pid, p->p_md.md_ss_addr, p->p_md.md_ss_instr, locr0[PC]); /* XXX */ return (0); }