xref: /original-bsd/sys/pmax/pmax/trap.c (revision da6ea800)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department and Ralph Campbell.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.32 91/04/06$
13  *
14  *	@(#)trap.c	7.19 (Berkeley) 05/31/93
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/proc.h>
20 #include <sys/kernel.h>
21 #include <sys/signalvar.h>
22 #include <sys/syscall.h>
23 #include <sys/user.h>
24 #include <sys/buf.h>
25 #ifdef KTRACE
26 #include <sys/ktrace.h>
27 #endif
28 #include <net/netisr.h>
29 
30 #include <machine/trap.h>
31 #include <machine/psl.h>
32 #include <machine/reg.h>
33 #include <machine/cpu.h>
34 #include <machine/pte.h>
35 #include <machine/mips_opcode.h>
36 
37 #include <vm/vm.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_page.h>
40 
41 #include <pmax/pmax/clockreg.h>
42 #include <pmax/pmax/kn01.h>
43 #include <pmax/pmax/kn02.h>
44 #include <pmax/pmax/kmin.h>
45 #include <pmax/pmax/maxine.h>
46 #include <pmax/pmax/kn03.h>
47 #include <pmax/pmax/asic.h>
48 #include <pmax/pmax/turbochannel.h>
49 
50 #include <pmax/stand/dec_prom.h>
51 
52 #include <asc.h>
53 #include <sii.h>
54 #include <le.h>
55 #include <dc.h>
56 
57 struct	proc *machFPCurProcPtr;		/* pointer to last proc to use FP */
58 
59 extern void MachKernGenException();
60 extern void MachUserGenException();
61 extern void MachKernIntr();
62 extern void MachUserIntr();
63 extern void MachTLBModException();
64 extern void MachTLBMissException();
65 extern unsigned MachEmulateBranch();
66 
67 void (*machExceptionTable[])() = {
68 /*
69  * The kernel exception handlers.
70  */
71 	MachKernIntr,			/* external interrupt */
72 	MachKernGenException,		/* TLB modification */
73 	MachTLBMissException,		/* TLB miss (load or instr. fetch) */
74 	MachTLBMissException,		/* TLB miss (store) */
75 	MachKernGenException,		/* address error (load or I-fetch) */
76 	MachKernGenException,		/* address error (store) */
77 	MachKernGenException,		/* bus error (I-fetch) */
78 	MachKernGenException,		/* bus error (load or store) */
79 	MachKernGenException,		/* system call */
80 	MachKernGenException,		/* breakpoint */
81 	MachKernGenException,		/* reserved instruction */
82 	MachKernGenException,		/* coprocessor unusable */
83 	MachKernGenException,		/* arithmetic overflow */
84 	MachKernGenException,		/* reserved */
85 	MachKernGenException,		/* reserved */
86 	MachKernGenException,		/* reserved */
87 /*
88  * The user exception handlers.
89  */
90 	MachUserIntr,
91 	MachUserGenException,
92 	MachUserGenException,
93 	MachUserGenException,
94 	MachUserGenException,
95 	MachUserGenException,
96 	MachUserGenException,
97 	MachUserGenException,
98 	MachUserGenException,
99 	MachUserGenException,
100 	MachUserGenException,
101 	MachUserGenException,
102 	MachUserGenException,
103 	MachUserGenException,
104 	MachUserGenException,
105 	MachUserGenException,
106 };
107 
108 char	*trap_type[] = {
109 	"external interrupt",
110 	"TLB modification",
111 	"TLB miss (load or instr. fetch)",
112 	"TLB miss (store)",
113 	"address error (load or I-fetch)",
114 	"address error (store)",
115 	"bus error (I-fetch)",
116 	"bus error (load or store)",
117 	"system call",
118 	"breakpoint",
119 	"reserved instruction",
120 	"coprocessor unusable",
121 	"arithmetic overflow",
122 	"reserved 13",
123 	"reserved 14",
124 	"reserved 15",
125 };
126 
127 #ifdef DEBUG
128 #define TRAPSIZE	10
129 struct trapdebug {		/* trap history buffer for debugging */
130 	u_int	status;
131 	u_int	cause;
132 	u_int	vadr;
133 	u_int	pc;
134 	u_int	ra;
135 	u_int	code;
136 } trapdebug[TRAPSIZE], *trp = trapdebug;
137 #endif
138 
139 static void pmax_errintr();
140 static void kn02_errintr(), kn02ba_errintr();
141 #ifdef DS5000_240
142 static void kn03_errintr();
143 #endif
144 static unsigned kn02ba_recover_erradr();
145 extern tc_option_t tc_slot_info[TC_MAX_LOGICAL_SLOTS];
146 extern u_long kmin_tc3_imask, xine_tc3_imask;
147 extern const struct callback *callv;
148 #ifdef DS5000_240
149 extern u_long kn03_tc3_imask;
150 #endif
151 int (*pmax_hardware_intr)() = (int (*)())0;
152 extern volatile struct chiptime *Mach_clock_addr;
153 
154 /*
155  * Handle an exception.
156  * Called from MachKernGenException() or MachUserGenException()
157  * when a processor trap occurs.
158  * In the case of a kernel trap, we return the pc where to resume if
159  * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc.
160  */
161 unsigned
162 trap(statusReg, causeReg, vadr, pc, args)
163 	unsigned statusReg;	/* status register at time of the exception */
164 	unsigned causeReg;	/* cause register at time of exception */
165 	unsigned vadr;		/* address (if any) the fault occured on */
166 	unsigned pc;		/* program counter where to continue */
167 {
168 	register int type, i;
169 	unsigned ucode = 0;
170 	register struct proc *p = curproc;
171 	u_quad_t sticks;
172 	vm_prot_t ftype;
173 	extern unsigned onfault_table[];
174 
175 #ifdef DEBUG
176 	trp->status = statusReg;
177 	trp->cause = causeReg;
178 	trp->vadr = vadr;
179 	trp->pc = pc;
180 	trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] :
181 		p->p_md.md_regs[RA];
182 	trp->code = 0;
183 	if (++trp == &trapdebug[TRAPSIZE])
184 		trp = trapdebug;
185 #endif
186 
187 	cnt.v_trap++;
188 	type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT;
189 	if (USERMODE(statusReg)) {
190 		type |= T_USER;
191 		sticks = p->p_sticks;
192 	}
193 
194 	/*
195 	 * Enable hardware interrupts if they were on before.
196 	 * We only respond to software interrupts when returning to user mode.
197 	 */
198 	if (statusReg & MACH_SR_INT_ENA_PREV)
199 		splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR);
200 
201 	switch (type) {
202 	case T_TLB_MOD:
203 		/* check for kernel address */
204 		if ((int)vadr < 0) {
205 			register pt_entry_t *pte;
206 			register unsigned entry;
207 			register vm_offset_t pa;
208 
209 			pte = kvtopte(vadr);
210 			entry = pte->pt_entry;
211 #ifdef DIAGNOSTIC
212 			if (!(entry & PG_V) || (entry & PG_M))
213 				panic("trap: ktlbmod: invalid pte");
214 #endif
215 			if (entry & PG_RO) {
216 				/* write to read only page in the kernel */
217 				ftype = VM_PROT_WRITE;
218 				goto kernel_fault;
219 			}
220 			entry |= PG_M;
221 			pte->pt_entry = entry;
222 			vadr &= ~PGOFSET;
223 			MachTLBUpdate(vadr, entry);
224 			pa = entry & PG_FRAME;
225 #ifdef ATTR
226 			pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD;
227 #else
228 			if (!IS_VM_PHYSADDR(pa))
229 				panic("trap: ktlbmod: unmanaged page");
230 			PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
231 #endif
232 			return (pc);
233 		}
234 		/* FALLTHROUGH */
235 
236 	case T_TLB_MOD+T_USER:
237 	    {
238 		register pt_entry_t *pte;
239 		register unsigned entry;
240 		register vm_offset_t pa;
241 		pmap_t pmap = &p->p_vmspace->vm_pmap;
242 
243 		if (!(pte = pmap_segmap(pmap, vadr)))
244 			panic("trap: utlbmod: invalid segmap");
245 		pte += (vadr >> PGSHIFT) & (NPTEPG - 1);
246 		entry = pte->pt_entry;
247 #ifdef DIAGNOSTIC
248 		if (!(entry & PG_V) || (entry & PG_M))
249 			panic("trap: utlbmod: invalid pte");
250 #endif
251 		if (entry & PG_RO) {
252 			/* write to read only page */
253 			ftype = VM_PROT_WRITE;
254 			goto dofault;
255 		}
256 		entry |= PG_M;
257 		pte->pt_entry = entry;
258 		vadr = (vadr & ~PGOFSET) |
259 			(pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
260 		MachTLBUpdate(vadr, entry);
261 		pa = entry & PG_FRAME;
262 #ifdef ATTR
263 		pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD;
264 #else
265 		if (!IS_VM_PHYSADDR(pa))
266 			panic("trap: utlbmod: unmanaged page");
267 		PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
268 #endif
269 		if (!USERMODE(statusReg))
270 			return (pc);
271 		goto out;
272 	    }
273 
274 	case T_TLB_LD_MISS:
275 	case T_TLB_ST_MISS:
276 		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
277 		/* check for kernel address */
278 		if ((int)vadr < 0) {
279 			register vm_offset_t va;
280 			int rv;
281 
282 		kernel_fault:
283 			va = trunc_page((vm_offset_t)vadr);
284 			rv = vm_fault(kernel_map, va, ftype, FALSE);
285 			if (rv == KERN_SUCCESS)
286 				return (pc);
287 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
288 				((struct pcb *)UADDR)->pcb_onfault = 0;
289 				return (onfault_table[i]);
290 			}
291 			goto err;
292 		}
293 		/*
294 		 * It is an error for the kernel to access user space except
295 		 * through the copyin/copyout routines.
296 		 */
297 		if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 0)
298 			goto err;
299 		/* check for fuswintr() or suswintr() getting a page fault */
300 		if (i == 4)
301 			return (onfault_table[i]);
302 		goto dofault;
303 
304 	case T_TLB_LD_MISS+T_USER:
305 		ftype = VM_PROT_READ;
306 		goto dofault;
307 
308 	case T_TLB_ST_MISS+T_USER:
309 		ftype = VM_PROT_WRITE;
310 	dofault:
311 	    {
312 		register vm_offset_t va;
313 		register struct vmspace *vm;
314 		register vm_map_t map;
315 		int rv;
316 
317 		vm = p->p_vmspace;
318 		map = &vm->vm_map;
319 		va = trunc_page((vm_offset_t)vadr);
320 		rv = vm_fault(map, va, ftype, FALSE);
321 		/*
322 		 * If this was a stack access we keep track of the maximum
323 		 * accessed stack size.  Also, if vm_fault gets a protection
324 		 * failure it is due to accessing the stack region outside
325 		 * the current limit and we need to reflect that as an access
326 		 * error.
327 		 */
328 		if ((caddr_t)va >= vm->vm_maxsaddr) {
329 			if (rv == KERN_SUCCESS) {
330 				unsigned nss;
331 
332 				nss = clrnd(btoc(USRSTACK-(unsigned)va));
333 				if (nss > vm->vm_ssize)
334 					vm->vm_ssize = nss;
335 			} else if (rv == KERN_PROTECTION_FAILURE)
336 				rv = KERN_INVALID_ADDRESS;
337 		}
338 		if (rv == KERN_SUCCESS) {
339 			if (!USERMODE(statusReg))
340 				return (pc);
341 			goto out;
342 		}
343 		if (!USERMODE(statusReg)) {
344 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
345 				((struct pcb *)UADDR)->pcb_onfault = 0;
346 				return (onfault_table[i]);
347 			}
348 			goto err;
349 		}
350 		ucode = vadr;
351 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
352 		break;
353 	    }
354 
355 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
356 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
357 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
358 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
359 		i = SIGSEGV;
360 		break;
361 
362 	case T_SYSCALL+T_USER:
363 	    {
364 		register int *locr0 = p->p_md.md_regs;
365 		register struct sysent *callp;
366 		unsigned int code;
367 		int numsys;
368 		struct args {
369 			int i[8];
370 		} args;
371 		int rval[2];
372 		struct sysent *systab;
373 		extern int nsysent;
374 #ifdef ULTRIXCOMPAT
375 		extern struct sysent ultrixsysent[];
376 		extern int ultrixnsysent;
377 #endif
378 
379 		cnt.v_syscall++;
380 		/* compute next PC after syscall instruction */
381 		if ((int)causeReg < 0)
382 			locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0);
383 		else
384 			locr0[PC] += 4;
385 		systab = sysent;
386 		numsys = nsysent;
387 #ifdef ULTRIXCOMPAT
388 		if (p->p_md.md_flags & MDP_ULTRIX) {
389 			systab = ultrixsysent;
390 			numsys = ultrixnsysent;
391 		}
392 #endif
393 		code = locr0[V0];
394 		switch (code) {
395 		case SYS_indir:
396 			/*
397 			 * Code is first argument, followed by actual args.
398 			 */
399 			code = locr0[A0];
400 			if (code >= numsys)
401 				callp = &systab[SYS_indir]; /* (illegal) */
402 			else
403 				callp = &systab[code];
404 			i = callp->sy_narg;
405 			args.i[0] = locr0[A1];
406 			args.i[1] = locr0[A2];
407 			args.i[2] = locr0[A3];
408 			if (i > 3) {
409 				i = copyin((caddr_t)(locr0[SP] +
410 						4 * sizeof(int)),
411 					(caddr_t)&args.i[3],
412 					(u_int)(i - 3) * sizeof(int));
413 				if (i) {
414 					locr0[V0] = i;
415 					locr0[A3] = 1;
416 #ifdef KTRACE
417 					if (KTRPOINT(p, KTR_SYSCALL))
418 						ktrsyscall(p->p_tracep, code,
419 							callp->sy_narg, args.i);
420 #endif
421 					goto done;
422 				}
423 			}
424 			break;
425 
426 		case SYS___indir:
427 			/*
428 			 * Like indir, but code is a quad, so as to maintain
429 			 * quad alignment for the rest of the arguments.
430 			 */
431 			code = locr0[A0 + _QUAD_LOWWORD];
432 			if (code >= numsys)
433 				callp = &systab[SYS_indir]; /* (illegal) */
434 			else
435 				callp = &systab[code];
436 			i = callp->sy_narg;
437 			args.i[0] = locr0[A2];
438 			args.i[1] = locr0[A3];
439 			if (i > 2) {
440 				i = copyin((caddr_t)(locr0[SP] +
441 						4 * sizeof(int)),
442 					(caddr_t)&args.i[2],
443 					(u_int)(i - 2) * sizeof(int));
444 				if (i) {
445 					locr0[V0] = i;
446 					locr0[A3] = 1;
447 #ifdef KTRACE
448 					if (KTRPOINT(p, KTR_SYSCALL))
449 						ktrsyscall(p->p_tracep, code,
450 							callp->sy_narg, args.i);
451 #endif
452 					goto done;
453 				}
454 			}
455 			break;
456 
457 		default:
458 			if (code >= numsys)
459 				callp = &systab[SYS_indir]; /* (illegal) */
460 			else
461 				callp = &systab[code];
462 			i = callp->sy_narg;
463 			args.i[0] = locr0[A0];
464 			args.i[1] = locr0[A1];
465 			args.i[2] = locr0[A2];
466 			args.i[3] = locr0[A3];
467 			if (i > 4) {
468 				i = copyin((caddr_t)(locr0[SP] +
469 						4 * sizeof(int)),
470 					(caddr_t)&args.i[4],
471 					(u_int)(i - 4) * sizeof(int));
472 				if (i) {
473 					locr0[V0] = i;
474 					locr0[A3] = 1;
475 #ifdef KTRACE
476 					if (KTRPOINT(p, KTR_SYSCALL))
477 						ktrsyscall(p->p_tracep, code,
478 							callp->sy_narg, args.i);
479 #endif
480 					goto done;
481 				}
482 			}
483 		}
484 #ifdef KTRACE
485 		if (KTRPOINT(p, KTR_SYSCALL))
486 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
487 #endif
488 		rval[0] = 0;
489 		rval[1] = locr0[V1];
490 #ifdef DEBUG
491 		if (trp == trapdebug)
492 			trapdebug[TRAPSIZE - 1].code = code;
493 		else
494 			trp[-1].code = code;
495 #endif
496 		i = (*callp->sy_call)(p, &args, rval);
497 		/*
498 		 * Reinitialize proc pointer `p' as it may be different
499 		 * if this is a child returning from fork syscall.
500 		 */
501 		p = curproc;
502 		locr0 = p->p_md.md_regs;
503 #ifdef DEBUG
504 		{ int s;
505 		s = splhigh();
506 		trp->status = statusReg;
507 		trp->cause = causeReg;
508 		trp->vadr = locr0[SP];
509 		trp->pc = locr0[PC];
510 		trp->ra = locr0[RA];
511 		trp->code = -code;
512 		if (++trp == &trapdebug[TRAPSIZE])
513 			trp = trapdebug;
514 		splx(s);
515 		}
516 #endif
517 		switch (i) {
518 		case 0:
519 			locr0[V0] = rval[0];
520 			locr0[V1] = rval[1];
521 			locr0[A3] = 0;
522 			break;
523 
524 		case ERESTART:
525 			locr0[PC] = pc;
526 			break;
527 
528 		case EJUSTRETURN:
529 			break;	/* nothing to do */
530 
531 		default:
532 			locr0[V0] = i;
533 			locr0[A3] = 1;
534 		}
535 	done:
536 #ifdef KTRACE
537 		if (KTRPOINT(p, KTR_SYSRET))
538 			ktrsysret(p->p_tracep, code, i, rval[0]);
539 #endif
540 		goto out;
541 	    }
542 
543 	case T_BREAK+T_USER:
544 	    {
545 		register unsigned va, instr;
546 
547 		/* compute address of break instruction */
548 		va = pc;
549 		if ((int)causeReg < 0)
550 			va += 4;
551 
552 		/* read break instruction */
553 		instr = fuiword((caddr_t)va);
554 #ifdef KADB
555 		if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP)
556 			goto err;
557 #endif
558 		if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) {
559 			i = SIGTRAP;
560 			break;
561 		}
562 
563 		/* restore original instruction and clear BP  */
564 		i = suiword((caddr_t)va, p->p_md.md_ss_instr);
565 		if (i < 0) {
566 			vm_offset_t sa, ea;
567 			int rv;
568 
569 			sa = trunc_page((vm_offset_t)va);
570 			ea = round_page((vm_offset_t)va+sizeof(int)-1);
571 			rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
572 				VM_PROT_DEFAULT, FALSE);
573 			if (rv == KERN_SUCCESS) {
574 				i = suiword((caddr_t)va, p->p_md.md_ss_instr);
575 				(void) vm_map_protect(&p->p_vmspace->vm_map,
576 					sa, ea, VM_PROT_READ|VM_PROT_EXECUTE,
577 					FALSE);
578 			}
579 		}
580 		if (i < 0) {
581 			i = SIGTRAP;
582 			break;
583 		}
584 		p->p_md.md_ss_addr = 0;
585 		goto out;
586 	    }
587 
588 	case T_RES_INST+T_USER:
589 		i = SIGILL;
590 		break;
591 
592 	case T_COP_UNUSABLE+T_USER:
593 		if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) {
594 			i = SIGILL;	/* only FPU instructions allowed */
595 			break;
596 		}
597 		MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs);
598 		machFPCurProcPtr = p;
599 		p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT;
600 		p->p_md.md_flags |= MDP_FPUSED;
601 		goto out;
602 
603 	case T_OVFLOW+T_USER:
604 		i = SIGFPE;
605 		break;
606 
607 	case T_ADDR_ERR_LD:	/* misaligned access */
608 	case T_ADDR_ERR_ST:	/* misaligned access */
609 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
610 		if (i = ((struct pcb *)UADDR)->pcb_onfault) {
611 			((struct pcb *)UADDR)->pcb_onfault = 0;
612 			return (onfault_table[i]);
613 		}
614 		/* FALLTHROUGH */
615 
616 	default:
617 	err:
618 #ifdef KADB
619 	    {
620 		extern struct pcb kdbpcb;
621 
622 		if (USERMODE(statusReg))
623 			kdbpcb = p->p_addr->u_pcb;
624 		else {
625 			kdbpcb.pcb_regs[ZERO] = 0;
626 			kdbpcb.pcb_regs[AST] = ((int *)&args)[2];
627 			kdbpcb.pcb_regs[V0] = ((int *)&args)[3];
628 			kdbpcb.pcb_regs[V1] = ((int *)&args)[4];
629 			kdbpcb.pcb_regs[A0] = ((int *)&args)[5];
630 			kdbpcb.pcb_regs[A1] = ((int *)&args)[6];
631 			kdbpcb.pcb_regs[A2] = ((int *)&args)[7];
632 			kdbpcb.pcb_regs[A3] = ((int *)&args)[8];
633 			kdbpcb.pcb_regs[T0] = ((int *)&args)[9];
634 			kdbpcb.pcb_regs[T1] = ((int *)&args)[10];
635 			kdbpcb.pcb_regs[T2] = ((int *)&args)[11];
636 			kdbpcb.pcb_regs[T3] = ((int *)&args)[12];
637 			kdbpcb.pcb_regs[T4] = ((int *)&args)[13];
638 			kdbpcb.pcb_regs[T5] = ((int *)&args)[14];
639 			kdbpcb.pcb_regs[T6] = ((int *)&args)[15];
640 			kdbpcb.pcb_regs[T7] = ((int *)&args)[16];
641 			kdbpcb.pcb_regs[T8] = ((int *)&args)[17];
642 			kdbpcb.pcb_regs[T9] = ((int *)&args)[18];
643 			kdbpcb.pcb_regs[RA] = ((int *)&args)[19];
644 			kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21];
645 			kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22];
646 			kdbpcb.pcb_regs[PC] = pc;
647 			kdbpcb.pcb_regs[SR] = statusReg;
648 			bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int));
649 		}
650 		if (kdb(causeReg, vadr, p, !USERMODE(statusReg)))
651 			return (kdbpcb.pcb_regs[PC]);
652 	    }
653 #else
654 #ifdef DEBUG
655 		trapDump("trap");
656 #endif
657 #endif
658 		panic("trap");
659 	}
660 	trapsignal(p, i, ucode);
661 out:
662 	/*
663 	 * Note: we should only get here if returning to user mode.
664 	 */
665 	/* take pending signals */
666 	while ((i = CURSIG(p)) != 0)
667 		psig(i);
668 	p->p_pri = p->p_usrpri;
669 	astpending = 0;
670 	if (want_resched) {
671 		int s;
672 
673 		/*
674 		 * Since we are curproc, clock will normally just change
675 		 * our priority without moving us from one queue to another
676 		 * (since the running process is not on a queue.)
677 		 * If that happened after we setrq ourselves but before we
678 		 * swtch()'ed, we might not be on the queue indicated by
679 		 * our priority.
680 		 */
681 		s = splstatclock();
682 		setrq(p);
683 		p->p_stats->p_ru.ru_nivcsw++;
684 		swtch();
685 		splx(s);
686 		while ((i = CURSIG(p)) != 0)
687 			psig(i);
688 	}
689 
690 	/*
691 	 * If profiling, charge system time to the trapped pc.
692 	 */
693 	if (p->p_flag & SPROFIL) {
694 		extern int psratio;
695 
696 		addupc_task(p, pc, (int)(p->p_sticks - sticks) * psratio);
697 	}
698 
699 	curpri = p->p_pri;
700 	return (pc);
701 }
702 
703 /*
704  * Handle an interrupt.
705  * Called from MachKernIntr() or MachUserIntr()
706  * Note: curproc might be NULL.
707  */
708 interrupt(statusReg, causeReg, pc)
709 	unsigned statusReg;	/* status register at time of the exception */
710 	unsigned causeReg;	/* cause register at time of exception */
711 	unsigned pc;		/* program counter where to continue */
712 {
713 	register unsigned mask;
714 	struct clockframe cf;
715 
716 #ifdef DEBUG
717 	trp->status = statusReg;
718 	trp->cause = causeReg;
719 	trp->vadr = 0;
720 	trp->pc = pc;
721 	trp->ra = 0;
722 	trp->code = 0;
723 	if (++trp == &trapdebug[TRAPSIZE])
724 		trp = trapdebug;
725 #endif
726 
727 	cnt.v_intr++;
728 	mask = causeReg & statusReg;	/* pending interrupts & enable mask */
729 	if (pmax_hardware_intr)
730 		splx((*pmax_hardware_intr)(mask, pc, statusReg, causeReg));
731 	if (mask & MACH_INT_MASK_5) {
732 		if (!USERMODE(statusReg)) {
733 #ifdef DEBUG
734 			trapDump("fpintr");
735 #else
736 			printf("FPU interrupt: PC %x CR %x SR %x\n",
737 				pc, causeReg, statusReg);
738 #endif
739 		} else
740 			MachFPInterrupt(statusReg, causeReg, pc);
741 	}
742 	if (mask & MACH_SOFT_INT_MASK_0) {
743 		clearsoftclock();
744 		cnt.v_soft++;
745 		softclock();
746 	}
747 	/* process network interrupt if we trapped or will very soon */
748 	if ((mask & MACH_SOFT_INT_MASK_1) ||
749 	    netisr && (statusReg & MACH_SOFT_INT_MASK_1)) {
750 		clearsoftnet();
751 		cnt.v_soft++;
752 #ifdef INET
753 		if (netisr & (1 << NETISR_ARP)) {
754 			netisr &= ~(1 << NETISR_ARP);
755 			arpintr();
756 		}
757 		if (netisr & (1 << NETISR_IP)) {
758 			netisr &= ~(1 << NETISR_IP);
759 			ipintr();
760 		}
761 #endif
762 #ifdef NS
763 		if (netisr & (1 << NETISR_NS)) {
764 			netisr &= ~(1 << NETISR_NS);
765 			nsintr();
766 		}
767 #endif
768 #ifdef ISO
769 		if (netisr & (1 << NETISR_ISO)) {
770 			netisr &= ~(1 << NETISR_ISO);
771 			clnlintr();
772 		}
773 #endif
774 	}
775 }
776 
777 /*
778  * Handle pmax (DECstation 2100/3100) interrupts.
779  */
780 pmax_intr(mask, pc, statusReg, causeReg)
781 	unsigned mask;
782 	unsigned pc;
783 	unsigned statusReg;
784 	unsigned causeReg;
785 {
786 	register volatile struct chiptime *c = Mach_clock_addr;
787 	struct clockframe cf;
788 	int temp;
789 
790 	/* handle clock interrupts ASAP */
791 	if (mask & MACH_INT_MASK_3) {
792 		temp = c->regc;	/* XXX clear interrupt bits */
793 		cf.pc = pc;
794 		cf.sr = statusReg;
795 		hardclock(&cf);
796 		/* keep clock interrupts enabled */
797 		causeReg &= ~MACH_INT_MASK_3;
798 	}
799 	/* Re-enable clock interrupts */
800 	splx(MACH_INT_MASK_3 | MACH_SR_INT_ENA_CUR);
801 #if NSII > 0
802 	if (mask & MACH_INT_MASK_0)
803 		siiintr(0);
804 #endif
805 #if NLE > 0
806 	if (mask & MACH_INT_MASK_1)
807 		leintr(0);
808 #endif
809 #if NDC > 0
810 	if (mask & MACH_INT_MASK_2)
811 		dcintr(0);
812 #endif
813 	if (mask & MACH_INT_MASK_4)
814 		pmax_errintr();
815 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
816 		MACH_SR_INT_ENA_CUR);
817 }
818 
819 /*
820  * Handle hardware interrupts for the KN02. (DECstation 5000/200)
821  * Returns spl value.
822  */
823 kn02_intr(mask, pc, statusReg, causeReg)
824 	unsigned mask;
825 	unsigned pc;
826 	unsigned statusReg;
827 	unsigned causeReg;
828 {
829 	register unsigned i, m;
830 	register volatile struct chiptime *c = Mach_clock_addr;
831 	register unsigned csr;
832 	int temp;
833 	struct clockframe cf;
834 	static int warned = 0;
835 
836 	/* handle clock interrupts ASAP */
837 	if (mask & MACH_INT_MASK_1) {
838 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
839 		if ((csr & KN02_CSR_PSWARN) && !warned) {
840 			warned = 1;
841 			printf("WARNING: power supply is overheating!\n");
842 		} else if (warned && !(csr & KN02_CSR_PSWARN)) {
843 			warned = 0;
844 			printf("WARNING: power supply is OK again\n");
845 		}
846 
847 		temp = c->regc;	/* XXX clear interrupt bits */
848 		cf.pc = pc;
849 		cf.sr = statusReg;
850 		hardclock(&cf);
851 
852 		/* keep clock interrupts enabled */
853 		causeReg &= ~MACH_INT_MASK_1;
854 	}
855 	/* Re-enable clock interrupts */
856 	splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
857 	if (mask & MACH_INT_MASK_0) {
858 
859 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
860 		m = csr & (csr >> KN02_CSR_IOINTEN_SHIFT) & KN02_CSR_IOINT;
861 #if 0
862 		*(unsigned *)MACHPHYS_TO_UNCACHED(KN02_SYS_CSR) =
863 			(csr & ~(KN02_CSR_WRESERVED | 0xFF)) |
864 			(m << KN02_CSR_IOINTEN_SHIFT);
865 #endif
866 		for (i = 0; m; i++, m >>= 1) {
867 			if (!(m & 1))
868 				continue;
869 			if (tc_slot_info[i].intr)
870 				(*tc_slot_info[i].intr)(tc_slot_info[i].unit);
871 			else
872 				printf("spurious interrupt %d\n", i);
873 		}
874 #if 0
875 		*(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR) =
876 			csr & ~(KN02_CSR_WRESERVED | 0xFF);
877 #endif
878 	}
879 	if (mask & MACH_INT_MASK_3)
880 		kn02_errintr();
881 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
882 		MACH_SR_INT_ENA_CUR);
883 }
884 
885 /*
886  * 3min hardware interrupts. (DECstation 5000/1xx)
887  */
888 kmin_intr(mask, pc, statusReg, causeReg)
889 	unsigned mask;
890 	unsigned pc;
891 	unsigned statusReg;
892 	unsigned causeReg;
893 {
894 	register u_int intr;
895 	register volatile struct chiptime *c = Mach_clock_addr;
896 	volatile u_int *imaskp =
897 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_IMSK);
898 	volatile u_int *intrp =
899 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_INTR);
900 	unsigned int old_mask;
901 	struct clockframe cf;
902 	int temp;
903 	static int user_warned = 0;
904 
905 	old_mask = *imaskp & kmin_tc3_imask;
906 	*imaskp = old_mask;
907 
908 	if (mask & MACH_INT_MASK_4)
909 		(*callv->halt)((int *)0, 0);
910 	if (mask & MACH_INT_MASK_3) {
911 		intr = *intrp;
912 		/* masked interrupts are still observable */
913 		intr &= old_mask;
914 
915 		if (intr & KMIN_INTR_SCSI_PTR_LOAD) {
916 			*intrp &= ~KMIN_INTR_SCSI_PTR_LOAD;
917 #ifdef notdef
918 			asc_dma_intr();
919 #endif
920 		}
921 
922 		if (intr & (KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E))
923 			*intrp &= ~(KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E);
924 
925 		if (intr & KMIN_INTR_LANCE_READ_E)
926 			*intrp &= ~KMIN_INTR_LANCE_READ_E;
927 
928 		if (intr & KMIN_INTR_TIMEOUT)
929 			kn02ba_errintr();
930 
931 		if (intr & KMIN_INTR_CLOCK) {
932 			temp = c->regc;	/* XXX clear interrupt bits */
933 			cf.pc = pc;
934 			cf.sr = statusReg;
935 			hardclock(&cf);
936 		}
937 
938 		if ((intr & KMIN_INTR_SCC_0) &&
939 			tc_slot_info[KMIN_SCC0_SLOT].intr)
940 			(*(tc_slot_info[KMIN_SCC0_SLOT].intr))
941 			(tc_slot_info[KMIN_SCC0_SLOT].unit);
942 
943 		if ((intr & KMIN_INTR_SCC_1) &&
944 			tc_slot_info[KMIN_SCC1_SLOT].intr)
945 			(*(tc_slot_info[KMIN_SCC1_SLOT].intr))
946 			(tc_slot_info[KMIN_SCC1_SLOT].unit);
947 
948 		if ((intr & KMIN_INTR_SCSI) &&
949 			tc_slot_info[KMIN_SCSI_SLOT].intr)
950 			(*(tc_slot_info[KMIN_SCSI_SLOT].intr))
951 			(tc_slot_info[KMIN_SCSI_SLOT].unit);
952 
953 		if ((intr & KMIN_INTR_LANCE) &&
954 			tc_slot_info[KMIN_LANCE_SLOT].intr)
955 			(*(tc_slot_info[KMIN_LANCE_SLOT].intr))
956 			(tc_slot_info[KMIN_LANCE_SLOT].unit);
957 
958 		if (user_warned && ((intr & KMIN_INTR_PSWARN) == 0)) {
959 			printf("%s\n", "Power supply ok now.");
960 			user_warned = 0;
961 		}
962 		if ((intr & KMIN_INTR_PSWARN) && (user_warned < 3)) {
963 			user_warned++;
964 			printf("%s\n", "Power supply overheating");
965 		}
966 	}
967 	if ((mask & MACH_INT_MASK_0) && tc_slot_info[0].intr)
968 		(*tc_slot_info[0].intr)(tc_slot_info[0].unit);
969 	if ((mask & MACH_INT_MASK_1) && tc_slot_info[1].intr)
970 		(*tc_slot_info[1].intr)(tc_slot_info[1].unit);
971 	if ((mask & MACH_INT_MASK_2) && tc_slot_info[2].intr)
972 		(*tc_slot_info[2].intr)(tc_slot_info[2].unit);
973 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
974 		MACH_SR_INT_ENA_CUR);
975 }
976 
977 /*
978  * Maxine hardware interrupts. (Personal DECstation 5000/xx)
979  */
980 xine_intr(mask, pc, statusReg, causeReg)
981 	unsigned mask;
982 	unsigned pc;
983 	unsigned statusReg;
984 	unsigned causeReg;
985 {
986 	register u_int intr;
987 	register volatile struct chiptime *c = Mach_clock_addr;
988 	volatile u_int *imaskp = (volatile u_int *)
989 		MACH_PHYS_TO_UNCACHED(XINE_REG_IMSK);
990 	volatile u_int *intrp = (volatile u_int *)
991 		MACH_PHYS_TO_UNCACHED(XINE_REG_INTR);
992 	u_int old_mask;
993 	struct clockframe cf;
994 	int temp;
995 
996 	old_mask = *imaskp & xine_tc3_imask;
997 	*imaskp = old_mask;
998 
999 	if (mask & MACH_INT_MASK_4)
1000 		(*callv->halt)((int *)0, 0);
1001 
1002 	/* handle clock interrupts ASAP */
1003 	if (mask & MACH_INT_MASK_1) {
1004 		temp = c->regc;	/* XXX clear interrupt bits */
1005 		cf.pc = pc;
1006 		cf.sr = statusReg;
1007 		hardclock(&cf);
1008 		causeReg &= ~MACH_INT_MASK_1;
1009 		/* reenable clock interrupts */
1010 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1011 	}
1012 	if (mask & MACH_INT_MASK_3) {
1013 		intr = *intrp;
1014 		/* masked interrupts are still observable */
1015 		intr &= old_mask;
1016 
1017 		if (intr & XINE_INTR_SCSI_PTR_LOAD) {
1018 			*intrp &= ~XINE_INTR_SCSI_PTR_LOAD;
1019 #ifdef notdef
1020 			asc_dma_intr();
1021 #endif
1022 		}
1023 
1024 		if (intr & (XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E))
1025 			*intrp &= ~(XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E);
1026 
1027 		if (intr & XINE_INTR_LANCE_READ_E)
1028 			*intrp &= ~XINE_INTR_LANCE_READ_E;
1029 
1030 		if ((intr & XINE_INTR_SCC_0) &&
1031 			tc_slot_info[XINE_SCC0_SLOT].intr)
1032 			(*(tc_slot_info[XINE_SCC0_SLOT].intr))
1033 			(tc_slot_info[XINE_SCC0_SLOT].unit);
1034 
1035 		if ((intr & XINE_INTR_DTOP_RX) &&
1036 			tc_slot_info[XINE_DTOP_SLOT].intr)
1037 			(*(tc_slot_info[XINE_DTOP_SLOT].intr))
1038 			(tc_slot_info[XINE_DTOP_SLOT].unit);
1039 
1040 		if ((intr & XINE_INTR_FLOPPY) &&
1041 			tc_slot_info[XINE_FLOPPY_SLOT].intr)
1042 			(*(tc_slot_info[XINE_FLOPPY_SLOT].intr))
1043 			(tc_slot_info[XINE_FLOPPY_SLOT].unit);
1044 
1045 		if ((intr & XINE_INTR_TC_0) &&
1046 			tc_slot_info[0].intr)
1047 			(*(tc_slot_info[0].intr))
1048 			(tc_slot_info[0].unit);
1049 
1050 		if ((intr & XINE_INTR_TC_1) &&
1051 			tc_slot_info[1].intr)
1052 			(*(tc_slot_info[1].intr))
1053 			(tc_slot_info[1].unit);
1054 
1055 		if ((intr & XINE_INTR_ISDN) &&
1056 			tc_slot_info[XINE_ISDN_SLOT].intr)
1057 			(*(tc_slot_info[XINE_ISDN_SLOT].intr))
1058 			(tc_slot_info[XINE_ISDN_SLOT].unit);
1059 
1060 		if ((intr & XINE_INTR_SCSI) &&
1061 			tc_slot_info[XINE_SCSI_SLOT].intr)
1062 			(*(tc_slot_info[XINE_SCSI_SLOT].intr))
1063 			(tc_slot_info[XINE_SCSI_SLOT].unit);
1064 
1065 		if ((intr & XINE_INTR_LANCE) &&
1066 			tc_slot_info[XINE_LANCE_SLOT].intr)
1067 			(*(tc_slot_info[XINE_LANCE_SLOT].intr))
1068 			(tc_slot_info[XINE_LANCE_SLOT].unit);
1069 
1070 	}
1071 	if (mask & MACH_INT_MASK_2)
1072 		kn02ba_errintr();
1073 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1074 		MACH_SR_INT_ENA_CUR);
1075 }
1076 
1077 #ifdef DS5000_240
1078 /*
1079  * 3Max+ hardware interrupts. (DECstation 5000/240) UNTESTED!!
1080  */
1081 kn03_intr(mask, pc, statusReg, causeReg)
1082 	unsigned mask;
1083 	unsigned pc;
1084 	unsigned statusReg;
1085 	unsigned causeReg;
1086 {
1087 	register u_int intr;
1088 	register volatile struct chiptime *c = Mach_clock_addr;
1089 	volatile u_int *imaskp = (volatile u_int *)
1090 		MACH_PHYS_TO_UNCACHED(KN03_REG_IMSK);
1091 	volatile u_int *intrp = (volatile u_int *)
1092 		MACH_PHYS_TO_UNCACHED(KN03_REG_INTR);
1093 	u_int old_mask;
1094 	struct clockframe cf;
1095 	int temp;
1096 	static int user_warned = 0;
1097 
1098 	old_mask = *imaskp & kn03_tc3_imask;
1099 	*imaskp = old_mask;
1100 
1101 	if (mask & MACH_INT_MASK_4)
1102 		(*callv->halt)((int *)0, 0);
1103 
1104 	/* handle clock interrupts ASAP */
1105 	if (mask & MACH_INT_MASK_1) {
1106 		temp = c->regc;	/* XXX clear interrupt bits */
1107 		cf.pc = pc;
1108 		cf.sr = statusReg;
1109 		hardclock(&cf);
1110 		causeReg &= ~MACH_INT_MASK_1;
1111 		/* reenable clock interrupts */
1112 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1113 	}
1114 	if (mask & MACH_INT_MASK_0) {
1115 		intr = *intrp;
1116 		/* masked interrupts are still observable */
1117 		intr &= old_mask;
1118 
1119 		if (intr & KN03_INTR_SCSI_PTR_LOAD) {
1120 			*intrp &= ~KN03_INTR_SCSI_PTR_LOAD;
1121 #ifdef notdef
1122 			asc_dma_intr();
1123 #endif
1124 		}
1125 
1126 		if (intr & (KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E))
1127 			*intrp &= ~(KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E);
1128 
1129 		if (intr & KN03_INTR_LANCE_READ_E)
1130 			*intrp &= ~KN03_INTR_LANCE_READ_E;
1131 
1132 		if ((intr & KN03_INTR_SCC_0) &&
1133 			tc_slot_info[KN03_SCC0_SLOT].intr)
1134 			(*(tc_slot_info[KN03_SCC0_SLOT].intr))
1135 			(tc_slot_info[KN03_SCC0_SLOT].unit);
1136 
1137 		if ((intr & KN03_INTR_SCC_1) &&
1138 			tc_slot_info[KN03_SCC1_SLOT].intr)
1139 			(*(tc_slot_info[KN03_SCC1_SLOT].intr))
1140 			(tc_slot_info[KN03_SCC1_SLOT].unit);
1141 
1142 		if ((intr & KN03_INTR_TC_0) &&
1143 			tc_slot_info[0].intr)
1144 			(*(tc_slot_info[0].intr))
1145 			(tc_slot_info[0].unit);
1146 
1147 		if ((intr & KN03_INTR_TC_1) &&
1148 			tc_slot_info[1].intr)
1149 			(*(tc_slot_info[1].intr))
1150 			(tc_slot_info[1].unit);
1151 
1152 		if ((intr & KN03_INTR_TC_2) &&
1153 			tc_slot_info[2].intr)
1154 			(*(tc_slot_info[2].intr))
1155 			(tc_slot_info[2].unit);
1156 
1157 		if ((intr & KN03_INTR_SCSI) &&
1158 			tc_slot_info[KN03_SCSI_SLOT].intr)
1159 			(*(tc_slot_info[KN03_SCSI_SLOT].intr))
1160 			(tc_slot_info[KN03_SCSI_SLOT].unit);
1161 
1162 		if ((intr & KN03_INTR_LANCE) &&
1163 			tc_slot_info[KN03_LANCE_SLOT].intr)
1164 			(*(tc_slot_info[KN03_LANCE_SLOT].intr))
1165 			(tc_slot_info[KN03_LANCE_SLOT].unit);
1166 
1167 		if (user_warned && ((intr & KN03_INTR_PSWARN) == 0)) {
1168 			printf("%s\n", "Power supply ok now.");
1169 			user_warned = 0;
1170 		}
1171 		if ((intr & KN03_INTR_PSWARN) && (user_warned < 3)) {
1172 			user_warned++;
1173 			printf("%s\n", "Power supply overheating");
1174 		}
1175 	}
1176 	if (mask & MACH_INT_MASK_3)
1177 		kn03_errintr();
1178 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1179 		MACH_SR_INT_ENA_CUR);
1180 }
1181 #endif /* DS5000_240 */
1182 
1183 /*
1184  * This is called from MachUserIntr() if astpending is set.
1185  * This is very similar to the tail of trap().
1186  */
1187 softintr(statusReg, pc)
1188 	unsigned statusReg;	/* status register at time of the exception */
1189 	unsigned pc;		/* program counter where to continue */
1190 {
1191 	register struct proc *p = curproc;
1192 	int sig;
1193 
1194 	cnt.v_soft++;
1195 	/* take pending signals */
1196 	while ((sig = CURSIG(p)) != 0)
1197 		psig(sig);
1198 	p->p_pri = p->p_usrpri;
1199 	astpending = 0;
1200 	if (p->p_flag & SOWEUPC) {
1201 		p->p_flag &= ~SOWEUPC;
1202 		ADDUPROF(p);
1203 	}
1204 	if (want_resched) {
1205 		int s;
1206 
1207 		/*
1208 		 * Since we are curproc, clock will normally just change
1209 		 * our priority without moving us from one queue to another
1210 		 * (since the running process is not on a queue.)
1211 		 * If that happened after we setrq ourselves but before we
1212 		 * swtch()'ed, we might not be on the queue indicated by
1213 		 * our priority.
1214 		 */
1215 		s = splstatclock();
1216 		setrq(p);
1217 		p->p_stats->p_ru.ru_nivcsw++;
1218 		swtch();
1219 		splx(s);
1220 		while ((sig = CURSIG(p)) != 0)
1221 			psig(sig);
1222 	}
1223 	curpri = p->p_pri;
1224 }
1225 
1226 #ifdef DEBUG
1227 trapDump(msg)
1228 	char *msg;
1229 {
1230 	register int i;
1231 	int s;
1232 
1233 	s = splhigh();
1234 	printf("trapDump(%s)\n", msg);
1235 	for (i = 0; i < TRAPSIZE; i++) {
1236 		if (trp == trapdebug)
1237 			trp = &trapdebug[TRAPSIZE - 1];
1238 		else
1239 			trp--;
1240 		if (trp->cause == 0)
1241 			break;
1242 		printf("%s: ADR %x PC %x CR %x SR %x\n",
1243 			trap_type[(trp->cause & MACH_CR_EXC_CODE) >>
1244 				MACH_CR_EXC_CODE_SHIFT],
1245 			trp->vadr, trp->pc, trp->cause, trp->status);
1246 		printf("   RA %x code %d\n", trp-> ra, trp->code);
1247 	}
1248 	bzero(trapdebug, sizeof(trapdebug));
1249 	trp = trapdebug;
1250 	splx(s);
1251 }
1252 #endif
1253 
1254 /*
1255  *----------------------------------------------------------------------
1256  *
1257  * MemErrorInterrupts --
1258  *   pmax_errintr - for the DS2100/DS3100
1259  *   kn02_errintr - for the DS5000/200
1260  *   kn02ba_errintr - for the DS5000/1xx and DS5000/xx
1261  *
1262  *	Handler an interrupt for the control register.
1263  *
1264  * Results:
1265  *	None.
1266  *
1267  * Side effects:
1268  *	None.
1269  *
1270  *----------------------------------------------------------------------
1271  */
1272 static void
1273 pmax_errintr()
1274 {
1275 	volatile u_short *sysCSRPtr =
1276 		(u_short *)MACH_PHYS_TO_UNCACHED(KN01_SYS_CSR);
1277 	u_short csr;
1278 
1279 	csr = *sysCSRPtr;
1280 
1281 	if (csr & KN01_CSR_MERR) {
1282 		printf("Memory error at 0x%x\n",
1283 			*(unsigned *)MACH_PHYS_TO_UNCACHED(KN01_SYS_ERRADR));
1284 		panic("Mem error interrupt");
1285 	}
1286 	*sysCSRPtr = (csr & ~KN01_CSR_MBZ) | 0xff;
1287 }
1288 
1289 static void
1290 kn02_errintr()
1291 {
1292 	u_int erradr, chksyn;
1293 
1294 	erradr = *(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR);
1295 	chksyn = *(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CHKSYN);
1296 	*(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR) = 0;
1297 	MachEmptyWriteBuffer();
1298 
1299 	if (!(erradr & KN02_ERR_VALID))
1300 		return;
1301 	printf("%s memory %s %s error at 0x%x\n",
1302 		(erradr & KN02_ERR_CPU) ? "CPU" : "DMA",
1303 		(erradr & KN02_ERR_WRITE) ? "write" : "read",
1304 		(erradr & KN02_ERR_ECCERR) ? "ECC" : "timeout",
1305 		(erradr & KN02_ERR_ADDRESS));
1306 	if (erradr & KN02_ERR_ECCERR) {
1307 		*(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CHKSYN) = 0;
1308 		MachEmptyWriteBuffer();
1309 		printf("ECC 0x%x\n", chksyn);
1310 	}
1311 	panic("Mem error interrupt");
1312 }
1313 
1314 #ifdef DS5000_240
1315 static void
1316 kn03_errintr()
1317 {
1318 
1319 	printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR));
1320 	*(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR) = 0;
1321 	MachEmptyWriteBuffer();
1322 }
1323 #endif /* DS5000_240 */
1324 
1325 static void
1326 kn02ba_errintr()
1327 {
1328 	register int mer, adr, siz;
1329 	static int errintr_cnt = 0;
1330 
1331 	siz = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MSR);
1332 	mer = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MER);
1333 	adr = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_AER);
1334 
1335 	/* clear interrupt bit */
1336 	*(unsigned int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_TIMEOUT) = 0;
1337 
1338 	errintr_cnt++;
1339 	printf("(%d)%s%x [%x %x %x]\n", errintr_cnt,
1340 	       "Bad memory chip at phys ",
1341 	       kn02ba_recover_erradr(adr, mer),
1342 	       mer, siz, adr);
1343 }
1344 
1345 static unsigned
1346 kn02ba_recover_erradr(phys, mer)
1347 	register unsigned phys, mer;
1348 {
1349 	/* phys holds bits 28:2, mer knows which byte */
1350 	switch (mer & KMIN_MER_LASTBYTE) {
1351 	case KMIN_LASTB31:
1352 		mer = 3; break;
1353 	case KMIN_LASTB23:
1354 		mer = 2; break;
1355 	case KMIN_LASTB15:
1356 		mer = 1; break;
1357 	case KMIN_LASTB07:
1358 		mer = 0; break;
1359 	}
1360 	return ((phys & KMIN_AER_ADDR_MASK) | mer);
1361 }
1362 
1363 /*
1364  * Return the resulting PC as if the branch was executed.
1365  */
1366 unsigned
1367 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch)
1368 	unsigned *regsPtr;
1369 	unsigned instPC;
1370 	unsigned fpcCSR;
1371 	int allowNonBranch;
1372 {
1373 	InstFmt inst;
1374 	unsigned retAddr;
1375 	int condition;
1376 	extern unsigned GetBranchDest();
1377 
1378 #if 0
1379 	printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC,
1380 		*instPC, fpcCSR);
1381 #endif
1382 
1383 	inst = *(InstFmt *)instPC;
1384 	switch ((int)inst.JType.op) {
1385 	case OP_SPECIAL:
1386 		switch ((int)inst.RType.func) {
1387 		case OP_JR:
1388 		case OP_JALR:
1389 			retAddr = regsPtr[inst.RType.rs];
1390 			break;
1391 
1392 		default:
1393 			if (!allowNonBranch)
1394 				panic("MachEmulateBranch: Non-branch");
1395 			retAddr = instPC + 4;
1396 			break;
1397 		}
1398 		break;
1399 
1400 	case OP_BCOND:
1401 		switch ((int)inst.IType.rt) {
1402 		case OP_BLTZ:
1403 		case OP_BLTZAL:
1404 			if ((int)(regsPtr[inst.RType.rs]) < 0)
1405 				retAddr = GetBranchDest((InstFmt *)instPC);
1406 			else
1407 				retAddr = instPC + 8;
1408 			break;
1409 
1410 		case OP_BGEZAL:
1411 		case OP_BGEZ:
1412 			if ((int)(regsPtr[inst.RType.rs]) >= 0)
1413 				retAddr = GetBranchDest((InstFmt *)instPC);
1414 			else
1415 				retAddr = instPC + 8;
1416 			break;
1417 
1418 		default:
1419 			panic("MachEmulateBranch: Bad branch cond");
1420 		}
1421 		break;
1422 
1423 	case OP_J:
1424 	case OP_JAL:
1425 		retAddr = (inst.JType.target << 2) |
1426 			((unsigned)instPC & 0xF0000000);
1427 		break;
1428 
1429 	case OP_BEQ:
1430 		if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1431 			retAddr = GetBranchDest((InstFmt *)instPC);
1432 		else
1433 			retAddr = instPC + 8;
1434 		break;
1435 
1436 	case OP_BNE:
1437 		if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1438 			retAddr = GetBranchDest((InstFmt *)instPC);
1439 		else
1440 			retAddr = instPC + 8;
1441 		break;
1442 
1443 	case OP_BLEZ:
1444 		if ((int)(regsPtr[inst.RType.rs]) <= 0)
1445 			retAddr = GetBranchDest((InstFmt *)instPC);
1446 		else
1447 			retAddr = instPC + 8;
1448 		break;
1449 
1450 	case OP_BGTZ:
1451 		if ((int)(regsPtr[inst.RType.rs]) > 0)
1452 			retAddr = GetBranchDest((InstFmt *)instPC);
1453 		else
1454 			retAddr = instPC + 8;
1455 		break;
1456 
1457 	case OP_COP1:
1458 		switch (inst.RType.rs) {
1459 		case OP_BCx:
1460 		case OP_BCy:
1461 			if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1462 				condition = fpcCSR & MACH_FPC_COND_BIT;
1463 			else
1464 				condition = !(fpcCSR & MACH_FPC_COND_BIT);
1465 			if (condition)
1466 				retAddr = GetBranchDest((InstFmt *)instPC);
1467 			else
1468 				retAddr = instPC + 8;
1469 			break;
1470 
1471 		default:
1472 			if (!allowNonBranch)
1473 				panic("MachEmulateBranch: Bad coproc branch instruction");
1474 			retAddr = instPC + 4;
1475 		}
1476 		break;
1477 
1478 	default:
1479 		if (!allowNonBranch)
1480 			panic("MachEmulateBranch: Non-branch instruction");
1481 		retAddr = instPC + 4;
1482 	}
1483 #if 0
1484 	printf("Target addr=%x\n", retAddr);
1485 #endif
1486 	return (retAddr);
1487 }
1488 
1489 unsigned
1490 GetBranchDest(InstPtr)
1491 	InstFmt *InstPtr;
1492 {
1493 	return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2));
1494 }
1495 
1496 /*
1497  * This routine is called by procxmt() to single step one instruction.
1498  * We do this by storing a break instruction after the current instruction,
1499  * resuming execution, and then restoring the old instruction.
1500  */
1501 cpu_singlestep(p)
1502 	register struct proc *p;
1503 {
1504 	register unsigned va;
1505 	register int *locr0 = p->p_md.md_regs;
1506 	int i;
1507 
1508 	/* compute next address after current location */
1509 	va = MachEmulateBranch(locr0, locr0[PC], 0, 1);
1510 	if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va ||
1511 	    !useracc((caddr_t)va, 4, B_READ)) {
1512 		printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
1513 			p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */
1514 		return (EFAULT);
1515 	}
1516 	p->p_md.md_ss_addr = va;
1517 	p->p_md.md_ss_instr = fuiword((caddr_t)va);
1518 	i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1519 	if (i < 0) {
1520 		vm_offset_t sa, ea;
1521 		int rv;
1522 
1523 		sa = trunc_page((vm_offset_t)va);
1524 		ea = round_page((vm_offset_t)va+sizeof(int)-1);
1525 		rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
1526 			VM_PROT_DEFAULT, FALSE);
1527 		if (rv == KERN_SUCCESS) {
1528 			i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1529 			(void) vm_map_protect(&p->p_vmspace->vm_map,
1530 				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
1531 		}
1532 	}
1533 	if (i < 0)
1534 		return (EFAULT);
1535 	printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n",
1536 		p->p_comm, p->p_pid, p->p_md.md_ss_addr,
1537 		p->p_md.md_ss_instr, locr0[PC]); /* XXX */
1538 	return (0);
1539 }
1540 
1541 #ifdef DEBUG
1542 kdbpeek(addr)
1543 {
1544 	if (addr & 3) {
1545 		printf("kdbpeek: unaligned address %x\n", addr);
1546 		return (-1);
1547 	}
1548 	return (*(int *)addr);
1549 }
1550 
1551 #define MIPS_JR_RA	0x03e00008	/* instruction code for jr ra */
1552 
1553 /*
1554  * Print a stack backtrace.
1555  */
1556 void
1557 stacktrace(a0, a1, a2, a3)
1558 	int a0, a1, a2, a3;
1559 {
1560 	unsigned pc, sp, fp, ra, va, subr;
1561 	unsigned instr, mask;
1562 	InstFmt i;
1563 	int more, stksize;
1564 	int regs[3];
1565 	extern setsoftclock();
1566 	extern char start[], edata[];
1567 
1568 	cpu_getregs(regs);
1569 
1570 	/* get initial values from the exception frame */
1571 	sp = regs[0];
1572 	pc = regs[1];
1573 	ra = 0;
1574 	fp = regs[2];
1575 
1576 loop:
1577 	/* check for current PC in the kernel interrupt handler code */
1578 	if (pc >= (unsigned)MachKernIntr && pc < (unsigned)MachUserIntr) {
1579 		/* NOTE: the offsets depend on the code in locore.s */
1580 		printf("interrupt\n");
1581 		a0 = kdbpeek(sp + 36);
1582 		a1 = kdbpeek(sp + 40);
1583 		a2 = kdbpeek(sp + 44);
1584 		a3 = kdbpeek(sp + 48);
1585 		pc = kdbpeek(sp + 20);
1586 		ra = kdbpeek(sp + 92);
1587 		sp = kdbpeek(sp + 100);
1588 		fp = kdbpeek(sp + 104);
1589 	}
1590 
1591 	/* check for current PC in the exception handler code */
1592 	if (pc >= 0x80000000 && pc < (unsigned)setsoftclock) {
1593 		ra = 0;
1594 		subr = 0;
1595 		goto done;
1596 	}
1597 
1598 	/* check for bad PC */
1599 	if (pc & 3 || pc < 0x80000000 || pc >= (unsigned)edata) {
1600 		printf("PC 0x%x: not in kernel\n", pc);
1601 		ra = 0;
1602 		subr = 0;
1603 		goto done;
1604 	}
1605 
1606 	/*
1607 	 * Find the beginning of the current subroutine by scanning backwards
1608 	 * from the current PC for the end of the previous subroutine.
1609 	 */
1610 	va = pc - sizeof(int);
1611 	while ((instr = kdbpeek(va)) != MIPS_JR_RA)
1612 		va -= sizeof(int);
1613 	va += 2 * sizeof(int);	/* skip back over branch & delay slot */
1614 	/* skip over nulls which might separate .o files */
1615 	while ((instr = kdbpeek(va)) == 0)
1616 		va += sizeof(int);
1617 	subr = va;
1618 
1619 	/* scan forwards to find stack size and any saved registers */
1620 	stksize = 0;
1621 	more = 3;
1622 	mask = 0;
1623 	for (; more; va += sizeof(int), more = (more == 3) ? 3 : more - 1) {
1624 		/* stop if hit our current position */
1625 		if (va >= pc)
1626 			break;
1627 		instr = kdbpeek(va);
1628 		i.word = instr;
1629 		switch (i.JType.op) {
1630 		case OP_SPECIAL:
1631 			switch (i.RType.func) {
1632 			case OP_JR:
1633 			case OP_JALR:
1634 				more = 2; /* stop after next instruction */
1635 				break;
1636 
1637 			case OP_SYSCALL:
1638 			case OP_BREAK:
1639 				more = 1; /* stop now */
1640 			};
1641 			break;
1642 
1643 		case OP_BCOND:
1644 		case OP_J:
1645 		case OP_JAL:
1646 		case OP_BEQ:
1647 		case OP_BNE:
1648 		case OP_BLEZ:
1649 		case OP_BGTZ:
1650 			more = 2; /* stop after next instruction */
1651 			break;
1652 
1653 		case OP_COP0:
1654 		case OP_COP1:
1655 		case OP_COP2:
1656 		case OP_COP3:
1657 			switch (i.RType.rs) {
1658 			case OP_BCx:
1659 			case OP_BCy:
1660 				more = 2; /* stop after next instruction */
1661 			};
1662 			break;
1663 
1664 		case OP_SW:
1665 			/* look for saved registers on the stack */
1666 			if (i.IType.rs != 29)
1667 				break;
1668 			/* only restore the first one */
1669 			if (mask & (1 << i.IType.rt))
1670 				break;
1671 			mask |= 1 << i.IType.rt;
1672 			switch (i.IType.rt) {
1673 			case 4: /* a0 */
1674 				a0 = kdbpeek(sp + (short)i.IType.imm);
1675 				break;
1676 
1677 			case 5: /* a1 */
1678 				a1 = kdbpeek(sp + (short)i.IType.imm);
1679 				break;
1680 
1681 			case 6: /* a2 */
1682 				a2 = kdbpeek(sp + (short)i.IType.imm);
1683 				break;
1684 
1685 			case 7: /* a3 */
1686 				a3 = kdbpeek(sp + (short)i.IType.imm);
1687 				break;
1688 
1689 			case 30: /* fp */
1690 				fp = kdbpeek(sp + (short)i.IType.imm);
1691 				break;
1692 
1693 			case 31: /* ra */
1694 				ra = kdbpeek(sp + (short)i.IType.imm);
1695 			}
1696 			break;
1697 
1698 		case OP_ADDI:
1699 		case OP_ADDIU:
1700 			/* look for stack pointer adjustment */
1701 			if (i.IType.rs != 29 || i.IType.rt != 29)
1702 				break;
1703 			stksize = (short)i.IType.imm;
1704 		}
1705 	}
1706 
1707 done:
1708 	printf("%x+%x (%x,%x,%x,%x) ra %x sz %d\n",
1709 		subr, pc - subr, a0, a1, a2, a3, ra, stksize);
1710 
1711 	if (ra) {
1712 		if (pc == ra && stksize == 0)
1713 			printf("stacktrace: loop!\n");
1714 		else {
1715 			pc = ra;
1716 			sp -= stksize;
1717 			goto loop;
1718 		}
1719 	}
1720 }
1721 #endif /* DEBUG */
1722