xref: /original-bsd/sys/pmax/pmax/trap.c (revision 3705696b)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department and Ralph Campbell.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.32 91/04/06$
13  *
14  *	@(#)trap.c	8.1 (Berkeley) 07/13/93
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/proc.h>
20 #include <sys/kernel.h>
21 #include <sys/signalvar.h>
22 #include <sys/syscall.h>
23 #include <sys/user.h>
24 #include <sys/buf.h>
25 #ifdef KTRACE
26 #include <sys/ktrace.h>
27 #endif
28 #include <net/netisr.h>
29 
30 #include <machine/trap.h>
31 #include <machine/psl.h>
32 #include <machine/reg.h>
33 #include <machine/cpu.h>
34 #include <machine/pte.h>
35 #include <machine/mips_opcode.h>
36 
37 #include <vm/vm.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_page.h>
40 
41 #include <pmax/pmax/clockreg.h>
42 #include <pmax/pmax/kn01.h>
43 #include <pmax/pmax/kn02.h>
44 #include <pmax/pmax/kmin.h>
45 #include <pmax/pmax/maxine.h>
46 #include <pmax/pmax/kn03.h>
47 #include <pmax/pmax/asic.h>
48 #include <pmax/pmax/turbochannel.h>
49 
50 #include <pmax/stand/dec_prom.h>
51 
52 #include <asc.h>
53 #include <sii.h>
54 #include <le.h>
55 #include <dc.h>
56 
57 struct	proc *machFPCurProcPtr;		/* pointer to last proc to use FP */
58 
59 extern void MachKernGenException();
60 extern void MachUserGenException();
61 extern void MachKernIntr();
62 extern void MachUserIntr();
63 extern void MachTLBModException();
64 extern void MachTLBMissException();
65 extern unsigned MachEmulateBranch();
66 
67 void (*machExceptionTable[])() = {
68 /*
69  * The kernel exception handlers.
70  */
71 	MachKernIntr,			/* external interrupt */
72 	MachKernGenException,		/* TLB modification */
73 	MachTLBMissException,		/* TLB miss (load or instr. fetch) */
74 	MachTLBMissException,		/* TLB miss (store) */
75 	MachKernGenException,		/* address error (load or I-fetch) */
76 	MachKernGenException,		/* address error (store) */
77 	MachKernGenException,		/* bus error (I-fetch) */
78 	MachKernGenException,		/* bus error (load or store) */
79 	MachKernGenException,		/* system call */
80 	MachKernGenException,		/* breakpoint */
81 	MachKernGenException,		/* reserved instruction */
82 	MachKernGenException,		/* coprocessor unusable */
83 	MachKernGenException,		/* arithmetic overflow */
84 	MachKernGenException,		/* reserved */
85 	MachKernGenException,		/* reserved */
86 	MachKernGenException,		/* reserved */
87 /*
88  * The user exception handlers.
89  */
90 	MachUserIntr,
91 	MachUserGenException,
92 	MachUserGenException,
93 	MachUserGenException,
94 	MachUserGenException,
95 	MachUserGenException,
96 	MachUserGenException,
97 	MachUserGenException,
98 	MachUserGenException,
99 	MachUserGenException,
100 	MachUserGenException,
101 	MachUserGenException,
102 	MachUserGenException,
103 	MachUserGenException,
104 	MachUserGenException,
105 	MachUserGenException,
106 };
107 
108 char	*trap_type[] = {
109 	"external interrupt",
110 	"TLB modification",
111 	"TLB miss (load or instr. fetch)",
112 	"TLB miss (store)",
113 	"address error (load or I-fetch)",
114 	"address error (store)",
115 	"bus error (I-fetch)",
116 	"bus error (load or store)",
117 	"system call",
118 	"breakpoint",
119 	"reserved instruction",
120 	"coprocessor unusable",
121 	"arithmetic overflow",
122 	"reserved 13",
123 	"reserved 14",
124 	"reserved 15",
125 };
126 
127 #ifdef DEBUG
128 #define TRAPSIZE	10
129 struct trapdebug {		/* trap history buffer for debugging */
130 	u_int	status;
131 	u_int	cause;
132 	u_int	vadr;
133 	u_int	pc;
134 	u_int	ra;
135 	u_int	code;
136 } trapdebug[TRAPSIZE], *trp = trapdebug;
137 #endif
138 
139 static void pmax_errintr();
140 static void kn02_errintr(), kn02ba_errintr();
141 #ifdef DS5000_240
142 static void kn03_errintr();
143 #endif
144 static unsigned kn02ba_recover_erradr();
145 extern tc_option_t tc_slot_info[TC_MAX_LOGICAL_SLOTS];
146 extern u_long kmin_tc3_imask, xine_tc3_imask;
147 extern const struct callback *callv;
148 #ifdef DS5000_240
149 extern u_long kn03_tc3_imask;
150 #endif
151 int (*pmax_hardware_intr)() = (int (*)())0;
152 extern volatile struct chiptime *Mach_clock_addr;
153 
154 /*
155  * Handle an exception.
156  * Called from MachKernGenException() or MachUserGenException()
157  * when a processor trap occurs.
158  * In the case of a kernel trap, we return the pc where to resume if
159  * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc.
160  */
161 unsigned
162 trap(statusReg, causeReg, vadr, pc, args)
163 	unsigned statusReg;	/* status register at time of the exception */
164 	unsigned causeReg;	/* cause register at time of exception */
165 	unsigned vadr;		/* address (if any) the fault occured on */
166 	unsigned pc;		/* program counter where to continue */
167 {
168 	register int type, i;
169 	unsigned ucode = 0;
170 	register struct proc *p = curproc;
171 	u_quad_t sticks;
172 	vm_prot_t ftype;
173 	extern unsigned onfault_table[];
174 
175 #ifdef DEBUG
176 	trp->status = statusReg;
177 	trp->cause = causeReg;
178 	trp->vadr = vadr;
179 	trp->pc = pc;
180 	trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] :
181 		p->p_md.md_regs[RA];
182 	trp->code = 0;
183 	if (++trp == &trapdebug[TRAPSIZE])
184 		trp = trapdebug;
185 #endif
186 
187 	cnt.v_trap++;
188 	type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT;
189 	if (USERMODE(statusReg)) {
190 		type |= T_USER;
191 		sticks = p->p_sticks;
192 	}
193 
194 	/*
195 	 * Enable hardware interrupts if they were on before.
196 	 * We only respond to software interrupts when returning to user mode.
197 	 */
198 	if (statusReg & MACH_SR_INT_ENA_PREV)
199 		splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR);
200 
201 	switch (type) {
202 	case T_TLB_MOD:
203 		/* check for kernel address */
204 		if ((int)vadr < 0) {
205 			register pt_entry_t *pte;
206 			register unsigned entry;
207 			register vm_offset_t pa;
208 
209 			pte = kvtopte(vadr);
210 			entry = pte->pt_entry;
211 #ifdef DIAGNOSTIC
212 			if (!(entry & PG_V) || (entry & PG_M))
213 				panic("trap: ktlbmod: invalid pte");
214 #endif
215 			if (entry & PG_RO) {
216 				/* write to read only page in the kernel */
217 				ftype = VM_PROT_WRITE;
218 				goto kernel_fault;
219 			}
220 			entry |= PG_M;
221 			pte->pt_entry = entry;
222 			vadr &= ~PGOFSET;
223 			MachTLBUpdate(vadr, entry);
224 			pa = entry & PG_FRAME;
225 #ifdef ATTR
226 			pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD;
227 #else
228 			if (!IS_VM_PHYSADDR(pa))
229 				panic("trap: ktlbmod: unmanaged page");
230 			PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
231 #endif
232 			return (pc);
233 		}
234 		/* FALLTHROUGH */
235 
236 	case T_TLB_MOD+T_USER:
237 	    {
238 		register pt_entry_t *pte;
239 		register unsigned entry;
240 		register vm_offset_t pa;
241 		pmap_t pmap = &p->p_vmspace->vm_pmap;
242 
243 		if (!(pte = pmap_segmap(pmap, vadr)))
244 			panic("trap: utlbmod: invalid segmap");
245 		pte += (vadr >> PGSHIFT) & (NPTEPG - 1);
246 		entry = pte->pt_entry;
247 #ifdef DIAGNOSTIC
248 		if (!(entry & PG_V) || (entry & PG_M))
249 			panic("trap: utlbmod: invalid pte");
250 #endif
251 		if (entry & PG_RO) {
252 			/* write to read only page */
253 			ftype = VM_PROT_WRITE;
254 			goto dofault;
255 		}
256 		entry |= PG_M;
257 		pte->pt_entry = entry;
258 		vadr = (vadr & ~PGOFSET) |
259 			(pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
260 		MachTLBUpdate(vadr, entry);
261 		pa = entry & PG_FRAME;
262 #ifdef ATTR
263 		pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD;
264 #else
265 		if (!IS_VM_PHYSADDR(pa))
266 			panic("trap: utlbmod: unmanaged page");
267 		PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
268 #endif
269 		if (!USERMODE(statusReg))
270 			return (pc);
271 		goto out;
272 	    }
273 
274 	case T_TLB_LD_MISS:
275 	case T_TLB_ST_MISS:
276 		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
277 		/* check for kernel address */
278 		if ((int)vadr < 0) {
279 			register vm_offset_t va;
280 			int rv;
281 
282 		kernel_fault:
283 			va = trunc_page((vm_offset_t)vadr);
284 			rv = vm_fault(kernel_map, va, ftype, FALSE);
285 			if (rv == KERN_SUCCESS)
286 				return (pc);
287 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
288 				((struct pcb *)UADDR)->pcb_onfault = 0;
289 				return (onfault_table[i]);
290 			}
291 			goto err;
292 		}
293 		/*
294 		 * It is an error for the kernel to access user space except
295 		 * through the copyin/copyout routines.
296 		 */
297 		if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 0)
298 			goto err;
299 		/* check for fuswintr() or suswintr() getting a page fault */
300 		if (i == 4)
301 			return (onfault_table[i]);
302 		goto dofault;
303 
304 	case T_TLB_LD_MISS+T_USER:
305 		ftype = VM_PROT_READ;
306 		goto dofault;
307 
308 	case T_TLB_ST_MISS+T_USER:
309 		ftype = VM_PROT_WRITE;
310 	dofault:
311 	    {
312 		register vm_offset_t va;
313 		register struct vmspace *vm;
314 		register vm_map_t map;
315 		int rv;
316 
317 		vm = p->p_vmspace;
318 		map = &vm->vm_map;
319 		va = trunc_page((vm_offset_t)vadr);
320 		rv = vm_fault(map, va, ftype, FALSE);
321 		/*
322 		 * If this was a stack access we keep track of the maximum
323 		 * accessed stack size.  Also, if vm_fault gets a protection
324 		 * failure it is due to accessing the stack region outside
325 		 * the current limit and we need to reflect that as an access
326 		 * error.
327 		 */
328 		if ((caddr_t)va >= vm->vm_maxsaddr) {
329 			if (rv == KERN_SUCCESS) {
330 				unsigned nss;
331 
332 				nss = clrnd(btoc(USRSTACK-(unsigned)va));
333 				if (nss > vm->vm_ssize)
334 					vm->vm_ssize = nss;
335 			} else if (rv == KERN_PROTECTION_FAILURE)
336 				rv = KERN_INVALID_ADDRESS;
337 		}
338 		if (rv == KERN_SUCCESS) {
339 			if (!USERMODE(statusReg))
340 				return (pc);
341 			goto out;
342 		}
343 		if (!USERMODE(statusReg)) {
344 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
345 				((struct pcb *)UADDR)->pcb_onfault = 0;
346 				return (onfault_table[i]);
347 			}
348 			goto err;
349 		}
350 		ucode = vadr;
351 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
352 		break;
353 	    }
354 
355 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
356 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
357 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
358 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
359 		i = SIGSEGV;
360 		break;
361 
362 	case T_SYSCALL+T_USER:
363 	    {
364 		register int *locr0 = p->p_md.md_regs;
365 		register struct sysent *callp;
366 		unsigned int code;
367 		int numsys;
368 		struct args {
369 			int i[8];
370 		} args;
371 		int rval[2];
372 		struct sysent *systab;
373 		extern int nsysent;
374 #ifdef ULTRIXCOMPAT
375 		extern struct sysent ultrixsysent[];
376 		extern int ultrixnsysent;
377 #endif
378 
379 		cnt.v_syscall++;
380 		/* compute next PC after syscall instruction */
381 		if ((int)causeReg < 0)
382 			locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0);
383 		else
384 			locr0[PC] += 4;
385 		systab = sysent;
386 		numsys = nsysent;
387 #ifdef ULTRIXCOMPAT
388 		if (p->p_md.md_flags & MDP_ULTRIX) {
389 			systab = ultrixsysent;
390 			numsys = ultrixnsysent;
391 		}
392 #endif
393 		code = locr0[V0];
394 		switch (code) {
395 		case SYS_syscall:
396 			/*
397 			 * Code is first argument, followed by actual args.
398 			 */
399 			code = locr0[A0];
400 			if (code >= numsys)
401 				callp = &systab[SYS_syscall]; /* (illegal) */
402 			else
403 				callp = &systab[code];
404 			i = callp->sy_narg;
405 			args.i[0] = locr0[A1];
406 			args.i[1] = locr0[A2];
407 			args.i[2] = locr0[A3];
408 			if (i > 3) {
409 				i = copyin((caddr_t)(locr0[SP] +
410 						4 * sizeof(int)),
411 					(caddr_t)&args.i[3],
412 					(u_int)(i - 3) * sizeof(int));
413 				if (i) {
414 					locr0[V0] = i;
415 					locr0[A3] = 1;
416 #ifdef KTRACE
417 					if (KTRPOINT(p, KTR_SYSCALL))
418 						ktrsyscall(p->p_tracep, code,
419 							callp->sy_narg, args.i);
420 #endif
421 					goto done;
422 				}
423 			}
424 			break;
425 
426 		case SYS___syscall:
427 			/*
428 			 * Like syscall, but code is a quad, so as to maintain
429 			 * quad alignment for the rest of the arguments.
430 			 */
431 			code = locr0[A0 + _QUAD_LOWWORD];
432 			if (code >= numsys)
433 				callp = &systab[SYS_syscall]; /* (illegal) */
434 			else
435 				callp = &systab[code];
436 			i = callp->sy_narg;
437 			args.i[0] = locr0[A2];
438 			args.i[1] = locr0[A3];
439 			if (i > 2) {
440 				i = copyin((caddr_t)(locr0[SP] +
441 						4 * sizeof(int)),
442 					(caddr_t)&args.i[2],
443 					(u_int)(i - 2) * sizeof(int));
444 				if (i) {
445 					locr0[V0] = i;
446 					locr0[A3] = 1;
447 #ifdef KTRACE
448 					if (KTRPOINT(p, KTR_SYSCALL))
449 						ktrsyscall(p->p_tracep, code,
450 							callp->sy_narg, args.i);
451 #endif
452 					goto done;
453 				}
454 			}
455 			break;
456 
457 		default:
458 			if (code >= numsys)
459 				callp = &systab[SYS_syscall]; /* (illegal) */
460 			else
461 				callp = &systab[code];
462 			i = callp->sy_narg;
463 			args.i[0] = locr0[A0];
464 			args.i[1] = locr0[A1];
465 			args.i[2] = locr0[A2];
466 			args.i[3] = locr0[A3];
467 			if (i > 4) {
468 				i = copyin((caddr_t)(locr0[SP] +
469 						4 * sizeof(int)),
470 					(caddr_t)&args.i[4],
471 					(u_int)(i - 4) * sizeof(int));
472 				if (i) {
473 					locr0[V0] = i;
474 					locr0[A3] = 1;
475 #ifdef KTRACE
476 					if (KTRPOINT(p, KTR_SYSCALL))
477 						ktrsyscall(p->p_tracep, code,
478 							callp->sy_narg, args.i);
479 #endif
480 					goto done;
481 				}
482 			}
483 		}
484 #ifdef KTRACE
485 		if (KTRPOINT(p, KTR_SYSCALL))
486 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
487 #endif
488 		rval[0] = 0;
489 		rval[1] = locr0[V1];
490 #ifdef DEBUG
491 		if (trp == trapdebug)
492 			trapdebug[TRAPSIZE - 1].code = code;
493 		else
494 			trp[-1].code = code;
495 #endif
496 		i = (*callp->sy_call)(p, &args, rval);
497 		/*
498 		 * Reinitialize proc pointer `p' as it may be different
499 		 * if this is a child returning from fork syscall.
500 		 */
501 		p = curproc;
502 		locr0 = p->p_md.md_regs;
503 #ifdef DEBUG
504 		{ int s;
505 		s = splhigh();
506 		trp->status = statusReg;
507 		trp->cause = causeReg;
508 		trp->vadr = locr0[SP];
509 		trp->pc = locr0[PC];
510 		trp->ra = locr0[RA];
511 		trp->code = -code;
512 		if (++trp == &trapdebug[TRAPSIZE])
513 			trp = trapdebug;
514 		splx(s);
515 		}
516 #endif
517 		switch (i) {
518 		case 0:
519 			locr0[V0] = rval[0];
520 			locr0[V1] = rval[1];
521 			locr0[A3] = 0;
522 			break;
523 
524 		case ERESTART:
525 			locr0[PC] = pc;
526 			break;
527 
528 		case EJUSTRETURN:
529 			break;	/* nothing to do */
530 
531 		default:
532 			locr0[V0] = i;
533 			locr0[A3] = 1;
534 		}
535 	done:
536 #ifdef KTRACE
537 		if (KTRPOINT(p, KTR_SYSRET))
538 			ktrsysret(p->p_tracep, code, i, rval[0]);
539 #endif
540 		goto out;
541 	    }
542 
543 	case T_BREAK+T_USER:
544 	    {
545 		register unsigned va, instr;
546 
547 		/* compute address of break instruction */
548 		va = pc;
549 		if ((int)causeReg < 0)
550 			va += 4;
551 
552 		/* read break instruction */
553 		instr = fuiword((caddr_t)va);
554 #if 0
555 		printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
556 			p->p_comm, p->p_pid, instr, pc,
557 			p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
558 #endif
559 #ifdef KADB
560 		if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP)
561 			goto err;
562 #endif
563 		if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) {
564 			i = SIGTRAP;
565 			break;
566 		}
567 
568 		/* restore original instruction and clear BP  */
569 		i = suiword((caddr_t)va, p->p_md.md_ss_instr);
570 		if (i < 0) {
571 			vm_offset_t sa, ea;
572 			int rv;
573 
574 			sa = trunc_page((vm_offset_t)va);
575 			ea = round_page((vm_offset_t)va+sizeof(int)-1);
576 			rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
577 				VM_PROT_DEFAULT, FALSE);
578 			if (rv == KERN_SUCCESS) {
579 				i = suiword((caddr_t)va, p->p_md.md_ss_instr);
580 				(void) vm_map_protect(&p->p_vmspace->vm_map,
581 					sa, ea, VM_PROT_READ|VM_PROT_EXECUTE,
582 					FALSE);
583 			}
584 		}
585 		if (i < 0)
586 			printf("Warning: can't restore instruction at %x: %x\n",
587 				p->p_md.md_ss_addr, p->p_md.md_ss_instr);
588 		p->p_md.md_ss_addr = 0;
589 		i = SIGTRAP;
590 		break;
591 	    }
592 
593 	case T_RES_INST+T_USER:
594 		i = SIGILL;
595 		break;
596 
597 	case T_COP_UNUSABLE+T_USER:
598 		if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) {
599 			i = SIGILL;	/* only FPU instructions allowed */
600 			break;
601 		}
602 		MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs);
603 		machFPCurProcPtr = p;
604 		p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT;
605 		p->p_md.md_flags |= MDP_FPUSED;
606 		goto out;
607 
608 	case T_OVFLOW+T_USER:
609 		i = SIGFPE;
610 		break;
611 
612 	case T_ADDR_ERR_LD:	/* misaligned access */
613 	case T_ADDR_ERR_ST:	/* misaligned access */
614 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
615 		if (i = ((struct pcb *)UADDR)->pcb_onfault) {
616 			((struct pcb *)UADDR)->pcb_onfault = 0;
617 			return (onfault_table[i]);
618 		}
619 		/* FALLTHROUGH */
620 
621 	default:
622 	err:
623 #ifdef KADB
624 	    {
625 		extern struct pcb kdbpcb;
626 
627 		if (USERMODE(statusReg))
628 			kdbpcb = p->p_addr->u_pcb;
629 		else {
630 			kdbpcb.pcb_regs[ZERO] = 0;
631 			kdbpcb.pcb_regs[AST] = ((int *)&args)[2];
632 			kdbpcb.pcb_regs[V0] = ((int *)&args)[3];
633 			kdbpcb.pcb_regs[V1] = ((int *)&args)[4];
634 			kdbpcb.pcb_regs[A0] = ((int *)&args)[5];
635 			kdbpcb.pcb_regs[A1] = ((int *)&args)[6];
636 			kdbpcb.pcb_regs[A2] = ((int *)&args)[7];
637 			kdbpcb.pcb_regs[A3] = ((int *)&args)[8];
638 			kdbpcb.pcb_regs[T0] = ((int *)&args)[9];
639 			kdbpcb.pcb_regs[T1] = ((int *)&args)[10];
640 			kdbpcb.pcb_regs[T2] = ((int *)&args)[11];
641 			kdbpcb.pcb_regs[T3] = ((int *)&args)[12];
642 			kdbpcb.pcb_regs[T4] = ((int *)&args)[13];
643 			kdbpcb.pcb_regs[T5] = ((int *)&args)[14];
644 			kdbpcb.pcb_regs[T6] = ((int *)&args)[15];
645 			kdbpcb.pcb_regs[T7] = ((int *)&args)[16];
646 			kdbpcb.pcb_regs[T8] = ((int *)&args)[17];
647 			kdbpcb.pcb_regs[T9] = ((int *)&args)[18];
648 			kdbpcb.pcb_regs[RA] = ((int *)&args)[19];
649 			kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21];
650 			kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22];
651 			kdbpcb.pcb_regs[PC] = pc;
652 			kdbpcb.pcb_regs[SR] = statusReg;
653 			bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int));
654 		}
655 		if (kdb(causeReg, vadr, p, !USERMODE(statusReg)))
656 			return (kdbpcb.pcb_regs[PC]);
657 	    }
658 #else
659 #ifdef DEBUG
660 		trapDump("trap");
661 #endif
662 #endif
663 		panic("trap");
664 	}
665 	trapsignal(p, i, ucode);
666 out:
667 	/*
668 	 * Note: we should only get here if returning to user mode.
669 	 */
670 	/* take pending signals */
671 	while ((i = CURSIG(p)) != 0)
672 		psig(i);
673 	p->p_pri = p->p_usrpri;
674 	astpending = 0;
675 	if (want_resched) {
676 		int s;
677 
678 		/*
679 		 * Since we are curproc, clock will normally just change
680 		 * our priority without moving us from one queue to another
681 		 * (since the running process is not on a queue.)
682 		 * If that happened after we setrq ourselves but before we
683 		 * swtch()'ed, we might not be on the queue indicated by
684 		 * our priority.
685 		 */
686 		s = splstatclock();
687 		setrq(p);
688 		p->p_stats->p_ru.ru_nivcsw++;
689 		swtch();
690 		splx(s);
691 		while ((i = CURSIG(p)) != 0)
692 			psig(i);
693 	}
694 
695 	/*
696 	 * If profiling, charge system time to the trapped pc.
697 	 */
698 	if (p->p_flag & SPROFIL) {
699 		extern int psratio;
700 
701 		addupc_task(p, pc, (int)(p->p_sticks - sticks) * psratio);
702 	}
703 
704 	curpri = p->p_pri;
705 	return (pc);
706 }
707 
708 /*
709  * Handle an interrupt.
710  * Called from MachKernIntr() or MachUserIntr()
711  * Note: curproc might be NULL.
712  */
713 interrupt(statusReg, causeReg, pc)
714 	unsigned statusReg;	/* status register at time of the exception */
715 	unsigned causeReg;	/* cause register at time of exception */
716 	unsigned pc;		/* program counter where to continue */
717 {
718 	register unsigned mask;
719 	struct clockframe cf;
720 
721 #ifdef DEBUG
722 	trp->status = statusReg;
723 	trp->cause = causeReg;
724 	trp->vadr = 0;
725 	trp->pc = pc;
726 	trp->ra = 0;
727 	trp->code = 0;
728 	if (++trp == &trapdebug[TRAPSIZE])
729 		trp = trapdebug;
730 #endif
731 
732 	cnt.v_intr++;
733 	mask = causeReg & statusReg;	/* pending interrupts & enable mask */
734 	if (pmax_hardware_intr)
735 		splx((*pmax_hardware_intr)(mask, pc, statusReg, causeReg));
736 	if (mask & MACH_INT_MASK_5) {
737 		if (!USERMODE(statusReg)) {
738 #ifdef DEBUG
739 			trapDump("fpintr");
740 #else
741 			printf("FPU interrupt: PC %x CR %x SR %x\n",
742 				pc, causeReg, statusReg);
743 #endif
744 		} else
745 			MachFPInterrupt(statusReg, causeReg, pc);
746 	}
747 	if (mask & MACH_SOFT_INT_MASK_0) {
748 		clearsoftclock();
749 		cnt.v_soft++;
750 		softclock();
751 	}
752 	/* process network interrupt if we trapped or will very soon */
753 	if ((mask & MACH_SOFT_INT_MASK_1) ||
754 	    netisr && (statusReg & MACH_SOFT_INT_MASK_1)) {
755 		clearsoftnet();
756 		cnt.v_soft++;
757 #ifdef INET
758 		if (netisr & (1 << NETISR_ARP)) {
759 			netisr &= ~(1 << NETISR_ARP);
760 			arpintr();
761 		}
762 		if (netisr & (1 << NETISR_IP)) {
763 			netisr &= ~(1 << NETISR_IP);
764 			ipintr();
765 		}
766 #endif
767 #ifdef NS
768 		if (netisr & (1 << NETISR_NS)) {
769 			netisr &= ~(1 << NETISR_NS);
770 			nsintr();
771 		}
772 #endif
773 #ifdef ISO
774 		if (netisr & (1 << NETISR_ISO)) {
775 			netisr &= ~(1 << NETISR_ISO);
776 			clnlintr();
777 		}
778 #endif
779 	}
780 }
781 
782 /*
783  * Handle pmax (DECstation 2100/3100) interrupts.
784  */
785 pmax_intr(mask, pc, statusReg, causeReg)
786 	unsigned mask;
787 	unsigned pc;
788 	unsigned statusReg;
789 	unsigned causeReg;
790 {
791 	register volatile struct chiptime *c = Mach_clock_addr;
792 	struct clockframe cf;
793 	int temp;
794 
795 	/* handle clock interrupts ASAP */
796 	if (mask & MACH_INT_MASK_3) {
797 		temp = c->regc;	/* XXX clear interrupt bits */
798 		cf.pc = pc;
799 		cf.sr = statusReg;
800 		hardclock(&cf);
801 		/* keep clock interrupts enabled */
802 		causeReg &= ~MACH_INT_MASK_3;
803 	}
804 	/* Re-enable clock interrupts */
805 	splx(MACH_INT_MASK_3 | MACH_SR_INT_ENA_CUR);
806 #if NSII > 0
807 	if (mask & MACH_INT_MASK_0)
808 		siiintr(0);
809 #endif
810 #if NLE > 0
811 	if (mask & MACH_INT_MASK_1)
812 		leintr(0);
813 #endif
814 #if NDC > 0
815 	if (mask & MACH_INT_MASK_2)
816 		dcintr(0);
817 #endif
818 	if (mask & MACH_INT_MASK_4)
819 		pmax_errintr();
820 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
821 		MACH_SR_INT_ENA_CUR);
822 }
823 
824 /*
825  * Handle hardware interrupts for the KN02. (DECstation 5000/200)
826  * Returns spl value.
827  */
828 kn02_intr(mask, pc, statusReg, causeReg)
829 	unsigned mask;
830 	unsigned pc;
831 	unsigned statusReg;
832 	unsigned causeReg;
833 {
834 	register unsigned i, m;
835 	register volatile struct chiptime *c = Mach_clock_addr;
836 	register unsigned csr;
837 	int temp;
838 	struct clockframe cf;
839 	static int warned = 0;
840 
841 	/* handle clock interrupts ASAP */
842 	if (mask & MACH_INT_MASK_1) {
843 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
844 		if ((csr & KN02_CSR_PSWARN) && !warned) {
845 			warned = 1;
846 			printf("WARNING: power supply is overheating!\n");
847 		} else if (warned && !(csr & KN02_CSR_PSWARN)) {
848 			warned = 0;
849 			printf("WARNING: power supply is OK again\n");
850 		}
851 
852 		temp = c->regc;	/* XXX clear interrupt bits */
853 		cf.pc = pc;
854 		cf.sr = statusReg;
855 		hardclock(&cf);
856 
857 		/* keep clock interrupts enabled */
858 		causeReg &= ~MACH_INT_MASK_1;
859 	}
860 	/* Re-enable clock interrupts */
861 	splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
862 	if (mask & MACH_INT_MASK_0) {
863 
864 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
865 		m = csr & (csr >> KN02_CSR_IOINTEN_SHIFT) & KN02_CSR_IOINT;
866 #if 0
867 		*(unsigned *)MACHPHYS_TO_UNCACHED(KN02_SYS_CSR) =
868 			(csr & ~(KN02_CSR_WRESERVED | 0xFF)) |
869 			(m << KN02_CSR_IOINTEN_SHIFT);
870 #endif
871 		for (i = 0; m; i++, m >>= 1) {
872 			if (!(m & 1))
873 				continue;
874 			if (tc_slot_info[i].intr)
875 				(*tc_slot_info[i].intr)(tc_slot_info[i].unit);
876 			else
877 				printf("spurious interrupt %d\n", i);
878 		}
879 #if 0
880 		*(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR) =
881 			csr & ~(KN02_CSR_WRESERVED | 0xFF);
882 #endif
883 	}
884 	if (mask & MACH_INT_MASK_3)
885 		kn02_errintr();
886 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
887 		MACH_SR_INT_ENA_CUR);
888 }
889 
890 /*
891  * 3min hardware interrupts. (DECstation 5000/1xx)
892  */
893 kmin_intr(mask, pc, statusReg, causeReg)
894 	unsigned mask;
895 	unsigned pc;
896 	unsigned statusReg;
897 	unsigned causeReg;
898 {
899 	register u_int intr;
900 	register volatile struct chiptime *c = Mach_clock_addr;
901 	volatile u_int *imaskp =
902 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_IMSK);
903 	volatile u_int *intrp =
904 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_INTR);
905 	unsigned int old_mask;
906 	struct clockframe cf;
907 	int temp;
908 	static int user_warned = 0;
909 
910 	old_mask = *imaskp & kmin_tc3_imask;
911 	*imaskp = old_mask;
912 
913 	if (mask & MACH_INT_MASK_4)
914 		(*callv->halt)((int *)0, 0);
915 	if (mask & MACH_INT_MASK_3) {
916 		intr = *intrp;
917 		/* masked interrupts are still observable */
918 		intr &= old_mask;
919 
920 		if (intr & KMIN_INTR_SCSI_PTR_LOAD) {
921 			*intrp &= ~KMIN_INTR_SCSI_PTR_LOAD;
922 #ifdef notdef
923 			asc_dma_intr();
924 #endif
925 		}
926 
927 		if (intr & (KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E))
928 			*intrp &= ~(KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E);
929 
930 		if (intr & KMIN_INTR_LANCE_READ_E)
931 			*intrp &= ~KMIN_INTR_LANCE_READ_E;
932 
933 		if (intr & KMIN_INTR_TIMEOUT)
934 			kn02ba_errintr();
935 
936 		if (intr & KMIN_INTR_CLOCK) {
937 			temp = c->regc;	/* XXX clear interrupt bits */
938 			cf.pc = pc;
939 			cf.sr = statusReg;
940 			hardclock(&cf);
941 		}
942 
943 		if ((intr & KMIN_INTR_SCC_0) &&
944 			tc_slot_info[KMIN_SCC0_SLOT].intr)
945 			(*(tc_slot_info[KMIN_SCC0_SLOT].intr))
946 			(tc_slot_info[KMIN_SCC0_SLOT].unit);
947 
948 		if ((intr & KMIN_INTR_SCC_1) &&
949 			tc_slot_info[KMIN_SCC1_SLOT].intr)
950 			(*(tc_slot_info[KMIN_SCC1_SLOT].intr))
951 			(tc_slot_info[KMIN_SCC1_SLOT].unit);
952 
953 		if ((intr & KMIN_INTR_SCSI) &&
954 			tc_slot_info[KMIN_SCSI_SLOT].intr)
955 			(*(tc_slot_info[KMIN_SCSI_SLOT].intr))
956 			(tc_slot_info[KMIN_SCSI_SLOT].unit);
957 
958 		if ((intr & KMIN_INTR_LANCE) &&
959 			tc_slot_info[KMIN_LANCE_SLOT].intr)
960 			(*(tc_slot_info[KMIN_LANCE_SLOT].intr))
961 			(tc_slot_info[KMIN_LANCE_SLOT].unit);
962 
963 		if (user_warned && ((intr & KMIN_INTR_PSWARN) == 0)) {
964 			printf("%s\n", "Power supply ok now.");
965 			user_warned = 0;
966 		}
967 		if ((intr & KMIN_INTR_PSWARN) && (user_warned < 3)) {
968 			user_warned++;
969 			printf("%s\n", "Power supply overheating");
970 		}
971 	}
972 	if ((mask & MACH_INT_MASK_0) && tc_slot_info[0].intr)
973 		(*tc_slot_info[0].intr)(tc_slot_info[0].unit);
974 	if ((mask & MACH_INT_MASK_1) && tc_slot_info[1].intr)
975 		(*tc_slot_info[1].intr)(tc_slot_info[1].unit);
976 	if ((mask & MACH_INT_MASK_2) && tc_slot_info[2].intr)
977 		(*tc_slot_info[2].intr)(tc_slot_info[2].unit);
978 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
979 		MACH_SR_INT_ENA_CUR);
980 }
981 
982 /*
983  * Maxine hardware interrupts. (Personal DECstation 5000/xx)
984  */
985 xine_intr(mask, pc, statusReg, causeReg)
986 	unsigned mask;
987 	unsigned pc;
988 	unsigned statusReg;
989 	unsigned causeReg;
990 {
991 	register u_int intr;
992 	register volatile struct chiptime *c = Mach_clock_addr;
993 	volatile u_int *imaskp = (volatile u_int *)
994 		MACH_PHYS_TO_UNCACHED(XINE_REG_IMSK);
995 	volatile u_int *intrp = (volatile u_int *)
996 		MACH_PHYS_TO_UNCACHED(XINE_REG_INTR);
997 	u_int old_mask;
998 	struct clockframe cf;
999 	int temp;
1000 
1001 	old_mask = *imaskp & xine_tc3_imask;
1002 	*imaskp = old_mask;
1003 
1004 	if (mask & MACH_INT_MASK_4)
1005 		(*callv->halt)((int *)0, 0);
1006 
1007 	/* handle clock interrupts ASAP */
1008 	if (mask & MACH_INT_MASK_1) {
1009 		temp = c->regc;	/* XXX clear interrupt bits */
1010 		cf.pc = pc;
1011 		cf.sr = statusReg;
1012 		hardclock(&cf);
1013 		causeReg &= ~MACH_INT_MASK_1;
1014 		/* reenable clock interrupts */
1015 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1016 	}
1017 	if (mask & MACH_INT_MASK_3) {
1018 		intr = *intrp;
1019 		/* masked interrupts are still observable */
1020 		intr &= old_mask;
1021 
1022 		if (intr & XINE_INTR_SCSI_PTR_LOAD) {
1023 			*intrp &= ~XINE_INTR_SCSI_PTR_LOAD;
1024 #ifdef notdef
1025 			asc_dma_intr();
1026 #endif
1027 		}
1028 
1029 		if (intr & (XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E))
1030 			*intrp &= ~(XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E);
1031 
1032 		if (intr & XINE_INTR_LANCE_READ_E)
1033 			*intrp &= ~XINE_INTR_LANCE_READ_E;
1034 
1035 		if ((intr & XINE_INTR_SCC_0) &&
1036 			tc_slot_info[XINE_SCC0_SLOT].intr)
1037 			(*(tc_slot_info[XINE_SCC0_SLOT].intr))
1038 			(tc_slot_info[XINE_SCC0_SLOT].unit);
1039 
1040 		if ((intr & XINE_INTR_DTOP_RX) &&
1041 			tc_slot_info[XINE_DTOP_SLOT].intr)
1042 			(*(tc_slot_info[XINE_DTOP_SLOT].intr))
1043 			(tc_slot_info[XINE_DTOP_SLOT].unit);
1044 
1045 		if ((intr & XINE_INTR_FLOPPY) &&
1046 			tc_slot_info[XINE_FLOPPY_SLOT].intr)
1047 			(*(tc_slot_info[XINE_FLOPPY_SLOT].intr))
1048 			(tc_slot_info[XINE_FLOPPY_SLOT].unit);
1049 
1050 		if ((intr & XINE_INTR_TC_0) &&
1051 			tc_slot_info[0].intr)
1052 			(*(tc_slot_info[0].intr))
1053 			(tc_slot_info[0].unit);
1054 
1055 		if ((intr & XINE_INTR_TC_1) &&
1056 			tc_slot_info[1].intr)
1057 			(*(tc_slot_info[1].intr))
1058 			(tc_slot_info[1].unit);
1059 
1060 		if ((intr & XINE_INTR_ISDN) &&
1061 			tc_slot_info[XINE_ISDN_SLOT].intr)
1062 			(*(tc_slot_info[XINE_ISDN_SLOT].intr))
1063 			(tc_slot_info[XINE_ISDN_SLOT].unit);
1064 
1065 		if ((intr & XINE_INTR_SCSI) &&
1066 			tc_slot_info[XINE_SCSI_SLOT].intr)
1067 			(*(tc_slot_info[XINE_SCSI_SLOT].intr))
1068 			(tc_slot_info[XINE_SCSI_SLOT].unit);
1069 
1070 		if ((intr & XINE_INTR_LANCE) &&
1071 			tc_slot_info[XINE_LANCE_SLOT].intr)
1072 			(*(tc_slot_info[XINE_LANCE_SLOT].intr))
1073 			(tc_slot_info[XINE_LANCE_SLOT].unit);
1074 
1075 	}
1076 	if (mask & MACH_INT_MASK_2)
1077 		kn02ba_errintr();
1078 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1079 		MACH_SR_INT_ENA_CUR);
1080 }
1081 
1082 #ifdef DS5000_240
1083 /*
1084  * 3Max+ hardware interrupts. (DECstation 5000/240) UNTESTED!!
1085  */
1086 kn03_intr(mask, pc, statusReg, causeReg)
1087 	unsigned mask;
1088 	unsigned pc;
1089 	unsigned statusReg;
1090 	unsigned causeReg;
1091 {
1092 	register u_int intr;
1093 	register volatile struct chiptime *c = Mach_clock_addr;
1094 	volatile u_int *imaskp = (volatile u_int *)
1095 		MACH_PHYS_TO_UNCACHED(KN03_REG_IMSK);
1096 	volatile u_int *intrp = (volatile u_int *)
1097 		MACH_PHYS_TO_UNCACHED(KN03_REG_INTR);
1098 	u_int old_mask;
1099 	struct clockframe cf;
1100 	int temp;
1101 	static int user_warned = 0;
1102 
1103 	old_mask = *imaskp & kn03_tc3_imask;
1104 	*imaskp = old_mask;
1105 
1106 	if (mask & MACH_INT_MASK_4)
1107 		(*callv->halt)((int *)0, 0);
1108 
1109 	/* handle clock interrupts ASAP */
1110 	if (mask & MACH_INT_MASK_1) {
1111 		temp = c->regc;	/* XXX clear interrupt bits */
1112 		cf.pc = pc;
1113 		cf.sr = statusReg;
1114 		hardclock(&cf);
1115 		causeReg &= ~MACH_INT_MASK_1;
1116 		/* reenable clock interrupts */
1117 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1118 	}
1119 	if (mask & MACH_INT_MASK_0) {
1120 		intr = *intrp;
1121 		/* masked interrupts are still observable */
1122 		intr &= old_mask;
1123 
1124 		if (intr & KN03_INTR_SCSI_PTR_LOAD) {
1125 			*intrp &= ~KN03_INTR_SCSI_PTR_LOAD;
1126 #ifdef notdef
1127 			asc_dma_intr();
1128 #endif
1129 		}
1130 
1131 		if (intr & (KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E))
1132 			*intrp &= ~(KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E);
1133 
1134 		if (intr & KN03_INTR_LANCE_READ_E)
1135 			*intrp &= ~KN03_INTR_LANCE_READ_E;
1136 
1137 		if ((intr & KN03_INTR_SCC_0) &&
1138 			tc_slot_info[KN03_SCC0_SLOT].intr)
1139 			(*(tc_slot_info[KN03_SCC0_SLOT].intr))
1140 			(tc_slot_info[KN03_SCC0_SLOT].unit);
1141 
1142 		if ((intr & KN03_INTR_SCC_1) &&
1143 			tc_slot_info[KN03_SCC1_SLOT].intr)
1144 			(*(tc_slot_info[KN03_SCC1_SLOT].intr))
1145 			(tc_slot_info[KN03_SCC1_SLOT].unit);
1146 
1147 		if ((intr & KN03_INTR_TC_0) &&
1148 			tc_slot_info[0].intr)
1149 			(*(tc_slot_info[0].intr))
1150 			(tc_slot_info[0].unit);
1151 
1152 		if ((intr & KN03_INTR_TC_1) &&
1153 			tc_slot_info[1].intr)
1154 			(*(tc_slot_info[1].intr))
1155 			(tc_slot_info[1].unit);
1156 
1157 		if ((intr & KN03_INTR_TC_2) &&
1158 			tc_slot_info[2].intr)
1159 			(*(tc_slot_info[2].intr))
1160 			(tc_slot_info[2].unit);
1161 
1162 		if ((intr & KN03_INTR_SCSI) &&
1163 			tc_slot_info[KN03_SCSI_SLOT].intr)
1164 			(*(tc_slot_info[KN03_SCSI_SLOT].intr))
1165 			(tc_slot_info[KN03_SCSI_SLOT].unit);
1166 
1167 		if ((intr & KN03_INTR_LANCE) &&
1168 			tc_slot_info[KN03_LANCE_SLOT].intr)
1169 			(*(tc_slot_info[KN03_LANCE_SLOT].intr))
1170 			(tc_slot_info[KN03_LANCE_SLOT].unit);
1171 
1172 		if (user_warned && ((intr & KN03_INTR_PSWARN) == 0)) {
1173 			printf("%s\n", "Power supply ok now.");
1174 			user_warned = 0;
1175 		}
1176 		if ((intr & KN03_INTR_PSWARN) && (user_warned < 3)) {
1177 			user_warned++;
1178 			printf("%s\n", "Power supply overheating");
1179 		}
1180 	}
1181 	if (mask & MACH_INT_MASK_3)
1182 		kn03_errintr();
1183 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1184 		MACH_SR_INT_ENA_CUR);
1185 }
1186 #endif /* DS5000_240 */
1187 
1188 /*
1189  * This is called from MachUserIntr() if astpending is set.
1190  * This is very similar to the tail of trap().
1191  */
1192 softintr(statusReg, pc)
1193 	unsigned statusReg;	/* status register at time of the exception */
1194 	unsigned pc;		/* program counter where to continue */
1195 {
1196 	register struct proc *p = curproc;
1197 	int sig;
1198 
1199 	cnt.v_soft++;
1200 	/* take pending signals */
1201 	while ((sig = CURSIG(p)) != 0)
1202 		psig(sig);
1203 	p->p_pri = p->p_usrpri;
1204 	astpending = 0;
1205 	if (p->p_flag & SOWEUPC) {
1206 		p->p_flag &= ~SOWEUPC;
1207 		ADDUPROF(p);
1208 	}
1209 	if (want_resched) {
1210 		int s;
1211 
1212 		/*
1213 		 * Since we are curproc, clock will normally just change
1214 		 * our priority without moving us from one queue to another
1215 		 * (since the running process is not on a queue.)
1216 		 * If that happened after we setrq ourselves but before we
1217 		 * swtch()'ed, we might not be on the queue indicated by
1218 		 * our priority.
1219 		 */
1220 		s = splstatclock();
1221 		setrq(p);
1222 		p->p_stats->p_ru.ru_nivcsw++;
1223 		swtch();
1224 		splx(s);
1225 		while ((sig = CURSIG(p)) != 0)
1226 			psig(sig);
1227 	}
1228 	curpri = p->p_pri;
1229 }
1230 
1231 #ifdef DEBUG
1232 trapDump(msg)
1233 	char *msg;
1234 {
1235 	register int i;
1236 	int s;
1237 
1238 	s = splhigh();
1239 	printf("trapDump(%s)\n", msg);
1240 	for (i = 0; i < TRAPSIZE; i++) {
1241 		if (trp == trapdebug)
1242 			trp = &trapdebug[TRAPSIZE - 1];
1243 		else
1244 			trp--;
1245 		if (trp->cause == 0)
1246 			break;
1247 		printf("%s: ADR %x PC %x CR %x SR %x\n",
1248 			trap_type[(trp->cause & MACH_CR_EXC_CODE) >>
1249 				MACH_CR_EXC_CODE_SHIFT],
1250 			trp->vadr, trp->pc, trp->cause, trp->status);
1251 		printf("   RA %x code %d\n", trp-> ra, trp->code);
1252 	}
1253 	bzero(trapdebug, sizeof(trapdebug));
1254 	trp = trapdebug;
1255 	splx(s);
1256 }
1257 #endif
1258 
1259 /*
1260  *----------------------------------------------------------------------
1261  *
1262  * MemErrorInterrupts --
1263  *   pmax_errintr - for the DS2100/DS3100
1264  *   kn02_errintr - for the DS5000/200
1265  *   kn02ba_errintr - for the DS5000/1xx and DS5000/xx
1266  *
1267  *	Handler an interrupt for the control register.
1268  *
1269  * Results:
1270  *	None.
1271  *
1272  * Side effects:
1273  *	None.
1274  *
1275  *----------------------------------------------------------------------
1276  */
1277 static void
1278 pmax_errintr()
1279 {
1280 	volatile u_short *sysCSRPtr =
1281 		(u_short *)MACH_PHYS_TO_UNCACHED(KN01_SYS_CSR);
1282 	u_short csr;
1283 
1284 	csr = *sysCSRPtr;
1285 
1286 	if (csr & KN01_CSR_MERR) {
1287 		printf("Memory error at 0x%x\n",
1288 			*(unsigned *)MACH_PHYS_TO_UNCACHED(KN01_SYS_ERRADR));
1289 		panic("Mem error interrupt");
1290 	}
1291 	*sysCSRPtr = (csr & ~KN01_CSR_MBZ) | 0xff;
1292 }
1293 
1294 static void
1295 kn02_errintr()
1296 {
1297 	u_int erradr, chksyn;
1298 
1299 	erradr = *(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR);
1300 	chksyn = *(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CHKSYN);
1301 	*(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR) = 0;
1302 	MachEmptyWriteBuffer();
1303 
1304 	if (!(erradr & KN02_ERR_VALID))
1305 		return;
1306 	printf("%s memory %s %s error at 0x%x\n",
1307 		(erradr & KN02_ERR_CPU) ? "CPU" : "DMA",
1308 		(erradr & KN02_ERR_WRITE) ? "write" : "read",
1309 		(erradr & KN02_ERR_ECCERR) ? "ECC" : "timeout",
1310 		(erradr & KN02_ERR_ADDRESS));
1311 	if (erradr & KN02_ERR_ECCERR) {
1312 		*(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CHKSYN) = 0;
1313 		MachEmptyWriteBuffer();
1314 		printf("ECC 0x%x\n", chksyn);
1315 	}
1316 	panic("Mem error interrupt");
1317 }
1318 
1319 #ifdef DS5000_240
1320 static void
1321 kn03_errintr()
1322 {
1323 
1324 	printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR));
1325 	*(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR) = 0;
1326 	MachEmptyWriteBuffer();
1327 }
1328 #endif /* DS5000_240 */
1329 
1330 static void
1331 kn02ba_errintr()
1332 {
1333 	register int mer, adr, siz;
1334 	static int errintr_cnt = 0;
1335 
1336 	siz = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MSR);
1337 	mer = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MER);
1338 	adr = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_AER);
1339 
1340 	/* clear interrupt bit */
1341 	*(unsigned int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_TIMEOUT) = 0;
1342 
1343 	errintr_cnt++;
1344 	printf("(%d)%s%x [%x %x %x]\n", errintr_cnt,
1345 	       "Bad memory chip at phys ",
1346 	       kn02ba_recover_erradr(adr, mer),
1347 	       mer, siz, adr);
1348 }
1349 
1350 static unsigned
1351 kn02ba_recover_erradr(phys, mer)
1352 	register unsigned phys, mer;
1353 {
1354 	/* phys holds bits 28:2, mer knows which byte */
1355 	switch (mer & KMIN_MER_LASTBYTE) {
1356 	case KMIN_LASTB31:
1357 		mer = 3; break;
1358 	case KMIN_LASTB23:
1359 		mer = 2; break;
1360 	case KMIN_LASTB15:
1361 		mer = 1; break;
1362 	case KMIN_LASTB07:
1363 		mer = 0; break;
1364 	}
1365 	return ((phys & KMIN_AER_ADDR_MASK) | mer);
1366 }
1367 
1368 /*
1369  * Return the resulting PC as if the branch was executed.
1370  */
1371 unsigned
1372 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch)
1373 	unsigned *regsPtr;
1374 	unsigned instPC;
1375 	unsigned fpcCSR;
1376 	int allowNonBranch;
1377 {
1378 	InstFmt inst;
1379 	unsigned retAddr;
1380 	int condition;
1381 	extern unsigned GetBranchDest();
1382 
1383 
1384 	inst = *(InstFmt *)instPC;
1385 #if 0
1386 	printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC,
1387 		inst.word, fpcCSR); /* XXX */
1388 #endif
1389 	switch ((int)inst.JType.op) {
1390 	case OP_SPECIAL:
1391 		switch ((int)inst.RType.func) {
1392 		case OP_JR:
1393 		case OP_JALR:
1394 			retAddr = regsPtr[inst.RType.rs];
1395 			break;
1396 
1397 		default:
1398 			if (!allowNonBranch)
1399 				panic("MachEmulateBranch: Non-branch");
1400 			retAddr = instPC + 4;
1401 			break;
1402 		}
1403 		break;
1404 
1405 	case OP_BCOND:
1406 		switch ((int)inst.IType.rt) {
1407 		case OP_BLTZ:
1408 		case OP_BLTZAL:
1409 			if ((int)(regsPtr[inst.RType.rs]) < 0)
1410 				retAddr = GetBranchDest((InstFmt *)instPC);
1411 			else
1412 				retAddr = instPC + 8;
1413 			break;
1414 
1415 		case OP_BGEZAL:
1416 		case OP_BGEZ:
1417 			if ((int)(regsPtr[inst.RType.rs]) >= 0)
1418 				retAddr = GetBranchDest((InstFmt *)instPC);
1419 			else
1420 				retAddr = instPC + 8;
1421 			break;
1422 
1423 		default:
1424 			panic("MachEmulateBranch: Bad branch cond");
1425 		}
1426 		break;
1427 
1428 	case OP_J:
1429 	case OP_JAL:
1430 		retAddr = (inst.JType.target << 2) |
1431 			((unsigned)instPC & 0xF0000000);
1432 		break;
1433 
1434 	case OP_BEQ:
1435 		if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1436 			retAddr = GetBranchDest((InstFmt *)instPC);
1437 		else
1438 			retAddr = instPC + 8;
1439 		break;
1440 
1441 	case OP_BNE:
1442 		if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1443 			retAddr = GetBranchDest((InstFmt *)instPC);
1444 		else
1445 			retAddr = instPC + 8;
1446 		break;
1447 
1448 	case OP_BLEZ:
1449 		if ((int)(regsPtr[inst.RType.rs]) <= 0)
1450 			retAddr = GetBranchDest((InstFmt *)instPC);
1451 		else
1452 			retAddr = instPC + 8;
1453 		break;
1454 
1455 	case OP_BGTZ:
1456 		if ((int)(regsPtr[inst.RType.rs]) > 0)
1457 			retAddr = GetBranchDest((InstFmt *)instPC);
1458 		else
1459 			retAddr = instPC + 8;
1460 		break;
1461 
1462 	case OP_COP1:
1463 		switch (inst.RType.rs) {
1464 		case OP_BCx:
1465 		case OP_BCy:
1466 			if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1467 				condition = fpcCSR & MACH_FPC_COND_BIT;
1468 			else
1469 				condition = !(fpcCSR & MACH_FPC_COND_BIT);
1470 			if (condition)
1471 				retAddr = GetBranchDest((InstFmt *)instPC);
1472 			else
1473 				retAddr = instPC + 8;
1474 			break;
1475 
1476 		default:
1477 			if (!allowNonBranch)
1478 				panic("MachEmulateBranch: Bad coproc branch instruction");
1479 			retAddr = instPC + 4;
1480 		}
1481 		break;
1482 
1483 	default:
1484 		if (!allowNonBranch)
1485 			panic("MachEmulateBranch: Non-branch instruction");
1486 		retAddr = instPC + 4;
1487 	}
1488 #if 0
1489 	printf("Target addr=%x\n", retAddr); /* XXX */
1490 #endif
1491 	return (retAddr);
1492 }
1493 
1494 unsigned
1495 GetBranchDest(InstPtr)
1496 	InstFmt *InstPtr;
1497 {
1498 	return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2));
1499 }
1500 
1501 /*
1502  * This routine is called by procxmt() to single step one instruction.
1503  * We do this by storing a break instruction after the current instruction,
1504  * resuming execution, and then restoring the old instruction.
1505  */
1506 cpu_singlestep(p)
1507 	register struct proc *p;
1508 {
1509 	register unsigned va;
1510 	register int *locr0 = p->p_md.md_regs;
1511 	int i;
1512 
1513 	/* compute next address after current location */
1514 	va = MachEmulateBranch(locr0, locr0[PC], locr0[FSR], 1);
1515 	if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va ||
1516 	    !useracc((caddr_t)va, 4, B_READ)) {
1517 		printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
1518 			p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */
1519 		return (EFAULT);
1520 	}
1521 	p->p_md.md_ss_addr = va;
1522 	p->p_md.md_ss_instr = fuiword((caddr_t)va);
1523 	i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1524 	if (i < 0) {
1525 		vm_offset_t sa, ea;
1526 		int rv;
1527 
1528 		sa = trunc_page((vm_offset_t)va);
1529 		ea = round_page((vm_offset_t)va+sizeof(int)-1);
1530 		rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
1531 			VM_PROT_DEFAULT, FALSE);
1532 		if (rv == KERN_SUCCESS) {
1533 			i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1534 			(void) vm_map_protect(&p->p_vmspace->vm_map,
1535 				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
1536 		}
1537 	}
1538 	if (i < 0)
1539 		return (EFAULT);
1540 #if 0
1541 	printf("SS %s (%d): breakpoint set at %x: %x (pc %x) br %x\n",
1542 		p->p_comm, p->p_pid, p->p_md.md_ss_addr,
1543 		p->p_md.md_ss_instr, locr0[PC], fuword((caddr_t)va)); /* XXX */
1544 #endif
1545 	return (0);
1546 }
1547 
1548 #ifdef DEBUG
1549 kdbpeek(addr)
1550 {
1551 	if (addr & 3) {
1552 		printf("kdbpeek: unaligned address %x\n", addr);
1553 		return (-1);
1554 	}
1555 	return (*(int *)addr);
1556 }
1557 
1558 #define MIPS_JR_RA	0x03e00008	/* instruction code for jr ra */
1559 
1560 /*
1561  * Print a stack backtrace.
1562  */
1563 void
1564 stacktrace(a0, a1, a2, a3)
1565 	int a0, a1, a2, a3;
1566 {
1567 	unsigned pc, sp, fp, ra, va, subr;
1568 	unsigned instr, mask;
1569 	InstFmt i;
1570 	int more, stksize;
1571 	int regs[3];
1572 	extern setsoftclock();
1573 	extern char start[], edata[];
1574 
1575 	cpu_getregs(regs);
1576 
1577 	/* get initial values from the exception frame */
1578 	sp = regs[0];
1579 	pc = regs[1];
1580 	ra = 0;
1581 	fp = regs[2];
1582 
1583 loop:
1584 	/* check for current PC in the kernel interrupt handler code */
1585 	if (pc >= (unsigned)MachKernIntr && pc < (unsigned)MachUserIntr) {
1586 		/* NOTE: the offsets depend on the code in locore.s */
1587 		printf("interrupt\n");
1588 		a0 = kdbpeek(sp + 36);
1589 		a1 = kdbpeek(sp + 40);
1590 		a2 = kdbpeek(sp + 44);
1591 		a3 = kdbpeek(sp + 48);
1592 		pc = kdbpeek(sp + 20);
1593 		ra = kdbpeek(sp + 92);
1594 		sp = kdbpeek(sp + 100);
1595 		fp = kdbpeek(sp + 104);
1596 	}
1597 
1598 	/* check for current PC in the exception handler code */
1599 	if (pc >= 0x80000000 && pc < (unsigned)setsoftclock) {
1600 		ra = 0;
1601 		subr = 0;
1602 		goto done;
1603 	}
1604 
1605 	/* check for bad PC */
1606 	if (pc & 3 || pc < 0x80000000 || pc >= (unsigned)edata) {
1607 		printf("PC 0x%x: not in kernel\n", pc);
1608 		ra = 0;
1609 		subr = 0;
1610 		goto done;
1611 	}
1612 
1613 	/*
1614 	 * Find the beginning of the current subroutine by scanning backwards
1615 	 * from the current PC for the end of the previous subroutine.
1616 	 */
1617 	va = pc - sizeof(int);
1618 	while ((instr = kdbpeek(va)) != MIPS_JR_RA)
1619 		va -= sizeof(int);
1620 	va += 2 * sizeof(int);	/* skip back over branch & delay slot */
1621 	/* skip over nulls which might separate .o files */
1622 	while ((instr = kdbpeek(va)) == 0)
1623 		va += sizeof(int);
1624 	subr = va;
1625 
1626 	/* scan forwards to find stack size and any saved registers */
1627 	stksize = 0;
1628 	more = 3;
1629 	mask = 0;
1630 	for (; more; va += sizeof(int), more = (more == 3) ? 3 : more - 1) {
1631 		/* stop if hit our current position */
1632 		if (va >= pc)
1633 			break;
1634 		instr = kdbpeek(va);
1635 		i.word = instr;
1636 		switch (i.JType.op) {
1637 		case OP_SPECIAL:
1638 			switch (i.RType.func) {
1639 			case OP_JR:
1640 			case OP_JALR:
1641 				more = 2; /* stop after next instruction */
1642 				break;
1643 
1644 			case OP_SYSCALL:
1645 			case OP_BREAK:
1646 				more = 1; /* stop now */
1647 			};
1648 			break;
1649 
1650 		case OP_BCOND:
1651 		case OP_J:
1652 		case OP_JAL:
1653 		case OP_BEQ:
1654 		case OP_BNE:
1655 		case OP_BLEZ:
1656 		case OP_BGTZ:
1657 			more = 2; /* stop after next instruction */
1658 			break;
1659 
1660 		case OP_COP0:
1661 		case OP_COP1:
1662 		case OP_COP2:
1663 		case OP_COP3:
1664 			switch (i.RType.rs) {
1665 			case OP_BCx:
1666 			case OP_BCy:
1667 				more = 2; /* stop after next instruction */
1668 			};
1669 			break;
1670 
1671 		case OP_SW:
1672 			/* look for saved registers on the stack */
1673 			if (i.IType.rs != 29)
1674 				break;
1675 			/* only restore the first one */
1676 			if (mask & (1 << i.IType.rt))
1677 				break;
1678 			mask |= 1 << i.IType.rt;
1679 			switch (i.IType.rt) {
1680 			case 4: /* a0 */
1681 				a0 = kdbpeek(sp + (short)i.IType.imm);
1682 				break;
1683 
1684 			case 5: /* a1 */
1685 				a1 = kdbpeek(sp + (short)i.IType.imm);
1686 				break;
1687 
1688 			case 6: /* a2 */
1689 				a2 = kdbpeek(sp + (short)i.IType.imm);
1690 				break;
1691 
1692 			case 7: /* a3 */
1693 				a3 = kdbpeek(sp + (short)i.IType.imm);
1694 				break;
1695 
1696 			case 30: /* fp */
1697 				fp = kdbpeek(sp + (short)i.IType.imm);
1698 				break;
1699 
1700 			case 31: /* ra */
1701 				ra = kdbpeek(sp + (short)i.IType.imm);
1702 			}
1703 			break;
1704 
1705 		case OP_ADDI:
1706 		case OP_ADDIU:
1707 			/* look for stack pointer adjustment */
1708 			if (i.IType.rs != 29 || i.IType.rt != 29)
1709 				break;
1710 			stksize = (short)i.IType.imm;
1711 		}
1712 	}
1713 
1714 done:
1715 	printf("%x+%x (%x,%x,%x,%x) ra %x sz %d\n",
1716 		subr, pc - subr, a0, a1, a2, a3, ra, stksize);
1717 
1718 	if (ra) {
1719 		if (pc == ra && stksize == 0)
1720 			printf("stacktrace: loop!\n");
1721 		else {
1722 			pc = ra;
1723 			sp -= stksize;
1724 			goto loop;
1725 		}
1726 	}
1727 }
1728 #endif /* DEBUG */
1729