xref: /original-bsd/sys/pmax/pmax/trap.c (revision 50cee248)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department and Ralph Campbell.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.32 91/04/06$
13  *
14  *	@(#)trap.c	7.17 (Berkeley) 05/09/93
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/proc.h>
20 #include <sys/kernel.h>
21 #include <sys/signalvar.h>
22 #include <sys/syscall.h>
23 #include <sys/user.h>
24 #include <sys/buf.h>
25 #ifdef KTRACE
26 #include <sys/ktrace.h>
27 #endif
28 #include <net/netisr.h>
29 
30 #include <machine/trap.h>
31 #include <machine/psl.h>
32 #include <machine/reg.h>
33 #include <machine/cpu.h>
34 #include <machine/pte.h>
35 #include <machine/mips_opcode.h>
36 
37 #include <vm/vm.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_page.h>
40 
41 #include <pmax/pmax/clockreg.h>
42 #include <pmax/pmax/kn01.h>
43 #include <pmax/pmax/kn02.h>
44 #include <pmax/pmax/kmin.h>
45 #include <pmax/pmax/maxine.h>
46 #include <pmax/pmax/kn03.h>
47 #include <pmax/pmax/asic.h>
48 #include <pmax/pmax/turbochannel.h>
49 
50 #include <pmax/stand/dec_prom.h>
51 
52 #include <asc.h>
53 #include <sii.h>
54 #include <le.h>
55 #include <dc.h>
56 
57 struct	proc *machFPCurProcPtr;		/* pointer to last proc to use FP */
58 
59 extern void MachKernGenException();
60 extern void MachUserGenException();
61 extern void MachKernIntr();
62 extern void MachUserIntr();
63 extern void MachTLBModException();
64 extern void MachTLBMissException();
65 extern unsigned MachEmulateBranch();
66 
67 void (*machExceptionTable[])() = {
68 /*
69  * The kernel exception handlers.
70  */
71 	MachKernIntr,			/* external interrupt */
72 	MachKernGenException,		/* TLB modification */
73 	MachTLBMissException,		/* TLB miss (load or instr. fetch) */
74 	MachTLBMissException,		/* TLB miss (store) */
75 	MachKernGenException,		/* address error (load or I-fetch) */
76 	MachKernGenException,		/* address error (store) */
77 	MachKernGenException,		/* bus error (I-fetch) */
78 	MachKernGenException,		/* bus error (load or store) */
79 	MachKernGenException,		/* system call */
80 	MachKernGenException,		/* breakpoint */
81 	MachKernGenException,		/* reserved instruction */
82 	MachKernGenException,		/* coprocessor unusable */
83 	MachKernGenException,		/* arithmetic overflow */
84 	MachKernGenException,		/* reserved */
85 	MachKernGenException,		/* reserved */
86 	MachKernGenException,		/* reserved */
87 /*
88  * The user exception handlers.
89  */
90 	MachUserIntr,
91 	MachUserGenException,
92 	MachUserGenException,
93 	MachUserGenException,
94 	MachUserGenException,
95 	MachUserGenException,
96 	MachUserGenException,
97 	MachUserGenException,
98 	MachUserGenException,
99 	MachUserGenException,
100 	MachUserGenException,
101 	MachUserGenException,
102 	MachUserGenException,
103 	MachUserGenException,
104 	MachUserGenException,
105 	MachUserGenException,
106 };
107 
108 char	*trap_type[] = {
109 	"external interrupt",
110 	"TLB modification",
111 	"TLB miss (load or instr. fetch)",
112 	"TLB miss (store)",
113 	"address error (load or I-fetch)",
114 	"address error (store)",
115 	"bus error (I-fetch)",
116 	"bus error (load or store)",
117 	"system call",
118 	"breakpoint",
119 	"reserved instruction",
120 	"coprocessor unusable",
121 	"arithmetic overflow",
122 	"reserved 13",
123 	"reserved 14",
124 	"reserved 15",
125 };
126 
127 #ifdef DEBUG
128 #define TRAPSIZE	10
129 struct trapdebug {		/* trap history buffer for debugging */
130 	u_int	status;
131 	u_int	cause;
132 	u_int	vadr;
133 	u_int	pc;
134 	u_int	ra;
135 	u_int	code;
136 } trapdebug[TRAPSIZE], *trp = trapdebug;
137 #endif
138 
139 static void pmax_errintr();
140 static void kn02_errintr(), kn02ba_errintr();
141 #ifdef DS5000_240
142 static void kn03_errintr();
143 #endif
144 static unsigned kn02ba_recover_erradr();
145 extern tc_option_t tc_slot_info[TC_MAX_LOGICAL_SLOTS];
146 extern u_long kmin_tc3_imask, xine_tc3_imask;
147 extern const struct callback *callv;
148 #ifdef DS5000_240
149 extern u_long kn03_tc3_imask;
150 #endif
151 int (*pmax_hardware_intr)() = (int (*)())0;
152 extern volatile struct chiptime *Mach_clock_addr;
153 
154 /*
155  * Handle an exception.
156  * Called from MachKernGenException() or MachUserGenException()
157  * when a processor trap occurs.
158  * In the case of a kernel trap, we return the pc where to resume if
159  * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc.
160  */
161 unsigned
162 trap(statusReg, causeReg, vadr, pc, args)
163 	unsigned statusReg;	/* status register at time of the exception */
164 	unsigned causeReg;	/* cause register at time of exception */
165 	unsigned vadr;		/* address (if any) the fault occured on */
166 	unsigned pc;		/* program counter where to continue */
167 {
168 	register int type, i;
169 	unsigned ucode = 0;
170 	register struct proc *p = curproc;
171 	u_quad_t sticks;
172 	vm_prot_t ftype;
173 	extern unsigned onfault_table[];
174 
175 #ifdef DEBUG
176 	trp->status = statusReg;
177 	trp->cause = causeReg;
178 	trp->vadr = vadr;
179 	trp->pc = pc;
180 	trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] :
181 		p->p_md.md_regs[RA];
182 	trp->code = 0;
183 	if (++trp == &trapdebug[TRAPSIZE])
184 		trp = trapdebug;
185 #endif
186 
187 	cnt.v_trap++;
188 	type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT;
189 	if (USERMODE(statusReg)) {
190 		type |= T_USER;
191 		sticks = p->p_sticks;
192 	}
193 
194 	/*
195 	 * Enable hardware interrupts if they were on before.
196 	 * We only respond to software interrupts when returning to user mode.
197 	 */
198 	if (statusReg & MACH_SR_INT_ENA_PREV)
199 		splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR);
200 
201 	switch (type) {
202 	case T_TLB_MOD:
203 		/* check for kernel address */
204 		if ((int)vadr < 0) {
205 			register pt_entry_t *pte;
206 			register unsigned entry;
207 			register vm_offset_t pa;
208 
209 			pte = kvtopte(vadr);
210 			entry = pte->pt_entry;
211 #ifdef DIAGNOSTIC
212 			if (!(entry & PG_V) || (entry & PG_M))
213 				panic("trap: ktlbmod: invalid pte");
214 #endif
215 			if (entry & PG_RO) {
216 				/* write to read only page in the kernel */
217 				ftype = VM_PROT_WRITE;
218 				goto kernel_fault;
219 			}
220 			entry |= PG_M;
221 			pte->pt_entry = entry;
222 			vadr &= ~PGOFSET;
223 			printf("trap: ktlbmod: TLBupdate hi %x lo %x i %x\n",
224 				vadr, entry,
225 				MachTLBUpdate(vadr, entry)); /* XXX */
226 			pa = entry & PG_FRAME;
227 #ifdef ATTR
228 			pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD;
229 #else
230 			if (!IS_VM_PHYSADDR(pa))
231 				panic("trap: ktlbmod: unmanaged page");
232 			PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
233 #endif
234 			return (pc);
235 		}
236 		/* FALLTHROUGH */
237 
238 	case T_TLB_MOD+T_USER:
239 	    {
240 		register pt_entry_t *pte;
241 		register unsigned entry;
242 		register vm_offset_t pa;
243 		pmap_t pmap = &p->p_vmspace->vm_pmap;
244 
245 		if (!(pte = pmap_segmap(pmap, vadr)))
246 			panic("trap: utlbmod: invalid segmap");
247 		pte += (vadr >> PGSHIFT) & (NPTEPG - 1);
248 		entry = pte->pt_entry;
249 #ifdef DIAGNOSTIC
250 		if (!(entry & PG_V) || (entry & PG_M))
251 			panic("trap: utlbmod: invalid pte");
252 #endif
253 		if (entry & PG_RO) {
254 			/* write to read only page */
255 			ftype = VM_PROT_WRITE;
256 			goto dofault;
257 		}
258 		entry |= PG_M;
259 		pte->pt_entry = entry;
260 		vadr = (vadr & ~PGOFSET) |
261 			(pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
262 		printf("trap: utlbmod: TLBupdate hi %x lo %x i %x\n",
263 			vadr, entry, MachTLBUpdate(vadr, entry)); /* XXX */
264 		pa = entry & PG_FRAME;
265 #ifdef ATTR
266 		pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD;
267 #else
268 		if (!IS_VM_PHYSADDR(pa))
269 			panic("trap: utlbmod: unmanaged page");
270 		PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
271 #endif
272 		if (!USERMODE(statusReg))
273 			return (pc);
274 		goto out;
275 	    }
276 
277 	case T_TLB_LD_MISS:
278 	case T_TLB_ST_MISS:
279 		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
280 		/* check for kernel address */
281 		if ((int)vadr < 0) {
282 			register vm_offset_t va;
283 			int rv;
284 
285 		kernel_fault:
286 			va = trunc_page((vm_offset_t)vadr);
287 			rv = vm_fault(kernel_map, va, ftype, FALSE);
288 			if (rv == KERN_SUCCESS)
289 				return (pc);
290 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
291 				((struct pcb *)UADDR)->pcb_onfault = 0;
292 				return (onfault_table[i]);
293 			}
294 			goto err;
295 		}
296 		/*
297 		 * It is an error for the kernel to access user space except
298 		 * through the copyin/copyout routines.
299 		 */
300 		if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 0)
301 			goto err;
302 		/* check for fuswintr() or suswintr() getting a page fault */
303 		if (i == 4)
304 			return (onfault_table[i]);
305 		goto dofault;
306 
307 	case T_TLB_LD_MISS+T_USER:
308 		ftype = VM_PROT_READ;
309 		goto dofault;
310 
311 	case T_TLB_ST_MISS+T_USER:
312 		ftype = VM_PROT_WRITE;
313 	dofault:
314 	    {
315 		register vm_offset_t va;
316 		register struct vmspace *vm;
317 		register vm_map_t map;
318 		int rv;
319 
320 		vm = p->p_vmspace;
321 		map = &vm->vm_map;
322 		va = trunc_page((vm_offset_t)vadr);
323 		rv = vm_fault(map, va, ftype, FALSE);
324 		/*
325 		 * If this was a stack access we keep track of the maximum
326 		 * accessed stack size.  Also, if vm_fault gets a protection
327 		 * failure it is due to accessing the stack region outside
328 		 * the current limit and we need to reflect that as an access
329 		 * error.
330 		 */
331 		if ((caddr_t)va >= vm->vm_maxsaddr) {
332 			if (rv == KERN_SUCCESS) {
333 				unsigned nss;
334 
335 				nss = clrnd(btoc(USRSTACK-(unsigned)va));
336 				if (nss > vm->vm_ssize)
337 					vm->vm_ssize = nss;
338 			} else if (rv == KERN_PROTECTION_FAILURE)
339 				rv = KERN_INVALID_ADDRESS;
340 		}
341 		if (rv == KERN_SUCCESS) {
342 			if (!USERMODE(statusReg))
343 				return (pc);
344 			goto out;
345 		}
346 		if (!USERMODE(statusReg)) {
347 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
348 				((struct pcb *)UADDR)->pcb_onfault = 0;
349 				return (onfault_table[i]);
350 			}
351 			goto err;
352 		}
353 		ucode = vadr;
354 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
355 		break;
356 	    }
357 
358 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
359 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
360 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
361 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
362 		i = SIGSEGV;
363 		break;
364 
365 	case T_SYSCALL+T_USER:
366 	    {
367 		register int *locr0 = p->p_md.md_regs;
368 		register struct sysent *callp;
369 		unsigned int code;
370 		int numsys;
371 		struct args {
372 			int i[8];
373 		} args;
374 		int rval[2];
375 		struct sysent *systab;
376 		extern int nsysent;
377 #ifdef ULTRIXCOMPAT
378 		extern struct sysent ultrixsysent[];
379 		extern int ultrixnsysent;
380 #endif
381 
382 		cnt.v_syscall++;
383 		/* compute next PC after syscall instruction */
384 		if ((int)causeReg < 0)
385 			locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0);
386 		else
387 			locr0[PC] += 4;
388 		systab = sysent;
389 		numsys = nsysent;
390 #ifdef ULTRIXCOMPAT
391 		if (p->p_md.md_flags & MDP_ULTRIX) {
392 			systab = ultrixsysent;
393 			numsys = ultrixnsysent;
394 		}
395 #endif
396 		code = locr0[V0];
397 		switch (code) {
398 		case SYS_indir:
399 			/*
400 			 * Code is first argument, followed by actual args.
401 			 */
402 			code = locr0[A0];
403 			if (code >= numsys)
404 				callp = &systab[SYS_indir]; /* (illegal) */
405 			else
406 				callp = &systab[code];
407 			i = callp->sy_narg;
408 			args.i[0] = locr0[A1];
409 			args.i[1] = locr0[A2];
410 			args.i[2] = locr0[A3];
411 			if (i > 3) {
412 				i = copyin((caddr_t)(locr0[SP] +
413 						4 * sizeof(int)),
414 					(caddr_t)&args.i[3],
415 					(u_int)(i - 3) * sizeof(int));
416 				if (i) {
417 					locr0[V0] = i;
418 					locr0[A3] = 1;
419 #ifdef KTRACE
420 					if (KTRPOINT(p, KTR_SYSCALL))
421 						ktrsyscall(p->p_tracep, code,
422 							callp->sy_narg, args.i);
423 #endif
424 					goto done;
425 				}
426 			}
427 			break;
428 
429 		case SYS___indir:
430 			/*
431 			 * Like indir, but code is a quad, so as to maintain
432 			 * quad alignment for the rest of the arguments.
433 			 */
434 			code = locr0[A0 + _QUAD_LOWWORD];
435 			if (code >= numsys)
436 				callp = &systab[SYS_indir]; /* (illegal) */
437 			else
438 				callp = &systab[code];
439 			i = callp->sy_narg;
440 			args.i[0] = locr0[A2];
441 			args.i[1] = locr0[A3];
442 			if (i > 2) {
443 				i = copyin((caddr_t)(locr0[SP] +
444 						4 * sizeof(int)),
445 					(caddr_t)&args.i[2],
446 					(u_int)(i - 2) * sizeof(int));
447 				if (i) {
448 					locr0[V0] = i;
449 					locr0[A3] = 1;
450 #ifdef KTRACE
451 					if (KTRPOINT(p, KTR_SYSCALL))
452 						ktrsyscall(p->p_tracep, code,
453 							callp->sy_narg, args.i);
454 #endif
455 					goto done;
456 				}
457 			}
458 			break;
459 
460 		default:
461 			if (code >= numsys)
462 				callp = &systab[SYS_indir]; /* (illegal) */
463 			else
464 				callp = &systab[code];
465 			i = callp->sy_narg;
466 			args.i[0] = locr0[A0];
467 			args.i[1] = locr0[A1];
468 			args.i[2] = locr0[A2];
469 			args.i[3] = locr0[A3];
470 			if (i > 4) {
471 				i = copyin((caddr_t)(locr0[SP] +
472 						4 * sizeof(int)),
473 					(caddr_t)&args.i[4],
474 					(u_int)(i - 4) * sizeof(int));
475 				if (i) {
476 					locr0[V0] = i;
477 					locr0[A3] = 1;
478 #ifdef KTRACE
479 					if (KTRPOINT(p, KTR_SYSCALL))
480 						ktrsyscall(p->p_tracep, code,
481 							callp->sy_narg, args.i);
482 #endif
483 					goto done;
484 				}
485 			}
486 		}
487 #ifdef KTRACE
488 		if (KTRPOINT(p, KTR_SYSCALL))
489 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
490 #endif
491 		rval[0] = 0;
492 		rval[1] = locr0[V1];
493 #ifdef DEBUG
494 		if (trp == trapdebug)
495 			trapdebug[TRAPSIZE - 1].code = code;
496 		else
497 			trp[-1].code = code;
498 #endif
499 		i = (*callp->sy_call)(p, &args, rval);
500 		/*
501 		 * Reinitialize proc pointer `p' as it may be different
502 		 * if this is a child returning from fork syscall.
503 		 */
504 		p = curproc;
505 		locr0 = p->p_md.md_regs;
506 #ifdef DEBUG
507 		{ int s;
508 		s = splhigh();
509 		trp->status = statusReg;
510 		trp->cause = causeReg;
511 		trp->vadr = locr0[SP];
512 		trp->pc = locr0[PC];
513 		trp->ra = locr0[RA];
514 		trp->code = -code;
515 		if (++trp == &trapdebug[TRAPSIZE])
516 			trp = trapdebug;
517 		splx(s);
518 		}
519 #endif
520 		switch (i) {
521 		case 0:
522 			locr0[V0] = rval[0];
523 			locr0[V1] = rval[1];
524 			locr0[A3] = 0;
525 			break;
526 
527 		case ERESTART:
528 			locr0[PC] = pc;
529 			break;
530 
531 		case EJUSTRETURN:
532 			break;	/* nothing to do */
533 
534 		default:
535 			locr0[V0] = i;
536 			locr0[A3] = 1;
537 		}
538 	done:
539 #ifdef KTRACE
540 		if (KTRPOINT(p, KTR_SYSRET))
541 			ktrsysret(p->p_tracep, code, i, rval[0]);
542 #endif
543 		goto out;
544 	    }
545 
546 	case T_BREAK+T_USER:
547 	    {
548 		register unsigned va, instr;
549 
550 		/* compute address of break instruction */
551 		va = pc;
552 		if ((int)causeReg < 0)
553 			va += 4;
554 
555 		/* read break instruction */
556 		instr = fuiword((caddr_t)va);
557 #ifdef KADB
558 		if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP)
559 			goto err;
560 #endif
561 		if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) {
562 			i = SIGTRAP;
563 			break;
564 		}
565 
566 		/* restore original instruction and clear BP  */
567 		i = suiword((caddr_t)va, p->p_md.md_ss_instr);
568 		if (i < 0) {
569 			vm_offset_t sa, ea;
570 			int rv;
571 
572 			sa = trunc_page((vm_offset_t)va);
573 			ea = round_page((vm_offset_t)va+sizeof(int)-1);
574 			rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
575 				VM_PROT_DEFAULT, FALSE);
576 			if (rv == KERN_SUCCESS) {
577 				i = suiword((caddr_t)va, p->p_md.md_ss_instr);
578 				(void) vm_map_protect(&p->p_vmspace->vm_map,
579 					sa, ea, VM_PROT_READ|VM_PROT_EXECUTE,
580 					FALSE);
581 			}
582 		}
583 		if (i < 0) {
584 			i = SIGTRAP;
585 			break;
586 		}
587 		p->p_md.md_ss_addr = 0;
588 		goto out;
589 	    }
590 
591 	case T_RES_INST+T_USER:
592 		i = SIGILL;
593 		break;
594 
595 	case T_COP_UNUSABLE+T_USER:
596 		if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) {
597 			i = SIGILL;	/* only FPU instructions allowed */
598 			break;
599 		}
600 		MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs);
601 		machFPCurProcPtr = p;
602 		p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT;
603 		p->p_md.md_flags |= MDP_FPUSED;
604 		goto out;
605 
606 	case T_OVFLOW+T_USER:
607 		i = SIGFPE;
608 		break;
609 
610 	case T_ADDR_ERR_LD:	/* misaligned access */
611 	case T_ADDR_ERR_ST:	/* misaligned access */
612 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
613 		if (i = ((struct pcb *)UADDR)->pcb_onfault) {
614 			((struct pcb *)UADDR)->pcb_onfault = 0;
615 			return (onfault_table[i]);
616 		}
617 		/* FALLTHROUGH */
618 
619 	default:
620 	err:
621 #ifdef KADB
622 	    {
623 		extern struct pcb kdbpcb;
624 
625 		if (USERMODE(statusReg))
626 			kdbpcb = p->p_addr->u_pcb;
627 		else {
628 			kdbpcb.pcb_regs[ZERO] = 0;
629 			kdbpcb.pcb_regs[AST] = ((int *)&args)[2];
630 			kdbpcb.pcb_regs[V0] = ((int *)&args)[3];
631 			kdbpcb.pcb_regs[V1] = ((int *)&args)[4];
632 			kdbpcb.pcb_regs[A0] = ((int *)&args)[5];
633 			kdbpcb.pcb_regs[A1] = ((int *)&args)[6];
634 			kdbpcb.pcb_regs[A2] = ((int *)&args)[7];
635 			kdbpcb.pcb_regs[A3] = ((int *)&args)[8];
636 			kdbpcb.pcb_regs[T0] = ((int *)&args)[9];
637 			kdbpcb.pcb_regs[T1] = ((int *)&args)[10];
638 			kdbpcb.pcb_regs[T2] = ((int *)&args)[11];
639 			kdbpcb.pcb_regs[T3] = ((int *)&args)[12];
640 			kdbpcb.pcb_regs[T4] = ((int *)&args)[13];
641 			kdbpcb.pcb_regs[T5] = ((int *)&args)[14];
642 			kdbpcb.pcb_regs[T6] = ((int *)&args)[15];
643 			kdbpcb.pcb_regs[T7] = ((int *)&args)[16];
644 			kdbpcb.pcb_regs[T8] = ((int *)&args)[17];
645 			kdbpcb.pcb_regs[T9] = ((int *)&args)[18];
646 			kdbpcb.pcb_regs[RA] = ((int *)&args)[19];
647 			kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21];
648 			kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22];
649 			kdbpcb.pcb_regs[PC] = pc;
650 			kdbpcb.pcb_regs[SR] = statusReg;
651 			bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int));
652 		}
653 		if (kdb(causeReg, vadr, p, !USERMODE(statusReg)))
654 			return (kdbpcb.pcb_regs[PC]);
655 	    }
656 #else
657 #ifdef DEBUG
658 		trapDump("trap");
659 #endif
660 #endif
661 		panic("trap");
662 	}
663 	printf("trap: pid %d '%s' sig %d adr %x pc %x ra %x\n", p->p_pid,
664 		p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */
665 	trapsignal(p, i, ucode);
666 out:
667 	/*
668 	 * Note: we should only get here if returning to user mode.
669 	 */
670 	/* take pending signals */
671 	while ((i = CURSIG(p)) != 0)
672 		psig(i);
673 	p->p_pri = p->p_usrpri;
674 	astpending = 0;
675 	if (want_resched) {
676 		int s;
677 
678 		/*
679 		 * Since we are curproc, clock will normally just change
680 		 * our priority without moving us from one queue to another
681 		 * (since the running process is not on a queue.)
682 		 * If that happened after we setrq ourselves but before we
683 		 * swtch()'ed, we might not be on the queue indicated by
684 		 * our priority.
685 		 */
686 		s = splstatclock();
687 		setrq(p);
688 		p->p_stats->p_ru.ru_nivcsw++;
689 		swtch();
690 		splx(s);
691 		while ((i = CURSIG(p)) != 0)
692 			psig(i);
693 	}
694 
695 	/*
696 	 * If profiling, charge system time to the trapped pc.
697 	 */
698 	if (p->p_flag & SPROFIL) {
699 		extern int psratio;
700 
701 		addupc_task(p, pc, (int)(p->p_sticks - sticks) * psratio);
702 	}
703 
704 	curpri = p->p_pri;
705 	return (pc);
706 }
707 
708 /*
709  * Handle an interrupt.
710  * Called from MachKernIntr() or MachUserIntr()
711  * Note: curproc might be NULL.
712  */
713 interrupt(statusReg, causeReg, pc)
714 	unsigned statusReg;	/* status register at time of the exception */
715 	unsigned causeReg;	/* cause register at time of exception */
716 	unsigned pc;		/* program counter where to continue */
717 {
718 	register unsigned mask;
719 	struct clockframe cf;
720 
721 #ifdef DEBUG
722 	trp->status = statusReg;
723 	trp->cause = causeReg;
724 	trp->vadr = 0;
725 	trp->pc = pc;
726 	trp->ra = 0;
727 	trp->code = 0;
728 	if (++trp == &trapdebug[TRAPSIZE])
729 		trp = trapdebug;
730 #endif
731 
732 	cnt.v_intr++;
733 	mask = causeReg & statusReg;	/* pending interrupts & enable mask */
734 	if (pmax_hardware_intr)
735 		splx((*pmax_hardware_intr)(mask, pc, statusReg, causeReg));
736 	if (mask & MACH_INT_MASK_5) {
737 		if (!USERMODE(statusReg)) {
738 #ifdef DEBUG
739 			trapDump("fpintr");
740 #else
741 			printf("FPU interrupt: PC %x CR %x SR %x\n",
742 				pc, causeReg, statusReg);
743 #endif
744 		} else
745 			MachFPInterrupt(statusReg, causeReg, pc);
746 	}
747 	if (mask & MACH_SOFT_INT_MASK_0) {
748 		clearsoftclock();
749 		cnt.v_soft++;
750 		softclock();
751 	}
752 	/* process network interrupt if we trapped or will very soon */
753 	if ((mask & MACH_SOFT_INT_MASK_1) ||
754 	    netisr && (statusReg & MACH_SOFT_INT_MASK_1)) {
755 		clearsoftnet();
756 		cnt.v_soft++;
757 #ifdef INET
758 		if (netisr & (1 << NETISR_ARP)) {
759 			netisr &= ~(1 << NETISR_ARP);
760 			arpintr();
761 		}
762 		if (netisr & (1 << NETISR_IP)) {
763 			netisr &= ~(1 << NETISR_IP);
764 			ipintr();
765 		}
766 #endif
767 #ifdef NS
768 		if (netisr & (1 << NETISR_NS)) {
769 			netisr &= ~(1 << NETISR_NS);
770 			nsintr();
771 		}
772 #endif
773 #ifdef ISO
774 		if (netisr & (1 << NETISR_ISO)) {
775 			netisr &= ~(1 << NETISR_ISO);
776 			clnlintr();
777 		}
778 #endif
779 	}
780 }
781 
782 /*
783  * Handle pmax (DECstation 2100/3100) interrupts.
784  */
785 pmax_intr(mask, pc, statusReg, causeReg)
786 	unsigned mask;
787 	unsigned pc;
788 	unsigned statusReg;
789 	unsigned causeReg;
790 {
791 	register volatile struct chiptime *c = Mach_clock_addr;
792 	struct clockframe cf;
793 	int temp;
794 
795 	/* handle clock interrupts ASAP */
796 	if (mask & MACH_INT_MASK_3) {
797 		temp = c->regc;	/* XXX clear interrupt bits */
798 		cf.pc = pc;
799 		cf.sr = statusReg;
800 		hardclock(&cf);
801 		/* keep clock interrupts enabled */
802 		causeReg &= ~MACH_INT_MASK_3;
803 	}
804 	/* Re-enable clock interrupts */
805 	splx(MACH_INT_MASK_3 | MACH_SR_INT_ENA_CUR);
806 #if NSII > 0
807 	if (mask & MACH_INT_MASK_0)
808 		siiintr(0);
809 #endif
810 #if NLE > 0
811 	if (mask & MACH_INT_MASK_1)
812 		leintr(0);
813 #endif
814 #if NDC > 0
815 	if (mask & MACH_INT_MASK_2)
816 		dcintr(0);
817 #endif
818 	if (mask & MACH_INT_MASK_4)
819 		pmax_errintr();
820 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
821 		MACH_SR_INT_ENA_CUR);
822 }
823 
824 /*
825  * Handle hardware interrupts for the KN02. (DECstation 5000/200)
826  * Returns spl value.
827  */
828 kn02_intr(mask, pc, statusReg, causeReg)
829 	unsigned mask;
830 	unsigned pc;
831 	unsigned statusReg;
832 	unsigned causeReg;
833 {
834 	register unsigned i, m;
835 	register volatile struct chiptime *c = Mach_clock_addr;
836 	register unsigned csr;
837 	int temp;
838 	struct clockframe cf;
839 	static int warned = 0;
840 
841 	/* handle clock interrupts ASAP */
842 	if (mask & MACH_INT_MASK_1) {
843 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
844 		if ((csr & KN02_CSR_PSWARN) && !warned) {
845 			warned = 1;
846 			printf("WARNING: power supply is overheating!\n");
847 		} else if (warned && !(csr & KN02_CSR_PSWARN)) {
848 			warned = 0;
849 			printf("WARNING: power supply is OK again\n");
850 		}
851 
852 		temp = c->regc;	/* XXX clear interrupt bits */
853 		cf.pc = pc;
854 		cf.sr = statusReg;
855 		hardclock(&cf);
856 
857 		/* keep clock interrupts enabled */
858 		causeReg &= ~MACH_INT_MASK_1;
859 	}
860 	/* Re-enable clock interrupts */
861 	splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
862 	if (mask & MACH_INT_MASK_0) {
863 
864 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
865 		m = csr & (csr >> KN02_CSR_IOINTEN_SHIFT) & KN02_CSR_IOINT;
866 #if 0
867 		*(unsigned *)MACHPHYS_TO_UNCACHED(KN02_SYS_CSR) =
868 			(csr & ~(KN02_CSR_WRESERVED | 0xFF)) |
869 			(m << KN02_CSR_IOINTEN_SHIFT);
870 #endif
871 		for (i = 0; m; i++, m >>= 1) {
872 			if (!(m & 1))
873 				continue;
874 			if (tc_slot_info[i].intr)
875 				(*tc_slot_info[i].intr)(tc_slot_info[i].unit);
876 			else
877 				printf("spurious interrupt %d\n", i);
878 		}
879 #if 0
880 		*(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR) =
881 			csr & ~(KN02_CSR_WRESERVED | 0xFF);
882 #endif
883 	}
884 	if (mask & MACH_INT_MASK_3)
885 		kn02_errintr();
886 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
887 		MACH_SR_INT_ENA_CUR);
888 }
889 
890 /*
891  * 3min hardware interrupts. (DECstation 5000/1xx)
892  */
893 kmin_intr(mask, pc, statusReg, causeReg)
894 	unsigned mask;
895 	unsigned pc;
896 	unsigned statusReg;
897 	unsigned causeReg;
898 {
899 	register u_int intr;
900 	register volatile struct chiptime *c = Mach_clock_addr;
901 	volatile u_int *imaskp =
902 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_IMSK);
903 	volatile u_int *intrp =
904 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_INTR);
905 	unsigned int old_mask;
906 	struct clockframe cf;
907 	int temp;
908 	static int user_warned = 0;
909 
910 	old_mask = *imaskp & kmin_tc3_imask;
911 	*imaskp = old_mask;
912 
913 	if (mask & MACH_INT_MASK_4)
914 		(*callv->halt)((int *)0, 0);
915 	if (mask & MACH_INT_MASK_3) {
916 		intr = *intrp;
917 		/* masked interrupts are still observable */
918 		intr &= old_mask;
919 
920 		if (intr & KMIN_INTR_SCSI_PTR_LOAD) {
921 			*intrp &= ~KMIN_INTR_SCSI_PTR_LOAD;
922 #ifdef notdef
923 			asc_dma_intr();
924 #endif
925 		}
926 
927 		if (intr & (KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E))
928 			*intrp &= ~(KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E);
929 
930 		if (intr & KMIN_INTR_LANCE_READ_E)
931 			*intrp &= ~KMIN_INTR_LANCE_READ_E;
932 
933 		if (intr & KMIN_INTR_TIMEOUT)
934 			kn02ba_errintr();
935 
936 		if (intr & KMIN_INTR_CLOCK) {
937 			temp = c->regc;	/* XXX clear interrupt bits */
938 			cf.pc = pc;
939 			cf.sr = statusReg;
940 			hardclock(&cf);
941 		}
942 
943 		if ((intr & KMIN_INTR_SCC_0) &&
944 			tc_slot_info[KMIN_SCC0_SLOT].intr)
945 			(*(tc_slot_info[KMIN_SCC0_SLOT].intr))
946 			(tc_slot_info[KMIN_SCC0_SLOT].unit);
947 
948 		if ((intr & KMIN_INTR_SCC_1) &&
949 			tc_slot_info[KMIN_SCC1_SLOT].intr)
950 			(*(tc_slot_info[KMIN_SCC1_SLOT].intr))
951 			(tc_slot_info[KMIN_SCC1_SLOT].unit);
952 
953 		if ((intr & KMIN_INTR_SCSI) &&
954 			tc_slot_info[KMIN_SCSI_SLOT].intr)
955 			(*(tc_slot_info[KMIN_SCSI_SLOT].intr))
956 			(tc_slot_info[KMIN_SCSI_SLOT].unit);
957 
958 		if ((intr & KMIN_INTR_LANCE) &&
959 			tc_slot_info[KMIN_LANCE_SLOT].intr)
960 			(*(tc_slot_info[KMIN_LANCE_SLOT].intr))
961 			(tc_slot_info[KMIN_LANCE_SLOT].unit);
962 
963 		if (user_warned && ((intr & KMIN_INTR_PSWARN) == 0)) {
964 			printf("%s\n", "Power supply ok now.");
965 			user_warned = 0;
966 		}
967 		if ((intr & KMIN_INTR_PSWARN) && (user_warned < 3)) {
968 			user_warned++;
969 			printf("%s\n", "Power supply overheating");
970 		}
971 	}
972 	if ((mask & MACH_INT_MASK_0) && tc_slot_info[0].intr)
973 		(*tc_slot_info[0].intr)(tc_slot_info[0].unit);
974 	if ((mask & MACH_INT_MASK_1) && tc_slot_info[1].intr)
975 		(*tc_slot_info[1].intr)(tc_slot_info[1].unit);
976 	if ((mask & MACH_INT_MASK_2) && tc_slot_info[2].intr)
977 		(*tc_slot_info[2].intr)(tc_slot_info[2].unit);
978 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
979 		MACH_SR_INT_ENA_CUR);
980 }
981 
982 /*
983  * Maxine hardware interrupts. (Personal DECstation 5000/xx)
984  */
985 xine_intr(mask, pc, statusReg, causeReg)
986 	unsigned mask;
987 	unsigned pc;
988 	unsigned statusReg;
989 	unsigned causeReg;
990 {
991 	register u_int intr;
992 	register volatile struct chiptime *c = Mach_clock_addr;
993 	volatile u_int *imaskp = (volatile u_int *)
994 		MACH_PHYS_TO_UNCACHED(XINE_REG_IMSK);
995 	volatile u_int *intrp = (volatile u_int *)
996 		MACH_PHYS_TO_UNCACHED(XINE_REG_INTR);
997 	u_int old_mask;
998 	struct clockframe cf;
999 	int temp;
1000 
1001 	old_mask = *imaskp & xine_tc3_imask;
1002 	*imaskp = old_mask;
1003 
1004 	if (mask & MACH_INT_MASK_4)
1005 		(*callv->halt)((int *)0, 0);
1006 
1007 	/* handle clock interrupts ASAP */
1008 	if (mask & MACH_INT_MASK_1) {
1009 		temp = c->regc;	/* XXX clear interrupt bits */
1010 		cf.pc = pc;
1011 		cf.sr = statusReg;
1012 		hardclock(&cf);
1013 		causeReg &= ~MACH_INT_MASK_1;
1014 		/* reenable clock interrupts */
1015 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1016 	}
1017 	if (mask & MACH_INT_MASK_3) {
1018 		intr = *intrp;
1019 		/* masked interrupts are still observable */
1020 		intr &= old_mask;
1021 
1022 		if (intr & XINE_INTR_SCSI_PTR_LOAD) {
1023 			*intrp &= ~XINE_INTR_SCSI_PTR_LOAD;
1024 #ifdef notdef
1025 			asc_dma_intr();
1026 #endif
1027 		}
1028 
1029 		if (intr & (XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E))
1030 			*intrp &= ~(XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E);
1031 
1032 		if (intr & XINE_INTR_LANCE_READ_E)
1033 			*intrp &= ~XINE_INTR_LANCE_READ_E;
1034 
1035 		if ((intr & XINE_INTR_SCC_0) &&
1036 			tc_slot_info[XINE_SCC0_SLOT].intr)
1037 			(*(tc_slot_info[XINE_SCC0_SLOT].intr))
1038 			(tc_slot_info[XINE_SCC0_SLOT].unit);
1039 
1040 		if ((intr & XINE_INTR_DTOP_RX) &&
1041 			tc_slot_info[XINE_DTOP_SLOT].intr)
1042 			(*(tc_slot_info[XINE_DTOP_SLOT].intr))
1043 			(tc_slot_info[XINE_DTOP_SLOT].unit);
1044 
1045 		if ((intr & XINE_INTR_FLOPPY) &&
1046 			tc_slot_info[XINE_FLOPPY_SLOT].intr)
1047 			(*(tc_slot_info[XINE_FLOPPY_SLOT].intr))
1048 			(tc_slot_info[XINE_FLOPPY_SLOT].unit);
1049 
1050 		if ((intr & XINE_INTR_TC_0) &&
1051 			tc_slot_info[0].intr)
1052 			(*(tc_slot_info[0].intr))
1053 			(tc_slot_info[0].unit);
1054 
1055 		if ((intr & XINE_INTR_TC_1) &&
1056 			tc_slot_info[1].intr)
1057 			(*(tc_slot_info[1].intr))
1058 			(tc_slot_info[1].unit);
1059 
1060 		if ((intr & XINE_INTR_ISDN) &&
1061 			tc_slot_info[XINE_ISDN_SLOT].intr)
1062 			(*(tc_slot_info[XINE_ISDN_SLOT].intr))
1063 			(tc_slot_info[XINE_ISDN_SLOT].unit);
1064 
1065 		if ((intr & XINE_INTR_SCSI) &&
1066 			tc_slot_info[XINE_SCSI_SLOT].intr)
1067 			(*(tc_slot_info[XINE_SCSI_SLOT].intr))
1068 			(tc_slot_info[XINE_SCSI_SLOT].unit);
1069 
1070 		if ((intr & XINE_INTR_LANCE) &&
1071 			tc_slot_info[XINE_LANCE_SLOT].intr)
1072 			(*(tc_slot_info[XINE_LANCE_SLOT].intr))
1073 			(tc_slot_info[XINE_LANCE_SLOT].unit);
1074 
1075 	}
1076 	if (mask & MACH_INT_MASK_2)
1077 		kn02ba_errintr();
1078 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1079 		MACH_SR_INT_ENA_CUR);
1080 }
1081 
1082 #ifdef DS5000_240
1083 /*
1084  * 3Max+ hardware interrupts. (DECstation 5000/240) UNTESTED!!
1085  */
1086 kn03_intr(mask, pc, statusReg, causeReg)
1087 	unsigned mask;
1088 	unsigned pc;
1089 	unsigned statusReg;
1090 	unsigned causeReg;
1091 {
1092 	register u_int intr;
1093 	register volatile struct chiptime *c = Mach_clock_addr;
1094 	volatile u_int *imaskp = (volatile u_int *)
1095 		MACH_PHYS_TO_UNCACHED(KN03_REG_IMSK);
1096 	volatile u_int *intrp = (volatile u_int *)
1097 		MACH_PHYS_TO_UNCACHED(KN03_REG_INTR);
1098 	u_int old_mask;
1099 	struct clockframe cf;
1100 	int temp;
1101 	static int user_warned = 0;
1102 
1103 	old_mask = *imaskp & kn03_tc3_imask;
1104 	*imaskp = old_mask;
1105 
1106 	if (mask & MACH_INT_MASK_4)
1107 		(*callv->halt)((int *)0, 0);
1108 
1109 	/* handle clock interrupts ASAP */
1110 	if (mask & MACH_INT_MASK_1) {
1111 		temp = c->regc;	/* XXX clear interrupt bits */
1112 		cf.pc = pc;
1113 		cf.sr = statusReg;
1114 		hardclock(&cf);
1115 		causeReg &= ~MACH_INT_MASK_1;
1116 		/* reenable clock interrupts */
1117 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1118 	}
1119 	if (mask & MACH_INT_MASK_0) {
1120 		intr = *intrp;
1121 		/* masked interrupts are still observable */
1122 		intr &= old_mask;
1123 
1124 		if (intr & KN03_INTR_SCSI_PTR_LOAD) {
1125 			*intrp &= ~KN03_INTR_SCSI_PTR_LOAD;
1126 #ifdef notdef
1127 			asc_dma_intr();
1128 #endif
1129 		}
1130 
1131 		if (intr & (KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E))
1132 			*intrp &= ~(KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E);
1133 
1134 		if (intr & KN03_INTR_LANCE_READ_E)
1135 			*intrp &= ~KN03_INTR_LANCE_READ_E;
1136 
1137 		if ((intr & KN03_INTR_SCC_0) &&
1138 			tc_slot_info[KN03_SCC0_SLOT].intr)
1139 			(*(tc_slot_info[KN03_SCC0_SLOT].intr))
1140 			(tc_slot_info[KN03_SCC0_SLOT].unit);
1141 
1142 		if ((intr & KN03_INTR_SCC_1) &&
1143 			tc_slot_info[KN03_SCC1_SLOT].intr)
1144 			(*(tc_slot_info[KN03_SCC1_SLOT].intr))
1145 			(tc_slot_info[KN03_SCC1_SLOT].unit);
1146 
1147 		if ((intr & KN03_INTR_TC_0) &&
1148 			tc_slot_info[0].intr)
1149 			(*(tc_slot_info[0].intr))
1150 			(tc_slot_info[0].unit);
1151 
1152 		if ((intr & KN03_INTR_TC_1) &&
1153 			tc_slot_info[1].intr)
1154 			(*(tc_slot_info[1].intr))
1155 			(tc_slot_info[1].unit);
1156 
1157 		if ((intr & KN03_INTR_TC_2) &&
1158 			tc_slot_info[2].intr)
1159 			(*(tc_slot_info[2].intr))
1160 			(tc_slot_info[2].unit);
1161 
1162 		if ((intr & KN03_INTR_SCSI) &&
1163 			tc_slot_info[KN03_SCSI_SLOT].intr)
1164 			(*(tc_slot_info[KN03_SCSI_SLOT].intr))
1165 			(tc_slot_info[KN03_SCSI_SLOT].unit);
1166 
1167 		if ((intr & KN03_INTR_LANCE) &&
1168 			tc_slot_info[KN03_LANCE_SLOT].intr)
1169 			(*(tc_slot_info[KN03_LANCE_SLOT].intr))
1170 			(tc_slot_info[KN03_LANCE_SLOT].unit);
1171 
1172 		if (user_warned && ((intr & KN03_INTR_PSWARN) == 0)) {
1173 			printf("%s\n", "Power supply ok now.");
1174 			user_warned = 0;
1175 		}
1176 		if ((intr & KN03_INTR_PSWARN) && (user_warned < 3)) {
1177 			user_warned++;
1178 			printf("%s\n", "Power supply overheating");
1179 		}
1180 	}
1181 	if (mask & MACH_INT_MASK_3)
1182 		kn03_errintr();
1183 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1184 		MACH_SR_INT_ENA_CUR);
1185 }
1186 #endif /* DS5000_240 */
1187 
1188 /*
1189  * This is called from MachUserIntr() if astpending is set.
1190  * This is very similar to the tail of trap().
1191  */
1192 softintr(statusReg, pc)
1193 	unsigned statusReg;	/* status register at time of the exception */
1194 	unsigned pc;		/* program counter where to continue */
1195 {
1196 	register struct proc *p = curproc;
1197 	int sig;
1198 
1199 	cnt.v_soft++;
1200 	/* take pending signals */
1201 	while ((sig = CURSIG(p)) != 0)
1202 		psig(sig);
1203 	p->p_pri = p->p_usrpri;
1204 	astpending = 0;
1205 	if (p->p_flag & SOWEUPC) {
1206 		p->p_flag &= ~SOWEUPC;
1207 		ADDUPROF(p);
1208 	}
1209 	if (want_resched) {
1210 		int s;
1211 
1212 		/*
1213 		 * Since we are curproc, clock will normally just change
1214 		 * our priority without moving us from one queue to another
1215 		 * (since the running process is not on a queue.)
1216 		 * If that happened after we setrq ourselves but before we
1217 		 * swtch()'ed, we might not be on the queue indicated by
1218 		 * our priority.
1219 		 */
1220 		s = splstatclock();
1221 		setrq(p);
1222 		p->p_stats->p_ru.ru_nivcsw++;
1223 		swtch();
1224 		splx(s);
1225 		while ((sig = CURSIG(p)) != 0)
1226 			psig(sig);
1227 	}
1228 	curpri = p->p_pri;
1229 }
1230 
1231 #ifdef DEBUG
1232 trapDump(msg)
1233 	char *msg;
1234 {
1235 	register int i;
1236 	int s;
1237 
1238 	s = splhigh();
1239 	printf("trapDump(%s)\n", msg);
1240 	for (i = 0; i < TRAPSIZE; i++) {
1241 		if (trp == trapdebug)
1242 			trp = &trapdebug[TRAPSIZE - 1];
1243 		else
1244 			trp--;
1245 		if (trp->cause == 0)
1246 			break;
1247 		printf("%s: ADR %x PC %x CR %x SR %x\n",
1248 			trap_type[(trp->cause & MACH_CR_EXC_CODE) >>
1249 				MACH_CR_EXC_CODE_SHIFT],
1250 			trp->vadr, trp->pc, trp->cause, trp->status);
1251 		printf("   RA %x code %d\n", trp-> ra, trp->code);
1252 	}
1253 	bzero(trapdebug, sizeof(trapdebug));
1254 	trp = trapdebug;
1255 	splx(s);
1256 }
1257 #endif
1258 
1259 /*
1260  *----------------------------------------------------------------------
1261  *
1262  * MemErrorInterrupts --
1263  *   pmax_errintr - for the DS2100/DS3100
1264  *   kn02_errintr - for the DS5000/200
1265  *   kn02ba_errintr - for the DS5000/1xx and DS5000/xx
1266  *
1267  *	Handler an interrupt for the control register.
1268  *
1269  * Results:
1270  *	None.
1271  *
1272  * Side effects:
1273  *	None.
1274  *
1275  *----------------------------------------------------------------------
1276  */
1277 static void
1278 pmax_errintr()
1279 {
1280 	volatile u_short *sysCSRPtr =
1281 		(u_short *)MACH_PHYS_TO_UNCACHED(KN01_SYS_CSR);
1282 	u_short csr;
1283 
1284 	csr = *sysCSRPtr;
1285 
1286 	if (csr & KN01_CSR_MERR) {
1287 		printf("Memory error at 0x%x\n",
1288 			*(unsigned *)MACH_PHYS_TO_UNCACHED(KN01_SYS_ERRADR));
1289 		panic("Mem error interrupt");
1290 	}
1291 	*sysCSRPtr = (csr & ~KN01_CSR_MBZ) | 0xff;
1292 }
1293 
1294 static void
1295 kn02_errintr()
1296 {
1297 
1298 	printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR));
1299 	*(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR) = 0;
1300 	MachEmptyWriteBuffer();
1301 }
1302 
1303 #ifdef DS5000_240
1304 static void
1305 kn03_errintr()
1306 {
1307 
1308 	printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR));
1309 	*(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR) = 0;
1310 	MachEmptyWriteBuffer();
1311 }
1312 #endif /* DS5000_240 */
1313 
1314 static void
1315 kn02ba_errintr()
1316 {
1317 	register int mer, adr, siz;
1318 	static int errintr_cnt = 0;
1319 
1320 	siz = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MSR);
1321 	mer = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MER);
1322 	adr = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_AER);
1323 
1324 	/* clear interrupt bit */
1325 	*(unsigned int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_TIMEOUT) = 0;
1326 
1327 	errintr_cnt++;
1328 	printf("(%d)%s%x [%x %x %x]\n", errintr_cnt,
1329 	       "Bad memory chip at phys ",
1330 	       kn02ba_recover_erradr(adr, mer),
1331 	       mer, siz, adr);
1332 }
1333 
1334 static unsigned
1335 kn02ba_recover_erradr(phys, mer)
1336 	register unsigned phys, mer;
1337 {
1338 	/* phys holds bits 28:2, mer knows which byte */
1339 	switch (mer & KMIN_MER_LASTBYTE) {
1340 	case KMIN_LASTB31:
1341 		mer = 3; break;
1342 	case KMIN_LASTB23:
1343 		mer = 2; break;
1344 	case KMIN_LASTB15:
1345 		mer = 1; break;
1346 	case KMIN_LASTB07:
1347 		mer = 0; break;
1348 	}
1349 	return ((phys & KMIN_AER_ADDR_MASK) | mer);
1350 }
1351 
1352 /*
1353  * Return the resulting PC as if the branch was executed.
1354  */
1355 unsigned
1356 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch)
1357 	unsigned *regsPtr;
1358 	unsigned instPC;
1359 	unsigned fpcCSR;
1360 	int allowNonBranch;
1361 {
1362 	InstFmt inst;
1363 	unsigned retAddr;
1364 	int condition;
1365 	extern unsigned GetBranchDest();
1366 
1367 #if 0
1368 	printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC,
1369 		*instPC, fpcCSR);
1370 #endif
1371 
1372 	inst = *(InstFmt *)instPC;
1373 	switch ((int)inst.JType.op) {
1374 	case OP_SPECIAL:
1375 		switch ((int)inst.RType.func) {
1376 		case OP_JR:
1377 		case OP_JALR:
1378 			retAddr = regsPtr[inst.RType.rs];
1379 			break;
1380 
1381 		default:
1382 			if (!allowNonBranch)
1383 				panic("MachEmulateBranch: Non-branch");
1384 			retAddr = instPC + 4;
1385 			break;
1386 		}
1387 		break;
1388 
1389 	case OP_BCOND:
1390 		switch ((int)inst.IType.rt) {
1391 		case OP_BLTZ:
1392 		case OP_BLTZAL:
1393 			if ((int)(regsPtr[inst.RType.rs]) < 0)
1394 				retAddr = GetBranchDest((InstFmt *)instPC);
1395 			else
1396 				retAddr = instPC + 8;
1397 			break;
1398 
1399 		case OP_BGEZAL:
1400 		case OP_BGEZ:
1401 			if ((int)(regsPtr[inst.RType.rs]) >= 0)
1402 				retAddr = GetBranchDest((InstFmt *)instPC);
1403 			else
1404 				retAddr = instPC + 8;
1405 			break;
1406 
1407 		default:
1408 			panic("MachEmulateBranch: Bad branch cond");
1409 		}
1410 		break;
1411 
1412 	case OP_J:
1413 	case OP_JAL:
1414 		retAddr = (inst.JType.target << 2) |
1415 			((unsigned)instPC & 0xF0000000);
1416 		break;
1417 
1418 	case OP_BEQ:
1419 		if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1420 			retAddr = GetBranchDest((InstFmt *)instPC);
1421 		else
1422 			retAddr = instPC + 8;
1423 		break;
1424 
1425 	case OP_BNE:
1426 		if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1427 			retAddr = GetBranchDest((InstFmt *)instPC);
1428 		else
1429 			retAddr = instPC + 8;
1430 		break;
1431 
1432 	case OP_BLEZ:
1433 		if ((int)(regsPtr[inst.RType.rs]) <= 0)
1434 			retAddr = GetBranchDest((InstFmt *)instPC);
1435 		else
1436 			retAddr = instPC + 8;
1437 		break;
1438 
1439 	case OP_BGTZ:
1440 		if ((int)(regsPtr[inst.RType.rs]) > 0)
1441 			retAddr = GetBranchDest((InstFmt *)instPC);
1442 		else
1443 			retAddr = instPC + 8;
1444 		break;
1445 
1446 	case OP_COP1:
1447 		switch (inst.RType.rs) {
1448 		case OP_BCx:
1449 		case OP_BCy:
1450 			if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1451 				condition = fpcCSR & MACH_FPC_COND_BIT;
1452 			else
1453 				condition = !(fpcCSR & MACH_FPC_COND_BIT);
1454 			if (condition)
1455 				retAddr = GetBranchDest((InstFmt *)instPC);
1456 			else
1457 				retAddr = instPC + 8;
1458 			break;
1459 
1460 		default:
1461 			if (!allowNonBranch)
1462 				panic("MachEmulateBranch: Bad coproc branch instruction");
1463 			retAddr = instPC + 4;
1464 		}
1465 		break;
1466 
1467 	default:
1468 		if (!allowNonBranch)
1469 			panic("MachEmulateBranch: Non-branch instruction");
1470 		retAddr = instPC + 4;
1471 	}
1472 #if 0
1473 	printf("Target addr=%x\n", retAddr);
1474 #endif
1475 	return (retAddr);
1476 }
1477 
1478 unsigned
1479 GetBranchDest(InstPtr)
1480 	InstFmt *InstPtr;
1481 {
1482 	return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2));
1483 }
1484 
1485 /*
1486  * This routine is called by procxmt() to single step one instruction.
1487  * We do this by storing a break instruction after the current instruction,
1488  * resuming execution, and then restoring the old instruction.
1489  */
1490 cpu_singlestep(p)
1491 	register struct proc *p;
1492 {
1493 	register unsigned va;
1494 	register int *locr0 = p->p_md.md_regs;
1495 	int i;
1496 
1497 	/* compute next address after current location */
1498 	va = MachEmulateBranch(locr0, locr0[PC], 0, 1);
1499 	if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va ||
1500 	    !useracc((caddr_t)va, 4, B_READ)) {
1501 		printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
1502 			p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */
1503 		return (EFAULT);
1504 	}
1505 	p->p_md.md_ss_addr = va;
1506 	p->p_md.md_ss_instr = fuiword((caddr_t)va);
1507 	i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1508 	if (i < 0) {
1509 		vm_offset_t sa, ea;
1510 		int rv;
1511 
1512 		sa = trunc_page((vm_offset_t)va);
1513 		ea = round_page((vm_offset_t)va+sizeof(int)-1);
1514 		rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
1515 			VM_PROT_DEFAULT, FALSE);
1516 		if (rv == KERN_SUCCESS) {
1517 			i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1518 			(void) vm_map_protect(&p->p_vmspace->vm_map,
1519 				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
1520 		}
1521 	}
1522 	if (i < 0)
1523 		return (EFAULT);
1524 	printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n",
1525 		p->p_comm, p->p_pid, p->p_md.md_ss_addr,
1526 		p->p_md.md_ss_instr, locr0[PC]); /* XXX */
1527 	return (0);
1528 }
1529 
1530 #ifdef DEBUG
1531 kdbpeek(addr)
1532 {
1533 	if (addr & 3) {
1534 		printf("kdbpeek: unaligned address %x\n", addr);
1535 		return (-1);
1536 	}
1537 	return (*(int *)addr);
1538 }
1539 
1540 #define MIPS_JR_RA	0x03e00008	/* instruction code for jr ra */
1541 
1542 /*
1543  * Print a stack backtrace.
1544  */
1545 void
1546 stacktrace(a0, a1, a2, a3)
1547 	int a0, a1, a2, a3;
1548 {
1549 	unsigned pc, sp, fp, ra, va, subr;
1550 	unsigned instr, mask;
1551 	InstFmt i;
1552 	int more, stksize;
1553 	int regs[3];
1554 	extern setsoftclock();
1555 	extern char start[], edata[];
1556 
1557 	cpu_getregs(regs);
1558 
1559 	/* get initial values from the exception frame */
1560 	sp = regs[0];
1561 	pc = regs[1];
1562 	ra = 0;
1563 	fp = regs[2];
1564 
1565 loop:
1566 	/* check for current PC in the kernel interrupt handler code */
1567 	if (pc >= (unsigned)MachKernIntr && pc < (unsigned)MachUserIntr) {
1568 		/* NOTE: the offsets depend on the code in locore.s */
1569 		printf("interrupt\n");
1570 		a0 = kdbpeek(sp + 36);
1571 		a1 = kdbpeek(sp + 40);
1572 		a2 = kdbpeek(sp + 44);
1573 		a3 = kdbpeek(sp + 48);
1574 		pc = kdbpeek(sp + 20);
1575 		ra = kdbpeek(sp + 92);
1576 		sp = kdbpeek(sp + 100);
1577 		fp = kdbpeek(sp + 104);
1578 	}
1579 
1580 	/* check for current PC in the exception handler code */
1581 	if (pc >= 0x80000000 && pc < (unsigned)setsoftclock) {
1582 		ra = 0;
1583 		subr = 0;
1584 		goto done;
1585 	}
1586 
1587 	/* check for bad PC */
1588 	if (pc & 3 || pc < 0x80000000 || pc >= (unsigned)edata) {
1589 		printf("PC 0x%x: not in kernel\n", pc);
1590 		ra = 0;
1591 		subr = 0;
1592 		goto done;
1593 	}
1594 
1595 	/*
1596 	 * Find the beginning of the current subroutine by scanning backwards
1597 	 * from the current PC for the end of the previous subroutine.
1598 	 */
1599 	va = pc - sizeof(int);
1600 	while ((instr = kdbpeek(va)) != MIPS_JR_RA)
1601 		va -= sizeof(int);
1602 	va += 2 * sizeof(int);	/* skip back over branch & delay slot */
1603 	/* skip over nulls which might separate .o files */
1604 	while ((instr = kdbpeek(va)) == 0)
1605 		va += sizeof(int);
1606 	subr = va;
1607 
1608 	/* scan forwards to find stack size and any saved registers */
1609 	stksize = 0;
1610 	more = 3;
1611 	mask = 0;
1612 	for (; more; va += sizeof(int), more = (more == 3) ? 3 : more - 1) {
1613 		/* stop if hit our current position */
1614 		if (va >= pc)
1615 			break;
1616 		instr = kdbpeek(va);
1617 		i.word = instr;
1618 		switch (i.JType.op) {
1619 		case OP_SPECIAL:
1620 			switch (i.RType.func) {
1621 			case OP_JR:
1622 			case OP_JALR:
1623 				more = 2; /* stop after next instruction */
1624 				break;
1625 
1626 			case OP_SYSCALL:
1627 			case OP_BREAK:
1628 				more = 1; /* stop now */
1629 			};
1630 			break;
1631 
1632 		case OP_BCOND:
1633 		case OP_J:
1634 		case OP_JAL:
1635 		case OP_BEQ:
1636 		case OP_BNE:
1637 		case OP_BLEZ:
1638 		case OP_BGTZ:
1639 			more = 2; /* stop after next instruction */
1640 			break;
1641 
1642 		case OP_COP0:
1643 		case OP_COP1:
1644 		case OP_COP2:
1645 		case OP_COP3:
1646 			switch (i.RType.rs) {
1647 			case OP_BCx:
1648 			case OP_BCy:
1649 				more = 2; /* stop after next instruction */
1650 			};
1651 			break;
1652 
1653 		case OP_SW:
1654 			/* look for saved registers on the stack */
1655 			if (i.IType.rs != 29)
1656 				break;
1657 			/* only restore the first one */
1658 			if (mask & (1 << i.IType.rt))
1659 				break;
1660 			mask |= 1 << i.IType.rt;
1661 			switch (i.IType.rt) {
1662 			case 4: /* a0 */
1663 				a0 = kdbpeek(sp + (short)i.IType.imm);
1664 				break;
1665 
1666 			case 5: /* a1 */
1667 				a1 = kdbpeek(sp + (short)i.IType.imm);
1668 				break;
1669 
1670 			case 6: /* a2 */
1671 				a2 = kdbpeek(sp + (short)i.IType.imm);
1672 				break;
1673 
1674 			case 7: /* a3 */
1675 				a3 = kdbpeek(sp + (short)i.IType.imm);
1676 				break;
1677 
1678 			case 30: /* fp */
1679 				fp = kdbpeek(sp + (short)i.IType.imm);
1680 				break;
1681 
1682 			case 31: /* ra */
1683 				ra = kdbpeek(sp + (short)i.IType.imm);
1684 			}
1685 			break;
1686 
1687 		case OP_ADDI:
1688 		case OP_ADDIU:
1689 			/* look for stack pointer adjustment */
1690 			if (i.IType.rs != 29 || i.IType.rt != 29)
1691 				break;
1692 			stksize = (short)i.IType.imm;
1693 		}
1694 	}
1695 
1696 done:
1697 	printf("%x+%x (%x,%x,%x,%x) ra %x sz %d\n",
1698 		subr, pc - subr, a0, a1, a2, a3, ra, stksize);
1699 
1700 	if (ra) {
1701 		if (pc == ra && stksize == 0)
1702 			printf("stacktrace: loop!\n");
1703 		else {
1704 			pc = ra;
1705 			sp -= stksize;
1706 			goto loop;
1707 		}
1708 	}
1709 }
1710 #endif /* DEBUG */
1711