xref: /original-bsd/sys/pmax/pmax/trap.c (revision deff14a8)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department and Ralph Campbell.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.32 91/04/06$
13  *
14  *	@(#)trap.c	8.6 (Berkeley) 07/03/94
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/proc.h>
20 #include <sys/kernel.h>
21 #include <sys/signalvar.h>
22 #include <sys/syscall.h>
23 #include <sys/user.h>
24 #include <sys/buf.h>
25 #ifdef KTRACE
26 #include <sys/ktrace.h>
27 #endif
28 #include <net/netisr.h>
29 
30 #include <machine/trap.h>
31 #include <machine/psl.h>
32 #include <machine/reg.h>
33 #include <machine/cpu.h>
34 #include <machine/pte.h>
35 #include <machine/mips_opcode.h>
36 
37 #include <vm/vm.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_page.h>
40 
41 #include <pmax/pmax/clockreg.h>
42 #include <pmax/pmax/kn01.h>
43 #include <pmax/pmax/kn02.h>
44 #include <pmax/pmax/kmin.h>
45 #include <pmax/pmax/maxine.h>
46 #include <pmax/pmax/kn03.h>
47 #include <pmax/pmax/asic.h>
48 #include <pmax/pmax/turbochannel.h>
49 
50 #include <pmax/stand/dec_prom.h>
51 
52 #include <asc.h>
53 #include <sii.h>
54 #include <le.h>
55 #include <dc.h>
56 
57 struct	proc *machFPCurProcPtr;		/* pointer to last proc to use FP */
58 
59 extern void MachKernGenException();
60 extern void MachUserGenException();
61 extern void MachKernIntr();
62 extern void MachUserIntr();
63 extern void MachTLBModException();
64 extern void MachTLBMissException();
65 extern unsigned MachEmulateBranch();
66 
67 void (*machExceptionTable[])() = {
68 /*
69  * The kernel exception handlers.
70  */
71 	MachKernIntr,			/* external interrupt */
72 	MachKernGenException,		/* TLB modification */
73 	MachTLBMissException,		/* TLB miss (load or instr. fetch) */
74 	MachTLBMissException,		/* TLB miss (store) */
75 	MachKernGenException,		/* address error (load or I-fetch) */
76 	MachKernGenException,		/* address error (store) */
77 	MachKernGenException,		/* bus error (I-fetch) */
78 	MachKernGenException,		/* bus error (load or store) */
79 	MachKernGenException,		/* system call */
80 	MachKernGenException,		/* breakpoint */
81 	MachKernGenException,		/* reserved instruction */
82 	MachKernGenException,		/* coprocessor unusable */
83 	MachKernGenException,		/* arithmetic overflow */
84 	MachKernGenException,		/* reserved */
85 	MachKernGenException,		/* reserved */
86 	MachKernGenException,		/* reserved */
87 /*
88  * The user exception handlers.
89  */
90 	MachUserIntr,
91 	MachUserGenException,
92 	MachUserGenException,
93 	MachUserGenException,
94 	MachUserGenException,
95 	MachUserGenException,
96 	MachUserGenException,
97 	MachUserGenException,
98 	MachUserGenException,
99 	MachUserGenException,
100 	MachUserGenException,
101 	MachUserGenException,
102 	MachUserGenException,
103 	MachUserGenException,
104 	MachUserGenException,
105 	MachUserGenException,
106 };
107 
108 char	*trap_type[] = {
109 	"external interrupt",
110 	"TLB modification",
111 	"TLB miss (load or instr. fetch)",
112 	"TLB miss (store)",
113 	"address error (load or I-fetch)",
114 	"address error (store)",
115 	"bus error (I-fetch)",
116 	"bus error (load or store)",
117 	"system call",
118 	"breakpoint",
119 	"reserved instruction",
120 	"coprocessor unusable",
121 	"arithmetic overflow",
122 	"reserved 13",
123 	"reserved 14",
124 	"reserved 15",
125 };
126 
127 #ifdef DEBUG
128 #define TRAPSIZE	10
129 struct trapdebug {		/* trap history buffer for debugging */
130 	u_int	status;
131 	u_int	cause;
132 	u_int	vadr;
133 	u_int	pc;
134 	u_int	ra;
135 	u_int	code;
136 } trapdebug[TRAPSIZE], *trp = trapdebug;
137 
138 u_int	intr_level;		/* number of nested interrupts */
139 #endif
140 
141 static void pmax_errintr();
142 static void kn02_errintr(), kn02ba_errintr();
143 #ifdef DS5000_240
144 static void kn03_errintr();
145 #endif
146 static unsigned kn02ba_recover_erradr();
147 extern tc_option_t tc_slot_info[TC_MAX_LOGICAL_SLOTS];
148 extern u_long kmin_tc3_imask, xine_tc3_imask;
149 extern const struct callback *callv;
150 #ifdef DS5000_240
151 extern u_long kn03_tc3_imask;
152 #endif
153 int (*pmax_hardware_intr)() = (int (*)())0;
154 extern volatile struct chiptime *Mach_clock_addr;
155 
156 /*
157  * Handle an exception.
158  * Called from MachKernGenException() or MachUserGenException()
159  * when a processor trap occurs.
160  * In the case of a kernel trap, we return the pc where to resume if
161  * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc.
162  */
163 unsigned
164 trap(statusReg, causeReg, vadr, pc, args)
165 	unsigned statusReg;	/* status register at time of the exception */
166 	unsigned causeReg;	/* cause register at time of exception */
167 	unsigned vadr;		/* address (if any) the fault occured on */
168 	unsigned pc;		/* program counter where to continue */
169 {
170 	register int type, i;
171 	unsigned ucode = 0;
172 	register struct proc *p = curproc;
173 	u_quad_t sticks;
174 	vm_prot_t ftype;
175 	extern unsigned onfault_table[];
176 
177 #ifdef DEBUG
178 	trp->status = statusReg;
179 	trp->cause = causeReg;
180 	trp->vadr = vadr;
181 	trp->pc = pc;
182 	trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] :
183 		p->p_md.md_regs[RA];
184 	trp->code = 0;
185 	if (++trp == &trapdebug[TRAPSIZE])
186 		trp = trapdebug;
187 #endif
188 
189 	cnt.v_trap++;
190 	type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT;
191 	if (USERMODE(statusReg)) {
192 		type |= T_USER;
193 		sticks = p->p_sticks;
194 	}
195 
196 	/*
197 	 * Enable hardware interrupts if they were on before.
198 	 * We only respond to software interrupts when returning to user mode.
199 	 */
200 	if (statusReg & MACH_SR_INT_ENA_PREV)
201 		splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR);
202 
203 	switch (type) {
204 	case T_TLB_MOD:
205 		/* check for kernel address */
206 		if ((int)vadr < 0) {
207 			register pt_entry_t *pte;
208 			register unsigned entry;
209 			register vm_offset_t pa;
210 
211 			pte = kvtopte(vadr);
212 			entry = pte->pt_entry;
213 #ifdef DIAGNOSTIC
214 			if (!(entry & PG_V) || (entry & PG_M))
215 				panic("trap: ktlbmod: invalid pte");
216 #endif
217 			if (entry & PG_RO) {
218 				/* write to read only page in the kernel */
219 				ftype = VM_PROT_WRITE;
220 				goto kernel_fault;
221 			}
222 			entry |= PG_M;
223 			pte->pt_entry = entry;
224 			vadr &= ~PGOFSET;
225 			MachTLBUpdate(vadr, entry);
226 			pa = entry & PG_FRAME;
227 #ifdef ATTR
228 			pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD;
229 #else
230 			if (!IS_VM_PHYSADDR(pa))
231 				panic("trap: ktlbmod: unmanaged page");
232 			PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
233 #endif
234 			return (pc);
235 		}
236 		/* FALLTHROUGH */
237 
238 	case T_TLB_MOD+T_USER:
239 	    {
240 		register pt_entry_t *pte;
241 		register unsigned entry;
242 		register vm_offset_t pa;
243 		pmap_t pmap = &p->p_vmspace->vm_pmap;
244 
245 		if (!(pte = pmap_segmap(pmap, vadr)))
246 			panic("trap: utlbmod: invalid segmap");
247 		pte += (vadr >> PGSHIFT) & (NPTEPG - 1);
248 		entry = pte->pt_entry;
249 #ifdef DIAGNOSTIC
250 		if (!(entry & PG_V) || (entry & PG_M))
251 			panic("trap: utlbmod: invalid pte");
252 #endif
253 		if (entry & PG_RO) {
254 			/* write to read only page */
255 			ftype = VM_PROT_WRITE;
256 			goto dofault;
257 		}
258 		entry |= PG_M;
259 		pte->pt_entry = entry;
260 		vadr = (vadr & ~PGOFSET) |
261 			(pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
262 		MachTLBUpdate(vadr, entry);
263 		pa = entry & PG_FRAME;
264 #ifdef ATTR
265 		pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD;
266 #else
267 		if (!IS_VM_PHYSADDR(pa))
268 			panic("trap: utlbmod: unmanaged page");
269 		PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
270 #endif
271 		if (!USERMODE(statusReg))
272 			return (pc);
273 		goto out;
274 	    }
275 
276 	case T_TLB_LD_MISS:
277 	case T_TLB_ST_MISS:
278 		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
279 		/* check for kernel address */
280 		if ((int)vadr < 0) {
281 			register vm_offset_t va;
282 			int rv;
283 
284 		kernel_fault:
285 			va = trunc_page((vm_offset_t)vadr);
286 			rv = vm_fault(kernel_map, va, ftype, FALSE);
287 			if (rv == KERN_SUCCESS)
288 				return (pc);
289 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
290 				((struct pcb *)UADDR)->pcb_onfault = 0;
291 				return (onfault_table[i]);
292 			}
293 			goto err;
294 		}
295 		/*
296 		 * It is an error for the kernel to access user space except
297 		 * through the copyin/copyout routines.
298 		 */
299 		if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 0)
300 			goto err;
301 		/* check for fuswintr() or suswintr() getting a page fault */
302 		if (i == 4)
303 			return (onfault_table[i]);
304 		goto dofault;
305 
306 	case T_TLB_LD_MISS+T_USER:
307 		ftype = VM_PROT_READ;
308 		goto dofault;
309 
310 	case T_TLB_ST_MISS+T_USER:
311 		ftype = VM_PROT_WRITE;
312 	dofault:
313 	    {
314 		register vm_offset_t va;
315 		register struct vmspace *vm;
316 		register vm_map_t map;
317 		int rv;
318 
319 		vm = p->p_vmspace;
320 		map = &vm->vm_map;
321 		va = trunc_page((vm_offset_t)vadr);
322 		rv = vm_fault(map, va, ftype, FALSE);
323 		/*
324 		 * If this was a stack access we keep track of the maximum
325 		 * accessed stack size.  Also, if vm_fault gets a protection
326 		 * failure it is due to accessing the stack region outside
327 		 * the current limit and we need to reflect that as an access
328 		 * error.
329 		 */
330 		if ((caddr_t)va >= vm->vm_maxsaddr) {
331 			if (rv == KERN_SUCCESS) {
332 				unsigned nss;
333 
334 				nss = clrnd(btoc(USRSTACK-(unsigned)va));
335 				if (nss > vm->vm_ssize)
336 					vm->vm_ssize = nss;
337 			} else if (rv == KERN_PROTECTION_FAILURE)
338 				rv = KERN_INVALID_ADDRESS;
339 		}
340 		if (rv == KERN_SUCCESS) {
341 			if (!USERMODE(statusReg))
342 				return (pc);
343 			goto out;
344 		}
345 		if (!USERMODE(statusReg)) {
346 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
347 				((struct pcb *)UADDR)->pcb_onfault = 0;
348 				return (onfault_table[i]);
349 			}
350 			goto err;
351 		}
352 		ucode = vadr;
353 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
354 		break;
355 	    }
356 
357 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
358 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
359 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
360 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
361 		i = SIGSEGV;
362 		break;
363 
364 	case T_SYSCALL+T_USER:
365 	    {
366 		register int *locr0 = p->p_md.md_regs;
367 		register struct sysent *callp;
368 		unsigned int code;
369 		int numsys;
370 		struct args {
371 			int i[8];
372 		} args;
373 		int rval[2];
374 		struct sysent *systab;
375 		extern int nsysent;
376 #ifdef ULTRIXCOMPAT
377 		extern struct sysent ultrixsysent[];
378 		extern int ultrixnsysent;
379 #endif
380 
381 		cnt.v_syscall++;
382 		/* compute next PC after syscall instruction */
383 		if ((int)causeReg < 0)
384 			locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0);
385 		else
386 			locr0[PC] += 4;
387 		systab = sysent;
388 		numsys = nsysent;
389 #ifdef ULTRIXCOMPAT
390 		if (p->p_md.md_flags & MDP_ULTRIX) {
391 			systab = ultrixsysent;
392 			numsys = ultrixnsysent;
393 		}
394 #endif
395 		code = locr0[V0];
396 		switch (code) {
397 		case SYS_syscall:
398 			/*
399 			 * Code is first argument, followed by actual args.
400 			 */
401 			code = locr0[A0];
402 			if (code >= numsys)
403 				callp = &systab[SYS_syscall]; /* (illegal) */
404 			else
405 				callp = &systab[code];
406 			i = callp->sy_narg;
407 			args.i[0] = locr0[A1];
408 			args.i[1] = locr0[A2];
409 			args.i[2] = locr0[A3];
410 			if (i > 3) {
411 				i = copyin((caddr_t)(locr0[SP] +
412 						4 * sizeof(int)),
413 					(caddr_t)&args.i[3],
414 					(u_int)(i - 3) * sizeof(int));
415 				if (i) {
416 					locr0[V0] = i;
417 					locr0[A3] = 1;
418 #ifdef KTRACE
419 					if (KTRPOINT(p, KTR_SYSCALL))
420 						ktrsyscall(p->p_tracep, code,
421 							callp->sy_narg, args.i);
422 #endif
423 					goto done;
424 				}
425 			}
426 			break;
427 
428 		case SYS___syscall:
429 			/*
430 			 * Like syscall, but code is a quad, so as to maintain
431 			 * quad alignment for the rest of the arguments.
432 			 */
433 			code = locr0[A0 + _QUAD_LOWWORD];
434 			if (code >= numsys)
435 				callp = &systab[SYS_syscall]; /* (illegal) */
436 			else
437 				callp = &systab[code];
438 			i = callp->sy_narg;
439 			args.i[0] = locr0[A2];
440 			args.i[1] = locr0[A3];
441 			if (i > 2) {
442 				i = copyin((caddr_t)(locr0[SP] +
443 						4 * sizeof(int)),
444 					(caddr_t)&args.i[2],
445 					(u_int)(i - 2) * sizeof(int));
446 				if (i) {
447 					locr0[V0] = i;
448 					locr0[A3] = 1;
449 #ifdef KTRACE
450 					if (KTRPOINT(p, KTR_SYSCALL))
451 						ktrsyscall(p->p_tracep, code,
452 							callp->sy_narg, args.i);
453 #endif
454 					goto done;
455 				}
456 			}
457 			break;
458 
459 		default:
460 			if (code >= numsys)
461 				callp = &systab[SYS_syscall]; /* (illegal) */
462 			else
463 				callp = &systab[code];
464 			i = callp->sy_narg;
465 			args.i[0] = locr0[A0];
466 			args.i[1] = locr0[A1];
467 			args.i[2] = locr0[A2];
468 			args.i[3] = locr0[A3];
469 			if (i > 4) {
470 				i = copyin((caddr_t)(locr0[SP] +
471 						4 * sizeof(int)),
472 					(caddr_t)&args.i[4],
473 					(u_int)(i - 4) * sizeof(int));
474 				if (i) {
475 					locr0[V0] = i;
476 					locr0[A3] = 1;
477 #ifdef KTRACE
478 					if (KTRPOINT(p, KTR_SYSCALL))
479 						ktrsyscall(p->p_tracep, code,
480 							callp->sy_narg, args.i);
481 #endif
482 					goto done;
483 				}
484 			}
485 		}
486 #ifdef KTRACE
487 		if (KTRPOINT(p, KTR_SYSCALL))
488 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
489 #endif
490 		rval[0] = 0;
491 		rval[1] = locr0[V1];
492 #ifdef DEBUG
493 		if (trp == trapdebug)
494 			trapdebug[TRAPSIZE - 1].code = code;
495 		else
496 			trp[-1].code = code;
497 #endif
498 		i = (*callp->sy_call)(p, &args, rval);
499 		/*
500 		 * Reinitialize proc pointer `p' as it may be different
501 		 * if this is a child returning from fork syscall.
502 		 */
503 		p = curproc;
504 		locr0 = p->p_md.md_regs;
505 #ifdef DEBUG
506 		{ int s;
507 		s = splhigh();
508 		trp->status = statusReg;
509 		trp->cause = causeReg;
510 		trp->vadr = locr0[SP];
511 		trp->pc = locr0[PC];
512 		trp->ra = locr0[RA];
513 		trp->code = -code;
514 		if (++trp == &trapdebug[TRAPSIZE])
515 			trp = trapdebug;
516 		splx(s);
517 		}
518 #endif
519 		switch (i) {
520 		case 0:
521 			locr0[V0] = rval[0];
522 			locr0[V1] = rval[1];
523 			locr0[A3] = 0;
524 			break;
525 
526 		case ERESTART:
527 			locr0[PC] = pc;
528 			break;
529 
530 		case EJUSTRETURN:
531 			break;	/* nothing to do */
532 
533 		default:
534 			locr0[V0] = i;
535 			locr0[A3] = 1;
536 		}
537 	done:
538 #ifdef KTRACE
539 		if (KTRPOINT(p, KTR_SYSRET))
540 			ktrsysret(p->p_tracep, code, i, rval[0]);
541 #endif
542 		goto out;
543 	    }
544 
545 	case T_BREAK+T_USER:
546 	    {
547 		register unsigned va, instr;
548 
549 		/* compute address of break instruction */
550 		va = pc;
551 		if ((int)causeReg < 0)
552 			va += 4;
553 
554 		/* read break instruction */
555 		instr = fuiword((caddr_t)va);
556 #if 0
557 		printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
558 			p->p_comm, p->p_pid, instr, pc,
559 			p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
560 #endif
561 #ifdef KADB
562 		if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP)
563 			goto err;
564 #endif
565 		if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) {
566 			i = SIGTRAP;
567 			break;
568 		}
569 
570 		/* restore original instruction and clear BP  */
571 		i = suiword((caddr_t)va, p->p_md.md_ss_instr);
572 		if (i < 0) {
573 			vm_offset_t sa, ea;
574 			int rv;
575 
576 			sa = trunc_page((vm_offset_t)va);
577 			ea = round_page((vm_offset_t)va+sizeof(int)-1);
578 			rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
579 				VM_PROT_DEFAULT, FALSE);
580 			if (rv == KERN_SUCCESS) {
581 				i = suiword((caddr_t)va, p->p_md.md_ss_instr);
582 				(void) vm_map_protect(&p->p_vmspace->vm_map,
583 					sa, ea, VM_PROT_READ|VM_PROT_EXECUTE,
584 					FALSE);
585 			}
586 		}
587 		if (i < 0)
588 			printf("Warning: can't restore instruction at %x: %x\n",
589 				p->p_md.md_ss_addr, p->p_md.md_ss_instr);
590 		p->p_md.md_ss_addr = 0;
591 		i = SIGTRAP;
592 		break;
593 	    }
594 
595 	case T_RES_INST+T_USER:
596 		i = SIGILL;
597 		break;
598 
599 	case T_COP_UNUSABLE+T_USER:
600 		if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) {
601 			i = SIGILL;	/* only FPU instructions allowed */
602 			break;
603 		}
604 		MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs);
605 		machFPCurProcPtr = p;
606 		p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT;
607 		p->p_md.md_flags |= MDP_FPUSED;
608 		goto out;
609 
610 	case T_OVFLOW+T_USER:
611 		i = SIGFPE;
612 		break;
613 
614 	case T_ADDR_ERR_LD:	/* misaligned access */
615 	case T_ADDR_ERR_ST:	/* misaligned access */
616 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
617 		if (i = ((struct pcb *)UADDR)->pcb_onfault) {
618 			((struct pcb *)UADDR)->pcb_onfault = 0;
619 			return (onfault_table[i]);
620 		}
621 		/* FALLTHROUGH */
622 
623 	default:
624 	err:
625 #ifdef KADB
626 	    {
627 		extern struct pcb kdbpcb;
628 
629 		if (USERMODE(statusReg))
630 			kdbpcb = p->p_addr->u_pcb;
631 		else {
632 			kdbpcb.pcb_regs[ZERO] = 0;
633 			kdbpcb.pcb_regs[AST] = ((int *)&args)[2];
634 			kdbpcb.pcb_regs[V0] = ((int *)&args)[3];
635 			kdbpcb.pcb_regs[V1] = ((int *)&args)[4];
636 			kdbpcb.pcb_regs[A0] = ((int *)&args)[5];
637 			kdbpcb.pcb_regs[A1] = ((int *)&args)[6];
638 			kdbpcb.pcb_regs[A2] = ((int *)&args)[7];
639 			kdbpcb.pcb_regs[A3] = ((int *)&args)[8];
640 			kdbpcb.pcb_regs[T0] = ((int *)&args)[9];
641 			kdbpcb.pcb_regs[T1] = ((int *)&args)[10];
642 			kdbpcb.pcb_regs[T2] = ((int *)&args)[11];
643 			kdbpcb.pcb_regs[T3] = ((int *)&args)[12];
644 			kdbpcb.pcb_regs[T4] = ((int *)&args)[13];
645 			kdbpcb.pcb_regs[T5] = ((int *)&args)[14];
646 			kdbpcb.pcb_regs[T6] = ((int *)&args)[15];
647 			kdbpcb.pcb_regs[T7] = ((int *)&args)[16];
648 			kdbpcb.pcb_regs[T8] = ((int *)&args)[17];
649 			kdbpcb.pcb_regs[T9] = ((int *)&args)[18];
650 			kdbpcb.pcb_regs[RA] = ((int *)&args)[19];
651 			kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21];
652 			kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22];
653 			kdbpcb.pcb_regs[PC] = pc;
654 			kdbpcb.pcb_regs[SR] = statusReg;
655 			bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int));
656 		}
657 		if (kdb(causeReg, vadr, p, !USERMODE(statusReg)))
658 			return (kdbpcb.pcb_regs[PC]);
659 	    }
660 #else
661 #ifdef DEBUG
662 		trapDump("trap");
663 #endif
664 #endif
665 		panic("trap");
666 	}
667 	trapsignal(p, i, ucode);
668 out:
669 	/*
670 	 * Note: we should only get here if returning to user mode.
671 	 */
672 	/* take pending signals */
673 	while ((i = CURSIG(p)) != 0)
674 		postsig(i);
675 	p->p_priority = p->p_usrpri;
676 	astpending = 0;
677 	if (want_resched) {
678 		int s;
679 
680 		/*
681 		 * Since we are curproc, clock will normally just change
682 		 * our priority without moving us from one queue to another
683 		 * (since the running process is not on a queue.)
684 		 * If that happened after we put ourselves on the run queue
685 		 * but before we switched, we might not be on the queue
686 		 * indicated by our priority.
687 		 */
688 		s = splstatclock();
689 		setrunqueue(p);
690 		p->p_stats->p_ru.ru_nivcsw++;
691 		mi_switch();
692 		splx(s);
693 		while ((i = CURSIG(p)) != 0)
694 			postsig(i);
695 	}
696 
697 	/*
698 	 * If profiling, charge system time to the trapped pc.
699 	 */
700 	if (p->p_flag & P_PROFIL) {
701 		extern int psratio;
702 
703 		addupc_task(p, pc, (int)(p->p_sticks - sticks) * psratio);
704 	}
705 
706 	curpriority = p->p_priority;
707 	return (pc);
708 }
709 
710 /*
711  * Handle an interrupt.
712  * Called from MachKernIntr() or MachUserIntr()
713  * Note: curproc might be NULL.
714  */
715 interrupt(statusReg, causeReg, pc)
716 	unsigned statusReg;	/* status register at time of the exception */
717 	unsigned causeReg;	/* cause register at time of exception */
718 	unsigned pc;		/* program counter where to continue */
719 {
720 	register unsigned mask;
721 	struct clockframe cf;
722 
723 #ifdef DEBUG
724 	trp->status = statusReg;
725 	trp->cause = causeReg;
726 	trp->vadr = 0;
727 	trp->pc = pc;
728 	trp->ra = 0;
729 	trp->code = 0;
730 	if (++trp == &trapdebug[TRAPSIZE])
731 		trp = trapdebug;
732 
733 	intr_level++;
734 #endif
735 
736 	cnt.v_intr++;
737 	mask = causeReg & statusReg;	/* pending interrupts & enable mask */
738 	if (pmax_hardware_intr)
739 		splx((*pmax_hardware_intr)(mask, pc, statusReg, causeReg));
740 	if (mask & MACH_INT_MASK_5) {
741 		if (!USERMODE(statusReg)) {
742 #ifdef DEBUG
743 			trapDump("fpintr");
744 #else
745 			printf("FPU interrupt: PC %x CR %x SR %x\n",
746 				pc, causeReg, statusReg);
747 #endif
748 		} else
749 			MachFPInterrupt(statusReg, causeReg, pc);
750 	}
751 	if (mask & MACH_SOFT_INT_MASK_0) {
752 		clearsoftclock();
753 		cnt.v_soft++;
754 		softclock();
755 	}
756 	/* process network interrupt if we trapped or will very soon */
757 	if ((mask & MACH_SOFT_INT_MASK_1) ||
758 	    netisr && (statusReg & MACH_SOFT_INT_MASK_1)) {
759 		clearsoftnet();
760 		cnt.v_soft++;
761 #ifdef INET
762 		if (netisr & (1 << NETISR_ARP)) {
763 			netisr &= ~(1 << NETISR_ARP);
764 			arpintr();
765 		}
766 		if (netisr & (1 << NETISR_IP)) {
767 			netisr &= ~(1 << NETISR_IP);
768 			ipintr();
769 		}
770 #endif
771 #ifdef NS
772 		if (netisr & (1 << NETISR_NS)) {
773 			netisr &= ~(1 << NETISR_NS);
774 			nsintr();
775 		}
776 #endif
777 #ifdef ISO
778 		if (netisr & (1 << NETISR_ISO)) {
779 			netisr &= ~(1 << NETISR_ISO);
780 			clnlintr();
781 		}
782 #endif
783 	}
784 #ifdef DEBUG
785 	intr_level--;
786 #endif
787 }
788 
789 /*
790  * Handle pmax (DECstation 2100/3100) interrupts.
791  */
792 pmax_intr(mask, pc, statusReg, causeReg)
793 	unsigned mask;
794 	unsigned pc;
795 	unsigned statusReg;
796 	unsigned causeReg;
797 {
798 	register volatile struct chiptime *c = Mach_clock_addr;
799 	struct clockframe cf;
800 	int temp;
801 
802 	/* handle clock interrupts ASAP */
803 	if (mask & MACH_INT_MASK_3) {
804 		temp = c->regc;	/* XXX clear interrupt bits */
805 		cf.pc = pc;
806 		cf.sr = statusReg;
807 		hardclock(&cf);
808 		/* keep clock interrupts enabled */
809 		causeReg &= ~MACH_INT_MASK_3;
810 	}
811 	/* Re-enable clock interrupts */
812 	splx(MACH_INT_MASK_3 | MACH_SR_INT_ENA_CUR);
813 #if NSII > 0
814 	if (mask & MACH_INT_MASK_0)
815 		siiintr(0);
816 #endif
817 #if NLE > 0
818 	if (mask & MACH_INT_MASK_1)
819 		leintr(0);
820 #endif
821 #if NDC > 0
822 	if (mask & MACH_INT_MASK_2)
823 		dcintr(0);
824 #endif
825 	if (mask & MACH_INT_MASK_4)
826 		pmax_errintr();
827 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
828 		MACH_SR_INT_ENA_CUR);
829 }
830 
831 /*
832  * Handle hardware interrupts for the KN02. (DECstation 5000/200)
833  * Returns spl value.
834  */
835 kn02_intr(mask, pc, statusReg, causeReg)
836 	unsigned mask;
837 	unsigned pc;
838 	unsigned statusReg;
839 	unsigned causeReg;
840 {
841 	register unsigned i, m;
842 	register volatile struct chiptime *c = Mach_clock_addr;
843 	register unsigned csr;
844 	int temp;
845 	struct clockframe cf;
846 	static int warned = 0;
847 
848 	/* handle clock interrupts ASAP */
849 	if (mask & MACH_INT_MASK_1) {
850 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
851 		if ((csr & KN02_CSR_PSWARN) && !warned) {
852 			warned = 1;
853 			printf("WARNING: power supply is overheating!\n");
854 		} else if (warned && !(csr & KN02_CSR_PSWARN)) {
855 			warned = 0;
856 			printf("WARNING: power supply is OK again\n");
857 		}
858 
859 		temp = c->regc;	/* XXX clear interrupt bits */
860 		cf.pc = pc;
861 		cf.sr = statusReg;
862 		hardclock(&cf);
863 
864 		/* keep clock interrupts enabled */
865 		causeReg &= ~MACH_INT_MASK_1;
866 	}
867 	/* Re-enable clock interrupts */
868 	splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
869 	if (mask & MACH_INT_MASK_0) {
870 
871 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
872 		m = csr & (csr >> KN02_CSR_IOINTEN_SHIFT) & KN02_CSR_IOINT;
873 #if 0
874 		*(unsigned *)MACHPHYS_TO_UNCACHED(KN02_SYS_CSR) =
875 			(csr & ~(KN02_CSR_WRESERVED | 0xFF)) |
876 			(m << KN02_CSR_IOINTEN_SHIFT);
877 #endif
878 		for (i = 0; m; i++, m >>= 1) {
879 			if (!(m & 1))
880 				continue;
881 			if (tc_slot_info[i].intr)
882 				(*tc_slot_info[i].intr)(tc_slot_info[i].unit);
883 			else
884 				printf("spurious interrupt %d\n", i);
885 		}
886 #if 0
887 		*(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR) =
888 			csr & ~(KN02_CSR_WRESERVED | 0xFF);
889 #endif
890 	}
891 	if (mask & MACH_INT_MASK_3)
892 		kn02_errintr();
893 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
894 		MACH_SR_INT_ENA_CUR);
895 }
896 
897 /*
898  * 3min hardware interrupts. (DECstation 5000/1xx)
899  */
900 kmin_intr(mask, pc, statusReg, causeReg)
901 	unsigned mask;
902 	unsigned pc;
903 	unsigned statusReg;
904 	unsigned causeReg;
905 {
906 	register u_int intr;
907 	register volatile struct chiptime *c = Mach_clock_addr;
908 	volatile u_int *imaskp =
909 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_IMSK);
910 	volatile u_int *intrp =
911 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_INTR);
912 	unsigned int old_mask;
913 	struct clockframe cf;
914 	int temp;
915 	static int user_warned = 0;
916 
917 	old_mask = *imaskp & kmin_tc3_imask;
918 	*imaskp = old_mask;
919 
920 	if (mask & MACH_INT_MASK_4)
921 		(*callv->halt)((int *)0, 0);
922 	if (mask & MACH_INT_MASK_3) {
923 		intr = *intrp;
924 		/* masked interrupts are still observable */
925 		intr &= old_mask;
926 
927 		if (intr & KMIN_INTR_SCSI_PTR_LOAD) {
928 			*intrp &= ~KMIN_INTR_SCSI_PTR_LOAD;
929 #ifdef notdef
930 			asc_dma_intr();
931 #endif
932 		}
933 
934 		if (intr & (KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E))
935 			*intrp &= ~(KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E);
936 
937 		if (intr & KMIN_INTR_LANCE_READ_E)
938 			*intrp &= ~KMIN_INTR_LANCE_READ_E;
939 
940 		if (intr & KMIN_INTR_TIMEOUT)
941 			kn02ba_errintr();
942 
943 		if (intr & KMIN_INTR_CLOCK) {
944 			temp = c->regc;	/* XXX clear interrupt bits */
945 			cf.pc = pc;
946 			cf.sr = statusReg;
947 			hardclock(&cf);
948 		}
949 
950 		if ((intr & KMIN_INTR_SCC_0) &&
951 			tc_slot_info[KMIN_SCC0_SLOT].intr)
952 			(*(tc_slot_info[KMIN_SCC0_SLOT].intr))
953 			(tc_slot_info[KMIN_SCC0_SLOT].unit);
954 
955 		if ((intr & KMIN_INTR_SCC_1) &&
956 			tc_slot_info[KMIN_SCC1_SLOT].intr)
957 			(*(tc_slot_info[KMIN_SCC1_SLOT].intr))
958 			(tc_slot_info[KMIN_SCC1_SLOT].unit);
959 
960 		if ((intr & KMIN_INTR_SCSI) &&
961 			tc_slot_info[KMIN_SCSI_SLOT].intr)
962 			(*(tc_slot_info[KMIN_SCSI_SLOT].intr))
963 			(tc_slot_info[KMIN_SCSI_SLOT].unit);
964 
965 		if ((intr & KMIN_INTR_LANCE) &&
966 			tc_slot_info[KMIN_LANCE_SLOT].intr)
967 			(*(tc_slot_info[KMIN_LANCE_SLOT].intr))
968 			(tc_slot_info[KMIN_LANCE_SLOT].unit);
969 
970 		if (user_warned && ((intr & KMIN_INTR_PSWARN) == 0)) {
971 			printf("%s\n", "Power supply ok now.");
972 			user_warned = 0;
973 		}
974 		if ((intr & KMIN_INTR_PSWARN) && (user_warned < 3)) {
975 			user_warned++;
976 			printf("%s\n", "Power supply overheating");
977 		}
978 	}
979 	if ((mask & MACH_INT_MASK_0) && tc_slot_info[0].intr)
980 		(*tc_slot_info[0].intr)(tc_slot_info[0].unit);
981 	if ((mask & MACH_INT_MASK_1) && tc_slot_info[1].intr)
982 		(*tc_slot_info[1].intr)(tc_slot_info[1].unit);
983 	if ((mask & MACH_INT_MASK_2) && tc_slot_info[2].intr)
984 		(*tc_slot_info[2].intr)(tc_slot_info[2].unit);
985 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
986 		MACH_SR_INT_ENA_CUR);
987 }
988 
989 /*
990  * Maxine hardware interrupts. (Personal DECstation 5000/xx)
991  */
992 xine_intr(mask, pc, statusReg, causeReg)
993 	unsigned mask;
994 	unsigned pc;
995 	unsigned statusReg;
996 	unsigned causeReg;
997 {
998 	register u_int intr;
999 	register volatile struct chiptime *c = Mach_clock_addr;
1000 	volatile u_int *imaskp = (volatile u_int *)
1001 		MACH_PHYS_TO_UNCACHED(XINE_REG_IMSK);
1002 	volatile u_int *intrp = (volatile u_int *)
1003 		MACH_PHYS_TO_UNCACHED(XINE_REG_INTR);
1004 	u_int old_mask;
1005 	struct clockframe cf;
1006 	int temp;
1007 
1008 	old_mask = *imaskp & xine_tc3_imask;
1009 	*imaskp = old_mask;
1010 
1011 	if (mask & MACH_INT_MASK_4)
1012 		(*callv->halt)((int *)0, 0);
1013 
1014 	/* handle clock interrupts ASAP */
1015 	if (mask & MACH_INT_MASK_1) {
1016 		temp = c->regc;	/* XXX clear interrupt bits */
1017 		cf.pc = pc;
1018 		cf.sr = statusReg;
1019 		hardclock(&cf);
1020 		causeReg &= ~MACH_INT_MASK_1;
1021 		/* reenable clock interrupts */
1022 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1023 	}
1024 	if (mask & MACH_INT_MASK_3) {
1025 		intr = *intrp;
1026 		/* masked interrupts are still observable */
1027 		intr &= old_mask;
1028 
1029 		if (intr & XINE_INTR_SCSI_PTR_LOAD) {
1030 			*intrp &= ~XINE_INTR_SCSI_PTR_LOAD;
1031 #ifdef notdef
1032 			asc_dma_intr();
1033 #endif
1034 		}
1035 
1036 		if (intr & (XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E))
1037 			*intrp &= ~(XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E);
1038 
1039 		if (intr & XINE_INTR_LANCE_READ_E)
1040 			*intrp &= ~XINE_INTR_LANCE_READ_E;
1041 
1042 		if ((intr & XINE_INTR_SCC_0) &&
1043 			tc_slot_info[XINE_SCC0_SLOT].intr)
1044 			(*(tc_slot_info[XINE_SCC0_SLOT].intr))
1045 			(tc_slot_info[XINE_SCC0_SLOT].unit);
1046 
1047 		if ((intr & XINE_INTR_DTOP_RX) &&
1048 			tc_slot_info[XINE_DTOP_SLOT].intr)
1049 			(*(tc_slot_info[XINE_DTOP_SLOT].intr))
1050 			(tc_slot_info[XINE_DTOP_SLOT].unit);
1051 
1052 		if ((intr & XINE_INTR_FLOPPY) &&
1053 			tc_slot_info[XINE_FLOPPY_SLOT].intr)
1054 			(*(tc_slot_info[XINE_FLOPPY_SLOT].intr))
1055 			(tc_slot_info[XINE_FLOPPY_SLOT].unit);
1056 
1057 		if ((intr & XINE_INTR_TC_0) &&
1058 			tc_slot_info[0].intr)
1059 			(*(tc_slot_info[0].intr))
1060 			(tc_slot_info[0].unit);
1061 
1062 		if ((intr & XINE_INTR_TC_1) &&
1063 			tc_slot_info[1].intr)
1064 			(*(tc_slot_info[1].intr))
1065 			(tc_slot_info[1].unit);
1066 
1067 		if ((intr & XINE_INTR_ISDN) &&
1068 			tc_slot_info[XINE_ISDN_SLOT].intr)
1069 			(*(tc_slot_info[XINE_ISDN_SLOT].intr))
1070 			(tc_slot_info[XINE_ISDN_SLOT].unit);
1071 
1072 		if ((intr & XINE_INTR_SCSI) &&
1073 			tc_slot_info[XINE_SCSI_SLOT].intr)
1074 			(*(tc_slot_info[XINE_SCSI_SLOT].intr))
1075 			(tc_slot_info[XINE_SCSI_SLOT].unit);
1076 
1077 		if ((intr & XINE_INTR_LANCE) &&
1078 			tc_slot_info[XINE_LANCE_SLOT].intr)
1079 			(*(tc_slot_info[XINE_LANCE_SLOT].intr))
1080 			(tc_slot_info[XINE_LANCE_SLOT].unit);
1081 
1082 	}
1083 	if (mask & MACH_INT_MASK_2)
1084 		kn02ba_errintr();
1085 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1086 		MACH_SR_INT_ENA_CUR);
1087 }
1088 
1089 #ifdef DS5000_240
1090 /*
1091  * 3Max+ hardware interrupts. (DECstation 5000/240) UNTESTED!!
1092  */
1093 kn03_intr(mask, pc, statusReg, causeReg)
1094 	unsigned mask;
1095 	unsigned pc;
1096 	unsigned statusReg;
1097 	unsigned causeReg;
1098 {
1099 	register u_int intr;
1100 	register volatile struct chiptime *c = Mach_clock_addr;
1101 	volatile u_int *imaskp = (volatile u_int *)
1102 		MACH_PHYS_TO_UNCACHED(KN03_REG_IMSK);
1103 	volatile u_int *intrp = (volatile u_int *)
1104 		MACH_PHYS_TO_UNCACHED(KN03_REG_INTR);
1105 	u_int old_mask;
1106 	struct clockframe cf;
1107 	int temp;
1108 	static int user_warned = 0;
1109 
1110 	old_mask = *imaskp & kn03_tc3_imask;
1111 	*imaskp = old_mask;
1112 
1113 	if (mask & MACH_INT_MASK_4)
1114 		(*callv->halt)((int *)0, 0);
1115 
1116 	/* handle clock interrupts ASAP */
1117 	if (mask & MACH_INT_MASK_1) {
1118 		temp = c->regc;	/* XXX clear interrupt bits */
1119 		cf.pc = pc;
1120 		cf.sr = statusReg;
1121 		hardclock(&cf);
1122 		causeReg &= ~MACH_INT_MASK_1;
1123 		/* reenable clock interrupts */
1124 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1125 	}
1126 	if (mask & MACH_INT_MASK_0) {
1127 		intr = *intrp;
1128 		/* masked interrupts are still observable */
1129 		intr &= old_mask;
1130 
1131 		if (intr & KN03_INTR_SCSI_PTR_LOAD) {
1132 			*intrp &= ~KN03_INTR_SCSI_PTR_LOAD;
1133 #ifdef notdef
1134 			asc_dma_intr();
1135 #endif
1136 		}
1137 
1138 		if (intr & (KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E))
1139 			*intrp &= ~(KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E);
1140 
1141 		if (intr & KN03_INTR_LANCE_READ_E)
1142 			*intrp &= ~KN03_INTR_LANCE_READ_E;
1143 
1144 		if ((intr & KN03_INTR_SCC_0) &&
1145 			tc_slot_info[KN03_SCC0_SLOT].intr)
1146 			(*(tc_slot_info[KN03_SCC0_SLOT].intr))
1147 			(tc_slot_info[KN03_SCC0_SLOT].unit);
1148 
1149 		if ((intr & KN03_INTR_SCC_1) &&
1150 			tc_slot_info[KN03_SCC1_SLOT].intr)
1151 			(*(tc_slot_info[KN03_SCC1_SLOT].intr))
1152 			(tc_slot_info[KN03_SCC1_SLOT].unit);
1153 
1154 		if ((intr & KN03_INTR_TC_0) &&
1155 			tc_slot_info[0].intr)
1156 			(*(tc_slot_info[0].intr))
1157 			(tc_slot_info[0].unit);
1158 
1159 		if ((intr & KN03_INTR_TC_1) &&
1160 			tc_slot_info[1].intr)
1161 			(*(tc_slot_info[1].intr))
1162 			(tc_slot_info[1].unit);
1163 
1164 		if ((intr & KN03_INTR_TC_2) &&
1165 			tc_slot_info[2].intr)
1166 			(*(tc_slot_info[2].intr))
1167 			(tc_slot_info[2].unit);
1168 
1169 		if ((intr & KN03_INTR_SCSI) &&
1170 			tc_slot_info[KN03_SCSI_SLOT].intr)
1171 			(*(tc_slot_info[KN03_SCSI_SLOT].intr))
1172 			(tc_slot_info[KN03_SCSI_SLOT].unit);
1173 
1174 		if ((intr & KN03_INTR_LANCE) &&
1175 			tc_slot_info[KN03_LANCE_SLOT].intr)
1176 			(*(tc_slot_info[KN03_LANCE_SLOT].intr))
1177 			(tc_slot_info[KN03_LANCE_SLOT].unit);
1178 
1179 		if (user_warned && ((intr & KN03_INTR_PSWARN) == 0)) {
1180 			printf("%s\n", "Power supply ok now.");
1181 			user_warned = 0;
1182 		}
1183 		if ((intr & KN03_INTR_PSWARN) && (user_warned < 3)) {
1184 			user_warned++;
1185 			printf("%s\n", "Power supply overheating");
1186 		}
1187 	}
1188 	if (mask & MACH_INT_MASK_3)
1189 		kn03_errintr();
1190 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1191 		MACH_SR_INT_ENA_CUR);
1192 }
1193 #endif /* DS5000_240 */
1194 
1195 /*
1196  * This is called from MachUserIntr() if astpending is set.
1197  * This is very similar to the tail of trap().
1198  */
1199 softintr(statusReg, pc)
1200 	unsigned statusReg;	/* status register at time of the exception */
1201 	unsigned pc;		/* program counter where to continue */
1202 {
1203 	register struct proc *p = curproc;
1204 	int sig;
1205 
1206 	cnt.v_soft++;
1207 	/* take pending signals */
1208 	while ((sig = CURSIG(p)) != 0)
1209 		postsig(sig);
1210 	p->p_priority = p->p_usrpri;
1211 	astpending = 0;
1212 	if (p->p_flag & P_OWEUPC) {
1213 		p->p_flag &= ~P_OWEUPC;
1214 		ADDUPROF(p);
1215 	}
1216 	if (want_resched) {
1217 		int s;
1218 
1219 		/*
1220 		 * Since we are curproc, clock will normally just change
1221 		 * our priority without moving us from one queue to another
1222 		 * (since the running process is not on a queue.)
1223 		 * If that happened after we put ourselves on the run queue
1224 		 * but before we switched, we might not be on the queue
1225 		 * indicated by our priority.
1226 		 */
1227 		s = splstatclock();
1228 		setrunqueue(p);
1229 		p->p_stats->p_ru.ru_nivcsw++;
1230 		mi_switch();
1231 		splx(s);
1232 		while ((sig = CURSIG(p)) != 0)
1233 			postsig(sig);
1234 	}
1235 	curpriority = p->p_priority;
1236 }
1237 
1238 #ifdef DEBUG
1239 trapDump(msg)
1240 	char *msg;
1241 {
1242 	register int i;
1243 	int s;
1244 
1245 	s = splhigh();
1246 	printf("trapDump(%s)\n", msg);
1247 	for (i = 0; i < TRAPSIZE; i++) {
1248 		if (trp == trapdebug)
1249 			trp = &trapdebug[TRAPSIZE - 1];
1250 		else
1251 			trp--;
1252 		if (trp->cause == 0)
1253 			break;
1254 		printf("%s: ADR %x PC %x CR %x SR %x\n",
1255 			trap_type[(trp->cause & MACH_CR_EXC_CODE) >>
1256 				MACH_CR_EXC_CODE_SHIFT],
1257 			trp->vadr, trp->pc, trp->cause, trp->status);
1258 		printf("   RA %x code %d\n", trp-> ra, trp->code);
1259 	}
1260 	bzero(trapdebug, sizeof(trapdebug));
1261 	trp = trapdebug;
1262 	splx(s);
1263 }
1264 #endif
1265 
1266 /*
1267  *----------------------------------------------------------------------
1268  *
1269  * MemErrorInterrupts --
1270  *   pmax_errintr - for the DS2100/DS3100
1271  *   kn02_errintr - for the DS5000/200
1272  *   kn02ba_errintr - for the DS5000/1xx and DS5000/xx
1273  *
1274  *	Handler an interrupt for the control register.
1275  *
1276  * Results:
1277  *	None.
1278  *
1279  * Side effects:
1280  *	None.
1281  *
1282  *----------------------------------------------------------------------
1283  */
1284 static void
1285 pmax_errintr()
1286 {
1287 	volatile u_short *sysCSRPtr =
1288 		(u_short *)MACH_PHYS_TO_UNCACHED(KN01_SYS_CSR);
1289 	u_short csr;
1290 
1291 	csr = *sysCSRPtr;
1292 
1293 	if (csr & KN01_CSR_MERR) {
1294 		printf("Memory error at 0x%x\n",
1295 			*(unsigned *)MACH_PHYS_TO_UNCACHED(KN01_SYS_ERRADR));
1296 		panic("Mem error interrupt");
1297 	}
1298 	*sysCSRPtr = (csr & ~KN01_CSR_MBZ) | 0xff;
1299 }
1300 
1301 static void
1302 kn02_errintr()
1303 {
1304 	u_int erradr, chksyn, physadr;
1305 	int i;
1306 
1307 	erradr = *(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR);
1308 	chksyn = *(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CHKSYN);
1309 	*(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR) = 0;
1310 	MachEmptyWriteBuffer();
1311 
1312 	if (!(erradr & KN02_ERR_VALID))
1313 		return;
1314 	/* extract the physical word address and compensate for pipelining */
1315 	physadr = erradr & KN02_ERR_ADDRESS;
1316 	if (!(erradr & KN02_ERR_WRITE))
1317 		physadr = (physadr & ~0xfff) | ((physadr & 0xfff) - 5);
1318 	physadr <<= 2;
1319 	printf("%s memory %s %s error at 0x%x\n",
1320 		(erradr & KN02_ERR_CPU) ? "CPU" : "DMA",
1321 		(erradr & KN02_ERR_WRITE) ? "write" : "read",
1322 		(erradr & KN02_ERR_ECCERR) ? "ECC" : "timeout",
1323 		physadr);
1324 	if (erradr & KN02_ERR_ECCERR) {
1325 		*(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CHKSYN) = 0;
1326 		MachEmptyWriteBuffer();
1327 		printf("ECC 0x%x\n", chksyn);
1328 
1329 		/* check for a corrected, single bit, read error */
1330 		if (!(erradr & KN02_ERR_WRITE)) {
1331 			if (physadr & 0x4) {
1332 				/* check high word */
1333 				if (chksyn & KN02_ECC_SNGHI)
1334 					return;
1335 			} else {
1336 				/* check low word */
1337 				if (chksyn & KN02_ECC_SNGLO)
1338 					return;
1339 			}
1340 		}
1341 	}
1342 	panic("Mem error interrupt");
1343 }
1344 
1345 #ifdef DS5000_240
1346 static void
1347 kn03_errintr()
1348 {
1349 
1350 	printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR));
1351 	*(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR) = 0;
1352 	MachEmptyWriteBuffer();
1353 }
1354 #endif /* DS5000_240 */
1355 
1356 static void
1357 kn02ba_errintr()
1358 {
1359 	register int mer, adr, siz;
1360 	static int errintr_cnt = 0;
1361 
1362 	siz = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MSR);
1363 	mer = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MER);
1364 	adr = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_AER);
1365 
1366 	/* clear interrupt bit */
1367 	*(unsigned int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_TIMEOUT) = 0;
1368 
1369 	errintr_cnt++;
1370 	printf("(%d)%s%x [%x %x %x]\n", errintr_cnt,
1371 	       "Bad memory chip at phys ",
1372 	       kn02ba_recover_erradr(adr, mer),
1373 	       mer, siz, adr);
1374 }
1375 
1376 static unsigned
1377 kn02ba_recover_erradr(phys, mer)
1378 	register unsigned phys, mer;
1379 {
1380 	/* phys holds bits 28:2, mer knows which byte */
1381 	switch (mer & KMIN_MER_LASTBYTE) {
1382 	case KMIN_LASTB31:
1383 		mer = 3; break;
1384 	case KMIN_LASTB23:
1385 		mer = 2; break;
1386 	case KMIN_LASTB15:
1387 		mer = 1; break;
1388 	case KMIN_LASTB07:
1389 		mer = 0; break;
1390 	}
1391 	return ((phys & KMIN_AER_ADDR_MASK) | mer);
1392 }
1393 
1394 /*
1395  * Return the resulting PC as if the branch was executed.
1396  */
1397 unsigned
1398 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch)
1399 	unsigned *regsPtr;
1400 	unsigned instPC;
1401 	unsigned fpcCSR;
1402 	int allowNonBranch;
1403 {
1404 	InstFmt inst;
1405 	unsigned retAddr;
1406 	int condition;
1407 	extern unsigned GetBranchDest();
1408 
1409 
1410 	inst = *(InstFmt *)instPC;
1411 #if 0
1412 	printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC,
1413 		inst.word, fpcCSR); /* XXX */
1414 #endif
1415 	switch ((int)inst.JType.op) {
1416 	case OP_SPECIAL:
1417 		switch ((int)inst.RType.func) {
1418 		case OP_JR:
1419 		case OP_JALR:
1420 			retAddr = regsPtr[inst.RType.rs];
1421 			break;
1422 
1423 		default:
1424 			if (!allowNonBranch)
1425 				panic("MachEmulateBranch: Non-branch");
1426 			retAddr = instPC + 4;
1427 			break;
1428 		}
1429 		break;
1430 
1431 	case OP_BCOND:
1432 		switch ((int)inst.IType.rt) {
1433 		case OP_BLTZ:
1434 		case OP_BLTZAL:
1435 			if ((int)(regsPtr[inst.RType.rs]) < 0)
1436 				retAddr = GetBranchDest((InstFmt *)instPC);
1437 			else
1438 				retAddr = instPC + 8;
1439 			break;
1440 
1441 		case OP_BGEZAL:
1442 		case OP_BGEZ:
1443 			if ((int)(regsPtr[inst.RType.rs]) >= 0)
1444 				retAddr = GetBranchDest((InstFmt *)instPC);
1445 			else
1446 				retAddr = instPC + 8;
1447 			break;
1448 
1449 		default:
1450 			panic("MachEmulateBranch: Bad branch cond");
1451 		}
1452 		break;
1453 
1454 	case OP_J:
1455 	case OP_JAL:
1456 		retAddr = (inst.JType.target << 2) |
1457 			((unsigned)instPC & 0xF0000000);
1458 		break;
1459 
1460 	case OP_BEQ:
1461 		if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1462 			retAddr = GetBranchDest((InstFmt *)instPC);
1463 		else
1464 			retAddr = instPC + 8;
1465 		break;
1466 
1467 	case OP_BNE:
1468 		if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1469 			retAddr = GetBranchDest((InstFmt *)instPC);
1470 		else
1471 			retAddr = instPC + 8;
1472 		break;
1473 
1474 	case OP_BLEZ:
1475 		if ((int)(regsPtr[inst.RType.rs]) <= 0)
1476 			retAddr = GetBranchDest((InstFmt *)instPC);
1477 		else
1478 			retAddr = instPC + 8;
1479 		break;
1480 
1481 	case OP_BGTZ:
1482 		if ((int)(regsPtr[inst.RType.rs]) > 0)
1483 			retAddr = GetBranchDest((InstFmt *)instPC);
1484 		else
1485 			retAddr = instPC + 8;
1486 		break;
1487 
1488 	case OP_COP1:
1489 		switch (inst.RType.rs) {
1490 		case OP_BCx:
1491 		case OP_BCy:
1492 			if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1493 				condition = fpcCSR & MACH_FPC_COND_BIT;
1494 			else
1495 				condition = !(fpcCSR & MACH_FPC_COND_BIT);
1496 			if (condition)
1497 				retAddr = GetBranchDest((InstFmt *)instPC);
1498 			else
1499 				retAddr = instPC + 8;
1500 			break;
1501 
1502 		default:
1503 			if (!allowNonBranch)
1504 				panic("MachEmulateBranch: Bad coproc branch instruction");
1505 			retAddr = instPC + 4;
1506 		}
1507 		break;
1508 
1509 	default:
1510 		if (!allowNonBranch)
1511 			panic("MachEmulateBranch: Non-branch instruction");
1512 		retAddr = instPC + 4;
1513 	}
1514 #if 0
1515 	printf("Target addr=%x\n", retAddr); /* XXX */
1516 #endif
1517 	return (retAddr);
1518 }
1519 
1520 unsigned
1521 GetBranchDest(InstPtr)
1522 	InstFmt *InstPtr;
1523 {
1524 	return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2));
1525 }
1526 
1527 /*
1528  * This routine is called by procxmt() to single step one instruction.
1529  * We do this by storing a break instruction after the current instruction,
1530  * resuming execution, and then restoring the old instruction.
1531  */
1532 cpu_singlestep(p)
1533 	register struct proc *p;
1534 {
1535 	register unsigned va;
1536 	register int *locr0 = p->p_md.md_regs;
1537 	int i;
1538 
1539 	/* compute next address after current location */
1540 	va = MachEmulateBranch(locr0, locr0[PC], locr0[FSR], 1);
1541 	if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va ||
1542 	    !useracc((caddr_t)va, 4, B_READ)) {
1543 		printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
1544 			p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */
1545 		return (EFAULT);
1546 	}
1547 	p->p_md.md_ss_addr = va;
1548 	p->p_md.md_ss_instr = fuiword((caddr_t)va);
1549 	i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1550 	if (i < 0) {
1551 		vm_offset_t sa, ea;
1552 		int rv;
1553 
1554 		sa = trunc_page((vm_offset_t)va);
1555 		ea = round_page((vm_offset_t)va+sizeof(int)-1);
1556 		rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
1557 			VM_PROT_DEFAULT, FALSE);
1558 		if (rv == KERN_SUCCESS) {
1559 			i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1560 			(void) vm_map_protect(&p->p_vmspace->vm_map,
1561 				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
1562 		}
1563 	}
1564 	if (i < 0)
1565 		return (EFAULT);
1566 #if 0
1567 	printf("SS %s (%d): breakpoint set at %x: %x (pc %x) br %x\n",
1568 		p->p_comm, p->p_pid, p->p_md.md_ss_addr,
1569 		p->p_md.md_ss_instr, locr0[PC], fuword((caddr_t)va)); /* XXX */
1570 #endif
1571 	return (0);
1572 }
1573 
1574 #ifdef DEBUG
1575 kdbpeek(addr)
1576 {
1577 	if (addr & 3) {
1578 		printf("kdbpeek: unaligned address %x\n", addr);
1579 		return (-1);
1580 	}
1581 	return (*(int *)addr);
1582 }
1583 
1584 #define MIPS_JR_RA	0x03e00008	/* instruction code for jr ra */
1585 
1586 /*
1587  * Print a stack backtrace.
1588  */
1589 void
1590 stacktrace(a0, a1, a2, a3)
1591 	int a0, a1, a2, a3;
1592 {
1593 	unsigned pc, sp, fp, ra, va, subr;
1594 	unsigned instr, mask;
1595 	InstFmt i;
1596 	int more, stksize;
1597 	int regs[3];
1598 	extern setsoftclock();
1599 	extern char start[], edata[];
1600 
1601 	cpu_getregs(regs);
1602 
1603 	/* get initial values from the exception frame */
1604 	sp = regs[0];
1605 	pc = regs[1];
1606 	ra = 0;
1607 	fp = regs[2];
1608 
1609 loop:
1610 	/* check for current PC in the kernel interrupt handler code */
1611 	if (pc >= (unsigned)MachKernIntr && pc < (unsigned)MachUserIntr) {
1612 		/* NOTE: the offsets depend on the code in locore.s */
1613 		printf("interrupt\n");
1614 		a0 = kdbpeek(sp + 36);
1615 		a1 = kdbpeek(sp + 40);
1616 		a2 = kdbpeek(sp + 44);
1617 		a3 = kdbpeek(sp + 48);
1618 		pc = kdbpeek(sp + 20);
1619 		ra = kdbpeek(sp + 92);
1620 		sp = kdbpeek(sp + 100);
1621 		fp = kdbpeek(sp + 104);
1622 	}
1623 
1624 	/* check for current PC in the exception handler code */
1625 	if (pc >= 0x80000000 && pc < (unsigned)setsoftclock) {
1626 		ra = 0;
1627 		subr = 0;
1628 		goto done;
1629 	}
1630 
1631 	/* check for bad PC */
1632 	if (pc & 3 || pc < 0x80000000 || pc >= (unsigned)edata) {
1633 		printf("PC 0x%x: not in kernel\n", pc);
1634 		ra = 0;
1635 		subr = 0;
1636 		goto done;
1637 	}
1638 
1639 	/*
1640 	 * Find the beginning of the current subroutine by scanning backwards
1641 	 * from the current PC for the end of the previous subroutine.
1642 	 */
1643 	va = pc - sizeof(int);
1644 	while ((instr = kdbpeek(va)) != MIPS_JR_RA)
1645 		va -= sizeof(int);
1646 	va += 2 * sizeof(int);	/* skip back over branch & delay slot */
1647 	/* skip over nulls which might separate .o files */
1648 	while ((instr = kdbpeek(va)) == 0)
1649 		va += sizeof(int);
1650 	subr = va;
1651 
1652 	/* scan forwards to find stack size and any saved registers */
1653 	stksize = 0;
1654 	more = 3;
1655 	mask = 0;
1656 	for (; more; va += sizeof(int), more = (more == 3) ? 3 : more - 1) {
1657 		/* stop if hit our current position */
1658 		if (va >= pc)
1659 			break;
1660 		instr = kdbpeek(va);
1661 		i.word = instr;
1662 		switch (i.JType.op) {
1663 		case OP_SPECIAL:
1664 			switch (i.RType.func) {
1665 			case OP_JR:
1666 			case OP_JALR:
1667 				more = 2; /* stop after next instruction */
1668 				break;
1669 
1670 			case OP_SYSCALL:
1671 			case OP_BREAK:
1672 				more = 1; /* stop now */
1673 			};
1674 			break;
1675 
1676 		case OP_BCOND:
1677 		case OP_J:
1678 		case OP_JAL:
1679 		case OP_BEQ:
1680 		case OP_BNE:
1681 		case OP_BLEZ:
1682 		case OP_BGTZ:
1683 			more = 2; /* stop after next instruction */
1684 			break;
1685 
1686 		case OP_COP0:
1687 		case OP_COP1:
1688 		case OP_COP2:
1689 		case OP_COP3:
1690 			switch (i.RType.rs) {
1691 			case OP_BCx:
1692 			case OP_BCy:
1693 				more = 2; /* stop after next instruction */
1694 			};
1695 			break;
1696 
1697 		case OP_SW:
1698 			/* look for saved registers on the stack */
1699 			if (i.IType.rs != 29)
1700 				break;
1701 			/* only restore the first one */
1702 			if (mask & (1 << i.IType.rt))
1703 				break;
1704 			mask |= 1 << i.IType.rt;
1705 			switch (i.IType.rt) {
1706 			case 4: /* a0 */
1707 				a0 = kdbpeek(sp + (short)i.IType.imm);
1708 				break;
1709 
1710 			case 5: /* a1 */
1711 				a1 = kdbpeek(sp + (short)i.IType.imm);
1712 				break;
1713 
1714 			case 6: /* a2 */
1715 				a2 = kdbpeek(sp + (short)i.IType.imm);
1716 				break;
1717 
1718 			case 7: /* a3 */
1719 				a3 = kdbpeek(sp + (short)i.IType.imm);
1720 				break;
1721 
1722 			case 30: /* fp */
1723 				fp = kdbpeek(sp + (short)i.IType.imm);
1724 				break;
1725 
1726 			case 31: /* ra */
1727 				ra = kdbpeek(sp + (short)i.IType.imm);
1728 			}
1729 			break;
1730 
1731 		case OP_ADDI:
1732 		case OP_ADDIU:
1733 			/* look for stack pointer adjustment */
1734 			if (i.IType.rs != 29 || i.IType.rt != 29)
1735 				break;
1736 			stksize = (short)i.IType.imm;
1737 		}
1738 	}
1739 
1740 done:
1741 	printf("%x+%x (%x,%x,%x,%x) ra %x sz %d\n",
1742 		subr, pc - subr, a0, a1, a2, a3, ra, stksize);
1743 
1744 	if (ra) {
1745 		if (pc == ra && stksize == 0)
1746 			printf("stacktrace: loop!\n");
1747 		else {
1748 			pc = ra;
1749 			sp -= stksize;
1750 			goto loop;
1751 		}
1752 	}
1753 }
1754 #endif /* DEBUG */
1755