xref: /original-bsd/sys/pmax/pmax/trap.c (revision 48611f03)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department and Ralph Campbell.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.32 91/04/06$
13  *
14  *	@(#)trap.c	7.16 (Berkeley) 04/05/93
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/proc.h>
20 #include <sys/kernel.h>
21 #include <sys/signalvar.h>
22 #include <sys/syscall.h>
23 #include <sys/user.h>
24 #include <sys/buf.h>
25 #ifdef KTRACE
26 #include <sys/ktrace.h>
27 #endif
28 #include <net/netisr.h>
29 
30 #include <machine/trap.h>
31 #include <machine/psl.h>
32 #include <machine/reg.h>
33 #include <machine/cpu.h>
34 #include <machine/pte.h>
35 #include <machine/mips_opcode.h>
36 
37 #include <vm/vm.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_page.h>
40 
41 #include <pmax/pmax/clockreg.h>
42 #include <pmax/pmax/kn01.h>
43 #include <pmax/pmax/kn02.h>
44 #include <pmax/pmax/kmin.h>
45 #include <pmax/pmax/maxine.h>
46 #include <pmax/pmax/kn03.h>
47 #include <pmax/pmax/asic.h>
48 #include <pmax/pmax/turbochannel.h>
49 
50 #include <pmax/stand/dec_prom.h>
51 
52 #include <asc.h>
53 #include <sii.h>
54 #include <le.h>
55 #include <dc.h>
56 
57 struct	proc *machFPCurProcPtr;		/* pointer to last proc to use FP */
58 
59 extern void MachKernGenException();
60 extern void MachUserGenException();
61 extern void MachKernIntr();
62 extern void MachUserIntr();
63 extern void MachTLBModException();
64 extern void MachTLBMissException();
65 extern unsigned MachEmulateBranch();
66 
67 void (*machExceptionTable[])() = {
68 /*
69  * The kernel exception handlers.
70  */
71 	MachKernIntr,			/* external interrupt */
72 	MachKernGenException,		/* TLB modification */
73 	MachTLBMissException,		/* TLB miss (load or instr. fetch) */
74 	MachTLBMissException,		/* TLB miss (store) */
75 	MachKernGenException,		/* address error (load or I-fetch) */
76 	MachKernGenException,		/* address error (store) */
77 	MachKernGenException,		/* bus error (I-fetch) */
78 	MachKernGenException,		/* bus error (load or store) */
79 	MachKernGenException,		/* system call */
80 	MachKernGenException,		/* breakpoint */
81 	MachKernGenException,		/* reserved instruction */
82 	MachKernGenException,		/* coprocessor unusable */
83 	MachKernGenException,		/* arithmetic overflow */
84 	MachKernGenException,		/* reserved */
85 	MachKernGenException,		/* reserved */
86 	MachKernGenException,		/* reserved */
87 /*
88  * The user exception handlers.
89  */
90 	MachUserIntr,
91 	MachUserGenException,
92 	MachUserGenException,
93 	MachUserGenException,
94 	MachUserGenException,
95 	MachUserGenException,
96 	MachUserGenException,
97 	MachUserGenException,
98 	MachUserGenException,
99 	MachUserGenException,
100 	MachUserGenException,
101 	MachUserGenException,
102 	MachUserGenException,
103 	MachUserGenException,
104 	MachUserGenException,
105 	MachUserGenException,
106 };
107 
108 char	*trap_type[] = {
109 	"external interrupt",
110 	"TLB modification",
111 	"TLB miss (load or instr. fetch)",
112 	"TLB miss (store)",
113 	"address error (load or I-fetch)",
114 	"address error (store)",
115 	"bus error (I-fetch)",
116 	"bus error (load or store)",
117 	"system call",
118 	"breakpoint",
119 	"reserved instruction",
120 	"coprocessor unusable",
121 	"arithmetic overflow",
122 	"reserved 13",
123 	"reserved 14",
124 	"reserved 15",
125 };
126 
127 #ifdef DEBUG
128 #define TRAPSIZE	10
129 struct trapdebug {		/* trap history buffer for debugging */
130 	u_int	status;
131 	u_int	cause;
132 	u_int	vadr;
133 	u_int	pc;
134 	u_int	ra;
135 	u_int	code;
136 } trapdebug[TRAPSIZE], *trp = trapdebug;
137 #endif
138 
139 static void pmax_errintr();
140 static void kn02_errintr(), kn02ba_errintr();
141 #ifdef DS5000_240
142 static void kn03_errintr();
143 #endif
144 static unsigned kn02ba_recover_erradr();
145 extern tc_option_t tc_slot_info[TC_MAX_LOGICAL_SLOTS];
146 extern u_long kmin_tc3_imask, xine_tc3_imask;
147 extern const struct callback *callv;
148 #ifdef DS5000_240
149 extern u_long kn03_tc3_imask;
150 #endif
151 int (*pmax_hardware_intr)() = (int (*)())0;
152 extern volatile struct chiptime *Mach_clock_addr;
153 
154 /*
155  * Handle an exception.
156  * Called from MachKernGenException() or MachUserGenException()
157  * when a processor trap occurs.
158  * In the case of a kernel trap, we return the pc where to resume if
159  * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc.
160  */
161 unsigned
162 trap(statusReg, causeReg, vadr, pc, args)
163 	unsigned statusReg;	/* status register at time of the exception */
164 	unsigned causeReg;	/* cause register at time of exception */
165 	unsigned vadr;		/* address (if any) the fault occured on */
166 	unsigned pc;		/* program counter where to continue */
167 {
168 	register int type, i;
169 	unsigned ucode = 0;
170 	register struct proc *p = curproc;
171 	u_quad_t sticks;
172 	vm_prot_t ftype;
173 	extern unsigned onfault_table[];
174 
175 #ifdef DEBUG
176 	trp->status = statusReg;
177 	trp->cause = causeReg;
178 	trp->vadr = vadr;
179 	trp->pc = pc;
180 	trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] :
181 		p->p_md.md_regs[RA];
182 	trp->code = 0;
183 	if (++trp == &trapdebug[TRAPSIZE])
184 		trp = trapdebug;
185 #endif
186 
187 	cnt.v_trap++;
188 	type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT;
189 	if (USERMODE(statusReg)) {
190 		type |= T_USER;
191 		sticks = p->p_sticks;
192 	}
193 
194 	/*
195 	 * Enable hardware interrupts if they were on before.
196 	 * We only respond to software interrupts when returning to user mode.
197 	 */
198 	if (statusReg & MACH_SR_INT_ENA_PREV)
199 		splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR);
200 
201 	switch (type) {
202 	case T_TLB_MOD:
203 		/* check for kernel address */
204 		if ((int)vadr < 0) {
205 			register pt_entry_t *pte;
206 			register unsigned entry;
207 #ifndef ATTR
208 			register vm_offset_t pa;
209 #endif
210 
211 			pte = kvtopte(vadr);
212 			entry = pte->pt_entry;
213 			if (entry & PG_RO) {
214 				/* write to read only page in the kernel */
215 				ftype = VM_PROT_WRITE;
216 				goto kernel_fault;
217 			}
218 			entry |= PG_M;
219 			pte->pt_entry = entry;
220 			vadr &= PG_FRAME;
221 			printf("trap: TLBupdate hi %x lo %x i %x\n", vadr,
222 				entry, MachTLBUpdate(vadr, entry)); /* XXX */
223 #ifdef ATTR
224 			pmap_attributes[atop(entry - KERNBASE)] |= PMAP_ATTR_MOD;
225 #else
226 			pa = entry & PG_FRAME;
227 			if (!IS_VM_PHYSADDR(pa))
228 				panic("trap: kmod");
229 			PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
230 #endif
231 			return (pc);
232 		}
233 		/* FALLTHROUGH */
234 
235 	case T_TLB_MOD+T_USER:
236 	    {
237 		pmap_hash_t hp;
238 #ifndef ATTR
239 		vm_offset_t pa;
240 #endif
241 #ifdef DIAGNOSTIC
242 		extern pmap_hash_t zero_pmap_hash;
243 		extern pmap_t cur_pmap;
244 
245 		if (cur_pmap->pm_hash == zero_pmap_hash ||
246 		    cur_pmap->pm_hash == (pmap_hash_t)0)
247 			panic("tlbmod");
248 #endif
249 		hp = &((pmap_hash_t)PMAP_HASH_UADDR)[PMAP_HASH(vadr)];
250 		if (((hp->pmh_pte[0].high ^ vadr) & ~PGOFSET) == 0)
251 			i = 0;
252 		else if (((hp->pmh_pte[1].high ^ vadr) & ~PGOFSET) == 0)
253 			i = 1;
254 		else
255 			panic("trap: tlb umod not found");
256 		if (hp->pmh_pte[i].low & PG_RO) {
257 			ftype = VM_PROT_WRITE;
258 			goto dofault;
259 		}
260 		hp->pmh_pte[i].low |= PG_M;
261 		printf("trap: TLBupdate hi %x lo %x i %x\n",
262 			hp->pmh_pte[i].high, hp->pmh_pte[i].low,
263 			MachTLBUpdate(hp->pmh_pte[i].high, hp->pmh_pte[i].low)); /* XXX */
264 #ifdef ATTR
265 		pmap_attributes[atop(hp->pmh_pte[i].low - KERNBASE)] |=
266 			PMAP_ATTR_MOD;
267 #else
268 		pa = hp->pmh_pte[i].low & PG_FRAME;
269 		if (!IS_VM_PHYSADDR(pa))
270 			panic("trap: umod");
271 		PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
272 #endif
273 		if (!USERMODE(statusReg))
274 			return (pc);
275 		goto out;
276 	    }
277 
278 	case T_TLB_LD_MISS:
279 	case T_TLB_ST_MISS:
280 		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
281 		/* check for kernel address */
282 		if ((int)vadr < 0) {
283 			register vm_offset_t va;
284 			int rv;
285 
286 		kernel_fault:
287 			va = trunc_page((vm_offset_t)vadr);
288 			rv = vm_fault(kernel_map, va, ftype, FALSE);
289 			if (rv == KERN_SUCCESS)
290 				return (pc);
291 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
292 				((struct pcb *)UADDR)->pcb_onfault = 0;
293 				return (onfault_table[i]);
294 			}
295 			goto err;
296 		}
297 		/*
298 		 * It is an error for the kernel to access user space except
299 		 * through the copyin/copyout routines.
300 		 */
301 		if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 0)
302 			goto err;
303 		/* check for fuswintr() or suswintr() getting a page fault */
304 		if (i == 4)
305 			return (onfault_table[i]);
306 		goto dofault;
307 
308 	case T_TLB_LD_MISS+T_USER:
309 		ftype = VM_PROT_READ;
310 		goto dofault;
311 
312 	case T_TLB_ST_MISS+T_USER:
313 		ftype = VM_PROT_WRITE;
314 	dofault:
315 	    {
316 		register vm_offset_t va;
317 		register struct vmspace *vm = p->p_vmspace;
318 		register vm_map_t map = &vm->vm_map;
319 		int rv;
320 
321 		va = trunc_page((vm_offset_t)vadr);
322 		rv = vm_fault(map, va, ftype, FALSE);
323 		if (rv != KERN_SUCCESS) {
324 			printf("vm_fault(%x, %x, %x, 0) -> %x ADR %x PC %x RA %x\n",
325 				map, va, ftype, rv, vadr, pc,
326 				!USERMODE(statusReg) ? ((int *)&args)[19] :
327 					p->p_md.md_regs[RA]); /* XXX */
328 			printf("\tpid %d %s PC %x RA %x SP %x\n", p->p_pid,
329 				p->p_comm, p->p_md.md_regs[PC],
330 				p->p_md.md_regs[RA],
331 				p->p_md.md_regs[SP]); /* XXX */
332 		}
333 		/*
334 		 * If this was a stack access we keep track of the maximum
335 		 * accessed stack size.  Also, if vm_fault gets a protection
336 		 * failure it is due to accessing the stack region outside
337 		 * the current limit and we need to reflect that as an access
338 		 * error.
339 		 */
340 		if ((caddr_t)va >= vm->vm_maxsaddr) {
341 			if (rv == KERN_SUCCESS) {
342 				unsigned nss;
343 
344 				nss = clrnd(btoc(USRSTACK-(unsigned)va));
345 				if (nss > vm->vm_ssize)
346 					vm->vm_ssize = nss;
347 			} else if (rv == KERN_PROTECTION_FAILURE)
348 				rv = KERN_INVALID_ADDRESS;
349 		}
350 		if (rv == KERN_SUCCESS) {
351 			if (!USERMODE(statusReg))
352 				return (pc);
353 			goto out;
354 		}
355 		if (!USERMODE(statusReg)) {
356 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
357 				((struct pcb *)UADDR)->pcb_onfault = 0;
358 				return (onfault_table[i]);
359 			}
360 			goto err;
361 		}
362 		ucode = vadr;
363 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
364 		break;
365 	    }
366 
367 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
368 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
369 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
370 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
371 		i = SIGSEGV;
372 		break;
373 
374 	case T_SYSCALL+T_USER:
375 	    {
376 		register int *locr0 = p->p_md.md_regs;
377 		register struct sysent *callp;
378 		unsigned int code;
379 		int numsys;
380 		struct args {
381 			int i[8];
382 		} args;
383 		int rval[2];
384 		struct sysent *systab;
385 		extern int nsysent;
386 #ifdef ULTRIXCOMPAT
387 		extern struct sysent ultrixsysent[];
388 		extern int ultrixnsysent;
389 #endif
390 
391 		cnt.v_syscall++;
392 		/* compute next PC after syscall instruction */
393 		if ((int)causeReg < 0)
394 			locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0);
395 		else
396 			locr0[PC] += 4;
397 		systab = sysent;
398 		numsys = nsysent;
399 #ifdef ULTRIXCOMPAT
400 		if (p->p_md.md_flags & MDP_ULTRIX) {
401 			systab = ultrixsysent;
402 			numsys = ultrixnsysent;
403 		}
404 #endif
405 		code = locr0[V0];
406 		switch (code) {
407 		case SYS_indir:
408 			/*
409 			 * Code is first argument, followed by actual args.
410 			 */
411 			code = locr0[A0];
412 			if (code >= numsys)
413 				callp = &systab[SYS_indir]; /* (illegal) */
414 			else
415 				callp = &systab[code];
416 			i = callp->sy_narg;
417 			args.i[0] = locr0[A1];
418 			args.i[1] = locr0[A2];
419 			args.i[2] = locr0[A3];
420 			if (i > 3) {
421 				i = copyin((caddr_t)(locr0[SP] +
422 						4 * sizeof(int)),
423 					(caddr_t)&args.i[3],
424 					(u_int)(i - 3) * sizeof(int));
425 				if (i) {
426 					locr0[V0] = i;
427 					locr0[A3] = 1;
428 #ifdef KTRACE
429 					if (KTRPOINT(p, KTR_SYSCALL))
430 						ktrsyscall(p->p_tracep, code,
431 							callp->sy_narg, args.i);
432 #endif
433 					goto done;
434 				}
435 			}
436 			break;
437 
438 		case SYS___indir:
439 			/*
440 			 * Like indir, but code is a quad, so as to maintain
441 			 * quad alignment for the rest of the arguments.
442 			 */
443 			code = locr0[A0 + _QUAD_LOWWORD];
444 			if (code >= numsys)
445 				callp = &systab[SYS_indir]; /* (illegal) */
446 			else
447 				callp = &systab[code];
448 			i = callp->sy_narg;
449 			args.i[0] = locr0[A2];
450 			args.i[1] = locr0[A3];
451 			if (i > 2) {
452 				i = copyin((caddr_t)(locr0[SP] +
453 						4 * sizeof(int)),
454 					(caddr_t)&args.i[2],
455 					(u_int)(i - 2) * sizeof(int));
456 				if (i) {
457 					locr0[V0] = i;
458 					locr0[A3] = 1;
459 #ifdef KTRACE
460 					if (KTRPOINT(p, KTR_SYSCALL))
461 						ktrsyscall(p->p_tracep, code,
462 							callp->sy_narg, args.i);
463 #endif
464 					goto done;
465 				}
466 			}
467 			break;
468 
469 		default:
470 			if (code >= numsys)
471 				callp = &systab[SYS_indir]; /* (illegal) */
472 			else
473 				callp = &systab[code];
474 			i = callp->sy_narg;
475 			args.i[0] = locr0[A0];
476 			args.i[1] = locr0[A1];
477 			args.i[2] = locr0[A2];
478 			args.i[3] = locr0[A3];
479 			if (i > 4) {
480 				i = copyin((caddr_t)(locr0[SP] +
481 						4 * sizeof(int)),
482 					(caddr_t)&args.i[4],
483 					(u_int)(i - 4) * sizeof(int));
484 				if (i) {
485 					locr0[V0] = i;
486 					locr0[A3] = 1;
487 #ifdef KTRACE
488 					if (KTRPOINT(p, KTR_SYSCALL))
489 						ktrsyscall(p->p_tracep, code,
490 							callp->sy_narg, args.i);
491 #endif
492 					goto done;
493 				}
494 			}
495 		}
496 #ifdef KTRACE
497 		if (KTRPOINT(p, KTR_SYSCALL))
498 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
499 #endif
500 		rval[0] = 0;
501 		rval[1] = locr0[V1];
502 #ifdef DEBUG
503 		if (trp == trapdebug)
504 			trapdebug[TRAPSIZE - 1].code = code;
505 		else
506 			trp[-1].code = code;
507 #endif
508 		i = (*callp->sy_call)(p, &args, rval);
509 		/*
510 		 * Reinitialize proc pointer `p' as it may be different
511 		 * if this is a child returning from fork syscall.
512 		 */
513 		p = curproc;
514 		locr0 = p->p_md.md_regs;
515 #ifdef DEBUG
516 		{ int s;
517 		s = splhigh();
518 		trp->status = statusReg;
519 		trp->cause = causeReg;
520 		trp->vadr = locr0[SP];
521 		trp->pc = locr0[PC];
522 		trp->ra = locr0[RA];
523 		trp->code = -code;
524 		if (++trp == &trapdebug[TRAPSIZE])
525 			trp = trapdebug;
526 		splx(s);
527 		}
528 #endif
529 		switch (i) {
530 		case 0:
531 			locr0[V0] = rval[0];
532 			locr0[V1] = rval[1];
533 			locr0[A3] = 0;
534 			break;
535 
536 		case ERESTART:
537 			locr0[PC] = pc;
538 			break;
539 
540 		case EJUSTRETURN:
541 			break;	/* nothing to do */
542 
543 		default:
544 			locr0[V0] = i;
545 			locr0[A3] = 1;
546 		}
547 	done:
548 #ifdef KTRACE
549 		if (KTRPOINT(p, KTR_SYSRET))
550 			ktrsysret(p->p_tracep, code, i, rval[0]);
551 #endif
552 		goto out;
553 	    }
554 
555 	case T_BREAK+T_USER:
556 	    {
557 		register unsigned va, instr;
558 
559 		/* compute address of break instruction */
560 		va = pc;
561 		if ((int)causeReg < 0)
562 			va += 4;
563 
564 		/* read break instruction */
565 		instr = fuiword((caddr_t)va);
566 #ifdef KADB
567 		if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP)
568 			goto err;
569 #endif
570 		if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) {
571 			i = SIGTRAP;
572 			break;
573 		}
574 
575 		/* restore original instruction and clear BP  */
576 		i = suiword((caddr_t)va, p->p_md.md_ss_instr);
577 		if (i < 0) {
578 			vm_offset_t sa, ea;
579 			int rv;
580 
581 			sa = trunc_page((vm_offset_t)va);
582 			ea = round_page((vm_offset_t)va+sizeof(int)-1);
583 			rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
584 				VM_PROT_DEFAULT, FALSE);
585 			if (rv == KERN_SUCCESS) {
586 				i = suiword((caddr_t)va, p->p_md.md_ss_instr);
587 				(void) vm_map_protect(&p->p_vmspace->vm_map,
588 					sa, ea, VM_PROT_READ|VM_PROT_EXECUTE,
589 					FALSE);
590 			}
591 		}
592 		if (i < 0) {
593 			i = SIGTRAP;
594 			break;
595 		}
596 		p->p_md.md_ss_addr = 0;
597 		goto out;
598 	    }
599 
600 	case T_RES_INST+T_USER:
601 		i = SIGILL;
602 		break;
603 
604 	case T_COP_UNUSABLE+T_USER:
605 		if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) {
606 			i = SIGILL;	/* only FPU instructions allowed */
607 			break;
608 		}
609 		MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs);
610 		machFPCurProcPtr = p;
611 		p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT;
612 		p->p_md.md_flags |= MDP_FPUSED;
613 		goto out;
614 
615 	case T_OVFLOW+T_USER:
616 		i = SIGFPE;
617 		break;
618 
619 	case T_ADDR_ERR_LD:	/* misaligned access */
620 	case T_ADDR_ERR_ST:	/* misaligned access */
621 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
622 		if (i = ((struct pcb *)UADDR)->pcb_onfault) {
623 			((struct pcb *)UADDR)->pcb_onfault = 0;
624 			return (onfault_table[i]);
625 		}
626 		/* FALLTHROUGH */
627 
628 	default:
629 	err:
630 #ifdef KADB
631 	    {
632 		extern struct pcb kdbpcb;
633 
634 		if (USERMODE(statusReg))
635 			kdbpcb = p->p_addr->u_pcb;
636 		else {
637 			kdbpcb.pcb_regs[ZERO] = 0;
638 			kdbpcb.pcb_regs[AST] = ((int *)&args)[2];
639 			kdbpcb.pcb_regs[V0] = ((int *)&args)[3];
640 			kdbpcb.pcb_regs[V1] = ((int *)&args)[4];
641 			kdbpcb.pcb_regs[A0] = ((int *)&args)[5];
642 			kdbpcb.pcb_regs[A1] = ((int *)&args)[6];
643 			kdbpcb.pcb_regs[A2] = ((int *)&args)[7];
644 			kdbpcb.pcb_regs[A3] = ((int *)&args)[8];
645 			kdbpcb.pcb_regs[T0] = ((int *)&args)[9];
646 			kdbpcb.pcb_regs[T1] = ((int *)&args)[10];
647 			kdbpcb.pcb_regs[T2] = ((int *)&args)[11];
648 			kdbpcb.pcb_regs[T3] = ((int *)&args)[12];
649 			kdbpcb.pcb_regs[T4] = ((int *)&args)[13];
650 			kdbpcb.pcb_regs[T5] = ((int *)&args)[14];
651 			kdbpcb.pcb_regs[T6] = ((int *)&args)[15];
652 			kdbpcb.pcb_regs[T7] = ((int *)&args)[16];
653 			kdbpcb.pcb_regs[T8] = ((int *)&args)[17];
654 			kdbpcb.pcb_regs[T9] = ((int *)&args)[18];
655 			kdbpcb.pcb_regs[RA] = ((int *)&args)[19];
656 			kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21];
657 			kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22];
658 			kdbpcb.pcb_regs[PC] = pc;
659 			kdbpcb.pcb_regs[SR] = statusReg;
660 			bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int));
661 		}
662 		if (kdb(causeReg, vadr, p, !USERMODE(statusReg)))
663 			return (kdbpcb.pcb_regs[PC]);
664 	    }
665 #else
666 #ifdef DEBUG
667 		trapDump("trap");
668 #endif
669 #endif
670 		panic("trap");
671 	}
672 	printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid,
673 		p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */
674 	trapsignal(p, i, ucode);
675 out:
676 	/*
677 	 * Note: we should only get here if returning to user mode.
678 	 */
679 	/* take pending signals */
680 	while ((i = CURSIG(p)) != 0)
681 		psig(i);
682 	p->p_pri = p->p_usrpri;
683 	astpending = 0;
684 	if (want_resched) {
685 		int s;
686 
687 		/*
688 		 * Since we are curproc, clock will normally just change
689 		 * our priority without moving us from one queue to another
690 		 * (since the running process is not on a queue.)
691 		 * If that happened after we setrq ourselves but before we
692 		 * swtch()'ed, we might not be on the queue indicated by
693 		 * our priority.
694 		 */
695 		s = splstatclock();
696 		setrq(p);
697 		p->p_stats->p_ru.ru_nivcsw++;
698 		swtch();
699 		splx(s);
700 		while ((i = CURSIG(p)) != 0)
701 			psig(i);
702 	}
703 
704 	/*
705 	 * If profiling, charge system time to the trapped pc.
706 	 */
707 	if (p->p_flag & SPROFIL) {
708 		extern int psratio;
709 
710 		addupc_task(p, pc, (int)(p->p_sticks - sticks) * psratio);
711 	}
712 
713 	curpri = p->p_pri;
714 	return (pc);
715 }
716 
717 /*
718  * Handle an interrupt.
719  * Called from MachKernIntr() or MachUserIntr()
720  * Note: curproc might be NULL.
721  */
722 interrupt(statusReg, causeReg, pc)
723 	unsigned statusReg;	/* status register at time of the exception */
724 	unsigned causeReg;	/* cause register at time of exception */
725 	unsigned pc;		/* program counter where to continue */
726 {
727 	register unsigned mask;
728 	struct clockframe cf;
729 
730 #ifdef DEBUG
731 	trp->status = statusReg;
732 	trp->cause = causeReg;
733 	trp->vadr = 0;
734 	trp->pc = pc;
735 	trp->ra = 0;
736 	trp->code = 0;
737 	if (++trp == &trapdebug[TRAPSIZE])
738 		trp = trapdebug;
739 #endif
740 
741 	cnt.v_intr++;
742 	mask = causeReg & statusReg;	/* pending interrupts & enable mask */
743 	if (pmax_hardware_intr)
744 		splx((*pmax_hardware_intr)(mask, pc, statusReg, causeReg));
745 	if (mask & MACH_INT_MASK_5) {
746 		if (!USERMODE(statusReg)) {
747 #ifdef DEBUG
748 			trapDump("fpintr");
749 #else
750 			printf("FPU interrupt: PC %x CR %x SR %x\n",
751 				pc, causeReg, statusReg);
752 #endif
753 		} else
754 			MachFPInterrupt(statusReg, causeReg, pc);
755 	}
756 	if (mask & MACH_SOFT_INT_MASK_0) {
757 		clearsoftclock();
758 		cnt.v_soft++;
759 		softclock();
760 	}
761 	/* process network interrupt if we trapped or will very soon */
762 	if ((mask & MACH_SOFT_INT_MASK_1) ||
763 	    netisr && (statusReg & MACH_SOFT_INT_MASK_1)) {
764 		clearsoftnet();
765 		cnt.v_soft++;
766 #ifdef INET
767 		if (netisr & (1 << NETISR_ARP)) {
768 			netisr &= ~(1 << NETISR_ARP);
769 			arpintr();
770 		}
771 		if (netisr & (1 << NETISR_IP)) {
772 			netisr &= ~(1 << NETISR_IP);
773 			ipintr();
774 		}
775 #endif
776 #ifdef NS
777 		if (netisr & (1 << NETISR_NS)) {
778 			netisr &= ~(1 << NETISR_NS);
779 			nsintr();
780 		}
781 #endif
782 #ifdef ISO
783 		if (netisr & (1 << NETISR_ISO)) {
784 			netisr &= ~(1 << NETISR_ISO);
785 			clnlintr();
786 		}
787 #endif
788 	}
789 }
790 
791 /*
792  * Handle pmax (DECstation 2100/3100) interrupts.
793  */
794 pmax_intr(mask, pc, statusReg, causeReg)
795 	unsigned mask;
796 	unsigned pc;
797 	unsigned statusReg;
798 	unsigned causeReg;
799 {
800 	register volatile struct chiptime *c = Mach_clock_addr;
801 	struct clockframe cf;
802 	int temp;
803 
804 	/* handle clock interrupts ASAP */
805 	if (mask & MACH_INT_MASK_3) {
806 		temp = c->regc;	/* XXX clear interrupt bits */
807 		cf.pc = pc;
808 		cf.sr = statusReg;
809 		hardclock(&cf);
810 		/* keep clock interrupts enabled */
811 		causeReg &= ~MACH_INT_MASK_3;
812 	}
813 	/* Re-enable clock interrupts */
814 	splx(MACH_INT_MASK_3 | MACH_SR_INT_ENA_CUR);
815 #if NSII > 0
816 	if (mask & MACH_INT_MASK_0)
817 		siiintr(0);
818 #endif
819 #if NLE > 0
820 	if (mask & MACH_INT_MASK_1)
821 		leintr(0);
822 #endif
823 #if NDC > 0
824 	if (mask & MACH_INT_MASK_2)
825 		dcintr(0);
826 #endif
827 	if (mask & MACH_INT_MASK_4)
828 		pmax_errintr();
829 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
830 		MACH_SR_INT_ENA_CUR);
831 }
832 
833 /*
834  * Handle hardware interrupts for the KN02. (DECstation 5000/200)
835  * Returns spl value.
836  */
837 kn02_intr(mask, pc, statusReg, causeReg)
838 	unsigned mask;
839 	unsigned pc;
840 	unsigned statusReg;
841 	unsigned causeReg;
842 {
843 	register unsigned i, m;
844 	register volatile struct chiptime *c = Mach_clock_addr;
845 	register unsigned csr;
846 	int temp;
847 	struct clockframe cf;
848 	static int warned = 0;
849 
850 	/* handle clock interrupts ASAP */
851 	if (mask & MACH_INT_MASK_1) {
852 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
853 		if ((csr & KN02_CSR_PSWARN) && !warned) {
854 			warned = 1;
855 			printf("WARNING: power supply is overheating!\n");
856 		} else if (warned && !(csr & KN02_CSR_PSWARN)) {
857 			warned = 0;
858 			printf("WARNING: power supply is OK again\n");
859 		}
860 
861 		temp = c->regc;	/* XXX clear interrupt bits */
862 		cf.pc = pc;
863 		cf.sr = statusReg;
864 		hardclock(&cf);
865 
866 		/* keep clock interrupts enabled */
867 		causeReg &= ~MACH_INT_MASK_1;
868 	}
869 	/* Re-enable clock interrupts */
870 	splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
871 	if (mask & MACH_INT_MASK_0) {
872 
873 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
874 		m = csr & (csr >> KN02_CSR_IOINTEN_SHIFT) & KN02_CSR_IOINT;
875 #if 0
876 		*(unsigned *)MACHPHYS_TO_UNCACHED(KN02_SYS_CSR) =
877 			(csr & ~(KN02_CSR_WRESERVED | 0xFF)) |
878 			(m << KN02_CSR_IOINTEN_SHIFT);
879 #endif
880 		for (i = 0; m; i++, m >>= 1) {
881 			if (!(m & 1))
882 				continue;
883 			if (tc_slot_info[i].intr)
884 				(*tc_slot_info[i].intr)(tc_slot_info[i].unit);
885 			else
886 				printf("spurious interrupt %d\n", i);
887 		}
888 #if 0
889 		*(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR) =
890 			csr & ~(KN02_CSR_WRESERVED | 0xFF);
891 #endif
892 	}
893 	if (mask & MACH_INT_MASK_3)
894 		kn02_errintr();
895 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
896 		MACH_SR_INT_ENA_CUR);
897 }
898 
899 /*
900  * 3min hardware interrupts. (DECstation 5000/1xx)
901  */
902 kmin_intr(mask, pc, statusReg, causeReg)
903 	unsigned mask;
904 	unsigned pc;
905 	unsigned statusReg;
906 	unsigned causeReg;
907 {
908 	register u_int intr;
909 	register volatile struct chiptime *c = Mach_clock_addr;
910 	volatile u_int *imaskp =
911 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_IMSK);
912 	volatile u_int *intrp =
913 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_INTR);
914 	unsigned int old_mask;
915 	struct clockframe cf;
916 	int temp;
917 	static int user_warned = 0;
918 
919 	old_mask = *imaskp & kmin_tc3_imask;
920 	*imaskp = old_mask;
921 
922 	if (mask & MACH_INT_MASK_4)
923 		(*callv->halt)((int *)0, 0);
924 	if (mask & MACH_INT_MASK_3) {
925 		intr = *intrp;
926 		/* masked interrupts are still observable */
927 		intr &= old_mask;
928 
929 		if (intr & KMIN_INTR_SCSI_PTR_LOAD) {
930 			*intrp &= ~KMIN_INTR_SCSI_PTR_LOAD;
931 #ifdef notdef
932 			asc_dma_intr();
933 #endif
934 		}
935 
936 		if (intr & (KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E))
937 			*intrp &= ~(KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E);
938 
939 		if (intr & KMIN_INTR_LANCE_READ_E)
940 			*intrp &= ~KMIN_INTR_LANCE_READ_E;
941 
942 		if (intr & KMIN_INTR_TIMEOUT)
943 			kn02ba_errintr();
944 
945 		if (intr & KMIN_INTR_CLOCK) {
946 			temp = c->regc;	/* XXX clear interrupt bits */
947 			cf.pc = pc;
948 			cf.sr = statusReg;
949 			hardclock(&cf);
950 		}
951 
952 		if ((intr & KMIN_INTR_SCC_0) &&
953 			tc_slot_info[KMIN_SCC0_SLOT].intr)
954 			(*(tc_slot_info[KMIN_SCC0_SLOT].intr))
955 			(tc_slot_info[KMIN_SCC0_SLOT].unit);
956 
957 		if ((intr & KMIN_INTR_SCC_1) &&
958 			tc_slot_info[KMIN_SCC1_SLOT].intr)
959 			(*(tc_slot_info[KMIN_SCC1_SLOT].intr))
960 			(tc_slot_info[KMIN_SCC1_SLOT].unit);
961 
962 		if ((intr & KMIN_INTR_SCSI) &&
963 			tc_slot_info[KMIN_SCSI_SLOT].intr)
964 			(*(tc_slot_info[KMIN_SCSI_SLOT].intr))
965 			(tc_slot_info[KMIN_SCSI_SLOT].unit);
966 
967 		if ((intr & KMIN_INTR_LANCE) &&
968 			tc_slot_info[KMIN_LANCE_SLOT].intr)
969 			(*(tc_slot_info[KMIN_LANCE_SLOT].intr))
970 			(tc_slot_info[KMIN_LANCE_SLOT].unit);
971 
972 		if (user_warned && ((intr & KMIN_INTR_PSWARN) == 0)) {
973 			printf("%s\n", "Power supply ok now.");
974 			user_warned = 0;
975 		}
976 		if ((intr & KMIN_INTR_PSWARN) && (user_warned < 3)) {
977 			user_warned++;
978 			printf("%s\n", "Power supply overheating");
979 		}
980 	}
981 	if ((mask & MACH_INT_MASK_0) && tc_slot_info[0].intr)
982 		(*tc_slot_info[0].intr)(tc_slot_info[0].unit);
983 	if ((mask & MACH_INT_MASK_1) && tc_slot_info[1].intr)
984 		(*tc_slot_info[1].intr)(tc_slot_info[1].unit);
985 	if ((mask & MACH_INT_MASK_2) && tc_slot_info[2].intr)
986 		(*tc_slot_info[2].intr)(tc_slot_info[2].unit);
987 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
988 		MACH_SR_INT_ENA_CUR);
989 }
990 
991 /*
992  * Maxine hardware interrupts. (Personal DECstation 5000/xx)
993  */
994 xine_intr(mask, pc, statusReg, causeReg)
995 	unsigned mask;
996 	unsigned pc;
997 	unsigned statusReg;
998 	unsigned causeReg;
999 {
1000 	register u_int intr;
1001 	register volatile struct chiptime *c = Mach_clock_addr;
1002 	volatile u_int *imaskp = (volatile u_int *)
1003 		MACH_PHYS_TO_UNCACHED(XINE_REG_IMSK);
1004 	volatile u_int *intrp = (volatile u_int *)
1005 		MACH_PHYS_TO_UNCACHED(XINE_REG_INTR);
1006 	u_int old_mask;
1007 	struct clockframe cf;
1008 	int temp;
1009 
1010 	old_mask = *imaskp & xine_tc3_imask;
1011 	*imaskp = old_mask;
1012 
1013 	if (mask & MACH_INT_MASK_4)
1014 		(*callv->halt)((int *)0, 0);
1015 
1016 	/* handle clock interrupts ASAP */
1017 	if (mask & MACH_INT_MASK_1) {
1018 		temp = c->regc;	/* XXX clear interrupt bits */
1019 		cf.pc = pc;
1020 		cf.sr = statusReg;
1021 		hardclock(&cf);
1022 		causeReg &= ~MACH_INT_MASK_1;
1023 		/* reenable clock interrupts */
1024 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1025 	}
1026 	if (mask & MACH_INT_MASK_3) {
1027 		intr = *intrp;
1028 		/* masked interrupts are still observable */
1029 		intr &= old_mask;
1030 
1031 		if (intr & XINE_INTR_SCSI_PTR_LOAD) {
1032 			*intrp &= ~XINE_INTR_SCSI_PTR_LOAD;
1033 #ifdef notdef
1034 			asc_dma_intr();
1035 #endif
1036 		}
1037 
1038 		if (intr & (XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E))
1039 			*intrp &= ~(XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E);
1040 
1041 		if (intr & XINE_INTR_LANCE_READ_E)
1042 			*intrp &= ~XINE_INTR_LANCE_READ_E;
1043 
1044 		if ((intr & XINE_INTR_SCC_0) &&
1045 			tc_slot_info[XINE_SCC0_SLOT].intr)
1046 			(*(tc_slot_info[XINE_SCC0_SLOT].intr))
1047 			(tc_slot_info[XINE_SCC0_SLOT].unit);
1048 
1049 		if ((intr & XINE_INTR_DTOP_RX) &&
1050 			tc_slot_info[XINE_DTOP_SLOT].intr)
1051 			(*(tc_slot_info[XINE_DTOP_SLOT].intr))
1052 			(tc_slot_info[XINE_DTOP_SLOT].unit);
1053 
1054 		if ((intr & XINE_INTR_FLOPPY) &&
1055 			tc_slot_info[XINE_FLOPPY_SLOT].intr)
1056 			(*(tc_slot_info[XINE_FLOPPY_SLOT].intr))
1057 			(tc_slot_info[XINE_FLOPPY_SLOT].unit);
1058 
1059 		if ((intr & XINE_INTR_TC_0) &&
1060 			tc_slot_info[0].intr)
1061 			(*(tc_slot_info[0].intr))
1062 			(tc_slot_info[0].unit);
1063 
1064 		if ((intr & XINE_INTR_TC_1) &&
1065 			tc_slot_info[1].intr)
1066 			(*(tc_slot_info[1].intr))
1067 			(tc_slot_info[1].unit);
1068 
1069 		if ((intr & XINE_INTR_ISDN) &&
1070 			tc_slot_info[XINE_ISDN_SLOT].intr)
1071 			(*(tc_slot_info[XINE_ISDN_SLOT].intr))
1072 			(tc_slot_info[XINE_ISDN_SLOT].unit);
1073 
1074 		if ((intr & XINE_INTR_SCSI) &&
1075 			tc_slot_info[XINE_SCSI_SLOT].intr)
1076 			(*(tc_slot_info[XINE_SCSI_SLOT].intr))
1077 			(tc_slot_info[XINE_SCSI_SLOT].unit);
1078 
1079 		if ((intr & XINE_INTR_LANCE) &&
1080 			tc_slot_info[XINE_LANCE_SLOT].intr)
1081 			(*(tc_slot_info[XINE_LANCE_SLOT].intr))
1082 			(tc_slot_info[XINE_LANCE_SLOT].unit);
1083 
1084 	}
1085 	if (mask & MACH_INT_MASK_2)
1086 		kn02ba_errintr();
1087 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1088 		MACH_SR_INT_ENA_CUR);
1089 }
1090 
1091 #ifdef DS5000_240
1092 /*
1093  * 3Max+ hardware interrupts. (DECstation 5000/240) UNTESTED!!
1094  */
1095 kn03_intr(mask, pc, statusReg, causeReg)
1096 	unsigned mask;
1097 	unsigned pc;
1098 	unsigned statusReg;
1099 	unsigned causeReg;
1100 {
1101 	register u_int intr;
1102 	register volatile struct chiptime *c = Mach_clock_addr;
1103 	volatile u_int *imaskp = (volatile u_int *)
1104 		MACH_PHYS_TO_UNCACHED(KN03_REG_IMSK);
1105 	volatile u_int *intrp = (volatile u_int *)
1106 		MACH_PHYS_TO_UNCACHED(KN03_REG_INTR);
1107 	u_int old_mask;
1108 	struct clockframe cf;
1109 	int temp;
1110 	static int user_warned = 0;
1111 
1112 	old_mask = *imaskp & kn03_tc3_imask;
1113 	*imaskp = old_mask;
1114 
1115 	if (mask & MACH_INT_MASK_4)
1116 		(*callv->halt)((int *)0, 0);
1117 
1118 	/* handle clock interrupts ASAP */
1119 	if (mask & MACH_INT_MASK_1) {
1120 		temp = c->regc;	/* XXX clear interrupt bits */
1121 		cf.pc = pc;
1122 		cf.sr = statusReg;
1123 		hardclock(&cf);
1124 		causeReg &= ~MACH_INT_MASK_1;
1125 		/* reenable clock interrupts */
1126 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1127 	}
1128 	if (mask & MACH_INT_MASK_0) {
1129 		intr = *intrp;
1130 		/* masked interrupts are still observable */
1131 		intr &= old_mask;
1132 
1133 		if (intr & KN03_INTR_SCSI_PTR_LOAD) {
1134 			*intrp &= ~KN03_INTR_SCSI_PTR_LOAD;
1135 #ifdef notdef
1136 			asc_dma_intr();
1137 #endif
1138 		}
1139 
1140 		if (intr & (KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E))
1141 			*intrp &= ~(KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E);
1142 
1143 		if (intr & KN03_INTR_LANCE_READ_E)
1144 			*intrp &= ~KN03_INTR_LANCE_READ_E;
1145 
1146 		if ((intr & KN03_INTR_SCC_0) &&
1147 			tc_slot_info[KN03_SCC0_SLOT].intr)
1148 			(*(tc_slot_info[KN03_SCC0_SLOT].intr))
1149 			(tc_slot_info[KN03_SCC0_SLOT].unit);
1150 
1151 		if ((intr & KN03_INTR_SCC_1) &&
1152 			tc_slot_info[KN03_SCC1_SLOT].intr)
1153 			(*(tc_slot_info[KN03_SCC1_SLOT].intr))
1154 			(tc_slot_info[KN03_SCC1_SLOT].unit);
1155 
1156 		if ((intr & KN03_INTR_TC_0) &&
1157 			tc_slot_info[0].intr)
1158 			(*(tc_slot_info[0].intr))
1159 			(tc_slot_info[0].unit);
1160 
1161 		if ((intr & KN03_INTR_TC_1) &&
1162 			tc_slot_info[1].intr)
1163 			(*(tc_slot_info[1].intr))
1164 			(tc_slot_info[1].unit);
1165 
1166 		if ((intr & KN03_INTR_TC_2) &&
1167 			tc_slot_info[2].intr)
1168 			(*(tc_slot_info[2].intr))
1169 			(tc_slot_info[2].unit);
1170 
1171 		if ((intr & KN03_INTR_SCSI) &&
1172 			tc_slot_info[KN03_SCSI_SLOT].intr)
1173 			(*(tc_slot_info[KN03_SCSI_SLOT].intr))
1174 			(tc_slot_info[KN03_SCSI_SLOT].unit);
1175 
1176 		if ((intr & KN03_INTR_LANCE) &&
1177 			tc_slot_info[KN03_LANCE_SLOT].intr)
1178 			(*(tc_slot_info[KN03_LANCE_SLOT].intr))
1179 			(tc_slot_info[KN03_LANCE_SLOT].unit);
1180 
1181 		if (user_warned && ((intr & KN03_INTR_PSWARN) == 0)) {
1182 			printf("%s\n", "Power supply ok now.");
1183 			user_warned = 0;
1184 		}
1185 		if ((intr & KN03_INTR_PSWARN) && (user_warned < 3)) {
1186 			user_warned++;
1187 			printf("%s\n", "Power supply overheating");
1188 		}
1189 	}
1190 	if (mask & MACH_INT_MASK_3)
1191 		kn03_errintr();
1192 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1193 		MACH_SR_INT_ENA_CUR);
1194 }
1195 #endif /* DS5000_240 */
1196 
1197 /*
1198  * This is called from MachUserIntr() if astpending is set.
1199  * This is very similar to the tail of trap().
1200  */
1201 softintr(statusReg, pc)
1202 	unsigned statusReg;	/* status register at time of the exception */
1203 	unsigned pc;		/* program counter where to continue */
1204 {
1205 	register struct proc *p = curproc;
1206 	int sig;
1207 
1208 	cnt.v_soft++;
1209 	/* take pending signals */
1210 	while ((sig = CURSIG(p)) != 0)
1211 		psig(sig);
1212 	p->p_pri = p->p_usrpri;
1213 	astpending = 0;
1214 	if (p->p_flag & SOWEUPC) {
1215 		p->p_flag &= ~SOWEUPC;
1216 		ADDUPROF(p);
1217 	}
1218 	if (want_resched) {
1219 		int s;
1220 
1221 		/*
1222 		 * Since we are curproc, clock will normally just change
1223 		 * our priority without moving us from one queue to another
1224 		 * (since the running process is not on a queue.)
1225 		 * If that happened after we setrq ourselves but before we
1226 		 * swtch()'ed, we might not be on the queue indicated by
1227 		 * our priority.
1228 		 */
1229 		s = splstatclock();
1230 		setrq(p);
1231 		p->p_stats->p_ru.ru_nivcsw++;
1232 		swtch();
1233 		splx(s);
1234 		while ((sig = CURSIG(p)) != 0)
1235 			psig(sig);
1236 	}
1237 	curpri = p->p_pri;
1238 }
1239 
1240 #ifdef DEBUG
1241 trapDump(msg)
1242 	char *msg;
1243 {
1244 	register int i;
1245 	int s;
1246 
1247 	s = splhigh();
1248 	printf("trapDump(%s)\n", msg);
1249 	for (i = 0; i < TRAPSIZE; i++) {
1250 		if (trp == trapdebug)
1251 			trp = &trapdebug[TRAPSIZE - 1];
1252 		else
1253 			trp--;
1254 		if (trp->cause == 0)
1255 			break;
1256 		printf("%s: ADR %x PC %x CR %x SR %x\n",
1257 			trap_type[(trp->cause & MACH_CR_EXC_CODE) >>
1258 				MACH_CR_EXC_CODE_SHIFT],
1259 			trp->vadr, trp->pc, trp->cause, trp->status);
1260 		printf("   RA %x code %d\n", trp-> ra, trp->code);
1261 	}
1262 	bzero(trapdebug, sizeof(trapdebug));
1263 	trp = trapdebug;
1264 	splx(s);
1265 }
1266 #endif
1267 
1268 /*
1269  *----------------------------------------------------------------------
1270  *
1271  * MemErrorInterrupts --
1272  *   pmax_errintr - for the DS2100/DS3100
1273  *   kn02_errintr - for the DS5000/200
1274  *   kn02ba_errintr - for the DS5000/1xx and DS5000/xx
1275  *
1276  *	Handler an interrupt for the control register.
1277  *
1278  * Results:
1279  *	None.
1280  *
1281  * Side effects:
1282  *	None.
1283  *
1284  *----------------------------------------------------------------------
1285  */
1286 static void
1287 pmax_errintr()
1288 {
1289 	volatile u_short *sysCSRPtr =
1290 		(u_short *)MACH_PHYS_TO_UNCACHED(KN01_SYS_CSR);
1291 	u_short csr;
1292 
1293 	csr = *sysCSRPtr;
1294 
1295 	if (csr & KN01_CSR_MERR) {
1296 		printf("Memory error at 0x%x\n",
1297 			*(unsigned *)MACH_PHYS_TO_UNCACHED(KN01_SYS_ERRADR));
1298 		panic("Mem error interrupt");
1299 	}
1300 	*sysCSRPtr = (csr & ~KN01_CSR_MBZ) | 0xff;
1301 }
1302 
1303 static void
1304 kn02_errintr()
1305 {
1306 
1307 	printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR));
1308 	*(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR) = 0;
1309 	MachEmptyWriteBuffer();
1310 }
1311 
1312 #ifdef DS5000_240
1313 static void
1314 kn03_errintr()
1315 {
1316 
1317 	printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR));
1318 	*(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR) = 0;
1319 	MachEmptyWriteBuffer();
1320 }
1321 #endif /* DS5000_240 */
1322 
1323 static void
1324 kn02ba_errintr()
1325 {
1326 	register int mer, adr, siz;
1327 	static int errintr_cnt = 0;
1328 
1329 	siz = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MSR);
1330 	mer = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MER);
1331 	adr = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_AER);
1332 
1333 	/* clear interrupt bit */
1334 	*(unsigned int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_TIMEOUT) = 0;
1335 
1336 	errintr_cnt++;
1337 	printf("(%d)%s%x [%x %x %x]\n", errintr_cnt,
1338 	       "Bad memory chip at phys ",
1339 	       kn02ba_recover_erradr(adr, mer),
1340 	       mer, siz, adr);
1341 }
1342 
1343 static unsigned
1344 kn02ba_recover_erradr(phys, mer)
1345 	register unsigned phys, mer;
1346 {
1347 	/* phys holds bits 28:2, mer knows which byte */
1348 	switch (mer & KMIN_MER_LASTBYTE) {
1349 	case KMIN_LASTB31:
1350 		mer = 3; break;
1351 	case KMIN_LASTB23:
1352 		mer = 2; break;
1353 	case KMIN_LASTB15:
1354 		mer = 1; break;
1355 	case KMIN_LASTB07:
1356 		mer = 0; break;
1357 	}
1358 	return ((phys & KMIN_AER_ADDR_MASK) | mer);
1359 }
1360 
1361 /*
1362  * Return the resulting PC as if the branch was executed.
1363  */
1364 unsigned
1365 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch)
1366 	unsigned *regsPtr;
1367 	unsigned instPC;
1368 	unsigned fpcCSR;
1369 	int allowNonBranch;
1370 {
1371 	InstFmt inst;
1372 	unsigned retAddr;
1373 	int condition;
1374 	extern unsigned GetBranchDest();
1375 
1376 #if 0
1377 	printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC,
1378 		*instPC, fpcCSR);
1379 #endif
1380 
1381 	inst = *(InstFmt *)instPC;
1382 	switch ((int)inst.JType.op) {
1383 	case OP_SPECIAL:
1384 		switch ((int)inst.RType.func) {
1385 		case OP_JR:
1386 		case OP_JALR:
1387 			retAddr = regsPtr[inst.RType.rs];
1388 			break;
1389 
1390 		default:
1391 			if (!allowNonBranch)
1392 				panic("MachEmulateBranch: Non-branch");
1393 			retAddr = instPC + 4;
1394 			break;
1395 		}
1396 		break;
1397 
1398 	case OP_BCOND:
1399 		switch ((int)inst.IType.rt) {
1400 		case OP_BLTZ:
1401 		case OP_BLTZAL:
1402 			if ((int)(regsPtr[inst.RType.rs]) < 0)
1403 				retAddr = GetBranchDest((InstFmt *)instPC);
1404 			else
1405 				retAddr = instPC + 8;
1406 			break;
1407 
1408 		case OP_BGEZAL:
1409 		case OP_BGEZ:
1410 			if ((int)(regsPtr[inst.RType.rs]) >= 0)
1411 				retAddr = GetBranchDest((InstFmt *)instPC);
1412 			else
1413 				retAddr = instPC + 8;
1414 			break;
1415 
1416 		default:
1417 			panic("MachEmulateBranch: Bad branch cond");
1418 		}
1419 		break;
1420 
1421 	case OP_J:
1422 	case OP_JAL:
1423 		retAddr = (inst.JType.target << 2) |
1424 			((unsigned)instPC & 0xF0000000);
1425 		break;
1426 
1427 	case OP_BEQ:
1428 		if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1429 			retAddr = GetBranchDest((InstFmt *)instPC);
1430 		else
1431 			retAddr = instPC + 8;
1432 		break;
1433 
1434 	case OP_BNE:
1435 		if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1436 			retAddr = GetBranchDest((InstFmt *)instPC);
1437 		else
1438 			retAddr = instPC + 8;
1439 		break;
1440 
1441 	case OP_BLEZ:
1442 		if ((int)(regsPtr[inst.RType.rs]) <= 0)
1443 			retAddr = GetBranchDest((InstFmt *)instPC);
1444 		else
1445 			retAddr = instPC + 8;
1446 		break;
1447 
1448 	case OP_BGTZ:
1449 		if ((int)(regsPtr[inst.RType.rs]) > 0)
1450 			retAddr = GetBranchDest((InstFmt *)instPC);
1451 		else
1452 			retAddr = instPC + 8;
1453 		break;
1454 
1455 	case OP_COP1:
1456 		switch (inst.RType.rs) {
1457 		case OP_BCx:
1458 		case OP_BCy:
1459 			if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1460 				condition = fpcCSR & MACH_FPC_COND_BIT;
1461 			else
1462 				condition = !(fpcCSR & MACH_FPC_COND_BIT);
1463 			if (condition)
1464 				retAddr = GetBranchDest((InstFmt *)instPC);
1465 			else
1466 				retAddr = instPC + 8;
1467 			break;
1468 
1469 		default:
1470 			if (!allowNonBranch)
1471 				panic("MachEmulateBranch: Bad coproc branch instruction");
1472 			retAddr = instPC + 4;
1473 		}
1474 		break;
1475 
1476 	default:
1477 		if (!allowNonBranch)
1478 			panic("MachEmulateBranch: Non-branch instruction");
1479 		retAddr = instPC + 4;
1480 	}
1481 #if 0
1482 	printf("Target addr=%x\n", retAddr);
1483 #endif
1484 	return (retAddr);
1485 }
1486 
1487 unsigned
1488 GetBranchDest(InstPtr)
1489 	InstFmt *InstPtr;
1490 {
1491 	return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2));
1492 }
1493 
1494 /*
1495  * This routine is called by procxmt() to single step one instruction.
1496  * We do this by storing a break instruction after the current instruction,
1497  * resuming execution, and then restoring the old instruction.
1498  */
1499 cpu_singlestep(p)
1500 	register struct proc *p;
1501 {
1502 	register unsigned va;
1503 	register int *locr0 = p->p_md.md_regs;
1504 	int i;
1505 
1506 	/* compute next address after current location */
1507 	va = MachEmulateBranch(locr0, locr0[PC], 0, 1);
1508 	if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va ||
1509 	    !useracc((caddr_t)va, 4, B_READ)) {
1510 		printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
1511 			p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */
1512 		return (EFAULT);
1513 	}
1514 	p->p_md.md_ss_addr = va;
1515 	p->p_md.md_ss_instr = fuiword((caddr_t)va);
1516 	i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1517 	if (i < 0) {
1518 		vm_offset_t sa, ea;
1519 		int rv;
1520 
1521 		sa = trunc_page((vm_offset_t)va);
1522 		ea = round_page((vm_offset_t)va+sizeof(int)-1);
1523 		rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
1524 			VM_PROT_DEFAULT, FALSE);
1525 		if (rv == KERN_SUCCESS) {
1526 			i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1527 			(void) vm_map_protect(&p->p_vmspace->vm_map,
1528 				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
1529 		}
1530 	}
1531 	if (i < 0)
1532 		return (EFAULT);
1533 	printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n",
1534 		p->p_comm, p->p_pid, p->p_md.md_ss_addr,
1535 		p->p_md.md_ss_instr, locr0[PC]); /* XXX */
1536 	return (0);
1537 }
1538 
1539 #ifdef DEBUG
1540 kdbpeek(addr)
1541 {
1542 	if (addr & 3) {
1543 		printf("kdbpeek: unaligned address %x\n", addr);
1544 		return (-1);
1545 	}
1546 	return (*(int *)addr);
1547 }
1548 
1549 #define MIPS_JR_RA	0x03e00008	/* instruction code for jr ra */
1550 
1551 /*
1552  * Print a stack backtrace.
1553  */
1554 void
1555 stacktrace()
1556 {
1557 	unsigned pc, sp, fp, ra, va, subr;
1558 	int a0, a1, a2, a3;
1559 	unsigned instr, mask;
1560 	InstFmt i;
1561 	int more, stksize;
1562 	int regs[8];
1563 	extern setsoftclock();
1564 	extern char start[], edata[];
1565 
1566 	cpu_getregs(regs);
1567 
1568 	/* get initial values from the exception frame */
1569 	sp = regs[0];
1570 	pc = regs[2];
1571 	ra = 0;
1572 	a0 = regs[3];
1573 	a1 = regs[4];
1574 	a2 = regs[5];
1575 	a3 = regs[6];
1576 	fp = regs[7];
1577 
1578 loop:
1579 	/* check for current PC in the kernel interrupt handler code */
1580 	if (pc >= (unsigned)MachKernIntr && pc < (unsigned)MachUserIntr) {
1581 		/* NOTE: the offsets depend on the code in locore.s */
1582 		printf("interrupt\n");
1583 		a0 = kdbpeek(sp + 36);
1584 		a1 = kdbpeek(sp + 40);
1585 		a2 = kdbpeek(sp + 44);
1586 		a3 = kdbpeek(sp + 48);
1587 		pc = kdbpeek(sp + 20);
1588 		ra = kdbpeek(sp + 92);
1589 		sp = kdbpeek(sp + 100);
1590 		fp = kdbpeek(sp + 104);
1591 	}
1592 
1593 	/* check for current PC in the exception handler code */
1594 	if (pc >= 0x80000000 && pc < (unsigned)setsoftclock) {
1595 		ra = 0;
1596 		subr = 0;
1597 		goto done;
1598 	}
1599 
1600 	/* check for bad PC */
1601 	if (pc & 3 || pc < 0x80000000 || pc >= (unsigned)edata) {
1602 		printf("PC 0x%x: not in kernel\n", pc);
1603 		ra = 0;
1604 		subr = 0;
1605 		goto done;
1606 	}
1607 
1608 	/*
1609 	 * Find the beginning of the current subroutine by scanning backwards
1610 	 * from the current PC for the end of the previous subroutine.
1611 	 */
1612 	va = pc - sizeof(int);
1613 	while ((instr = kdbpeek(va)) != MIPS_JR_RA)
1614 		va -= sizeof(int);
1615 	va += 2 * sizeof(int);	/* skip back over branch & delay slot */
1616 	/* skip over nulls which might separate .o files */
1617 	while ((instr = kdbpeek(va)) == 0)
1618 		va += sizeof(int);
1619 	subr = va;
1620 
1621 	/* scan forwards to find stack size and any saved registers */
1622 	stksize = 0;
1623 	more = 3;
1624 	mask = 0;
1625 	for (; more; va += sizeof(int), more = (more == 3) ? 3 : more - 1) {
1626 		/* stop if hit our current position */
1627 		if (va >= pc)
1628 			break;
1629 		instr = kdbpeek(va);
1630 		i.word = instr;
1631 		switch (i.JType.op) {
1632 		case OP_SPECIAL:
1633 			switch (i.RType.func) {
1634 			case OP_JR:
1635 			case OP_JALR:
1636 				more = 2; /* stop after next instruction */
1637 				break;
1638 
1639 			case OP_SYSCALL:
1640 			case OP_BREAK:
1641 				more = 1; /* stop now */
1642 			};
1643 			break;
1644 
1645 		case OP_BCOND:
1646 		case OP_J:
1647 		case OP_JAL:
1648 		case OP_BEQ:
1649 		case OP_BNE:
1650 		case OP_BLEZ:
1651 		case OP_BGTZ:
1652 			more = 2; /* stop after next instruction */
1653 			break;
1654 
1655 		case OP_COP0:
1656 		case OP_COP1:
1657 		case OP_COP2:
1658 		case OP_COP3:
1659 			switch (i.RType.rs) {
1660 			case OP_BCx:
1661 			case OP_BCy:
1662 				more = 2; /* stop after next instruction */
1663 			};
1664 			break;
1665 
1666 		case OP_SW:
1667 			/* look for saved registers on the stack */
1668 			if (i.IType.rs != 29)
1669 				break;
1670 			/* only restore the first one */
1671 			if (mask & (1 << i.IType.rt))
1672 				break;
1673 			mask |= 1 << i.IType.rt;
1674 			switch (i.IType.rt) {
1675 			case 4: /* a0 */
1676 				a0 = kdbpeek(sp + (short)i.IType.imm);
1677 				break;
1678 
1679 			case 5: /* a1 */
1680 				a1 = kdbpeek(sp + (short)i.IType.imm);
1681 				break;
1682 
1683 			case 6: /* a2 */
1684 				a2 = kdbpeek(sp + (short)i.IType.imm);
1685 				break;
1686 
1687 			case 7: /* a3 */
1688 				a3 = kdbpeek(sp + (short)i.IType.imm);
1689 				break;
1690 
1691 			case 30: /* fp */
1692 				fp = kdbpeek(sp + (short)i.IType.imm);
1693 				break;
1694 
1695 			case 31: /* ra */
1696 				ra = kdbpeek(sp + (short)i.IType.imm);
1697 			}
1698 			break;
1699 
1700 		case OP_ADDI:
1701 		case OP_ADDIU:
1702 			/* look for stack pointer adjustment */
1703 			if (i.IType.rs != 29 || i.IType.rt != 29)
1704 				break;
1705 			stksize = (short)i.IType.imm;
1706 		}
1707 	}
1708 
1709 done:
1710 	printf("%x+%x (%x,%x,%x,%x) ra %x sz %d\n",
1711 		subr, pc - subr, a0, a1, a2, a3, ra, stksize);
1712 
1713 	if (ra) {
1714 		if (pc == ra && stksize == 0)
1715 			printf("stacktrace: loop!\n");
1716 		else {
1717 			pc = ra;
1718 			sp -= stksize;
1719 			goto loop;
1720 		}
1721 	}
1722 }
1723 #endif /* DEBUG */
1724