xref: /original-bsd/sys/pmax/pmax/trap.c (revision f7851764)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department and Ralph Campbell.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.32 91/04/06$
13  *
14  *	@(#)trap.c	7.15 (Berkeley) 03/23/93
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/proc.h>
20 #include <sys/kernel.h>
21 #include <sys/signalvar.h>
22 #include <sys/syscall.h>
23 #include <sys/user.h>
24 #include <sys/buf.h>
25 #ifdef KTRACE
26 #include <sys/ktrace.h>
27 #endif
28 #include <net/netisr.h>
29 
30 #include <machine/trap.h>
31 #include <machine/psl.h>
32 #include <machine/reg.h>
33 #include <machine/cpu.h>
34 #include <machine/pte.h>
35 #include <machine/mips_opcode.h>
36 
37 #include <vm/vm.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_page.h>
40 
41 #include <pmax/pmax/clockreg.h>
42 #include <pmax/pmax/kn01.h>
43 #include <pmax/pmax/kn02.h>
44 #include <pmax/pmax/kmin.h>
45 #include <pmax/pmax/maxine.h>
46 #include <pmax/pmax/kn03.h>
47 #include <pmax/pmax/asic.h>
48 #include <pmax/pmax/turbochannel.h>
49 
50 #include <pmax/stand/dec_prom.h>
51 
52 #include <asc.h>
53 #include <sii.h>
54 #include <le.h>
55 #include <dc.h>
56 
57 /*
58  * This is a kludge to allow X windows to work.
59  */
60 #define X_KLUGE
61 
62 #ifdef X_KLUGE
63 #define USER_MAP_ADDR	0x4000
64 #define NPTES 550
65 static pt_entry_t UserMapPtes[NPTES];
66 static unsigned nUserMapPtes;
67 static pid_t UserMapPid;
68 #endif
69 
70 struct	proc *machFPCurProcPtr;		/* pointer to last proc to use FP */
71 
72 extern void MachKernGenException();
73 extern void MachUserGenException();
74 extern void MachKernIntr();
75 extern void MachUserIntr();
76 extern void MachTLBModException();
77 extern void MachTLBMissException();
78 extern unsigned MachEmulateBranch();
79 
80 void (*machExceptionTable[])() = {
81 /*
82  * The kernel exception handlers.
83  */
84 	MachKernIntr,			/* external interrupt */
85 	MachKernGenException,		/* TLB modification */
86 	MachTLBMissException,		/* TLB miss (load or instr. fetch) */
87 	MachTLBMissException,		/* TLB miss (store) */
88 	MachKernGenException,		/* address error (load or I-fetch) */
89 	MachKernGenException,		/* address error (store) */
90 	MachKernGenException,		/* bus error (I-fetch) */
91 	MachKernGenException,		/* bus error (load or store) */
92 	MachKernGenException,		/* system call */
93 	MachKernGenException,		/* breakpoint */
94 	MachKernGenException,		/* reserved instruction */
95 	MachKernGenException,		/* coprocessor unusable */
96 	MachKernGenException,		/* arithmetic overflow */
97 	MachKernGenException,		/* reserved */
98 	MachKernGenException,		/* reserved */
99 	MachKernGenException,		/* reserved */
100 /*
101  * The user exception handlers.
102  */
103 	MachUserIntr,
104 	MachUserGenException,
105 	MachUserGenException,
106 	MachUserGenException,
107 	MachUserGenException,
108 	MachUserGenException,
109 	MachUserGenException,
110 	MachUserGenException,
111 	MachUserGenException,
112 	MachUserGenException,
113 	MachUserGenException,
114 	MachUserGenException,
115 	MachUserGenException,
116 	MachUserGenException,
117 	MachUserGenException,
118 	MachUserGenException,
119 };
120 
121 char	*trap_type[] = {
122 	"external interrupt",
123 	"TLB modification",
124 	"TLB miss (load or instr. fetch)",
125 	"TLB miss (store)",
126 	"address error (load or I-fetch)",
127 	"address error (store)",
128 	"bus error (I-fetch)",
129 	"bus error (load or store)",
130 	"system call",
131 	"breakpoint",
132 	"reserved instruction",
133 	"coprocessor unusable",
134 	"arithmetic overflow",
135 	"reserved 13",
136 	"reserved 14",
137 	"reserved 15",
138 };
139 
140 #ifdef DEBUG
141 #define TRAPSIZE	10
142 struct trapdebug {		/* trap history buffer for debugging */
143 	u_int	status;
144 	u_int	cause;
145 	u_int	vadr;
146 	u_int	pc;
147 	u_int	ra;
148 	u_int	code;
149 } trapdebug[TRAPSIZE], *trp = trapdebug;
150 #endif
151 
152 static void pmax_errintr();
153 static void kn02_errintr(), kn02ba_errintr();
154 #ifdef DS5000_240
155 static void kn03_errintr();
156 #endif
157 static unsigned kn02ba_recover_erradr();
158 extern tc_option_t tc_slot_info[TC_MAX_LOGICAL_SLOTS];
159 extern u_long kmin_tc3_imask, xine_tc3_imask;
160 extern const struct callback *callv;
161 #ifdef DS5000_240
162 extern u_long kn03_tc3_imask;
163 #endif
164 int (*pmax_hardware_intr)() = (int (*)())0;
165 extern volatile struct chiptime *Mach_clock_addr;
166 
167 /*
168  * Handle an exception.
169  * Called from MachKernGenException() or MachUserGenException()
170  * when a processor trap occurs.
171  * In the case of a kernel trap, we return the pc where to resume if
172  * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc.
173  */
174 unsigned
175 trap(statusReg, causeReg, vadr, pc, args)
176 	unsigned statusReg;	/* status register at time of the exception */
177 	unsigned causeReg;	/* cause register at time of exception */
178 	unsigned vadr;		/* address (if any) the fault occured on */
179 	unsigned pc;		/* program counter where to continue */
180 {
181 	register int type, i;
182 	unsigned ucode = 0;
183 	register struct proc *p = curproc;
184 	u_quad_t sticks;
185 	vm_prot_t ftype;
186 	extern unsigned onfault_table[];
187 
188 #ifdef DEBUG
189 	trp->status = statusReg;
190 	trp->cause = causeReg;
191 	trp->vadr = vadr;
192 	trp->pc = pc;
193 	trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] :
194 		p->p_md.md_regs[RA];
195 	trp->code = 0;
196 	if (++trp == &trapdebug[TRAPSIZE])
197 		trp = trapdebug;
198 #endif
199 
200 	cnt.v_trap++;
201 	type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT;
202 	if (USERMODE(statusReg)) {
203 		type |= T_USER;
204 		sticks = p->p_sticks;
205 	}
206 
207 	/*
208 	 * Enable hardware interrupts if they were on before.
209 	 * We only respond to software interrupts when returning to user mode.
210 	 */
211 	if (statusReg & MACH_SR_INT_ENA_PREV)
212 		splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR);
213 
214 	switch (type) {
215 	case T_TLB_MOD:
216 		/* check for kernel address */
217 		if ((int)vadr < 0) {
218 			register pt_entry_t *pte;
219 			register unsigned entry;
220 #ifndef ATTR
221 			register vm_offset_t pa;
222 #endif
223 
224 			pte = kvtopte(vadr);
225 			entry = pte->pt_entry;
226 			if (entry & PG_RO) {
227 				/* write to read only page in the kernel */
228 				ftype = VM_PROT_WRITE;
229 				goto kernel_fault;
230 			}
231 			entry |= PG_M;
232 			pte->pt_entry = entry;
233 			vadr &= PG_FRAME;
234 			printf("trap: TLBupdate hi %x lo %x i %x\n", vadr,
235 				entry, MachTLBUpdate(vadr, entry)); /* XXX */
236 #ifdef ATTR
237 			pmap_attributes[atop(entry - KERNBASE)] |= PMAP_ATTR_MOD;
238 #else
239 			pa = entry & PG_FRAME;
240 			if (!IS_VM_PHYSADDR(pa))
241 				panic("trap: kmod");
242 			PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
243 #endif
244 			return (pc);
245 		}
246 		/* FALLTHROUGH */
247 
248 	case T_TLB_MOD+T_USER:
249 	    {
250 		pmap_hash_t hp;
251 #ifndef ATTR
252 		vm_offset_t pa;
253 #endif
254 #ifdef DIAGNOSTIC
255 		extern pmap_hash_t zero_pmap_hash;
256 		extern pmap_t cur_pmap;
257 
258 		if (cur_pmap->pm_hash == zero_pmap_hash ||
259 		    cur_pmap->pm_hash == (pmap_hash_t)0)
260 			panic("tlbmod");
261 #endif
262 		hp = &((pmap_hash_t)PMAP_HASH_UADDR)[PMAP_HASH(vadr)];
263 		if (((hp->pmh_pte[0].high ^ vadr) & ~PGOFSET) == 0)
264 			i = 0;
265 		else if (((hp->pmh_pte[1].high ^ vadr) & ~PGOFSET) == 0)
266 			i = 1;
267 		else
268 			panic("trap: tlb umod not found");
269 		if (hp->pmh_pte[i].low & PG_RO) {
270 			ftype = VM_PROT_WRITE;
271 			goto dofault;
272 		}
273 		hp->pmh_pte[i].low |= PG_M;
274 		printf("trap: TLBupdate hi %x lo %x i %x\n",
275 			hp->pmh_pte[i].high, hp->pmh_pte[i].low,
276 			MachTLBUpdate(hp->pmh_pte[i].high, hp->pmh_pte[i].low)); /* XXX */
277 #ifdef ATTR
278 		pmap_attributes[atop(hp->pmh_pte[i].low - KERNBASE)] |=
279 			PMAP_ATTR_MOD;
280 #else
281 		pa = hp->pmh_pte[i].low & PG_FRAME;
282 		if (!IS_VM_PHYSADDR(pa))
283 			panic("trap: umod");
284 		PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
285 #endif
286 		if (!USERMODE(statusReg))
287 			return (pc);
288 		goto out;
289 	    }
290 
291 	case T_TLB_LD_MISS:
292 	case T_TLB_ST_MISS:
293 		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
294 		/* check for kernel address */
295 		if ((int)vadr < 0) {
296 			register vm_offset_t va;
297 			int rv;
298 
299 		kernel_fault:
300 			va = trunc_page((vm_offset_t)vadr);
301 			rv = vm_fault(kernel_map, va, ftype, FALSE);
302 			if (rv == KERN_SUCCESS)
303 				return (pc);
304 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
305 				((struct pcb *)UADDR)->pcb_onfault = 0;
306 				return (onfault_table[i]);
307 			}
308 			goto err;
309 		}
310 		/*
311 		 * It is an error for the kernel to access user space except
312 		 * through the copyin/copyout routines.
313 		 */
314 		if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 0)
315 			goto err;
316 		/* check for fuswintr() or suswintr() getting a page fault */
317 		if (i == 4)
318 			return (onfault_table[i]);
319 		goto dofault;
320 
321 	case T_TLB_LD_MISS+T_USER:
322 		ftype = VM_PROT_READ;
323 		goto dofault;
324 
325 	case T_TLB_ST_MISS+T_USER:
326 		ftype = VM_PROT_WRITE;
327 	dofault:
328 	    {
329 		register vm_offset_t va;
330 		register struct vmspace *vm = p->p_vmspace;
331 		register vm_map_t map = &vm->vm_map;
332 		int rv;
333 
334 #ifdef X_KLUGE
335 		if (p->p_pid == UserMapPid &&
336 		    (va = pmax_btop(vadr - USER_MAP_ADDR)) < nUserMapPtes) {
337 			register pt_entry_t *pte;
338 
339 			pte = &UserMapPtes[va];
340 			MachTLBWriteRandom((vadr & PG_FRAME) |
341 				(vm->vm_pmap.pm_tlbpid << VMMACH_TLB_PID_SHIFT),
342 				pte->pt_entry);
343 			return (pc);
344 		}
345 #endif
346 		va = trunc_page((vm_offset_t)vadr);
347 		rv = vm_fault(map, va, ftype, FALSE);
348 		if (rv != KERN_SUCCESS) {
349 			printf("vm_fault(%x, %x, %x, 0) -> %x ADR %x PC %x RA %x\n",
350 				map, va, ftype, rv, vadr, pc,
351 				!USERMODE(statusReg) ? ((int *)&args)[19] :
352 					p->p_md.md_regs[RA]); /* XXX */
353 			printf("\tpid %d %s PC %x RA %x SP %x\n", p->p_pid,
354 				p->p_comm, p->p_md.md_regs[PC],
355 				p->p_md.md_regs[RA],
356 				p->p_md.md_regs[SP]); /* XXX */
357 		}
358 		/*
359 		 * If this was a stack access we keep track of the maximum
360 		 * accessed stack size.  Also, if vm_fault gets a protection
361 		 * failure it is due to accessing the stack region outside
362 		 * the current limit and we need to reflect that as an access
363 		 * error.
364 		 */
365 		if ((caddr_t)va >= vm->vm_maxsaddr) {
366 			if (rv == KERN_SUCCESS) {
367 				unsigned nss;
368 
369 				nss = clrnd(btoc(USRSTACK-(unsigned)va));
370 				if (nss > vm->vm_ssize)
371 					vm->vm_ssize = nss;
372 			} else if (rv == KERN_PROTECTION_FAILURE)
373 				rv = KERN_INVALID_ADDRESS;
374 		}
375 		if (rv == KERN_SUCCESS) {
376 			if (!USERMODE(statusReg))
377 				return (pc);
378 			goto out;
379 		}
380 		if (!USERMODE(statusReg)) {
381 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
382 				((struct pcb *)UADDR)->pcb_onfault = 0;
383 				return (onfault_table[i]);
384 			}
385 			goto err;
386 		}
387 		ucode = vadr;
388 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
389 		break;
390 	    }
391 
392 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
393 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
394 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
395 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
396 		i = SIGSEGV;
397 		break;
398 
399 	case T_SYSCALL+T_USER:
400 	    {
401 		register int *locr0 = p->p_md.md_regs;
402 		register struct sysent *callp;
403 		unsigned int code;
404 		int numsys;
405 		struct args {
406 			int i[8];
407 		} args;
408 		int rval[2];
409 		struct sysent *systab;
410 		extern int nsysent;
411 #ifdef ULTRIXCOMPAT
412 		extern struct sysent ultrixsysent[];
413 		extern int ultrixnsysent;
414 #endif
415 
416 		cnt.v_syscall++;
417 		/* compute next PC after syscall instruction */
418 		if ((int)causeReg < 0)
419 			locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0);
420 		else
421 			locr0[PC] += 4;
422 		systab = sysent;
423 		numsys = nsysent;
424 #ifdef ULTRIXCOMPAT
425 		if (p->p_md.md_flags & MDP_ULTRIX) {
426 			systab = ultrixsysent;
427 			numsys = ultrixnsysent;
428 		}
429 #endif
430 		code = locr0[V0];
431 		switch (code) {
432 		case SYS_indir:
433 			/*
434 			 * Code is first argument, followed by actual args.
435 			 */
436 			code = locr0[A0];
437 			if (code >= numsys)
438 				callp = &systab[SYS_indir]; /* (illegal) */
439 			else
440 				callp = &systab[code];
441 			i = callp->sy_narg;
442 			args.i[0] = locr0[A1];
443 			args.i[1] = locr0[A2];
444 			args.i[2] = locr0[A3];
445 			if (i > 3) {
446 				i = copyin((caddr_t)(locr0[SP] +
447 						4 * sizeof(int)),
448 					(caddr_t)&args.i[3],
449 					(u_int)(i - 3) * sizeof(int));
450 				if (i) {
451 					locr0[V0] = i;
452 					locr0[A3] = 1;
453 #ifdef KTRACE
454 					if (KTRPOINT(p, KTR_SYSCALL))
455 						ktrsyscall(p->p_tracep, code,
456 							callp->sy_narg, args.i);
457 #endif
458 					goto done;
459 				}
460 			}
461 			break;
462 
463 		case SYS___indir:
464 			/*
465 			 * Like indir, but code is a quad, so as to maintain
466 			 * quad alignment for the rest of the arguments.
467 			 */
468 			code = locr0[A0 + _QUAD_LOWWORD];
469 			if (code >= numsys)
470 				callp = &systab[SYS_indir]; /* (illegal) */
471 			else
472 				callp = &systab[code];
473 			i = callp->sy_narg;
474 			args.i[0] = locr0[A2];
475 			args.i[1] = locr0[A3];
476 			if (i > 2) {
477 				i = copyin((caddr_t)(locr0[SP] +
478 						4 * sizeof(int)),
479 					(caddr_t)&args.i[2],
480 					(u_int)(i - 2) * sizeof(int));
481 				if (i) {
482 					locr0[V0] = i;
483 					locr0[A3] = 1;
484 #ifdef KTRACE
485 					if (KTRPOINT(p, KTR_SYSCALL))
486 						ktrsyscall(p->p_tracep, code,
487 							callp->sy_narg, args.i);
488 #endif
489 					goto done;
490 				}
491 			}
492 			break;
493 
494 		default:
495 			if (code >= numsys)
496 				callp = &systab[SYS_indir]; /* (illegal) */
497 			else
498 				callp = &systab[code];
499 			i = callp->sy_narg;
500 			args.i[0] = locr0[A0];
501 			args.i[1] = locr0[A1];
502 			args.i[2] = locr0[A2];
503 			args.i[3] = locr0[A3];
504 			if (i > 4) {
505 				i = copyin((caddr_t)(locr0[SP] +
506 						4 * sizeof(int)),
507 					(caddr_t)&args.i[4],
508 					(u_int)(i - 4) * sizeof(int));
509 				if (i) {
510 					locr0[V0] = i;
511 					locr0[A3] = 1;
512 #ifdef KTRACE
513 					if (KTRPOINT(p, KTR_SYSCALL))
514 						ktrsyscall(p->p_tracep, code,
515 							callp->sy_narg, args.i);
516 #endif
517 					goto done;
518 				}
519 			}
520 		}
521 #ifdef KTRACE
522 		if (KTRPOINT(p, KTR_SYSCALL))
523 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
524 #endif
525 		rval[0] = 0;
526 		rval[1] = locr0[V1];
527 #ifdef DEBUG
528 		if (trp == trapdebug)
529 			trapdebug[TRAPSIZE - 1].code = code;
530 		else
531 			trp[-1].code = code;
532 #endif
533 		i = (*callp->sy_call)(p, &args, rval);
534 		/*
535 		 * Reinitialize proc pointer `p' as it may be different
536 		 * if this is a child returning from fork syscall.
537 		 */
538 		p = curproc;
539 		locr0 = p->p_md.md_regs;
540 #ifdef DEBUG
541 		{ int s;
542 		s = splhigh();
543 		trp->status = statusReg;
544 		trp->cause = causeReg;
545 		trp->vadr = locr0[SP];
546 		trp->pc = locr0[PC];
547 		trp->ra = locr0[RA];
548 		trp->code = -code;
549 		if (++trp == &trapdebug[TRAPSIZE])
550 			trp = trapdebug;
551 		splx(s);
552 		}
553 #endif
554 		switch (i) {
555 		case 0:
556 			locr0[V0] = rval[0];
557 			locr0[V1] = rval[1];
558 			locr0[A3] = 0;
559 			break;
560 
561 		case ERESTART:
562 			locr0[PC] = pc;
563 			break;
564 
565 		case EJUSTRETURN:
566 			break;	/* nothing to do */
567 
568 		default:
569 			locr0[V0] = i;
570 			locr0[A3] = 1;
571 		}
572 	done:
573 #ifdef KTRACE
574 		if (KTRPOINT(p, KTR_SYSRET))
575 			ktrsysret(p->p_tracep, code, i, rval[0]);
576 #endif
577 		goto out;
578 	    }
579 
580 	case T_BREAK+T_USER:
581 	    {
582 		register unsigned va, instr;
583 
584 		/* compute address of break instruction */
585 		va = pc;
586 		if ((int)causeReg < 0)
587 			va += 4;
588 
589 		/* read break instruction */
590 		instr = fuiword((caddr_t)va);
591 #ifdef KADB
592 		if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP)
593 			goto err;
594 #endif
595 		if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) {
596 			i = SIGTRAP;
597 			break;
598 		}
599 
600 		/* restore original instruction and clear BP  */
601 		i = suiword((caddr_t)va, p->p_md.md_ss_instr);
602 		if (i < 0) {
603 			vm_offset_t sa, ea;
604 			int rv;
605 
606 			sa = trunc_page((vm_offset_t)va);
607 			ea = round_page((vm_offset_t)va+sizeof(int)-1);
608 			rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
609 				VM_PROT_DEFAULT, FALSE);
610 			if (rv == KERN_SUCCESS) {
611 				i = suiword((caddr_t)va, p->p_md.md_ss_instr);
612 				(void) vm_map_protect(&p->p_vmspace->vm_map,
613 					sa, ea, VM_PROT_READ|VM_PROT_EXECUTE,
614 					FALSE);
615 			}
616 		}
617 		if (i < 0) {
618 			i = SIGTRAP;
619 			break;
620 		}
621 		p->p_md.md_ss_addr = 0;
622 		goto out;
623 	    }
624 
625 	case T_RES_INST+T_USER:
626 		i = SIGILL;
627 		break;
628 
629 	case T_COP_UNUSABLE+T_USER:
630 		if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) {
631 			i = SIGILL;	/* only FPU instructions allowed */
632 			break;
633 		}
634 		MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs);
635 		machFPCurProcPtr = p;
636 		p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT;
637 		p->p_md.md_flags |= MDP_FPUSED;
638 		goto out;
639 
640 	case T_OVFLOW+T_USER:
641 		i = SIGFPE;
642 		break;
643 
644 	case T_ADDR_ERR_LD:	/* misaligned access */
645 	case T_ADDR_ERR_ST:	/* misaligned access */
646 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
647 		if (i = ((struct pcb *)UADDR)->pcb_onfault) {
648 			((struct pcb *)UADDR)->pcb_onfault = 0;
649 			return (onfault_table[i]);
650 		}
651 		/* FALLTHROUGH */
652 
653 	default:
654 	err:
655 #ifdef KADB
656 	    {
657 		extern struct pcb kdbpcb;
658 
659 		if (USERMODE(statusReg))
660 			kdbpcb = p->p_addr->u_pcb;
661 		else {
662 			kdbpcb.pcb_regs[ZERO] = 0;
663 			kdbpcb.pcb_regs[AST] = ((int *)&args)[2];
664 			kdbpcb.pcb_regs[V0] = ((int *)&args)[3];
665 			kdbpcb.pcb_regs[V1] = ((int *)&args)[4];
666 			kdbpcb.pcb_regs[A0] = ((int *)&args)[5];
667 			kdbpcb.pcb_regs[A1] = ((int *)&args)[6];
668 			kdbpcb.pcb_regs[A2] = ((int *)&args)[7];
669 			kdbpcb.pcb_regs[A3] = ((int *)&args)[8];
670 			kdbpcb.pcb_regs[T0] = ((int *)&args)[9];
671 			kdbpcb.pcb_regs[T1] = ((int *)&args)[10];
672 			kdbpcb.pcb_regs[T2] = ((int *)&args)[11];
673 			kdbpcb.pcb_regs[T3] = ((int *)&args)[12];
674 			kdbpcb.pcb_regs[T4] = ((int *)&args)[13];
675 			kdbpcb.pcb_regs[T5] = ((int *)&args)[14];
676 			kdbpcb.pcb_regs[T6] = ((int *)&args)[15];
677 			kdbpcb.pcb_regs[T7] = ((int *)&args)[16];
678 			kdbpcb.pcb_regs[T8] = ((int *)&args)[17];
679 			kdbpcb.pcb_regs[T9] = ((int *)&args)[18];
680 			kdbpcb.pcb_regs[RA] = ((int *)&args)[19];
681 			kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21];
682 			kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22];
683 			kdbpcb.pcb_regs[PC] = pc;
684 			kdbpcb.pcb_regs[SR] = statusReg;
685 			bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int));
686 		}
687 		if (kdb(causeReg, vadr, p, !USERMODE(statusReg)))
688 			return (kdbpcb.pcb_regs[PC]);
689 	    }
690 #else
691 #ifdef DEBUG
692 		trapDump("trap");
693 #endif
694 #endif
695 		panic("trap");
696 	}
697 	printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid,
698 		p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */
699 	trapsignal(p, i, ucode);
700 out:
701 	/*
702 	 * Note: we should only get here if returning to user mode.
703 	 */
704 	/* take pending signals */
705 	while ((i = CURSIG(p)) != 0)
706 		psig(i);
707 	p->p_pri = p->p_usrpri;
708 	astpending = 0;
709 	if (want_resched) {
710 		int s;
711 
712 		/*
713 		 * Since we are curproc, clock will normally just change
714 		 * our priority without moving us from one queue to another
715 		 * (since the running process is not on a queue.)
716 		 * If that happened after we setrq ourselves but before we
717 		 * swtch()'ed, we might not be on the queue indicated by
718 		 * our priority.
719 		 */
720 		s = splstatclock();
721 		setrq(p);
722 		p->p_stats->p_ru.ru_nivcsw++;
723 		swtch();
724 		splx(s);
725 		while ((i = CURSIG(p)) != 0)
726 			psig(i);
727 	}
728 
729 	/*
730 	 * If profiling, charge system time to the trapped pc.
731 	 */
732 	if (p->p_flag & SPROFIL) {
733 		extern int psratio;
734 
735 		addupc_task(p, pc, (int)(p->p_sticks - sticks) * psratio);
736 	}
737 
738 	curpri = p->p_pri;
739 	return (pc);
740 }
741 
742 /*
743  * Handle an interrupt.
744  * Called from MachKernIntr() or MachUserIntr()
745  * Note: curproc might be NULL.
746  */
747 interrupt(statusReg, causeReg, pc)
748 	unsigned statusReg;	/* status register at time of the exception */
749 	unsigned causeReg;	/* cause register at time of exception */
750 	unsigned pc;		/* program counter where to continue */
751 {
752 	register unsigned mask;
753 	struct clockframe cf;
754 
755 #ifdef DEBUG
756 	trp->status = statusReg;
757 	trp->cause = causeReg;
758 	trp->vadr = 0;
759 	trp->pc = pc;
760 	trp->ra = 0;
761 	trp->code = 0;
762 	if (++trp == &trapdebug[TRAPSIZE])
763 		trp = trapdebug;
764 #endif
765 
766 	cnt.v_intr++;
767 	mask = causeReg & statusReg;	/* pending interrupts & enable mask */
768 	if (pmax_hardware_intr)
769 		splx((*pmax_hardware_intr)(mask, pc, statusReg, causeReg));
770 	if (mask & MACH_INT_MASK_5) {
771 		if (!USERMODE(statusReg)) {
772 #ifdef DEBUG
773 			trapDump("fpintr");
774 #else
775 			printf("FPU interrupt: PC %x CR %x SR %x\n",
776 				pc, causeReg, statusReg);
777 #endif
778 		} else
779 			MachFPInterrupt(statusReg, causeReg, pc);
780 	}
781 	if (mask & MACH_SOFT_INT_MASK_0) {
782 		clearsoftclock();
783 		cnt.v_soft++;
784 		softclock();
785 	}
786 	/* process network interrupt if we trapped or will very soon */
787 	if ((mask & MACH_SOFT_INT_MASK_1) ||
788 	    netisr && (statusReg & MACH_SOFT_INT_MASK_1)) {
789 		clearsoftnet();
790 		cnt.v_soft++;
791 #ifdef INET
792 		if (netisr & (1 << NETISR_ARP)) {
793 			netisr &= ~(1 << NETISR_ARP);
794 			arpintr();
795 		}
796 		if (netisr & (1 << NETISR_IP)) {
797 			netisr &= ~(1 << NETISR_IP);
798 			ipintr();
799 		}
800 #endif
801 #ifdef NS
802 		if (netisr & (1 << NETISR_NS)) {
803 			netisr &= ~(1 << NETISR_NS);
804 			nsintr();
805 		}
806 #endif
807 #ifdef ISO
808 		if (netisr & (1 << NETISR_ISO)) {
809 			netisr &= ~(1 << NETISR_ISO);
810 			clnlintr();
811 		}
812 #endif
813 	}
814 }
815 
816 /*
817  * Handle pmax (DECstation 2100/3100) interrupts.
818  */
819 pmax_intr(mask, pc, statusReg, causeReg)
820 	unsigned mask;
821 	unsigned pc;
822 	unsigned statusReg;
823 	unsigned causeReg;
824 {
825 	register volatile struct chiptime *c = Mach_clock_addr;
826 	struct clockframe cf;
827 	int temp;
828 
829 	/* handle clock interrupts ASAP */
830 	if (mask & MACH_INT_MASK_3) {
831 		temp = c->regc;	/* XXX clear interrupt bits */
832 		cf.pc = pc;
833 		cf.sr = statusReg;
834 		hardclock(&cf);
835 		/* keep clock interrupts enabled */
836 		causeReg &= ~MACH_INT_MASK_3;
837 	}
838 	/* Re-enable clock interrupts */
839 	splx(MACH_INT_MASK_3 | MACH_SR_INT_ENA_CUR);
840 #if NSII > 0
841 	if (mask & MACH_INT_MASK_0)
842 		siiintr(0);
843 #endif
844 #if NLE > 0
845 	if (mask & MACH_INT_MASK_1)
846 		leintr(0);
847 #endif
848 #if NDC > 0
849 	if (mask & MACH_INT_MASK_2)
850 		dcintr(0);
851 #endif
852 	if (mask & MACH_INT_MASK_4)
853 		pmax_errintr();
854 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
855 		MACH_SR_INT_ENA_CUR);
856 }
857 
858 /*
859  * Handle hardware interrupts for the KN02. (DECstation 5000/200)
860  * Returns spl value.
861  */
862 kn02_intr(mask, pc, statusReg, causeReg)
863 	unsigned mask;
864 	unsigned pc;
865 	unsigned statusReg;
866 	unsigned causeReg;
867 {
868 	register unsigned i, m;
869 	register volatile struct chiptime *c = Mach_clock_addr;
870 	register unsigned csr;
871 	int temp;
872 	struct clockframe cf;
873 	static int warned = 0;
874 
875 	/* handle clock interrupts ASAP */
876 	if (mask & MACH_INT_MASK_1) {
877 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
878 		if ((csr & KN02_CSR_PSWARN) && !warned) {
879 			warned = 1;
880 			printf("WARNING: power supply is overheating!\n");
881 		} else if (warned && !(csr & KN02_CSR_PSWARN)) {
882 			warned = 0;
883 			printf("WARNING: power supply is OK again\n");
884 		}
885 
886 		temp = c->regc;	/* XXX clear interrupt bits */
887 		cf.pc = pc;
888 		cf.sr = statusReg;
889 		hardclock(&cf);
890 
891 		/* keep clock interrupts enabled */
892 		causeReg &= ~MACH_INT_MASK_1;
893 	}
894 	/* Re-enable clock interrupts */
895 	splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
896 	if (mask & MACH_INT_MASK_0) {
897 
898 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
899 		m = csr & (csr >> KN02_CSR_IOINTEN_SHIFT) & KN02_CSR_IOINT;
900 #if 0
901 		*(unsigned *)MACHPHYS_TO_UNCACHED(KN02_SYS_CSR) =
902 			(csr & ~(KN02_CSR_WRESERVED | 0xFF)) |
903 			(m << KN02_CSR_IOINTEN_SHIFT);
904 #endif
905 		for (i = 0; m; i++, m >>= 1) {
906 			if (!(m & 1))
907 				continue;
908 			if (tc_slot_info[i].intr)
909 				(*tc_slot_info[i].intr)(tc_slot_info[i].unit);
910 			else
911 				printf("spurious interrupt %d\n", i);
912 		}
913 #if 0
914 		*(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR) =
915 			csr & ~(KN02_CSR_WRESERVED | 0xFF);
916 #endif
917 	}
918 	if (mask & MACH_INT_MASK_3)
919 		kn02_errintr();
920 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
921 		MACH_SR_INT_ENA_CUR);
922 }
923 
924 /*
925  * 3min hardware interrupts. (DECstation 5000/1xx)
926  */
927 kmin_intr(mask, pc, statusReg, causeReg)
928 	unsigned mask;
929 	unsigned pc;
930 	unsigned statusReg;
931 	unsigned causeReg;
932 {
933 	register u_int intr;
934 	register volatile struct chiptime *c = Mach_clock_addr;
935 	volatile u_int *imaskp =
936 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_IMSK);
937 	volatile u_int *intrp =
938 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_INTR);
939 	unsigned int old_mask;
940 	struct clockframe cf;
941 	int temp;
942 	static int user_warned = 0;
943 
944 	old_mask = *imaskp & kmin_tc3_imask;
945 	*imaskp = old_mask;
946 
947 	if (mask & MACH_INT_MASK_4)
948 		(*callv->halt)((int *)0, 0);
949 	if (mask & MACH_INT_MASK_3) {
950 		intr = *intrp;
951 		/* masked interrupts are still observable */
952 		intr &= old_mask;
953 
954 		if (intr & KMIN_INTR_SCSI_PTR_LOAD) {
955 			*intrp &= ~KMIN_INTR_SCSI_PTR_LOAD;
956 #ifdef notdef
957 			asc_dma_intr();
958 #endif
959 		}
960 
961 		if (intr & (KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E))
962 			*intrp &= ~(KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E);
963 
964 		if (intr & KMIN_INTR_LANCE_READ_E)
965 			*intrp &= ~KMIN_INTR_LANCE_READ_E;
966 
967 		if (intr & KMIN_INTR_TIMEOUT)
968 			kn02ba_errintr();
969 
970 		if (intr & KMIN_INTR_CLOCK) {
971 			temp = c->regc;	/* XXX clear interrupt bits */
972 			cf.pc = pc;
973 			cf.sr = statusReg;
974 			hardclock(&cf);
975 		}
976 
977 		if ((intr & KMIN_INTR_SCC_0) &&
978 			tc_slot_info[KMIN_SCC0_SLOT].intr)
979 			(*(tc_slot_info[KMIN_SCC0_SLOT].intr))
980 			(tc_slot_info[KMIN_SCC0_SLOT].unit);
981 
982 		if ((intr & KMIN_INTR_SCC_1) &&
983 			tc_slot_info[KMIN_SCC1_SLOT].intr)
984 			(*(tc_slot_info[KMIN_SCC1_SLOT].intr))
985 			(tc_slot_info[KMIN_SCC1_SLOT].unit);
986 
987 		if ((intr & KMIN_INTR_SCSI) &&
988 			tc_slot_info[KMIN_SCSI_SLOT].intr)
989 			(*(tc_slot_info[KMIN_SCSI_SLOT].intr))
990 			(tc_slot_info[KMIN_SCSI_SLOT].unit);
991 
992 		if ((intr & KMIN_INTR_LANCE) &&
993 			tc_slot_info[KMIN_LANCE_SLOT].intr)
994 			(*(tc_slot_info[KMIN_LANCE_SLOT].intr))
995 			(tc_slot_info[KMIN_LANCE_SLOT].unit);
996 
997 		if (user_warned && ((intr & KMIN_INTR_PSWARN) == 0)) {
998 			printf("%s\n", "Power supply ok now.");
999 			user_warned = 0;
1000 		}
1001 		if ((intr & KMIN_INTR_PSWARN) && (user_warned < 3)) {
1002 			user_warned++;
1003 			printf("%s\n", "Power supply overheating");
1004 		}
1005 	}
1006 	if ((mask & MACH_INT_MASK_0) && tc_slot_info[0].intr)
1007 		(*tc_slot_info[0].intr)(tc_slot_info[0].unit);
1008 	if ((mask & MACH_INT_MASK_1) && tc_slot_info[1].intr)
1009 		(*tc_slot_info[1].intr)(tc_slot_info[1].unit);
1010 	if ((mask & MACH_INT_MASK_2) && tc_slot_info[2].intr)
1011 		(*tc_slot_info[2].intr)(tc_slot_info[2].unit);
1012 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1013 		MACH_SR_INT_ENA_CUR);
1014 }
1015 
1016 /*
1017  * Maxine hardware interrupts. (Personal DECstation 5000/xx)
1018  */
1019 xine_intr(mask, pc, statusReg, causeReg)
1020 	unsigned mask;
1021 	unsigned pc;
1022 	unsigned statusReg;
1023 	unsigned causeReg;
1024 {
1025 	register u_int intr;
1026 	register volatile struct chiptime *c = Mach_clock_addr;
1027 	volatile u_int *imaskp = (volatile u_int *)
1028 		MACH_PHYS_TO_UNCACHED(XINE_REG_IMSK);
1029 	volatile u_int *intrp = (volatile u_int *)
1030 		MACH_PHYS_TO_UNCACHED(XINE_REG_INTR);
1031 	u_int old_mask;
1032 	struct clockframe cf;
1033 	int temp;
1034 
1035 	old_mask = *imaskp & xine_tc3_imask;
1036 	*imaskp = old_mask;
1037 
1038 	if (mask & MACH_INT_MASK_4)
1039 		(*callv->halt)((int *)0, 0);
1040 
1041 	/* handle clock interrupts ASAP */
1042 	if (mask & MACH_INT_MASK_1) {
1043 		temp = c->regc;	/* XXX clear interrupt bits */
1044 		cf.pc = pc;
1045 		cf.sr = statusReg;
1046 		hardclock(&cf);
1047 		causeReg &= ~MACH_INT_MASK_1;
1048 		/* reenable clock interrupts */
1049 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1050 	}
1051 	if (mask & MACH_INT_MASK_3) {
1052 		intr = *intrp;
1053 		/* masked interrupts are still observable */
1054 		intr &= old_mask;
1055 
1056 		if (intr & XINE_INTR_SCSI_PTR_LOAD) {
1057 			*intrp &= ~XINE_INTR_SCSI_PTR_LOAD;
1058 #ifdef notdef
1059 			asc_dma_intr();
1060 #endif
1061 		}
1062 
1063 		if (intr & (XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E))
1064 			*intrp &= ~(XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E);
1065 
1066 		if (intr & XINE_INTR_LANCE_READ_E)
1067 			*intrp &= ~XINE_INTR_LANCE_READ_E;
1068 
1069 		if ((intr & XINE_INTR_SCC_0) &&
1070 			tc_slot_info[XINE_SCC0_SLOT].intr)
1071 			(*(tc_slot_info[XINE_SCC0_SLOT].intr))
1072 			(tc_slot_info[XINE_SCC0_SLOT].unit);
1073 
1074 		if ((intr & XINE_INTR_DTOP_RX) &&
1075 			tc_slot_info[XINE_DTOP_SLOT].intr)
1076 			(*(tc_slot_info[XINE_DTOP_SLOT].intr))
1077 			(tc_slot_info[XINE_DTOP_SLOT].unit);
1078 
1079 		if ((intr & XINE_INTR_FLOPPY) &&
1080 			tc_slot_info[XINE_FLOPPY_SLOT].intr)
1081 			(*(tc_slot_info[XINE_FLOPPY_SLOT].intr))
1082 			(tc_slot_info[XINE_FLOPPY_SLOT].unit);
1083 
1084 		if ((intr & XINE_INTR_TC_0) &&
1085 			tc_slot_info[0].intr)
1086 			(*(tc_slot_info[0].intr))
1087 			(tc_slot_info[0].unit);
1088 
1089 		if ((intr & XINE_INTR_TC_1) &&
1090 			tc_slot_info[1].intr)
1091 			(*(tc_slot_info[1].intr))
1092 			(tc_slot_info[1].unit);
1093 
1094 		if ((intr & XINE_INTR_ISDN) &&
1095 			tc_slot_info[XINE_ISDN_SLOT].intr)
1096 			(*(tc_slot_info[XINE_ISDN_SLOT].intr))
1097 			(tc_slot_info[XINE_ISDN_SLOT].unit);
1098 
1099 		if ((intr & XINE_INTR_SCSI) &&
1100 			tc_slot_info[XINE_SCSI_SLOT].intr)
1101 			(*(tc_slot_info[XINE_SCSI_SLOT].intr))
1102 			(tc_slot_info[XINE_SCSI_SLOT].unit);
1103 
1104 		if ((intr & XINE_INTR_LANCE) &&
1105 			tc_slot_info[XINE_LANCE_SLOT].intr)
1106 			(*(tc_slot_info[XINE_LANCE_SLOT].intr))
1107 			(tc_slot_info[XINE_LANCE_SLOT].unit);
1108 
1109 	}
1110 	if (mask & MACH_INT_MASK_2)
1111 		kn02ba_errintr();
1112 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1113 		MACH_SR_INT_ENA_CUR);
1114 }
1115 
1116 #ifdef DS5000_240
1117 /*
1118  * 3Max+ hardware interrupts. (DECstation 5000/240) UNTESTED!!
1119  */
1120 kn03_intr(mask, pc, statusReg, causeReg)
1121 	unsigned mask;
1122 	unsigned pc;
1123 	unsigned statusReg;
1124 	unsigned causeReg;
1125 {
1126 	register u_int intr;
1127 	register volatile struct chiptime *c = Mach_clock_addr;
1128 	volatile u_int *imaskp = (volatile u_int *)
1129 		MACH_PHYS_TO_UNCACHED(KN03_REG_IMSK);
1130 	volatile u_int *intrp = (volatile u_int *)
1131 		MACH_PHYS_TO_UNCACHED(KN03_REG_INTR);
1132 	u_int old_mask;
1133 	struct clockframe cf;
1134 	int temp;
1135 	static int user_warned = 0;
1136 
1137 	old_mask = *imaskp & kn03_tc3_imask;
1138 	*imaskp = old_mask;
1139 
1140 	if (mask & MACH_INT_MASK_4)
1141 		(*callv->halt)((int *)0, 0);
1142 
1143 	/* handle clock interrupts ASAP */
1144 	if (mask & MACH_INT_MASK_1) {
1145 		temp = c->regc;	/* XXX clear interrupt bits */
1146 		cf.pc = pc;
1147 		cf.sr = statusReg;
1148 		hardclock(&cf);
1149 		causeReg &= ~MACH_INT_MASK_1;
1150 		/* reenable clock interrupts */
1151 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1152 	}
1153 	if (mask & MACH_INT_MASK_0) {
1154 		intr = *intrp;
1155 		/* masked interrupts are still observable */
1156 		intr &= old_mask;
1157 
1158 		if (intr & KN03_INTR_SCSI_PTR_LOAD) {
1159 			*intrp &= ~KN03_INTR_SCSI_PTR_LOAD;
1160 #ifdef notdef
1161 			asc_dma_intr();
1162 #endif
1163 		}
1164 
1165 		if (intr & (KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E))
1166 			*intrp &= ~(KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E);
1167 
1168 		if (intr & KN03_INTR_LANCE_READ_E)
1169 			*intrp &= ~KN03_INTR_LANCE_READ_E;
1170 
1171 		if ((intr & KN03_INTR_SCC_0) &&
1172 			tc_slot_info[KN03_SCC0_SLOT].intr)
1173 			(*(tc_slot_info[KN03_SCC0_SLOT].intr))
1174 			(tc_slot_info[KN03_SCC0_SLOT].unit);
1175 
1176 		if ((intr & KN03_INTR_SCC_1) &&
1177 			tc_slot_info[KN03_SCC1_SLOT].intr)
1178 			(*(tc_slot_info[KN03_SCC1_SLOT].intr))
1179 			(tc_slot_info[KN03_SCC1_SLOT].unit);
1180 
1181 		if ((intr & KN03_INTR_TC_0) &&
1182 			tc_slot_info[0].intr)
1183 			(*(tc_slot_info[0].intr))
1184 			(tc_slot_info[0].unit);
1185 
1186 		if ((intr & KN03_INTR_TC_1) &&
1187 			tc_slot_info[1].intr)
1188 			(*(tc_slot_info[1].intr))
1189 			(tc_slot_info[1].unit);
1190 
1191 		if ((intr & KN03_INTR_TC_2) &&
1192 			tc_slot_info[2].intr)
1193 			(*(tc_slot_info[2].intr))
1194 			(tc_slot_info[2].unit);
1195 
1196 		if ((intr & KN03_INTR_SCSI) &&
1197 			tc_slot_info[KN03_SCSI_SLOT].intr)
1198 			(*(tc_slot_info[KN03_SCSI_SLOT].intr))
1199 			(tc_slot_info[KN03_SCSI_SLOT].unit);
1200 
1201 		if ((intr & KN03_INTR_LANCE) &&
1202 			tc_slot_info[KN03_LANCE_SLOT].intr)
1203 			(*(tc_slot_info[KN03_LANCE_SLOT].intr))
1204 			(tc_slot_info[KN03_LANCE_SLOT].unit);
1205 
1206 		if (user_warned && ((intr & KN03_INTR_PSWARN) == 0)) {
1207 			printf("%s\n", "Power supply ok now.");
1208 			user_warned = 0;
1209 		}
1210 		if ((intr & KN03_INTR_PSWARN) && (user_warned < 3)) {
1211 			user_warned++;
1212 			printf("%s\n", "Power supply overheating");
1213 		}
1214 	}
1215 	if (mask & MACH_INT_MASK_3)
1216 		kn03_errintr();
1217 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1218 		MACH_SR_INT_ENA_CUR);
1219 }
1220 #endif /* DS5000_240 */
1221 
1222 /*
1223  * This is called from MachUserIntr() if astpending is set.
1224  * This is very similar to the tail of trap().
1225  */
1226 softintr(statusReg, pc)
1227 	unsigned statusReg;	/* status register at time of the exception */
1228 	unsigned pc;		/* program counter where to continue */
1229 {
1230 	register struct proc *p = curproc;
1231 	int sig;
1232 
1233 	cnt.v_soft++;
1234 	/* take pending signals */
1235 	while ((sig = CURSIG(p)) != 0)
1236 		psig(sig);
1237 	p->p_pri = p->p_usrpri;
1238 	astpending = 0;
1239 	if (p->p_flag & SOWEUPC) {
1240 		p->p_flag &= ~SOWEUPC;
1241 		ADDUPROF(p);
1242 	}
1243 	if (want_resched) {
1244 		int s;
1245 
1246 		/*
1247 		 * Since we are curproc, clock will normally just change
1248 		 * our priority without moving us from one queue to another
1249 		 * (since the running process is not on a queue.)
1250 		 * If that happened after we setrq ourselves but before we
1251 		 * swtch()'ed, we might not be on the queue indicated by
1252 		 * our priority.
1253 		 */
1254 		s = splstatclock();
1255 		setrq(p);
1256 		p->p_stats->p_ru.ru_nivcsw++;
1257 		swtch();
1258 		splx(s);
1259 		while ((sig = CURSIG(p)) != 0)
1260 			psig(sig);
1261 	}
1262 	curpri = p->p_pri;
1263 }
1264 
1265 #ifdef DEBUG
1266 trapDump(msg)
1267 	char *msg;
1268 {
1269 	register int i;
1270 	int s;
1271 
1272 	s = splhigh();
1273 	printf("trapDump(%s)\n", msg);
1274 	for (i = 0; i < TRAPSIZE; i++) {
1275 		if (trp == trapdebug)
1276 			trp = &trapdebug[TRAPSIZE - 1];
1277 		else
1278 			trp--;
1279 		if (trp->cause == 0)
1280 			break;
1281 		printf("%s: ADR %x PC %x CR %x SR %x\n",
1282 			trap_type[(trp->cause & MACH_CR_EXC_CODE) >>
1283 				MACH_CR_EXC_CODE_SHIFT],
1284 			trp->vadr, trp->pc, trp->cause, trp->status);
1285 		printf("   RA %x code %d\n", trp-> ra, trp->code);
1286 	}
1287 	bzero(trapdebug, sizeof(trapdebug));
1288 	trp = trapdebug;
1289 	splx(s);
1290 }
1291 #endif
1292 
1293 #ifdef X_KLUGE
1294 /*
1295  * This is a kludge to allow X windows to work.
1296  */
1297 caddr_t
1298 vmUserMap(size, pa)
1299 	int size;
1300 	unsigned pa;
1301 {
1302 	register caddr_t v;
1303 	unsigned off, entry;
1304 
1305 	if (nUserMapPtes == 0)
1306 		UserMapPid = curproc->p_pid;
1307 	else if (UserMapPid != curproc->p_pid)
1308 		return ((caddr_t)0);
1309 	off = pa & PGOFSET;
1310 	size = btoc(off + size);
1311 	if (nUserMapPtes + size > NPTES)
1312 		return ((caddr_t)0);
1313 	v = (caddr_t)(USER_MAP_ADDR + pmax_ptob(nUserMapPtes) + off);
1314 	entry = (pa & 0x9ffff000) | PG_V | PG_M;
1315 	if (pa >= MACH_UNCACHED_MEMORY_ADDR)
1316 		entry |= PG_N;
1317 	while (size > 0) {
1318 		UserMapPtes[nUserMapPtes].pt_entry = entry;
1319 		entry += NBPG;
1320 		nUserMapPtes++;
1321 		size--;
1322 	}
1323 	return (v);
1324 }
1325 
1326 vmUserUnmap()
1327 {
1328 	int id;
1329 
1330 	nUserMapPtes = 0;
1331 	if (UserMapPid == curproc->p_pid) {
1332 		id = curproc->p_vmspace->vm_pmap.pm_tlbpid;
1333 		if (id >= 0)
1334 			MachTLBFlushPID(id);
1335 	}
1336 	UserMapPid = 0;
1337 }
1338 #endif
1339 
1340 /*
1341  *----------------------------------------------------------------------
1342  *
1343  * MemErrorInterrupts --
1344  *   pmax_errintr - for the DS2100/DS3100
1345  *   kn02_errintr - for the DS5000/200
1346  *   kn02ba_errintr - for the DS5000/1xx and DS5000/xx
1347  *
1348  *	Handler an interrupt for the control register.
1349  *
1350  * Results:
1351  *	None.
1352  *
1353  * Side effects:
1354  *	None.
1355  *
1356  *----------------------------------------------------------------------
1357  */
1358 static void
1359 pmax_errintr()
1360 {
1361 	volatile u_short *sysCSRPtr =
1362 		(u_short *)MACH_PHYS_TO_UNCACHED(KN01_SYS_CSR);
1363 	u_short csr;
1364 
1365 	csr = *sysCSRPtr;
1366 
1367 	if (csr & KN01_CSR_MERR) {
1368 		printf("Memory error at 0x%x\n",
1369 			*(unsigned *)MACH_PHYS_TO_UNCACHED(KN01_SYS_ERRADR));
1370 		panic("Mem error interrupt");
1371 	}
1372 	*sysCSRPtr = (csr & ~KN01_CSR_MBZ) | 0xff;
1373 }
1374 
1375 static void
1376 kn02_errintr()
1377 {
1378 
1379 	printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR));
1380 	*(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR) = 0;
1381 	MachEmptyWriteBuffer();
1382 }
1383 
1384 #ifdef DS5000_240
1385 static void
1386 kn03_errintr()
1387 {
1388 
1389 	printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR));
1390 	*(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR) = 0;
1391 	MachEmptyWriteBuffer();
1392 }
1393 #endif /* DS5000_240 */
1394 
1395 static void
1396 kn02ba_errintr()
1397 {
1398 	register int mer, adr, siz;
1399 	static int errintr_cnt = 0;
1400 
1401 	siz = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MSR);
1402 	mer = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MER);
1403 	adr = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_AER);
1404 
1405 	/* clear interrupt bit */
1406 	*(unsigned int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_TIMEOUT) = 0;
1407 
1408 	errintr_cnt++;
1409 	printf("(%d)%s%x [%x %x %x]\n", errintr_cnt,
1410 	       "Bad memory chip at phys ",
1411 	       kn02ba_recover_erradr(adr, mer),
1412 	       mer, siz, adr);
1413 }
1414 
1415 static unsigned
1416 kn02ba_recover_erradr(phys, mer)
1417 	register unsigned phys, mer;
1418 {
1419 	/* phys holds bits 28:2, mer knows which byte */
1420 	switch (mer & KMIN_MER_LASTBYTE) {
1421 	case KMIN_LASTB31:
1422 		mer = 3; break;
1423 	case KMIN_LASTB23:
1424 		mer = 2; break;
1425 	case KMIN_LASTB15:
1426 		mer = 1; break;
1427 	case KMIN_LASTB07:
1428 		mer = 0; break;
1429 	}
1430 	return ((phys & KMIN_AER_ADDR_MASK) | mer);
1431 }
1432 
1433 /*
1434  * Return the resulting PC as if the branch was executed.
1435  */
1436 unsigned
1437 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch)
1438 	unsigned *regsPtr;
1439 	unsigned instPC;
1440 	unsigned fpcCSR;
1441 	int allowNonBranch;
1442 {
1443 	InstFmt inst;
1444 	unsigned retAddr;
1445 	int condition;
1446 	extern unsigned GetBranchDest();
1447 
1448 #if 0
1449 	printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC,
1450 		*instPC, fpcCSR);
1451 #endif
1452 
1453 	inst = *(InstFmt *)instPC;
1454 	switch ((int)inst.JType.op) {
1455 	case OP_SPECIAL:
1456 		switch ((int)inst.RType.func) {
1457 		case OP_JR:
1458 		case OP_JALR:
1459 			retAddr = regsPtr[inst.RType.rs];
1460 			break;
1461 
1462 		default:
1463 			if (!allowNonBranch)
1464 				panic("MachEmulateBranch: Non-branch");
1465 			retAddr = instPC + 4;
1466 			break;
1467 		}
1468 		break;
1469 
1470 	case OP_BCOND:
1471 		switch ((int)inst.IType.rt) {
1472 		case OP_BLTZ:
1473 		case OP_BLTZAL:
1474 			if ((int)(regsPtr[inst.RType.rs]) < 0)
1475 				retAddr = GetBranchDest((InstFmt *)instPC);
1476 			else
1477 				retAddr = instPC + 8;
1478 			break;
1479 
1480 		case OP_BGEZAL:
1481 		case OP_BGEZ:
1482 			if ((int)(regsPtr[inst.RType.rs]) >= 0)
1483 				retAddr = GetBranchDest((InstFmt *)instPC);
1484 			else
1485 				retAddr = instPC + 8;
1486 			break;
1487 
1488 		default:
1489 			panic("MachEmulateBranch: Bad branch cond");
1490 		}
1491 		break;
1492 
1493 	case OP_J:
1494 	case OP_JAL:
1495 		retAddr = (inst.JType.target << 2) |
1496 			((unsigned)instPC & 0xF0000000);
1497 		break;
1498 
1499 	case OP_BEQ:
1500 		if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1501 			retAddr = GetBranchDest((InstFmt *)instPC);
1502 		else
1503 			retAddr = instPC + 8;
1504 		break;
1505 
1506 	case OP_BNE:
1507 		if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1508 			retAddr = GetBranchDest((InstFmt *)instPC);
1509 		else
1510 			retAddr = instPC + 8;
1511 		break;
1512 
1513 	case OP_BLEZ:
1514 		if ((int)(regsPtr[inst.RType.rs]) <= 0)
1515 			retAddr = GetBranchDest((InstFmt *)instPC);
1516 		else
1517 			retAddr = instPC + 8;
1518 		break;
1519 
1520 	case OP_BGTZ:
1521 		if ((int)(regsPtr[inst.RType.rs]) > 0)
1522 			retAddr = GetBranchDest((InstFmt *)instPC);
1523 		else
1524 			retAddr = instPC + 8;
1525 		break;
1526 
1527 	case OP_COP1:
1528 		switch (inst.RType.rs) {
1529 		case OP_BCx:
1530 		case OP_BCy:
1531 			if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1532 				condition = fpcCSR & MACH_FPC_COND_BIT;
1533 			else
1534 				condition = !(fpcCSR & MACH_FPC_COND_BIT);
1535 			if (condition)
1536 				retAddr = GetBranchDest((InstFmt *)instPC);
1537 			else
1538 				retAddr = instPC + 8;
1539 			break;
1540 
1541 		default:
1542 			if (!allowNonBranch)
1543 				panic("MachEmulateBranch: Bad coproc branch instruction");
1544 			retAddr = instPC + 4;
1545 		}
1546 		break;
1547 
1548 	default:
1549 		if (!allowNonBranch)
1550 			panic("MachEmulateBranch: Non-branch instruction");
1551 		retAddr = instPC + 4;
1552 	}
1553 #if 0
1554 	printf("Target addr=%x\n", retAddr);
1555 #endif
1556 	return (retAddr);
1557 }
1558 
1559 unsigned
1560 GetBranchDest(InstPtr)
1561 	InstFmt *InstPtr;
1562 {
1563 	return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2));
1564 }
1565 
1566 /*
1567  * This routine is called by procxmt() to single step one instruction.
1568  * We do this by storing a break instruction after the current instruction,
1569  * resuming execution, and then restoring the old instruction.
1570  */
1571 cpu_singlestep(p)
1572 	register struct proc *p;
1573 {
1574 	register unsigned va;
1575 	register int *locr0 = p->p_md.md_regs;
1576 	int i;
1577 
1578 	/* compute next address after current location */
1579 	va = MachEmulateBranch(locr0, locr0[PC], 0, 1);
1580 	if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va ||
1581 	    !useracc((caddr_t)va, 4, B_READ)) {
1582 		printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
1583 			p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */
1584 		return (EFAULT);
1585 	}
1586 	p->p_md.md_ss_addr = va;
1587 	p->p_md.md_ss_instr = fuiword((caddr_t)va);
1588 	i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1589 	if (i < 0) {
1590 		vm_offset_t sa, ea;
1591 		int rv;
1592 
1593 		sa = trunc_page((vm_offset_t)va);
1594 		ea = round_page((vm_offset_t)va+sizeof(int)-1);
1595 		rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
1596 			VM_PROT_DEFAULT, FALSE);
1597 		if (rv == KERN_SUCCESS) {
1598 			i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1599 			(void) vm_map_protect(&p->p_vmspace->vm_map,
1600 				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
1601 		}
1602 	}
1603 	if (i < 0)
1604 		return (EFAULT);
1605 	printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n",
1606 		p->p_comm, p->p_pid, p->p_md.md_ss_addr,
1607 		p->p_md.md_ss_instr, locr0[PC]); /* XXX */
1608 	return (0);
1609 }
1610 
1611 #ifdef DEBUG
1612 kdbpeek(addr)
1613 {
1614 	if (addr & 3) {
1615 		printf("kdbpeek: unaligned address %x\n", addr);
1616 		return (-1);
1617 	}
1618 	return (*(int *)addr);
1619 }
1620 
1621 #define MIPS_JR_RA	0x03e00008	/* instruction code for jr ra */
1622 
1623 /*
1624  * Print a stack backtrace.
1625  */
1626 void
1627 stacktrace()
1628 {
1629 	unsigned pc, sp, fp, ra, va, subr;
1630 	int a0, a1, a2, a3;
1631 	unsigned instr, mask;
1632 	InstFmt i;
1633 	int more, stksize;
1634 	int regs[8];
1635 	extern setsoftclock();
1636 	extern char start[], edata[];
1637 
1638 	cpu_getregs(regs);
1639 
1640 	/* get initial values from the exception frame */
1641 	sp = regs[0];
1642 	pc = regs[2];
1643 	ra = 0;
1644 	a0 = regs[3];
1645 	a1 = regs[4];
1646 	a2 = regs[5];
1647 	a3 = regs[6];
1648 	fp = regs[7];
1649 
1650 loop:
1651 	/* check for current PC in the kernel interrupt handler code */
1652 	if (pc >= (unsigned)MachKernIntr && pc < (unsigned)MachUserIntr) {
1653 		/* NOTE: the offsets depend on the code in locore.s */
1654 		printf("interrupt\n");
1655 		a0 = kdbpeek(sp + 36);
1656 		a1 = kdbpeek(sp + 40);
1657 		a2 = kdbpeek(sp + 44);
1658 		a3 = kdbpeek(sp + 48);
1659 		pc = kdbpeek(sp + 20);
1660 		ra = kdbpeek(sp + 92);
1661 		sp = kdbpeek(sp + 100);
1662 		fp = kdbpeek(sp + 104);
1663 	}
1664 
1665 	/* check for current PC in the exception handler code */
1666 	if (pc >= 0x80000000 && pc < (unsigned)setsoftclock) {
1667 		ra = 0;
1668 		subr = 0;
1669 		goto done;
1670 	}
1671 
1672 	/* check for bad PC */
1673 	if (pc & 3 || pc < 0x80000000 || pc >= (unsigned)edata) {
1674 		printf("PC 0x%x: not in kernel\n", pc);
1675 		ra = 0;
1676 		subr = 0;
1677 		goto done;
1678 	}
1679 
1680 	/*
1681 	 * Find the beginning of the current subroutine by scanning backwards
1682 	 * from the current PC for the end of the previous subroutine.
1683 	 */
1684 	va = pc - sizeof(int);
1685 	while ((instr = kdbpeek(va)) != MIPS_JR_RA)
1686 		va -= sizeof(int);
1687 	va += 2 * sizeof(int);	/* skip back over branch & delay slot */
1688 	/* skip over nulls which might separate .o files */
1689 	while ((instr = kdbpeek(va)) == 0)
1690 		va += sizeof(int);
1691 	subr = va;
1692 
1693 	/* scan forwards to find stack size and any saved registers */
1694 	stksize = 0;
1695 	more = 3;
1696 	mask = 0;
1697 	for (; more; va += sizeof(int), more = (more == 3) ? 3 : more - 1) {
1698 		/* stop if hit our current position */
1699 		if (va >= pc)
1700 			break;
1701 		instr = kdbpeek(va);
1702 		i.word = instr;
1703 		switch (i.JType.op) {
1704 		case OP_SPECIAL:
1705 			switch (i.RType.func) {
1706 			case OP_JR:
1707 			case OP_JALR:
1708 				more = 2; /* stop after next instruction */
1709 				break;
1710 
1711 			case OP_SYSCALL:
1712 			case OP_BREAK:
1713 				more = 1; /* stop now */
1714 			};
1715 			break;
1716 
1717 		case OP_BCOND:
1718 		case OP_J:
1719 		case OP_JAL:
1720 		case OP_BEQ:
1721 		case OP_BNE:
1722 		case OP_BLEZ:
1723 		case OP_BGTZ:
1724 			more = 2; /* stop after next instruction */
1725 			break;
1726 
1727 		case OP_COP0:
1728 		case OP_COP1:
1729 		case OP_COP2:
1730 		case OP_COP3:
1731 			switch (i.RType.rs) {
1732 			case OP_BCx:
1733 			case OP_BCy:
1734 				more = 2; /* stop after next instruction */
1735 			};
1736 			break;
1737 
1738 		case OP_SW:
1739 			/* look for saved registers on the stack */
1740 			if (i.IType.rs != 29)
1741 				break;
1742 			/* only restore the first one */
1743 			if (mask & (1 << i.IType.rt))
1744 				break;
1745 			mask |= 1 << i.IType.rt;
1746 			switch (i.IType.rt) {
1747 			case 4: /* a0 */
1748 				a0 = kdbpeek(sp + (short)i.IType.imm);
1749 				break;
1750 
1751 			case 5: /* a1 */
1752 				a1 = kdbpeek(sp + (short)i.IType.imm);
1753 				break;
1754 
1755 			case 6: /* a2 */
1756 				a2 = kdbpeek(sp + (short)i.IType.imm);
1757 				break;
1758 
1759 			case 7: /* a3 */
1760 				a3 = kdbpeek(sp + (short)i.IType.imm);
1761 				break;
1762 
1763 			case 30: /* fp */
1764 				fp = kdbpeek(sp + (short)i.IType.imm);
1765 				break;
1766 
1767 			case 31: /* ra */
1768 				ra = kdbpeek(sp + (short)i.IType.imm);
1769 			}
1770 			break;
1771 
1772 		case OP_ADDI:
1773 		case OP_ADDIU:
1774 			/* look for stack pointer adjustment */
1775 			if (i.IType.rs != 29 || i.IType.rt != 29)
1776 				break;
1777 			stksize = (short)i.IType.imm;
1778 		}
1779 	}
1780 
1781 done:
1782 	printf("%x+%x (%x,%x,%x,%x) ra %x sz %d\n",
1783 		subr, pc - subr, a0, a1, a2, a3, ra, stksize);
1784 
1785 	if (ra) {
1786 		if (pc == ra && stksize == 0)
1787 			printf("stacktrace: loop!\n");
1788 		else {
1789 			pc = ra;
1790 			sp -= stksize;
1791 			goto loop;
1792 		}
1793 	}
1794 }
1795 #endif /* DEBUG */
1796