xref: /original-bsd/sys/pmax/pmax/trap.c (revision 4670e840)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department and Ralph Campbell.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.32 91/04/06$
13  *
14  *	@(#)trap.c	7.14 (Berkeley) 03/08/93
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/proc.h>
20 #include <sys/kernel.h>
21 #include <sys/signalvar.h>
22 #include <sys/syscall.h>
23 #include <sys/user.h>
24 #include <sys/buf.h>
25 #ifdef KTRACE
26 #include <sys/ktrace.h>
27 #endif
28 #include <net/netisr.h>
29 
30 #include <machine/trap.h>
31 #include <machine/psl.h>
32 #include <machine/reg.h>
33 #include <machine/cpu.h>
34 #include <machine/pte.h>
35 #include <machine/mips_opcode.h>
36 
37 #include <vm/vm.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_page.h>
40 
41 #include <pmax/pmax/clockreg.h>
42 #include <pmax/pmax/kn01.h>
43 #include <pmax/pmax/kn02.h>
44 #include <pmax/pmax/kmin.h>
45 #include <pmax/pmax/maxine.h>
46 #include <pmax/pmax/kn03.h>
47 #include <pmax/pmax/asic.h>
48 #include <pmax/pmax/turbochannel.h>
49 
50 #include <asc.h>
51 #include <sii.h>
52 #include <le.h>
53 #include <dc.h>
54 
55 /*
56  * This is a kludge to allow X windows to work.
57  */
58 #define X_KLUGE
59 
60 #ifdef X_KLUGE
61 #define USER_MAP_ADDR	0x4000
62 #define NPTES 300
63 static pt_entry_t UserMapPtes[NPTES];
64 static unsigned nUserMapPtes;
65 static pid_t UserMapPid;
66 #endif
67 
68 struct	proc *machFPCurProcPtr;		/* pointer to last proc to use FP */
69 
70 extern void MachKernGenException();
71 extern void MachUserGenException();
72 extern void MachKernIntr();
73 extern void MachUserIntr();
74 extern void MachTLBModException();
75 extern void MachTLBMissException();
76 extern unsigned MachEmulateBranch();
77 
78 void (*machExceptionTable[])() = {
79 /*
80  * The kernel exception handlers.
81  */
82 	MachKernIntr,			/* external interrupt */
83 	MachKernGenException,		/* TLB modification */
84 	MachTLBMissException,		/* TLB miss (load or instr. fetch) */
85 	MachTLBMissException,		/* TLB miss (store) */
86 	MachKernGenException,		/* address error (load or I-fetch) */
87 	MachKernGenException,		/* address error (store) */
88 	MachKernGenException,		/* bus error (I-fetch) */
89 	MachKernGenException,		/* bus error (load or store) */
90 	MachKernGenException,		/* system call */
91 	MachKernGenException,		/* breakpoint */
92 	MachKernGenException,		/* reserved instruction */
93 	MachKernGenException,		/* coprocessor unusable */
94 	MachKernGenException,		/* arithmetic overflow */
95 	MachKernGenException,		/* reserved */
96 	MachKernGenException,		/* reserved */
97 	MachKernGenException,		/* reserved */
98 /*
99  * The user exception handlers.
100  */
101 	MachUserIntr,
102 	MachUserGenException,
103 	MachUserGenException,
104 	MachUserGenException,
105 	MachUserGenException,
106 	MachUserGenException,
107 	MachUserGenException,
108 	MachUserGenException,
109 	MachUserGenException,
110 	MachUserGenException,
111 	MachUserGenException,
112 	MachUserGenException,
113 	MachUserGenException,
114 	MachUserGenException,
115 	MachUserGenException,
116 	MachUserGenException,
117 };
118 
119 char	*trap_type[] = {
120 	"external interrupt",
121 	"TLB modification",
122 	"TLB miss (load or instr. fetch)",
123 	"TLB miss (store)",
124 	"address error (load or I-fetch)",
125 	"address error (store)",
126 	"bus error (I-fetch)",
127 	"bus error (load or store)",
128 	"system call",
129 	"breakpoint",
130 	"reserved instruction",
131 	"coprocessor unusable",
132 	"arithmetic overflow",
133 	"reserved 13",
134 	"reserved 14",
135 	"reserved 15",
136 };
137 
138 #ifdef DEBUG
139 #define TRAPSIZE	10
140 struct trapdebug {		/* trap history buffer for debugging */
141 	u_int	status;
142 	u_int	cause;
143 	u_int	vadr;
144 	u_int	pc;
145 	u_int	ra;
146 	u_int	code;
147 } trapdebug[TRAPSIZE], *trp = trapdebug;
148 #endif
149 
150 static void pmax_errintr();
151 static void kn02_errintr(), kn02ba_errintr();
152 #ifdef DS5000_240
153 static void kn03_errintr();
154 #endif
155 static unsigned kn02ba_recover_erradr();
156 extern tc_option_t tc_slot_info[TC_MAX_LOGICAL_SLOTS];
157 extern u_long kmin_tc3_imask, xine_tc3_imask;
158 #ifdef DS5000_240
159 extern u_long kn03_tc3_imask;
160 #endif
161 int (*pmax_hardware_intr)() = (int (*)())0;
162 extern volatile struct chiptime *Mach_clock_addr;
163 
164 /*
165  * Handle an exception.
166  * Called from MachKernGenException() or MachUserGenException()
167  * when a processor trap occurs.
168  * In the case of a kernel trap, we return the pc where to resume if
169  * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc.
170  */
171 unsigned
172 trap(statusReg, causeReg, vadr, pc, args)
173 	unsigned statusReg;	/* status register at time of the exception */
174 	unsigned causeReg;	/* cause register at time of exception */
175 	unsigned vadr;		/* address (if any) the fault occured on */
176 	unsigned pc;		/* program counter where to continue */
177 {
178 	register int type, i;
179 	unsigned ucode = 0;
180 	register struct proc *p = curproc;
181 	u_quad_t sticks;
182 	vm_prot_t ftype;
183 	extern unsigned onfault_table[];
184 
185 #ifdef DEBUG
186 	trp->status = statusReg;
187 	trp->cause = causeReg;
188 	trp->vadr = vadr;
189 	trp->pc = pc;
190 	trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] :
191 		p->p_md.md_regs[RA];
192 	trp->code = 0;
193 	if (++trp == &trapdebug[TRAPSIZE])
194 		trp = trapdebug;
195 #endif
196 
197 	cnt.v_trap++;
198 	type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT;
199 	if (USERMODE(statusReg)) {
200 		type |= T_USER;
201 		sticks = p->p_sticks;
202 	}
203 
204 	/*
205 	 * Enable hardware interrupts if they were on before.
206 	 * We only respond to software interrupts when returning to user mode.
207 	 */
208 	if (statusReg & MACH_SR_INT_ENA_PREV)
209 		splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR);
210 
211 	switch (type) {
212 	case T_TLB_MOD:
213 		/* check for kernel address */
214 		if ((int)vadr < 0) {
215 			register pt_entry_t *pte;
216 			register unsigned entry;
217 #ifndef ATTR
218 			register vm_offset_t pa;
219 #endif
220 
221 			pte = kvtopte(vadr);
222 			entry = pte->pt_entry;
223 			if (entry & PG_RO) {
224 				/* write to read only page in the kernel */
225 				ftype = VM_PROT_WRITE;
226 				goto kernel_fault;
227 			}
228 			entry |= PG_M;
229 			pte->pt_entry = entry;
230 			vadr &= PG_FRAME;
231 			printf("trap: TLBupdate hi %x lo %x i %x\n", vadr,
232 				entry, MachTLBUpdate(vadr, entry)); /* XXX */
233 #ifdef ATTR
234 			pmap_attributes[atop(entry - KERNBASE)] |= PMAP_ATTR_MOD;
235 #else
236 			pa = entry & PG_FRAME;
237 			if (!IS_VM_PHYSADDR(pa))
238 				panic("trap: kmod");
239 			PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
240 #endif
241 			return (pc);
242 		}
243 		/* FALLTHROUGH */
244 
245 	case T_TLB_MOD+T_USER:
246 	    {
247 		pmap_hash_t hp;
248 #ifndef ATTR
249 		vm_offset_t pa;
250 #endif
251 #ifdef DIAGNOSTIC
252 		extern pmap_hash_t zero_pmap_hash;
253 		extern pmap_t cur_pmap;
254 
255 		if (cur_pmap->pm_hash == zero_pmap_hash ||
256 		    cur_pmap->pm_hash == (pmap_hash_t)0)
257 			panic("tlbmod");
258 #endif
259 		hp = &((pmap_hash_t)PMAP_HASH_UADDR)[PMAP_HASH(vadr)];
260 		if (((hp->pmh_pte[0].high ^ vadr) & ~PGOFSET) == 0)
261 			i = 0;
262 		else if (((hp->pmh_pte[1].high ^ vadr) & ~PGOFSET) == 0)
263 			i = 1;
264 		else
265 			panic("trap: tlb umod not found");
266 		if (hp->pmh_pte[i].low & PG_RO) {
267 			ftype = VM_PROT_WRITE;
268 			goto dofault;
269 		}
270 		hp->pmh_pte[i].low |= PG_M;
271 		printf("trap: TLBupdate hi %x lo %x i %x\n",
272 			hp->pmh_pte[i].high, hp->pmh_pte[i].low,
273 			MachTLBUpdate(hp->pmh_pte[i].high, hp->pmh_pte[i].low)); /* XXX */
274 #ifdef ATTR
275 		pmap_attributes[atop(hp->pmh_pte[i].low - KERNBASE)] |=
276 			PMAP_ATTR_MOD;
277 #else
278 		pa = hp->pmh_pte[i].low & PG_FRAME;
279 		if (!IS_VM_PHYSADDR(pa))
280 			panic("trap: umod");
281 		PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
282 #endif
283 		if (!USERMODE(statusReg))
284 			return (pc);
285 		goto out;
286 	    }
287 
288 	case T_TLB_LD_MISS:
289 	case T_TLB_ST_MISS:
290 		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
291 		/* check for kernel address */
292 		if ((int)vadr < 0) {
293 			register vm_offset_t va;
294 			int rv;
295 
296 		kernel_fault:
297 			va = trunc_page((vm_offset_t)vadr);
298 			rv = vm_fault(kernel_map, va, ftype, FALSE);
299 			if (rv == KERN_SUCCESS)
300 				return (pc);
301 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
302 				((struct pcb *)UADDR)->pcb_onfault = 0;
303 				return (onfault_table[i]);
304 			}
305 			goto err;
306 		}
307 		/*
308 		 * It is an error for the kernel to access user space except
309 		 * through the copyin/copyout routines.
310 		 */
311 		if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 0)
312 			goto err;
313 		/* check for fuswintr() or suswintr() getting a page fault */
314 		if (i == 4)
315 			return (onfault_table[i]);
316 		goto dofault;
317 
318 	case T_TLB_LD_MISS+T_USER:
319 		ftype = VM_PROT_READ;
320 		goto dofault;
321 
322 	case T_TLB_ST_MISS+T_USER:
323 		ftype = VM_PROT_WRITE;
324 	dofault:
325 	    {
326 		register vm_offset_t va;
327 		register struct vmspace *vm = p->p_vmspace;
328 		register vm_map_t map = &vm->vm_map;
329 		int rv;
330 
331 #ifdef X_KLUGE
332 		if (p->p_pid == UserMapPid &&
333 		    (va = pmax_btop(vadr - USER_MAP_ADDR)) < nUserMapPtes) {
334 			register pt_entry_t *pte;
335 
336 			pte = &UserMapPtes[va];
337 			MachTLBWriteRandom((vadr & PG_FRAME) |
338 				(vm->vm_pmap.pm_tlbpid << VMMACH_TLB_PID_SHIFT),
339 				pte->pt_entry);
340 			return (pc);
341 		}
342 #endif
343 		va = trunc_page((vm_offset_t)vadr);
344 		rv = vm_fault(map, va, ftype, FALSE);
345 		if (rv != KERN_SUCCESS) {
346 			printf("vm_fault(%x, %x, %x, 0) -> %x ADR %x PC %x RA %x\n",
347 				map, va, ftype, rv, vadr, pc,
348 				!USERMODE(statusReg) ? ((int *)&args)[19] :
349 					p->p_md.md_regs[RA]); /* XXX */
350 			printf("\tpid %d %s PC %x RA %x SP %x\n", p->p_pid,
351 				p->p_comm, p->p_md.md_regs[PC],
352 				p->p_md.md_regs[RA],
353 				p->p_md.md_regs[SP]); /* XXX */
354 		}
355 		/*
356 		 * If this was a stack access we keep track of the maximum
357 		 * accessed stack size.  Also, if vm_fault gets a protection
358 		 * failure it is due to accessing the stack region outside
359 		 * the current limit and we need to reflect that as an access
360 		 * error.
361 		 */
362 		if ((caddr_t)va >= vm->vm_maxsaddr) {
363 			if (rv == KERN_SUCCESS) {
364 				unsigned nss;
365 
366 				nss = clrnd(btoc(USRSTACK-(unsigned)va));
367 				if (nss > vm->vm_ssize)
368 					vm->vm_ssize = nss;
369 			} else if (rv == KERN_PROTECTION_FAILURE)
370 				rv = KERN_INVALID_ADDRESS;
371 		}
372 		if (rv == KERN_SUCCESS) {
373 			if (!USERMODE(statusReg))
374 				return (pc);
375 			goto out;
376 		}
377 		if (!USERMODE(statusReg)) {
378 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
379 				((struct pcb *)UADDR)->pcb_onfault = 0;
380 				return (onfault_table[i]);
381 			}
382 			goto err;
383 		}
384 		ucode = vadr;
385 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
386 		break;
387 	    }
388 
389 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
390 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
391 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
392 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
393 		i = SIGSEGV;
394 		break;
395 
396 	case T_SYSCALL+T_USER:
397 	    {
398 		register int *locr0 = p->p_md.md_regs;
399 		register struct sysent *callp;
400 		unsigned int code;
401 		int numsys;
402 		struct args {
403 			int i[8];
404 		} args;
405 		int rval[2];
406 		struct sysent *systab;
407 		extern int nsysent;
408 #ifdef ULTRIXCOMPAT
409 		extern struct sysent ultrixsysent[];
410 		extern int ultrixnsysent;
411 #endif
412 
413 		cnt.v_syscall++;
414 		/* compute next PC after syscall instruction */
415 		if ((int)causeReg < 0)
416 			locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0);
417 		else
418 			locr0[PC] += 4;
419 		systab = sysent;
420 		numsys = nsysent;
421 #ifdef ULTRIXCOMPAT
422 		if (p->p_md.md_flags & MDP_ULTRIX) {
423 			systab = ultrixsysent;
424 			numsys = ultrixnsysent;
425 		}
426 #endif
427 		code = locr0[V0];
428 		switch (code) {
429 		case SYS_indir:
430 			/*
431 			 * Code is first argument, followed by actual args.
432 			 */
433 			code = locr0[A0];
434 			if (code >= numsys)
435 				callp = &systab[SYS_indir]; /* (illegal) */
436 			else
437 				callp = &systab[code];
438 			i = callp->sy_narg;
439 			args.i[0] = locr0[A1];
440 			args.i[1] = locr0[A2];
441 			args.i[2] = locr0[A3];
442 			if (i > 3) {
443 				i = copyin((caddr_t)(locr0[SP] +
444 						4 * sizeof(int)),
445 					(caddr_t)&args.i[3],
446 					(u_int)(i - 3) * sizeof(int));
447 				if (i) {
448 					locr0[V0] = i;
449 					locr0[A3] = 1;
450 #ifdef KTRACE
451 					if (KTRPOINT(p, KTR_SYSCALL))
452 						ktrsyscall(p->p_tracep, code,
453 							callp->sy_narg, args.i);
454 #endif
455 					goto done;
456 				}
457 			}
458 			break;
459 
460 		case SYS___indir:
461 			/*
462 			 * Like indir, but code is a quad, so as to maintain
463 			 * quad alignment for the rest of the arguments.
464 			 */
465 			code = locr0[A0 + _QUAD_LOWWORD];
466 			if (code >= numsys)
467 				callp = &systab[SYS_indir]; /* (illegal) */
468 			else
469 				callp = &systab[code];
470 			i = callp->sy_narg;
471 			args.i[0] = locr0[A2];
472 			args.i[1] = locr0[A3];
473 			if (i > 2) {
474 				i = copyin((caddr_t)(locr0[SP] +
475 						4 * sizeof(int)),
476 					(caddr_t)&args.i[2],
477 					(u_int)(i - 2) * sizeof(int));
478 				if (i) {
479 					locr0[V0] = i;
480 					locr0[A3] = 1;
481 #ifdef KTRACE
482 					if (KTRPOINT(p, KTR_SYSCALL))
483 						ktrsyscall(p->p_tracep, code,
484 							callp->sy_narg, args.i);
485 #endif
486 					goto done;
487 				}
488 			}
489 			break;
490 
491 		default:
492 			if (code >= numsys)
493 				callp = &systab[SYS_indir]; /* (illegal) */
494 			else
495 				callp = &systab[code];
496 			i = callp->sy_narg;
497 			args.i[0] = locr0[A0];
498 			args.i[1] = locr0[A1];
499 			args.i[2] = locr0[A2];
500 			args.i[3] = locr0[A3];
501 			if (i > 4) {
502 				i = copyin((caddr_t)(locr0[SP] +
503 						4 * sizeof(int)),
504 					(caddr_t)&args.i[4],
505 					(u_int)(i - 4) * sizeof(int));
506 				if (i) {
507 					locr0[V0] = i;
508 					locr0[A3] = 1;
509 #ifdef KTRACE
510 					if (KTRPOINT(p, KTR_SYSCALL))
511 						ktrsyscall(p->p_tracep, code,
512 							callp->sy_narg, args.i);
513 #endif
514 					goto done;
515 				}
516 			}
517 		}
518 #ifdef KTRACE
519 		if (KTRPOINT(p, KTR_SYSCALL))
520 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
521 #endif
522 		rval[0] = 0;
523 		rval[1] = locr0[V1];
524 #ifdef DEBUG
525 		if (trp == trapdebug)
526 			trapdebug[TRAPSIZE - 1].code = code;
527 		else
528 			trp[-1].code = code;
529 #endif
530 		i = (*callp->sy_call)(p, &args, rval);
531 		/*
532 		 * Reinitialize proc pointer `p' as it may be different
533 		 * if this is a child returning from fork syscall.
534 		 */
535 		p = curproc;
536 		locr0 = p->p_md.md_regs;
537 #ifdef DEBUG
538 		{ int s;
539 		s = splhigh();
540 		trp->status = statusReg;
541 		trp->cause = causeReg;
542 		trp->vadr = locr0[SP];
543 		trp->pc = locr0[PC];
544 		trp->ra = locr0[RA];
545 		trp->code = -code;
546 		if (++trp == &trapdebug[TRAPSIZE])
547 			trp = trapdebug;
548 		splx(s);
549 		}
550 #endif
551 		switch (i) {
552 		case 0:
553 			locr0[V0] = rval[0];
554 			locr0[V1] = rval[1];
555 			locr0[A3] = 0;
556 			break;
557 
558 		case ERESTART:
559 			locr0[PC] = pc;
560 			break;
561 
562 		case EJUSTRETURN:
563 			break;	/* nothing to do */
564 
565 		default:
566 			locr0[V0] = i;
567 			locr0[A3] = 1;
568 		}
569 	done:
570 #ifdef KTRACE
571 		if (KTRPOINT(p, KTR_SYSRET))
572 			ktrsysret(p->p_tracep, code, i, rval[0]);
573 #endif
574 		goto out;
575 	    }
576 
577 	case T_BREAK+T_USER:
578 	    {
579 		register unsigned va, instr;
580 
581 		/* compute address of break instruction */
582 		va = pc;
583 		if ((int)causeReg < 0)
584 			va += 4;
585 
586 		/* read break instruction */
587 		instr = fuiword((caddr_t)va);
588 #ifdef KADB
589 		if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP)
590 			goto err;
591 #endif
592 		if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) {
593 			i = SIGTRAP;
594 			break;
595 		}
596 
597 		/* restore original instruction and clear BP  */
598 		i = suiword((caddr_t)va, p->p_md.md_ss_instr);
599 		if (i < 0) {
600 			vm_offset_t sa, ea;
601 			int rv;
602 
603 			sa = trunc_page((vm_offset_t)va);
604 			ea = round_page((vm_offset_t)va+sizeof(int)-1);
605 			rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
606 				VM_PROT_DEFAULT, FALSE);
607 			if (rv == KERN_SUCCESS) {
608 				i = suiword((caddr_t)va, p->p_md.md_ss_instr);
609 				(void) vm_map_protect(&p->p_vmspace->vm_map,
610 					sa, ea, VM_PROT_READ|VM_PROT_EXECUTE,
611 					FALSE);
612 			}
613 		}
614 		if (i < 0) {
615 			i = SIGTRAP;
616 			break;
617 		}
618 		p->p_md.md_ss_addr = 0;
619 		goto out;
620 	    }
621 
622 	case T_RES_INST+T_USER:
623 		i = SIGILL;
624 		break;
625 
626 	case T_COP_UNUSABLE+T_USER:
627 		if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) {
628 			i = SIGILL;	/* only FPU instructions allowed */
629 			break;
630 		}
631 		MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs);
632 		machFPCurProcPtr = p;
633 		p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT;
634 		p->p_md.md_flags |= MDP_FPUSED;
635 		goto out;
636 
637 	case T_OVFLOW+T_USER:
638 		i = SIGFPE;
639 		break;
640 
641 	case T_ADDR_ERR_LD:	/* misaligned access */
642 	case T_ADDR_ERR_ST:	/* misaligned access */
643 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
644 		if (i = ((struct pcb *)UADDR)->pcb_onfault) {
645 			((struct pcb *)UADDR)->pcb_onfault = 0;
646 			return (onfault_table[i]);
647 		}
648 		/* FALLTHROUGH */
649 
650 	default:
651 	err:
652 #ifdef KADB
653 	    {
654 		extern struct pcb kdbpcb;
655 
656 		if (USERMODE(statusReg))
657 			kdbpcb = p->p_addr->u_pcb;
658 		else {
659 			kdbpcb.pcb_regs[ZERO] = 0;
660 			kdbpcb.pcb_regs[AST] = ((int *)&args)[2];
661 			kdbpcb.pcb_regs[V0] = ((int *)&args)[3];
662 			kdbpcb.pcb_regs[V1] = ((int *)&args)[4];
663 			kdbpcb.pcb_regs[A0] = ((int *)&args)[5];
664 			kdbpcb.pcb_regs[A1] = ((int *)&args)[6];
665 			kdbpcb.pcb_regs[A2] = ((int *)&args)[7];
666 			kdbpcb.pcb_regs[A3] = ((int *)&args)[8];
667 			kdbpcb.pcb_regs[T0] = ((int *)&args)[9];
668 			kdbpcb.pcb_regs[T1] = ((int *)&args)[10];
669 			kdbpcb.pcb_regs[T2] = ((int *)&args)[11];
670 			kdbpcb.pcb_regs[T3] = ((int *)&args)[12];
671 			kdbpcb.pcb_regs[T4] = ((int *)&args)[13];
672 			kdbpcb.pcb_regs[T5] = ((int *)&args)[14];
673 			kdbpcb.pcb_regs[T6] = ((int *)&args)[15];
674 			kdbpcb.pcb_regs[T7] = ((int *)&args)[16];
675 			kdbpcb.pcb_regs[T8] = ((int *)&args)[17];
676 			kdbpcb.pcb_regs[T9] = ((int *)&args)[18];
677 			kdbpcb.pcb_regs[RA] = ((int *)&args)[19];
678 			kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21];
679 			kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22];
680 			kdbpcb.pcb_regs[PC] = pc;
681 			kdbpcb.pcb_regs[SR] = statusReg;
682 			bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int));
683 		}
684 		if (kdb(causeReg, vadr, p, !USERMODE(statusReg)))
685 			return (kdbpcb.pcb_regs[PC]);
686 	    }
687 #else
688 #ifdef DEBUG
689 		trapDump("trap");
690 #endif
691 #endif
692 		panic("trap");
693 	}
694 	printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid,
695 		p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */
696 	trapsignal(p, i, ucode);
697 out:
698 	/*
699 	 * Note: we should only get here if returning to user mode.
700 	 */
701 	/* take pending signals */
702 	while ((i = CURSIG(p)) != 0)
703 		psig(i);
704 	p->p_pri = p->p_usrpri;
705 	astpending = 0;
706 	if (want_resched) {
707 		int s;
708 
709 		/*
710 		 * Since we are curproc, clock will normally just change
711 		 * our priority without moving us from one queue to another
712 		 * (since the running process is not on a queue.)
713 		 * If that happened after we setrq ourselves but before we
714 		 * swtch()'ed, we might not be on the queue indicated by
715 		 * our priority.
716 		 */
717 		s = splstatclock();
718 		setrq(p);
719 		p->p_stats->p_ru.ru_nivcsw++;
720 		swtch();
721 		splx(s);
722 		while ((i = CURSIG(p)) != 0)
723 			psig(i);
724 	}
725 
726 	/*
727 	 * If profiling, charge system time to the trapped pc.
728 	 */
729 	if (p->p_flag & SPROFIL) {
730 		extern int psratio;
731 
732 		addupc_task(p, pc, (int)(p->p_sticks - sticks) * psratio);
733 	}
734 
735 	curpri = p->p_pri;
736 	return (pc);
737 }
738 
739 /*
740  * Handle an interrupt.
741  * Called from MachKernIntr() or MachUserIntr()
742  * Note: curproc might be NULL.
743  */
744 interrupt(statusReg, causeReg, pc)
745 	unsigned statusReg;	/* status register at time of the exception */
746 	unsigned causeReg;	/* cause register at time of exception */
747 	unsigned pc;		/* program counter where to continue */
748 {
749 	register unsigned mask;
750 	struct clockframe cf;
751 
752 #ifdef DEBUG
753 	trp->status = statusReg;
754 	trp->cause = causeReg;
755 	trp->vadr = 0;
756 	trp->pc = pc;
757 	trp->ra = 0;
758 	trp->code = 0;
759 	if (++trp == &trapdebug[TRAPSIZE])
760 		trp = trapdebug;
761 #endif
762 
763 	cnt.v_intr++;
764 	mask = causeReg & statusReg;	/* pending interrupts & enable mask */
765 	if (pmax_hardware_intr)
766 		splx((*pmax_hardware_intr)(mask, pc, statusReg, causeReg));
767 	if (mask & MACH_INT_MASK_5) {
768 		if (!USERMODE(statusReg)) {
769 #ifdef DEBUG
770 			trapDump("fpintr");
771 #else
772 			printf("FPU interrupt: PC %x CR %x SR %x\n",
773 				pc, causeReg, statusReg);
774 #endif
775 		} else
776 			MachFPInterrupt(statusReg, causeReg, pc);
777 	}
778 	if (mask & MACH_SOFT_INT_MASK_0) {
779 		clearsoftclock();
780 		cnt.v_soft++;
781 		softclock();
782 	}
783 	/* process network interrupt if we trapped or will very soon */
784 	if ((mask & MACH_SOFT_INT_MASK_1) ||
785 	    netisr && (statusReg & MACH_SOFT_INT_MASK_1)) {
786 		clearsoftnet();
787 		cnt.v_soft++;
788 #ifdef INET
789 		if (netisr & (1 << NETISR_ARP)) {
790 			netisr &= ~(1 << NETISR_ARP);
791 			arpintr();
792 		}
793 		if (netisr & (1 << NETISR_IP)) {
794 			netisr &= ~(1 << NETISR_IP);
795 			ipintr();
796 		}
797 #endif
798 #ifdef NS
799 		if (netisr & (1 << NETISR_NS)) {
800 			netisr &= ~(1 << NETISR_NS);
801 			nsintr();
802 		}
803 #endif
804 #ifdef ISO
805 		if (netisr & (1 << NETISR_ISO)) {
806 			netisr &= ~(1 << NETISR_ISO);
807 			clnlintr();
808 		}
809 #endif
810 	}
811 }
812 
813 /*
814  * Handle pmax (DECstation 2100/3100) interrupts.
815  */
816 pmax_intr(mask, pc, statusReg, causeReg)
817 	unsigned mask;
818 	unsigned pc;
819 	unsigned statusReg;
820 	unsigned causeReg;
821 {
822 	register volatile struct chiptime *c = Mach_clock_addr;
823 	struct clockframe cf;
824 	int temp;
825 
826 	/* handle clock interrupts ASAP */
827 	if (mask & MACH_INT_MASK_3) {
828 		temp = c->regc;	/* XXX clear interrupt bits */
829 		cf.pc = pc;
830 		cf.sr = statusReg;
831 		hardclock(&cf);
832 		/* keep clock interrupts enabled */
833 		causeReg &= ~MACH_INT_MASK_3;
834 	}
835 	/* Re-enable clock interrupts */
836 	splx(MACH_INT_MASK_3 | MACH_SR_INT_ENA_CUR);
837 #if NSII > 0
838 	if (mask & MACH_INT_MASK_0)
839 		siiintr(0);
840 #endif
841 #if NLE > 0
842 	if (mask & MACH_INT_MASK_1)
843 		leintr(0);
844 #endif
845 #if NDC > 0
846 	if (mask & MACH_INT_MASK_2)
847 		dcintr(0);
848 #endif
849 	if (mask & MACH_INT_MASK_4)
850 		pmax_errintr();
851 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
852 		MACH_SR_INT_ENA_CUR);
853 }
854 
855 /*
856  * Handle hardware interrupts for the KN02. (DECstation 5000/200)
857  * Returns spl value.
858  */
859 kn02_intr(mask, pc, statusReg, causeReg)
860 	unsigned mask;
861 	unsigned pc;
862 	unsigned statusReg;
863 	unsigned causeReg;
864 {
865 	register unsigned i, m;
866 	register volatile struct chiptime *c = Mach_clock_addr;
867 	register unsigned csr;
868 	int temp;
869 	struct clockframe cf;
870 	static int warned = 0;
871 
872 	/* handle clock interrupts ASAP */
873 	if (mask & MACH_INT_MASK_1) {
874 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
875 		if ((csr & KN02_CSR_PSWARN) && !warned) {
876 			warned = 1;
877 			printf("WARNING: power supply is overheating!\n");
878 		} else if (warned && !(csr & KN02_CSR_PSWARN)) {
879 			warned = 0;
880 			printf("WARNING: power supply is OK again\n");
881 		}
882 
883 		temp = c->regc;	/* XXX clear interrupt bits */
884 		cf.pc = pc;
885 		cf.sr = statusReg;
886 		hardclock(&cf);
887 
888 		/* keep clock interrupts enabled */
889 		causeReg &= ~MACH_INT_MASK_1;
890 	}
891 	/* Re-enable clock interrupts */
892 	splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
893 	if (mask & MACH_INT_MASK_0) {
894 
895 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
896 		m = csr & (csr >> KN02_CSR_IOINTEN_SHIFT) & KN02_CSR_IOINT;
897 #if 0
898 		*(unsigned *)MACHPHYS_TO_UNCACHED(KN02_SYS_CSR) =
899 			(csr & ~(KN02_CSR_WRESERVED | 0xFF)) |
900 			(m << KN02_CSR_IOINTEN_SHIFT);
901 #endif
902 		for (i = 0; m; i++, m >>= 1) {
903 			if (!(m & 1))
904 				continue;
905 			if (tc_slot_info[i].intr)
906 				(*tc_slot_info[i].intr)(tc_slot_info[i].unit);
907 			else
908 				printf("spurious interrupt %d\n", i);
909 		}
910 #if 0
911 		*(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR) =
912 			csr & ~(KN02_CSR_WRESERVED | 0xFF);
913 #endif
914 	}
915 	if (mask & MACH_INT_MASK_3)
916 		kn02_errintr();
917 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
918 		MACH_SR_INT_ENA_CUR);
919 }
920 
921 /*
922  * 3min hardware interrupts. (DECstation 5000/1xx)
923  */
924 kmin_intr(mask, pc, statusReg, causeReg)
925 	unsigned mask;
926 	unsigned pc;
927 	unsigned statusReg;
928 	unsigned causeReg;
929 {
930 	register u_int intr;
931 	register volatile struct chiptime *c = Mach_clock_addr;
932 	volatile u_int *imaskp =
933 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_IMSK);
934 	volatile u_int *intrp =
935 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_INTR);
936 	unsigned int old_mask;
937 	struct clockframe cf;
938 	int temp;
939 	static int user_warned = 0;
940 
941 	old_mask = *imaskp & kmin_tc3_imask;
942 	*imaskp = old_mask;
943 
944 	if (mask & MACH_INT_MASK_3) {
945 		intr = *intrp;
946 		/* masked interrupts are still observable */
947 		intr &= old_mask;
948 
949 		if (intr & KMIN_INTR_SCSI_PTR_LOAD) {
950 			*intrp &= ~KMIN_INTR_SCSI_PTR_LOAD;
951 #ifdef notdef
952 			asc_dma_intr();
953 #endif
954 		}
955 
956 		if (intr & (KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E))
957 			*intrp &= ~(KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E);
958 
959 		if (intr & KMIN_INTR_LANCE_READ_E)
960 			*intrp &= ~KMIN_INTR_LANCE_READ_E;
961 
962 		if (intr & KMIN_INTR_TIMEOUT)
963 			kn02ba_errintr();
964 
965 		if (intr & KMIN_INTR_CLOCK) {
966 			temp = c->regc;	/* XXX clear interrupt bits */
967 			cf.pc = pc;
968 			cf.sr = statusReg;
969 			hardclock(&cf);
970 		}
971 
972 		if ((intr & KMIN_INTR_SCC_0) &&
973 			tc_slot_info[KMIN_SCC0_SLOT].intr)
974 			(*(tc_slot_info[KMIN_SCC0_SLOT].intr))
975 			(tc_slot_info[KMIN_SCC0_SLOT].unit);
976 
977 		if ((intr & KMIN_INTR_SCC_1) &&
978 			tc_slot_info[KMIN_SCC1_SLOT].intr)
979 			(*(tc_slot_info[KMIN_SCC1_SLOT].intr))
980 			(tc_slot_info[KMIN_SCC1_SLOT].unit);
981 
982 		if ((intr & KMIN_INTR_SCSI) &&
983 			tc_slot_info[KMIN_SCSI_SLOT].intr)
984 			(*(tc_slot_info[KMIN_SCSI_SLOT].intr))
985 			(tc_slot_info[KMIN_SCSI_SLOT].unit);
986 
987 		if ((intr & KMIN_INTR_LANCE) &&
988 			tc_slot_info[KMIN_LANCE_SLOT].intr)
989 			(*(tc_slot_info[KMIN_LANCE_SLOT].intr))
990 			(tc_slot_info[KMIN_LANCE_SLOT].unit);
991 
992 		if (user_warned && ((intr & KMIN_INTR_PSWARN) == 0)) {
993 			*imaskp = 0;
994 			printf("%s\n", "Power supply ok now.");
995 			user_warned = 0;
996 		}
997 		if ((intr & KMIN_INTR_PSWARN) && (user_warned < 3)) {
998 			*imaskp = 0;
999 			user_warned++;
1000 			printf("%s\n", "Power supply overheating");
1001 		}
1002 	}
1003 	if ((mask & MACH_INT_MASK_0) && tc_slot_info[0].intr)
1004 		(*tc_slot_info[0].intr)(tc_slot_info[0].unit);
1005 	if ((mask & MACH_INT_MASK_1) && tc_slot_info[1].intr)
1006 		(*tc_slot_info[1].intr)(tc_slot_info[1].unit);
1007 	if ((mask & MACH_INT_MASK_2) && tc_slot_info[2].intr)
1008 		(*tc_slot_info[2].intr)(tc_slot_info[2].unit);
1009 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1010 		MACH_SR_INT_ENA_CUR);
1011 }
1012 
1013 /*
1014  * Maxine hardware interrupts. (Personal DECstation 5000/xx)
1015  */
1016 xine_intr(mask, pc, statusReg, causeReg)
1017 	unsigned mask;
1018 	unsigned pc;
1019 	unsigned statusReg;
1020 	unsigned causeReg;
1021 {
1022 	register u_int intr;
1023 	register volatile struct chiptime *c = Mach_clock_addr;
1024 	volatile u_int *imaskp = (volatile u_int *)
1025 		MACH_PHYS_TO_UNCACHED(XINE_REG_IMSK);
1026 	volatile u_int *intrp = (volatile u_int *)
1027 		MACH_PHYS_TO_UNCACHED(XINE_REG_INTR);
1028 	u_int old_mask;
1029 	struct clockframe cf;
1030 	int temp;
1031 
1032 	old_mask = *imaskp & xine_tc3_imask;
1033 	*imaskp = old_mask;
1034 
1035 	/* handle clock interrupts ASAP */
1036 	if (mask & MACH_INT_MASK_1) {
1037 		temp = c->regc;	/* XXX clear interrupt bits */
1038 		cf.pc = pc;
1039 		cf.sr = statusReg;
1040 		hardclock(&cf);
1041 		causeReg &= ~MACH_INT_MASK_1;
1042 		/* reenable clock interrupts */
1043 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1044 	}
1045 	if (mask & MACH_INT_MASK_3) {
1046 		intr = *intrp;
1047 		/* masked interrupts are still observable */
1048 		intr &= old_mask;
1049 
1050 		if (intr & XINE_INTR_SCSI_PTR_LOAD) {
1051 			*intrp &= ~XINE_INTR_SCSI_PTR_LOAD;
1052 #ifdef notdef
1053 			asc_dma_intr();
1054 #endif
1055 		}
1056 
1057 		if (intr & (XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E))
1058 			*intrp &= ~(XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E);
1059 
1060 		if (intr & XINE_INTR_LANCE_READ_E)
1061 			*intrp &= ~XINE_INTR_LANCE_READ_E;
1062 
1063 		if ((intr & XINE_INTR_FLOPPY) &&
1064 			tc_slot_info[XINE_FLOPPY_SLOT].intr)
1065 			(*(tc_slot_info[XINE_FLOPPY_SLOT].intr))
1066 			(tc_slot_info[XINE_FLOPPY_SLOT].unit);
1067 
1068 		if ((intr & XINE_INTR_TC_0) &&
1069 			tc_slot_info[0].intr)
1070 			(*(tc_slot_info[0].intr))
1071 			(tc_slot_info[0].unit);
1072 
1073 		if ((intr & XINE_INTR_ISDN) &&
1074 			tc_slot_info[XINE_ISDN_SLOT].intr)
1075 			(*(tc_slot_info[XINE_ISDN_SLOT].intr))
1076 			(tc_slot_info[XINE_ISDN_SLOT].unit);
1077 
1078 		if ((intr & XINE_INTR_SCSI) &&
1079 			tc_slot_info[XINE_SCSI_SLOT].intr)
1080 			(*(tc_slot_info[XINE_SCSI_SLOT].intr))
1081 			(tc_slot_info[XINE_SCSI_SLOT].unit);
1082 
1083 		if ((intr & XINE_INTR_LANCE) &&
1084 			tc_slot_info[XINE_LANCE_SLOT].intr)
1085 			(*(tc_slot_info[XINE_LANCE_SLOT].intr))
1086 			(tc_slot_info[XINE_LANCE_SLOT].unit);
1087 
1088 		if ((intr & XINE_INTR_SCC_0) &&
1089 			tc_slot_info[XINE_SCC0_SLOT].intr)
1090 			(*(tc_slot_info[XINE_SCC0_SLOT].intr))
1091 			(tc_slot_info[XINE_SCC0_SLOT].unit);
1092 
1093 		if ((intr & XINE_INTR_TC_1) &&
1094 			tc_slot_info[1].intr)
1095 			(*(tc_slot_info[1].intr))
1096 			(tc_slot_info[1].unit);
1097 
1098 		if ((intr & XINE_INTR_DTOP_RX) &&
1099 			tc_slot_info[XINE_DTOP_SLOT].intr)
1100 			(*(tc_slot_info[XINE_DTOP_SLOT].intr))
1101 			(tc_slot_info[XINE_DTOP_SLOT].unit);
1102 
1103 	}
1104 	if (mask & MACH_INT_MASK_2)
1105 		kn02ba_errintr();
1106 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1107 		MACH_SR_INT_ENA_CUR);
1108 }
1109 
1110 #ifdef DS5000_240
1111 /*
1112  * 3Max+ hardware interrupts. (DECstation 5000/240) UNTESTED!!
1113  */
1114 kn03_intr(mask, pc, statusReg, causeReg)
1115 	unsigned mask;
1116 	unsigned pc;
1117 	unsigned statusReg;
1118 	unsigned causeReg;
1119 {
1120 	register u_int intr;
1121 	register volatile struct chiptime *c = Mach_clock_addr;
1122 	volatile u_int *imaskp = (volatile u_int *)
1123 		MACH_PHYS_TO_UNCACHED(KN03_REG_IMSK);
1124 	volatile u_int *intrp = (volatile u_int *)
1125 		MACH_PHYS_TO_UNCACHED(KN03_REG_INTR);
1126 	u_int old_mask;
1127 	struct clockframe cf;
1128 	int temp;
1129 
1130 	old_mask = *imaskp & kn03_tc3_imask;
1131 	*imaskp = old_mask;
1132 
1133 	/* handle clock interrupts ASAP */
1134 	if (mask & MACH_INT_MASK_1) {
1135 		temp = c->regc;	/* XXX clear interrupt bits */
1136 		cf.pc = pc;
1137 		cf.sr = statusReg;
1138 		hardclock(&cf);
1139 		causeReg &= ~MACH_INT_MASK_1;
1140 		/* reenable clock interrupts */
1141 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1142 	}
1143 	if (mask & MACH_INT_MASK_0) {
1144 		intr = *intrp;
1145 		/* masked interrupts are still observable */
1146 		intr &= old_mask;
1147 
1148 		if (intr & KN03_INTR_SCSI_PTR_LOAD) {
1149 			*intrp &= ~KN03_INTR_SCSI_PTR_LOAD;
1150 #ifdef notdef
1151 			asc_dma_intr();
1152 #endif
1153 		}
1154 
1155 		if (intr & (KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E))
1156 			*intrp &= ~(KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E);
1157 
1158 		if (intr & KN03_INTR_LANCE_READ_E)
1159 			*intrp &= ~KN03_INTR_LANCE_READ_E;
1160 
1161 		if ((intr & KN03_INTR_TC_0) &&
1162 			tc_slot_info[0].intr)
1163 			(*(tc_slot_info[0].intr))
1164 			(tc_slot_info[0].unit);
1165 
1166 		if ((intr & KN03_INTR_TC_1) &&
1167 			tc_slot_info[1].intr)
1168 			(*(tc_slot_info[1].intr))
1169 			(tc_slot_info[1].unit);
1170 
1171 		if ((intr & KN03_INTR_TC_2) &&
1172 			tc_slot_info[2].intr)
1173 			(*(tc_slot_info[2].intr))
1174 			(tc_slot_info[2].unit);
1175 
1176 		if ((intr & KN03_INTR_SCSI) &&
1177 			tc_slot_info[KN03_SCSI_SLOT].intr)
1178 			(*(tc_slot_info[KN03_SCSI_SLOT].intr))
1179 			(tc_slot_info[KN03_SCSI_SLOT].unit);
1180 
1181 		if ((intr & KN03_INTR_LANCE) &&
1182 			tc_slot_info[KN03_LANCE_SLOT].intr)
1183 			(*(tc_slot_info[KN03_LANCE_SLOT].intr))
1184 			(tc_slot_info[KN03_LANCE_SLOT].unit);
1185 
1186 		if ((intr & KN03_INTR_SCC_0) &&
1187 			tc_slot_info[KN03_SCC0_SLOT].intr)
1188 			(*(tc_slot_info[KN03_SCC0_SLOT].intr))
1189 			(tc_slot_info[KN03_SCC0_SLOT].unit);
1190 
1191 		if ((intr & KN03_INTR_SCC_1) &&
1192 			tc_slot_info[KN03_SCC1_SLOT].intr)
1193 			(*(tc_slot_info[KN03_SCC1_SLOT].intr))
1194 			(tc_slot_info[KN03_SCC1_SLOT].unit);
1195 
1196 	}
1197 	if (mask & MACH_INT_MASK_3)
1198 		kn03_errintr();
1199 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1200 		MACH_SR_INT_ENA_CUR);
1201 }
1202 #endif /* DS5000_240 */
1203 
1204 /*
1205  * This is called from MachUserIntr() if astpending is set.
1206  * This is very similar to the tail of trap().
1207  */
1208 softintr(statusReg, pc)
1209 	unsigned statusReg;	/* status register at time of the exception */
1210 	unsigned pc;		/* program counter where to continue */
1211 {
1212 	register struct proc *p = curproc;
1213 	int sig;
1214 
1215 	cnt.v_soft++;
1216 	/* take pending signals */
1217 	while ((sig = CURSIG(p)) != 0)
1218 		psig(sig);
1219 	p->p_pri = p->p_usrpri;
1220 	astpending = 0;
1221 	if (p->p_flag & SOWEUPC) {
1222 		p->p_flag &= ~SOWEUPC;
1223 		ADDUPROF(p);
1224 	}
1225 	if (want_resched) {
1226 		int s;
1227 
1228 		/*
1229 		 * Since we are curproc, clock will normally just change
1230 		 * our priority without moving us from one queue to another
1231 		 * (since the running process is not on a queue.)
1232 		 * If that happened after we setrq ourselves but before we
1233 		 * swtch()'ed, we might not be on the queue indicated by
1234 		 * our priority.
1235 		 */
1236 		s = splstatclock();
1237 		setrq(p);
1238 		p->p_stats->p_ru.ru_nivcsw++;
1239 		swtch();
1240 		splx(s);
1241 		while ((sig = CURSIG(p)) != 0)
1242 			psig(sig);
1243 	}
1244 	curpri = p->p_pri;
1245 }
1246 
1247 #ifdef DEBUG
1248 trapDump(msg)
1249 	char *msg;
1250 {
1251 	register int i;
1252 	int s;
1253 
1254 	s = splhigh();
1255 	printf("trapDump(%s)\n", msg);
1256 	for (i = 0; i < TRAPSIZE; i++) {
1257 		if (trp == trapdebug)
1258 			trp = &trapdebug[TRAPSIZE - 1];
1259 		else
1260 			trp--;
1261 		if (trp->cause == 0)
1262 			break;
1263 		printf("%s: ADR %x PC %x CR %x SR %x\n",
1264 			trap_type[(trp->cause & MACH_CR_EXC_CODE) >>
1265 				MACH_CR_EXC_CODE_SHIFT],
1266 			trp->vadr, trp->pc, trp->cause, trp->status);
1267 		printf("   RA %x code %d\n", trp-> ra, trp->code);
1268 	}
1269 	bzero(trapdebug, sizeof(trapdebug));
1270 	trp = trapdebug;
1271 	splx(s);
1272 }
1273 #endif
1274 
1275 #ifdef X_KLUGE
1276 /*
1277  * This is a kludge to allow X windows to work.
1278  */
1279 caddr_t
1280 vmUserMap(size, pa)
1281 	int size;
1282 	unsigned pa;
1283 {
1284 	register caddr_t v;
1285 	unsigned off, entry;
1286 
1287 	if (nUserMapPtes == 0)
1288 		UserMapPid = curproc->p_pid;
1289 	else if (UserMapPid != curproc->p_pid)
1290 		return ((caddr_t)0);
1291 	off = pa & PGOFSET;
1292 	size = btoc(off + size);
1293 	if (nUserMapPtes + size > NPTES)
1294 		return ((caddr_t)0);
1295 	v = (caddr_t)(USER_MAP_ADDR + pmax_ptob(nUserMapPtes) + off);
1296 	entry = (pa & 0x9ffff000) | PG_V | PG_M;
1297 	if (pa >= MACH_UNCACHED_MEMORY_ADDR)
1298 		entry |= PG_N;
1299 	while (size > 0) {
1300 		UserMapPtes[nUserMapPtes].pt_entry = entry;
1301 		entry += NBPG;
1302 		nUserMapPtes++;
1303 		size--;
1304 	}
1305 	return (v);
1306 }
1307 
1308 vmUserUnmap()
1309 {
1310 	int id;
1311 
1312 	nUserMapPtes = 0;
1313 	if (UserMapPid == curproc->p_pid) {
1314 		id = curproc->p_vmspace->vm_pmap.pm_tlbpid;
1315 		if (id >= 0)
1316 			MachTLBFlushPID(id);
1317 	}
1318 	UserMapPid = 0;
1319 }
1320 #endif
1321 
1322 /*
1323  *----------------------------------------------------------------------
1324  *
1325  * MemErrorInterrupts --
1326  *   pmax_errintr - for the DS2100/DS3100
1327  *   kn02_errintr - for the DS5000/200
1328  *   kn02ba_errintr - for the DS5000/1xx and DS5000/xx
1329  *
1330  *	Handler an interrupt for the control register.
1331  *
1332  * Results:
1333  *	None.
1334  *
1335  * Side effects:
1336  *	None.
1337  *
1338  *----------------------------------------------------------------------
1339  */
1340 static void
1341 pmax_errintr()
1342 {
1343 	volatile u_short *sysCSRPtr =
1344 		(u_short *)MACH_PHYS_TO_UNCACHED(KN01_SYS_CSR);
1345 	u_short csr;
1346 
1347 	csr = *sysCSRPtr;
1348 
1349 	if (csr & KN01_CSR_MERR) {
1350 		printf("Memory error at 0x%x\n",
1351 			*(unsigned *)MACH_PHYS_TO_UNCACHED(KN01_SYS_ERRADR));
1352 		panic("Mem error interrupt");
1353 	}
1354 	*sysCSRPtr = (csr & ~KN01_CSR_MBZ) | 0xff;
1355 }
1356 
1357 static void
1358 kn02_errintr()
1359 {
1360 
1361 	printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR));
1362 	*(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR) = 0;
1363 	MachEmptyWriteBuffer();
1364 }
1365 
1366 #ifdef DS5000_240
1367 static void
1368 kn03_errintr()
1369 {
1370 
1371 	printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR));
1372 	*(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR) = 0;
1373 	MachEmptyWriteBuffer();
1374 }
1375 #endif /* DS5000_240 */
1376 
1377 static void
1378 kn02ba_errintr()
1379 {
1380 	register int mer, adr, siz;
1381 	static int errintr_cnt = 0;
1382 
1383 	siz = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MSR);
1384 	mer = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MER);
1385 	adr = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_AER);
1386 
1387 	/* clear interrupt bit */
1388 	*(unsigned int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_TIMEOUT) = 0;
1389 
1390 	errintr_cnt++;
1391 	printf("(%d)%s%x [%x %x %x]\n", errintr_cnt,
1392 	       "Bad memory chip at phys ",
1393 	       kn02ba_recover_erradr(adr, mer),
1394 	       mer, siz, adr);
1395 }
1396 
1397 static unsigned
1398 kn02ba_recover_erradr(phys, mer)
1399 	register unsigned phys, mer;
1400 {
1401 	/* phys holds bits 28:2, mer knows which byte */
1402 	switch (mer & KMIN_MER_LASTBYTE) {
1403 	case KMIN_LASTB31:
1404 		mer = 3; break;
1405 	case KMIN_LASTB23:
1406 		mer = 2; break;
1407 	case KMIN_LASTB15:
1408 		mer = 1; break;
1409 	case KMIN_LASTB07:
1410 		mer = 0; break;
1411 	}
1412 	return ((phys & KMIN_AER_ADDR_MASK) | mer);
1413 }
1414 
1415 /*
1416  * Return the resulting PC as if the branch was executed.
1417  */
1418 unsigned
1419 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch)
1420 	unsigned *regsPtr;
1421 	unsigned instPC;
1422 	unsigned fpcCSR;
1423 	int allowNonBranch;
1424 {
1425 	InstFmt inst;
1426 	unsigned retAddr;
1427 	int condition;
1428 	extern unsigned GetBranchDest();
1429 
1430 #if 0
1431 	printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC,
1432 		*instPC, fpcCSR);
1433 #endif
1434 
1435 	inst = *(InstFmt *)instPC;
1436 	switch ((int)inst.JType.op) {
1437 	case OP_SPECIAL:
1438 		switch ((int)inst.RType.func) {
1439 		case OP_JR:
1440 		case OP_JALR:
1441 			retAddr = regsPtr[inst.RType.rs];
1442 			break;
1443 
1444 		default:
1445 			if (!allowNonBranch)
1446 				panic("MachEmulateBranch: Non-branch");
1447 			retAddr = instPC + 4;
1448 			break;
1449 		}
1450 		break;
1451 
1452 	case OP_BCOND:
1453 		switch ((int)inst.IType.rt) {
1454 		case OP_BLTZ:
1455 		case OP_BLTZAL:
1456 			if ((int)(regsPtr[inst.RType.rs]) < 0)
1457 				retAddr = GetBranchDest((InstFmt *)instPC);
1458 			else
1459 				retAddr = instPC + 8;
1460 			break;
1461 
1462 		case OP_BGEZAL:
1463 		case OP_BGEZ:
1464 			if ((int)(regsPtr[inst.RType.rs]) >= 0)
1465 				retAddr = GetBranchDest((InstFmt *)instPC);
1466 			else
1467 				retAddr = instPC + 8;
1468 			break;
1469 
1470 		default:
1471 			panic("MachEmulateBranch: Bad branch cond");
1472 		}
1473 		break;
1474 
1475 	case OP_J:
1476 	case OP_JAL:
1477 		retAddr = (inst.JType.target << 2) |
1478 			((unsigned)instPC & 0xF0000000);
1479 		break;
1480 
1481 	case OP_BEQ:
1482 		if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1483 			retAddr = GetBranchDest((InstFmt *)instPC);
1484 		else
1485 			retAddr = instPC + 8;
1486 		break;
1487 
1488 	case OP_BNE:
1489 		if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1490 			retAddr = GetBranchDest((InstFmt *)instPC);
1491 		else
1492 			retAddr = instPC + 8;
1493 		break;
1494 
1495 	case OP_BLEZ:
1496 		if ((int)(regsPtr[inst.RType.rs]) <= 0)
1497 			retAddr = GetBranchDest((InstFmt *)instPC);
1498 		else
1499 			retAddr = instPC + 8;
1500 		break;
1501 
1502 	case OP_BGTZ:
1503 		if ((int)(regsPtr[inst.RType.rs]) > 0)
1504 			retAddr = GetBranchDest((InstFmt *)instPC);
1505 		else
1506 			retAddr = instPC + 8;
1507 		break;
1508 
1509 	case OP_COP1:
1510 		switch (inst.RType.rs) {
1511 		case OP_BCx:
1512 		case OP_BCy:
1513 			if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1514 				condition = fpcCSR & MACH_FPC_COND_BIT;
1515 			else
1516 				condition = !(fpcCSR & MACH_FPC_COND_BIT);
1517 			if (condition)
1518 				retAddr = GetBranchDest((InstFmt *)instPC);
1519 			else
1520 				retAddr = instPC + 8;
1521 			break;
1522 
1523 		default:
1524 			if (!allowNonBranch)
1525 				panic("MachEmulateBranch: Bad coproc branch instruction");
1526 			retAddr = instPC + 4;
1527 		}
1528 		break;
1529 
1530 	default:
1531 		if (!allowNonBranch)
1532 			panic("MachEmulateBranch: Non-branch instruction");
1533 		retAddr = instPC + 4;
1534 	}
1535 #if 0
1536 	printf("Target addr=%x\n", retAddr);
1537 #endif
1538 	return (retAddr);
1539 }
1540 
1541 unsigned
1542 GetBranchDest(InstPtr)
1543 	InstFmt *InstPtr;
1544 {
1545 	return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2));
1546 }
1547 
1548 /*
1549  * This routine is called by procxmt() to single step one instruction.
1550  * We do this by storing a break instruction after the current instruction,
1551  * resuming execution, and then restoring the old instruction.
1552  */
1553 cpu_singlestep(p)
1554 	register struct proc *p;
1555 {
1556 	register unsigned va;
1557 	register int *locr0 = p->p_md.md_regs;
1558 	int i;
1559 
1560 	/* compute next address after current location */
1561 	va = MachEmulateBranch(locr0, locr0[PC], 0, 1);
1562 	if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va ||
1563 	    !useracc((caddr_t)va, 4, B_READ)) {
1564 		printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
1565 			p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */
1566 		return (EFAULT);
1567 	}
1568 	p->p_md.md_ss_addr = va;
1569 	p->p_md.md_ss_instr = fuiword((caddr_t)va);
1570 	i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1571 	if (i < 0) {
1572 		vm_offset_t sa, ea;
1573 		int rv;
1574 
1575 		sa = trunc_page((vm_offset_t)va);
1576 		ea = round_page((vm_offset_t)va+sizeof(int)-1);
1577 		rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
1578 			VM_PROT_DEFAULT, FALSE);
1579 		if (rv == KERN_SUCCESS) {
1580 			i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1581 			(void) vm_map_protect(&p->p_vmspace->vm_map,
1582 				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
1583 		}
1584 	}
1585 	if (i < 0)
1586 		return (EFAULT);
1587 	printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n",
1588 		p->p_comm, p->p_pid, p->p_md.md_ss_addr,
1589 		p->p_md.md_ss_instr, locr0[PC]); /* XXX */
1590 	return (0);
1591 }
1592 
1593 #ifdef DEBUG
1594 kdbpeek(addr)
1595 {
1596 	if (addr & 3) {
1597 		printf("kdbpeek: unaligned address %x\n", addr);
1598 		return (-1);
1599 	}
1600 	return (*(int *)addr);
1601 }
1602 
1603 #define MIPS_JR_RA	0x03e00008	/* instruction code for jr ra */
1604 
1605 /*
1606  * Print a stack backtrace.
1607  */
1608 void
1609 stacktrace()
1610 {
1611 	unsigned pc, sp, fp, ra, va, subr;
1612 	int a0, a1, a2, a3;
1613 	unsigned instr, mask;
1614 	InstFmt i;
1615 	int more, stksize;
1616 	int regs[8];
1617 	extern setsoftclock();
1618 	extern char start[], edata[];
1619 
1620 	cpu_getregs(regs);
1621 
1622 	/* get initial values from the exception frame */
1623 	sp = regs[0];
1624 	pc = regs[2];
1625 	ra = 0;
1626 	a0 = regs[3];
1627 	a1 = regs[4];
1628 	a2 = regs[5];
1629 	a3 = regs[6];
1630 	fp = regs[7];
1631 
1632 loop:
1633 	/* check for current PC in the kernel interrupt handler code */
1634 	if (pc >= (unsigned)MachKernIntr && pc < (unsigned)MachUserIntr) {
1635 		/* NOTE: the offsets depend on the code in locore.s */
1636 		printf("interrupt\n");
1637 		a0 = kdbpeek(sp + 36);
1638 		a1 = kdbpeek(sp + 40);
1639 		a2 = kdbpeek(sp + 44);
1640 		a3 = kdbpeek(sp + 48);
1641 		pc = kdbpeek(sp + 20);
1642 		ra = kdbpeek(sp + 92);
1643 		sp = kdbpeek(sp + 100);
1644 		fp = kdbpeek(sp + 104);
1645 	}
1646 
1647 	/* check for current PC in the exception handler code */
1648 	if (pc >= 0x80000000 && pc < (unsigned)setsoftclock) {
1649 		ra = 0;
1650 		subr = 0;
1651 		goto done;
1652 	}
1653 
1654 	/* check for bad PC */
1655 	if (pc & 3 || pc < 0x80000000 || pc >= (unsigned)edata) {
1656 		printf("PC 0x%x: not in kernel\n", pc);
1657 		ra = 0;
1658 		subr = 0;
1659 		goto done;
1660 	}
1661 
1662 	/*
1663 	 * Find the beginning of the current subroutine by scanning backwards
1664 	 * from the current PC for the end of the previous subroutine.
1665 	 */
1666 	va = pc - sizeof(int);
1667 	while ((instr = kdbpeek(va)) != MIPS_JR_RA)
1668 		va -= sizeof(int);
1669 	va += 2 * sizeof(int);	/* skip back over branch & delay slot */
1670 	/* skip over nulls which might separate .o files */
1671 	while ((instr = kdbpeek(va)) == 0)
1672 		va += sizeof(int);
1673 	subr = va;
1674 
1675 	/* scan forwards to find stack size and any saved registers */
1676 	stksize = 0;
1677 	more = 3;
1678 	mask = 0;
1679 	for (; more; va += sizeof(int), more = (more == 3) ? 3 : more - 1) {
1680 		/* stop if hit our current position */
1681 		if (va >= pc)
1682 			break;
1683 		instr = kdbpeek(va);
1684 		i.word = instr;
1685 		switch (i.JType.op) {
1686 		case OP_SPECIAL:
1687 			switch (i.RType.func) {
1688 			case OP_JR:
1689 			case OP_JALR:
1690 				more = 2; /* stop after next instruction */
1691 				break;
1692 
1693 			case OP_SYSCALL:
1694 			case OP_BREAK:
1695 				more = 1; /* stop now */
1696 			};
1697 			break;
1698 
1699 		case OP_BCOND:
1700 		case OP_J:
1701 		case OP_JAL:
1702 		case OP_BEQ:
1703 		case OP_BNE:
1704 		case OP_BLEZ:
1705 		case OP_BGTZ:
1706 			more = 2; /* stop after next instruction */
1707 			break;
1708 
1709 		case OP_COP0:
1710 		case OP_COP1:
1711 		case OP_COP2:
1712 		case OP_COP3:
1713 			switch (i.RType.rs) {
1714 			case OP_BCx:
1715 			case OP_BCy:
1716 				more = 2; /* stop after next instruction */
1717 			};
1718 			break;
1719 
1720 		case OP_SW:
1721 			/* look for saved registers on the stack */
1722 			if (i.IType.rs != 29)
1723 				break;
1724 			/* only restore the first one */
1725 			if (mask & (1 << i.IType.rt))
1726 				break;
1727 			mask |= 1 << i.IType.rt;
1728 			switch (i.IType.rt) {
1729 			case 4: /* a0 */
1730 				a0 = kdbpeek(sp + (short)i.IType.imm);
1731 				break;
1732 
1733 			case 5: /* a1 */
1734 				a1 = kdbpeek(sp + (short)i.IType.imm);
1735 				break;
1736 
1737 			case 6: /* a2 */
1738 				a2 = kdbpeek(sp + (short)i.IType.imm);
1739 				break;
1740 
1741 			case 7: /* a3 */
1742 				a3 = kdbpeek(sp + (short)i.IType.imm);
1743 				break;
1744 
1745 			case 30: /* fp */
1746 				fp = kdbpeek(sp + (short)i.IType.imm);
1747 				break;
1748 
1749 			case 31: /* ra */
1750 				ra = kdbpeek(sp + (short)i.IType.imm);
1751 			}
1752 			break;
1753 
1754 		case OP_ADDI:
1755 		case OP_ADDIU:
1756 			/* look for stack pointer adjustment */
1757 			if (i.IType.rs != 29 || i.IType.rt != 29)
1758 				break;
1759 			stksize = (short)i.IType.imm;
1760 		}
1761 	}
1762 
1763 done:
1764 	printf("%x+%x (%x,%x,%x,%x) ra %x sz %d\n",
1765 		subr, pc - subr, a0, a1, a2, a3, ra, stksize);
1766 
1767 	if (ra) {
1768 		if (pc == ra && stksize == 0)
1769 			printf("stacktrace: loop!\n");
1770 		else {
1771 			pc = ra;
1772 			sp -= stksize;
1773 			goto loop;
1774 		}
1775 	}
1776 }
1777 #endif /* DEBUG */
1778