xref: /original-bsd/sys/pmax/pmax/trap.c (revision 67bfb13e)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department and Ralph Campbell.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.32 91/04/06$
13  *
14  *	@(#)trap.c	7.12 (Berkeley) 12/20/92
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/proc.h>
20 #include <sys/kernel.h>
21 #include <sys/signalvar.h>
22 #include <sys/syscall.h>
23 #include <sys/user.h>
24 #include <sys/buf.h>
25 #ifdef KTRACE
26 #include <sys/ktrace.h>
27 #endif
28 #include <net/netisr.h>
29 
30 #include <machine/trap.h>
31 #include <machine/psl.h>
32 #include <machine/reg.h>
33 #include <machine/cpu.h>
34 #include <machine/pte.h>
35 #include <machine/mips_opcode.h>
36 
37 #include <vm/vm.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_page.h>
40 
41 #include <pmax/pmax/clockreg.h>
42 #include <pmax/pmax/kn01.h>
43 #include <pmax/pmax/kn02.h>
44 #include <pmax/pmax/kmin.h>
45 #include <pmax/pmax/maxine.h>
46 #include <pmax/pmax/kn03.h>
47 #include <pmax/pmax/asic.h>
48 #include <pmax/pmax/turbochannel.h>
49 
50 #include <asc.h>
51 #include <sii.h>
52 #include <le.h>
53 #include <dc.h>
54 
55 /*
56  * This is a kludge to allow X windows to work.
57  */
58 #define X_KLUGE
59 
60 #ifdef X_KLUGE
61 #define USER_MAP_ADDR	0x4000
62 #define NPTES 300
63 static pt_entry_t UserMapPtes[NPTES];
64 static unsigned nUserMapPtes;
65 static pid_t UserMapPid;
66 #endif
67 
68 struct	proc *machFPCurProcPtr;		/* pointer to last proc to use FP */
69 
70 extern void MachKernGenException();
71 extern void MachUserGenException();
72 extern void MachKernIntr();
73 extern void MachUserIntr();
74 extern void MachTLBModException();
75 extern void MachTLBMissException();
76 extern unsigned MachEmulateBranch();
77 
78 void (*machExceptionTable[])() = {
79 /*
80  * The kernel exception handlers.
81  */
82 	MachKernIntr,			/* external interrupt */
83 	MachKernGenException,		/* TLB modification */
84 	MachTLBMissException,		/* TLB miss (load or instr. fetch) */
85 	MachTLBMissException,		/* TLB miss (store) */
86 	MachKernGenException,		/* address error (load or I-fetch) */
87 	MachKernGenException,		/* address error (store) */
88 	MachKernGenException,		/* bus error (I-fetch) */
89 	MachKernGenException,		/* bus error (load or store) */
90 	MachKernGenException,		/* system call */
91 	MachKernGenException,		/* breakpoint */
92 	MachKernGenException,		/* reserved instruction */
93 	MachKernGenException,		/* coprocessor unusable */
94 	MachKernGenException,		/* arithmetic overflow */
95 	MachKernGenException,		/* reserved */
96 	MachKernGenException,		/* reserved */
97 	MachKernGenException,		/* reserved */
98 /*
99  * The user exception handlers.
100  */
101 	MachUserIntr,
102 	MachUserGenException,
103 	MachUserGenException,
104 	MachUserGenException,
105 	MachUserGenException,
106 	MachUserGenException,
107 	MachUserGenException,
108 	MachUserGenException,
109 	MachUserGenException,
110 	MachUserGenException,
111 	MachUserGenException,
112 	MachUserGenException,
113 	MachUserGenException,
114 	MachUserGenException,
115 	MachUserGenException,
116 	MachUserGenException,
117 };
118 
119 char	*trap_type[] = {
120 	"external interrupt",
121 	"TLB modification",
122 	"TLB miss (load or instr. fetch)",
123 	"TLB miss (store)",
124 	"address error (load or I-fetch)",
125 	"address error (store)",
126 	"bus error (I-fetch)",
127 	"bus error (load or store)",
128 	"system call",
129 	"breakpoint",
130 	"reserved instruction",
131 	"coprocessor unusable",
132 	"arithmetic overflow",
133 	"reserved 13",
134 	"reserved 14",
135 	"reserved 15",
136 };
137 
138 #ifdef DEBUG
139 #define TRAPSIZE	10
140 struct trapdebug {		/* trap history buffer for debugging */
141 	u_int	status;
142 	u_int	cause;
143 	u_int	vadr;
144 	u_int	pc;
145 	u_int	ra;
146 	u_int	code;
147 } trapdebug[TRAPSIZE], *trp = trapdebug;
148 #endif
149 
150 static void pmax_errintr();
151 static void kn02_errintr(), kn02ba_errintr();
152 #ifdef DS5000_240
153 static void kn03_errintr();
154 #endif
155 static unsigned kn02ba_recover_erradr();
156 extern tc_option_t tc_slot_info[TC_MAX_LOGICAL_SLOTS];
157 extern u_long kmin_tc3_imask, xine_tc3_imask;
158 #ifdef DS5000_240
159 extern u_long kn03_tc3_imask;
160 #endif
161 int (*pmax_hardware_intr)() = (int (*)())0;
162 extern volatile struct chiptime *Mach_clock_addr;
163 
164 /*
165  * Handle an exception.
166  * Called from MachKernGenException() or MachUserGenException()
167  * when a processor trap occurs.
168  * In the case of a kernel trap, we return the pc where to resume if
169  * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc.
170  */
171 unsigned
172 trap(statusReg, causeReg, vadr, pc, args)
173 	unsigned statusReg;	/* status register at time of the exception */
174 	unsigned causeReg;	/* cause register at time of exception */
175 	unsigned vadr;		/* address (if any) the fault occured on */
176 	unsigned pc;		/* program counter where to continue */
177 {
178 	register int type, i;
179 	unsigned ucode = 0;
180 	register struct proc *p = curproc;
181 	u_quad_t sticks;
182 	vm_prot_t ftype;
183 	extern unsigned onfault_table[];
184 
185 #ifdef DEBUG
186 	trp->status = statusReg;
187 	trp->cause = causeReg;
188 	trp->vadr = vadr;
189 	trp->pc = pc;
190 	trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] :
191 		p->p_md.md_regs[RA];
192 	trp->code = 0;
193 	if (++trp == &trapdebug[TRAPSIZE])
194 		trp = trapdebug;
195 #endif
196 
197 	cnt.v_trap++;
198 	type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT;
199 	if (USERMODE(statusReg)) {
200 		type |= T_USER;
201 		sticks = p->p_sticks;
202 	}
203 
204 	/*
205 	 * Enable hardware interrupts if they were on before.
206 	 * We only respond to software interrupts when returning to user mode.
207 	 */
208 	if (statusReg & MACH_SR_INT_ENA_PREV)
209 		splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR);
210 
211 	switch (type) {
212 	case T_TLB_MOD:
213 		/* check for kernel address */
214 		if ((int)vadr < 0) {
215 			register pt_entry_t *pte;
216 			register unsigned entry;
217 #ifndef ATTR
218 			register vm_offset_t pa;
219 #endif
220 
221 			pte = kvtopte(vadr);
222 			entry = pte->pt_entry;
223 			if (entry & PG_RO) {
224 				/* write to read only page in the kernel */
225 				ftype = VM_PROT_WRITE;
226 				goto kernel_fault;
227 			}
228 			entry |= PG_M;
229 			pte->pt_entry = entry;
230 			vadr &= PG_FRAME;
231 			printf("trap: TLBupdate hi %x lo %x i %x\n", vadr,
232 				entry, MachTLBUpdate(vadr, entry)); /* XXX */
233 #ifdef ATTR
234 			pmap_attributes[atop(entry - KERNBASE)] |= PMAP_ATTR_MOD;
235 #else
236 			pa = entry & PG_FRAME;
237 			if (!IS_VM_PHYSADDR(pa))
238 				panic("trap: kmod");
239 			PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
240 #endif
241 			return (pc);
242 		}
243 		/* FALLTHROUGH */
244 
245 	case T_TLB_MOD+T_USER:
246 	    {
247 		pmap_hash_t hp;
248 #ifndef ATTR
249 		vm_offset_t pa;
250 #endif
251 #ifdef DIAGNOSTIC
252 		extern pmap_hash_t zero_pmap_hash;
253 		extern pmap_t cur_pmap;
254 
255 		if (cur_pmap->pm_hash == zero_pmap_hash ||
256 		    cur_pmap->pm_hash == (pmap_hash_t)0)
257 			panic("tlbmod");
258 #endif
259 		hp = &((pmap_hash_t)PMAP_HASH_UADDR)[PMAP_HASH(vadr)];
260 		if (((hp->pmh_pte[0].high ^ vadr) & ~PGOFSET) == 0)
261 			i = 0;
262 		else if (((hp->pmh_pte[1].high ^ vadr) & ~PGOFSET) == 0)
263 			i = 1;
264 		else
265 			panic("trap: tlb umod not found");
266 		if (hp->pmh_pte[i].low & PG_RO) {
267 			ftype = VM_PROT_WRITE;
268 			goto dofault;
269 		}
270 		hp->pmh_pte[i].low |= PG_M;
271 		printf("trap: TLBupdate hi %x lo %x i %x\n",
272 			hp->pmh_pte[i].high, hp->pmh_pte[i].low,
273 			MachTLBUpdate(hp->pmh_pte[i].high, hp->pmh_pte[i].low)); /* XXX */
274 #ifdef ATTR
275 		pmap_attributes[atop(hp->pmh_pte[i].low - KERNBASE)] |=
276 			PMAP_ATTR_MOD;
277 #else
278 		pa = hp->pmh_pte[i].low & PG_FRAME;
279 		if (!IS_VM_PHYSADDR(pa))
280 			panic("trap: umod");
281 		PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
282 #endif
283 		if (!USERMODE(statusReg))
284 			return (pc);
285 		goto out;
286 	    }
287 
288 	case T_TLB_LD_MISS:
289 	case T_TLB_ST_MISS:
290 		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
291 		/* check for kernel address */
292 		if ((int)vadr < 0) {
293 			register vm_offset_t va;
294 			int rv;
295 
296 		kernel_fault:
297 			va = trunc_page((vm_offset_t)vadr);
298 			rv = vm_fault(kernel_map, va, ftype, FALSE);
299 			if (rv == KERN_SUCCESS)
300 				return (pc);
301 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
302 				((struct pcb *)UADDR)->pcb_onfault = 0;
303 				return (onfault_table[i]);
304 			}
305 			goto err;
306 		}
307 		/*
308 		 * It is an error for the kernel to access user space except
309 		 * through the copyin/copyout routines.
310 		 */
311 		if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 0)
312 			goto err;
313 		/* check for fuswintr() or suswintr() getting a page fault */
314 		if (i == 4)
315 			return (onfault_table[i]);
316 		goto dofault;
317 
318 	case T_TLB_LD_MISS+T_USER:
319 		ftype = VM_PROT_READ;
320 		goto dofault;
321 
322 	case T_TLB_ST_MISS+T_USER:
323 		ftype = VM_PROT_WRITE;
324 	dofault:
325 	    {
326 		register vm_offset_t va;
327 		register struct vmspace *vm = p->p_vmspace;
328 		register vm_map_t map = &vm->vm_map;
329 		int rv;
330 
331 #ifdef X_KLUGE
332 		if (p->p_pid == UserMapPid &&
333 		    (va = pmax_btop(vadr - USER_MAP_ADDR)) < nUserMapPtes) {
334 			register pt_entry_t *pte;
335 
336 			pte = &UserMapPtes[va];
337 			MachTLBWriteRandom((vadr & PG_FRAME) |
338 				(vm->vm_pmap.pm_tlbpid << VMMACH_TLB_PID_SHIFT),
339 				pte->pt_entry);
340 			return (pc);
341 		}
342 #endif
343 		va = trunc_page((vm_offset_t)vadr);
344 		rv = vm_fault(map, va, ftype, FALSE);
345 		if (rv != KERN_SUCCESS) {
346 			printf("vm_fault(%x, %x, %x, 0) -> %x ADR %x PC %x RA %x\n",
347 				map, va, ftype, rv, vadr, pc,
348 				!USERMODE(statusReg) ? ((int *)&args)[19] :
349 					p->p_md.md_regs[RA]); /* XXX */
350 			printf("\tpid %d %s PC %x RA %x\n", p->p_pid,
351 				p->p_comm, p->p_md.md_regs[PC],
352 				p->p_md.md_regs[RA]); /* XXX */
353 			trapDump("vm_fault");
354 		}
355 		/*
356 		 * If this was a stack access we keep track of the maximum
357 		 * accessed stack size.  Also, if vm_fault gets a protection
358 		 * failure it is due to accessing the stack region outside
359 		 * the current limit and we need to reflect that as an access
360 		 * error.
361 		 */
362 		if ((caddr_t)va >= vm->vm_maxsaddr) {
363 			if (rv == KERN_SUCCESS) {
364 				unsigned nss;
365 
366 				nss = clrnd(btoc(USRSTACK-(unsigned)va));
367 				if (nss > vm->vm_ssize)
368 					vm->vm_ssize = nss;
369 			} else if (rv == KERN_PROTECTION_FAILURE)
370 				rv = KERN_INVALID_ADDRESS;
371 		}
372 		if (rv == KERN_SUCCESS) {
373 			if (!USERMODE(statusReg))
374 				return (pc);
375 			goto out;
376 		}
377 		if (!USERMODE(statusReg)) {
378 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
379 				((struct pcb *)UADDR)->pcb_onfault = 0;
380 				return (onfault_table[i]);
381 			}
382 			goto err;
383 		}
384 		ucode = vadr;
385 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
386 		break;
387 	    }
388 
389 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
390 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
391 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
392 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
393 		i = SIGSEGV;
394 		break;
395 
396 	case T_SYSCALL+T_USER:
397 	    {
398 		register int *locr0 = p->p_md.md_regs;
399 		register struct sysent *callp;
400 		unsigned int code;
401 		int numsys;
402 		struct args {
403 			int i[8];
404 		} args;
405 		int rval[2];
406 		struct sysent *systab;
407 		extern int nsysent;
408 #ifdef ULTRIXCOMPAT
409 		extern struct sysent ultrixsysent[];
410 		extern int ultrixnsysent;
411 #endif
412 
413 		cnt.v_syscall++;
414 		/* compute next PC after syscall instruction */
415 		if ((int)causeReg < 0)
416 			locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0);
417 		else
418 			locr0[PC] += 4;
419 		systab = sysent;
420 		numsys = nsysent;
421 #ifdef ULTRIXCOMPAT
422 		if (p->p_md.md_flags & MDP_ULTRIX) {
423 			systab = ultrixsysent;
424 			numsys = ultrixnsysent;
425 		}
426 #endif
427 		code = locr0[V0];
428 		switch (code) {
429 		case SYS_indir:
430 			/*
431 			 * Code is first argument, followed by actual args.
432 			 */
433 			code = locr0[A0];
434 			if (code >= numsys)
435 				callp = &systab[SYS_indir]; /* (illegal) */
436 			else
437 				callp = &systab[code];
438 			i = callp->sy_narg;
439 			args.i[0] = locr0[A1];
440 			args.i[1] = locr0[A2];
441 			args.i[2] = locr0[A3];
442 			if (i > 3) {
443 				i = copyin((caddr_t)(locr0[SP] +
444 						4 * sizeof(int)),
445 					(caddr_t)&args.i[3],
446 					(u_int)(i - 3) * sizeof(int));
447 				if (i) {
448 					locr0[V0] = i;
449 					locr0[A3] = 1;
450 #ifdef KTRACE
451 					if (KTRPOINT(p, KTR_SYSCALL))
452 						ktrsyscall(p->p_tracep, code,
453 							callp->sy_narg, args.i);
454 #endif
455 					goto done;
456 				}
457 			}
458 			break;
459 
460 		case SYS___indir:
461 			/*
462 			 * Like indir, but code is a quad, so as to maintain
463 			 * quad alignment for the rest of the arguments.
464 			 */
465 			code = locr0[A0 + _QUAD_LOWWORD];
466 			if (code >= numsys)
467 				callp = &systab[SYS_indir]; /* (illegal) */
468 			else
469 				callp = &systab[code];
470 			i = callp->sy_narg;
471 			args.i[0] = locr0[A2];
472 			args.i[1] = locr0[A3];
473 			if (i > 2) {
474 				i = copyin((caddr_t)(locr0[SP] +
475 						4 * sizeof(int)),
476 					(caddr_t)&args.i[2],
477 					(u_int)(i - 2) * sizeof(int));
478 				if (i) {
479 					locr0[V0] = i;
480 					locr0[A3] = 1;
481 #ifdef KTRACE
482 					if (KTRPOINT(p, KTR_SYSCALL))
483 						ktrsyscall(p->p_tracep, code,
484 							callp->sy_narg, args.i);
485 #endif
486 					goto done;
487 				}
488 			}
489 			break;
490 
491 		default:
492 			if (code >= numsys)
493 				callp = &systab[SYS_indir]; /* (illegal) */
494 			else
495 				callp = &systab[code];
496 			i = callp->sy_narg;
497 			args.i[0] = locr0[A0];
498 			args.i[1] = locr0[A1];
499 			args.i[2] = locr0[A2];
500 			args.i[3] = locr0[A3];
501 			if (i > 4) {
502 				i = copyin((caddr_t)(locr0[SP] +
503 						4 * sizeof(int)),
504 					(caddr_t)&args.i[4],
505 					(u_int)(i - 4) * sizeof(int));
506 				if (i) {
507 					locr0[V0] = i;
508 					locr0[A3] = 1;
509 #ifdef KTRACE
510 					if (KTRPOINT(p, KTR_SYSCALL))
511 						ktrsyscall(p->p_tracep, code,
512 							callp->sy_narg, args.i);
513 #endif
514 					goto done;
515 				}
516 			}
517 		}
518 #ifdef KTRACE
519 		if (KTRPOINT(p, KTR_SYSCALL))
520 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
521 #endif
522 		rval[0] = 0;
523 		rval[1] = locr0[V1];
524 #ifdef DEBUG
525 		if (trp == trapdebug)
526 			trapdebug[TRAPSIZE - 1].code = code;
527 		else
528 			trp[-1].code = code;
529 #endif
530 		i = (*callp->sy_call)(p, &args, rval);
531 		/*
532 		 * Reinitialize proc pointer `p' as it may be different
533 		 * if this is a child returning from fork syscall.
534 		 */
535 		p = curproc;
536 		locr0 = p->p_md.md_regs;
537 #ifdef DEBUG
538 		{ int s;
539 		s = splhigh();
540 		trp->status = statusReg;
541 		trp->cause = causeReg;
542 		trp->vadr = locr0[SP];
543 		trp->pc = locr0[PC];
544 		trp->ra = locr0[RA];
545 		trp->code = -code;
546 		if (++trp == &trapdebug[TRAPSIZE])
547 			trp = trapdebug;
548 		splx(s);
549 		}
550 #endif
551 		switch (i) {
552 		case 0:
553 			locr0[V0] = rval[0];
554 			locr0[V1] = rval[1];
555 			locr0[A3] = 0;
556 			break;
557 
558 		case ERESTART:
559 			locr0[PC] = pc;
560 			break;
561 
562 		case EJUSTRETURN:
563 			break;	/* nothing to do */
564 
565 		default:
566 			locr0[V0] = i;
567 			locr0[A3] = 1;
568 		}
569 	done:
570 #ifdef KTRACE
571 		if (KTRPOINT(p, KTR_SYSRET))
572 			ktrsysret(p->p_tracep, code, i, rval[0]);
573 #endif
574 		goto out;
575 	    }
576 
577 	case T_BREAK+T_USER:
578 	    {
579 		register unsigned va, instr;
580 
581 		/* compute address of break instruction */
582 		va = pc;
583 		if ((int)causeReg < 0)
584 			va += 4;
585 
586 		/* read break instruction */
587 		instr = fuiword((caddr_t)va);
588 #ifdef KADB
589 		if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP)
590 			goto err;
591 #endif
592 		if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) {
593 			i = SIGTRAP;
594 			break;
595 		}
596 
597 		/* restore original instruction and clear BP  */
598 		i = suiword((caddr_t)va, p->p_md.md_ss_instr);
599 		if (i < 0) {
600 			vm_offset_t sa, ea;
601 			int rv;
602 
603 			sa = trunc_page((vm_offset_t)va);
604 			ea = round_page((vm_offset_t)va+sizeof(int)-1);
605 			rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
606 				VM_PROT_DEFAULT, FALSE);
607 			if (rv == KERN_SUCCESS) {
608 				i = suiword((caddr_t)va, p->p_md.md_ss_instr);
609 				(void) vm_map_protect(&p->p_vmspace->vm_map,
610 					sa, ea, VM_PROT_READ|VM_PROT_EXECUTE,
611 					FALSE);
612 			}
613 		}
614 		if (i < 0) {
615 			i = SIGTRAP;
616 			break;
617 		}
618 		p->p_md.md_ss_addr = 0;
619 		goto out;
620 	    }
621 
622 	case T_RES_INST+T_USER:
623 		i = SIGILL;
624 		break;
625 
626 	case T_COP_UNUSABLE+T_USER:
627 		if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) {
628 			i = SIGILL;	/* only FPU instructions allowed */
629 			break;
630 		}
631 		MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs);
632 		machFPCurProcPtr = p;
633 		p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT;
634 		p->p_md.md_flags |= MDP_FPUSED;
635 		goto out;
636 
637 	case T_OVFLOW+T_USER:
638 		i = SIGFPE;
639 		break;
640 
641 	case T_ADDR_ERR_LD:	/* misaligned access */
642 	case T_ADDR_ERR_ST:	/* misaligned access */
643 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
644 		if (i = ((struct pcb *)UADDR)->pcb_onfault) {
645 			((struct pcb *)UADDR)->pcb_onfault = 0;
646 			return (onfault_table[i]);
647 		}
648 		/* FALLTHROUGH */
649 
650 	default:
651 	err:
652 #ifdef KADB
653 	    {
654 		extern struct pcb kdbpcb;
655 
656 		if (USERMODE(statusReg))
657 			kdbpcb = p->p_addr->u_pcb;
658 		else {
659 			kdbpcb.pcb_regs[ZERO] = 0;
660 			kdbpcb.pcb_regs[AST] = ((int *)&args)[2];
661 			kdbpcb.pcb_regs[V0] = ((int *)&args)[3];
662 			kdbpcb.pcb_regs[V1] = ((int *)&args)[4];
663 			kdbpcb.pcb_regs[A0] = ((int *)&args)[5];
664 			kdbpcb.pcb_regs[A1] = ((int *)&args)[6];
665 			kdbpcb.pcb_regs[A2] = ((int *)&args)[7];
666 			kdbpcb.pcb_regs[A3] = ((int *)&args)[8];
667 			kdbpcb.pcb_regs[T0] = ((int *)&args)[9];
668 			kdbpcb.pcb_regs[T1] = ((int *)&args)[10];
669 			kdbpcb.pcb_regs[T2] = ((int *)&args)[11];
670 			kdbpcb.pcb_regs[T3] = ((int *)&args)[12];
671 			kdbpcb.pcb_regs[T4] = ((int *)&args)[13];
672 			kdbpcb.pcb_regs[T5] = ((int *)&args)[14];
673 			kdbpcb.pcb_regs[T6] = ((int *)&args)[15];
674 			kdbpcb.pcb_regs[T7] = ((int *)&args)[16];
675 			kdbpcb.pcb_regs[T8] = ((int *)&args)[17];
676 			kdbpcb.pcb_regs[T9] = ((int *)&args)[18];
677 			kdbpcb.pcb_regs[RA] = ((int *)&args)[19];
678 			kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21];
679 			kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22];
680 			kdbpcb.pcb_regs[PC] = pc;
681 			kdbpcb.pcb_regs[SR] = statusReg;
682 			bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int));
683 		}
684 		if (kdb(causeReg, vadr, p, !USERMODE(statusReg)))
685 			return (kdbpcb.pcb_regs[PC]);
686 	    }
687 #else
688 #ifdef DEBUG
689 		trapDump("trap");
690 #endif
691 #endif
692 		panic("trap");
693 	}
694 	printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid,
695 		p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */
696 	trapsignal(p, i, ucode);
697 out:
698 	/*
699 	 * Note: we should only get here if returning to user mode.
700 	 */
701 	/* take pending signals */
702 	while ((i = CURSIG(p)) != 0)
703 		psig(i);
704 	p->p_pri = p->p_usrpri;
705 	astpending = 0;
706 	if (want_resched) {
707 		int s;
708 
709 		/*
710 		 * Since we are curproc, clock will normally just change
711 		 * our priority without moving us from one queue to another
712 		 * (since the running process is not on a queue.)
713 		 * If that happened after we setrq ourselves but before we
714 		 * swtch()'ed, we might not be on the queue indicated by
715 		 * our priority.
716 		 */
717 		s = splstatclock();
718 		setrq(p);
719 		p->p_stats->p_ru.ru_nivcsw++;
720 		swtch();
721 		splx(s);
722 		while ((i = CURSIG(p)) != 0)
723 			psig(i);
724 	}
725 
726 	/*
727 	 * If profiling, charge system time to the trapped pc.
728 	 */
729 	if (p->p_flag & SPROFIL) {
730 		extern int psratio;
731 
732 		addupc_task(p, pc, (int)(p->p_sticks - sticks) * psratio);
733 	}
734 
735 	curpri = p->p_pri;
736 	return (pc);
737 }
738 
739 /*
740  * Handle an interrupt.
741  * Called from MachKernIntr() or MachUserIntr()
742  * Note: curproc might be NULL.
743  */
744 interrupt(statusReg, causeReg, pc)
745 	unsigned statusReg;	/* status register at time of the exception */
746 	unsigned causeReg;	/* cause register at time of exception */
747 	unsigned pc;		/* program counter where to continue */
748 {
749 	register unsigned mask;
750 	struct clockframe cf;
751 
752 #ifdef DEBUG
753 	trp->status = statusReg;
754 	trp->cause = causeReg;
755 	trp->vadr = 0;
756 	trp->pc = pc;
757 	trp->ra = 0;
758 	trp->code = 0;
759 	if (++trp == &trapdebug[TRAPSIZE])
760 		trp = trapdebug;
761 #endif
762 
763 	cnt.v_intr++;
764 	mask = causeReg & statusReg;	/* pending interrupts & enable mask */
765 	if (pmax_hardware_intr)
766 		splx((*pmax_hardware_intr)(mask, pc, statusReg, causeReg));
767 	if (mask & MACH_INT_MASK_5) {
768 		if (!USERMODE(statusReg)) {
769 #ifdef DEBUG
770 			trapDump("fpintr");
771 #else
772 			printf("FPU interrupt: PC %x CR %x SR %x\n",
773 				pc, causeReg, statusReg);
774 #endif
775 		} else
776 			MachFPInterrupt(statusReg, causeReg, pc);
777 	}
778 	if (mask & MACH_SOFT_INT_MASK_0) {
779 		clearsoftclock();
780 		cnt.v_soft++;
781 		softclock();
782 	}
783 	/* process network interrupt if we trapped or will very soon */
784 	if ((mask & MACH_SOFT_INT_MASK_1) ||
785 	    netisr && (statusReg & MACH_SOFT_INT_MASK_1)) {
786 		clearsoftnet();
787 		cnt.v_soft++;
788 #ifdef INET
789 		if (netisr & (1 << NETISR_ARP)) {
790 			netisr &= ~(1 << NETISR_ARP);
791 			arpintr();
792 		}
793 		if (netisr & (1 << NETISR_IP)) {
794 			netisr &= ~(1 << NETISR_IP);
795 			ipintr();
796 		}
797 #endif
798 #ifdef NS
799 		if (netisr & (1 << NETISR_NS)) {
800 			netisr &= ~(1 << NETISR_NS);
801 			nsintr();
802 		}
803 #endif
804 #ifdef ISO
805 		if (netisr & (1 << NETISR_ISO)) {
806 			netisr &= ~(1 << NETISR_ISO);
807 			clnlintr();
808 		}
809 #endif
810 	}
811 }
812 
813 /*
814  * Handle pmax (DECstation 2100/3100) interrupts.
815  */
816 pmax_intr(mask, pc, statusReg, causeReg)
817 	unsigned mask;
818 	unsigned pc;
819 	unsigned statusReg;
820 	unsigned causeReg;
821 {
822 	register volatile struct chiptime *c = Mach_clock_addr;
823 	struct clockframe cf;
824 	int temp;
825 
826 	/* handle clock interrupts ASAP */
827 	if (mask & MACH_INT_MASK_3) {
828 		temp = c->regc;	/* XXX clear interrupt bits */
829 		cf.pc = pc;
830 		cf.sr = statusReg;
831 		hardclock(&cf);
832 		causeReg &= ~MACH_INT_MASK_3;	/* reenable clock interrupts */
833 		splx(MACH_INT_MASK_3 | MACH_SR_INT_ENA_CUR);
834 	}
835 #if NSII > 0
836 	if (mask & MACH_INT_MASK_0)
837 		siiintr(0);
838 #endif
839 #if NLE > 0
840 	if (mask & MACH_INT_MASK_1)
841 		leintr(0);
842 #endif
843 #if NDC > 0
844 	if (mask & MACH_INT_MASK_2)
845 		dcintr(0);
846 #endif
847 	if (mask & MACH_INT_MASK_4)
848 		pmax_errintr();
849 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
850 		MACH_SR_INT_ENA_CUR);
851 }
852 
853 /*
854  * Handle hardware interrupts for the KN02. (DECstation 5000/200)
855  * Returns spl value.
856  */
857 kn02_intr(mask, pc, statusReg, causeReg)
858 	unsigned mask;
859 	unsigned pc;
860 	unsigned statusReg;
861 	unsigned causeReg;
862 {
863 	register unsigned i, m;
864 	register volatile struct chiptime *c = Mach_clock_addr;
865 	register unsigned csr;
866 	int temp;
867 	struct clockframe cf;
868 	static int warned = 0;
869 
870 	/* handle clock interrupts ASAP */
871 	if (mask & MACH_INT_MASK_1) {
872 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
873 		if ((csr & KN02_CSR_PSWARN) && !warned) {
874 			warned = 1;
875 			printf("WARNING: power supply is overheating!\n");
876 		} else if (warned && !(csr & KN02_CSR_PSWARN)) {
877 			warned = 0;
878 			printf("WARNING: power supply is OK again\n");
879 		}
880 
881 		temp = c->regc;	/* XXX clear interrupt bits */
882 		cf.pc = pc;
883 		cf.sr = statusReg;
884 		hardclock(&cf);
885 
886 		/* Re-enable clock interrupts */
887 		causeReg &= ~MACH_INT_MASK_1;
888 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
889 	}
890 	if (mask & MACH_INT_MASK_0) {
891 
892 		csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
893 		m = csr & (csr >> KN02_CSR_IOINTEN_SHIFT) & KN02_CSR_IOINT;
894 #if 0
895 		*(unsigned *)MACHPHYS_TO_UNCACHED(KN02_SYS_CSR) =
896 			(csr & ~(KN02_CSR_WRESERVED | 0xFF)) |
897 			(m << KN02_CSR_IOINTEN_SHIFT);
898 #endif
899 		for (i = 0; m; i++, m >>= 1) {
900 			if (!(m & 1))
901 				continue;
902 			if (tc_slot_info[i].intr)
903 				(*tc_slot_info[i].intr)(tc_slot_info[i].unit);
904 			else
905 				printf("spurious interrupt %d\n", i);
906 		}
907 #if 0
908 		*(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR) =
909 			csr & ~(KN02_CSR_WRESERVED | 0xFF);
910 #endif
911 	}
912 	if (mask & MACH_INT_MASK_3)
913 		kn02_errintr();
914 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
915 		MACH_SR_INT_ENA_CUR);
916 }
917 
918 /*
919  * 3min hardware interrupts. (DECstation 5000/1xx)
920  */
921 kmin_intr(mask, pc, statusReg, causeReg)
922 	unsigned mask;
923 	unsigned pc;
924 	unsigned statusReg;
925 	unsigned causeReg;
926 {
927 	register u_int intr;
928 	register volatile struct chiptime *c = Mach_clock_addr;
929 	volatile u_int *imaskp =
930 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_IMSK);
931 	volatile u_int *intrp =
932 		(volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_INTR);
933 	unsigned int old_mask;
934 	struct clockframe cf;
935 	int temp;
936 	static int user_warned = 0;
937 
938 	old_mask = *imaskp & kmin_tc3_imask;
939 	*imaskp = old_mask;
940 
941 	if (mask & MACH_INT_MASK_3) {
942 		intr = *intrp;
943 		/* masked interrupts are still observable */
944 		intr &= old_mask;
945 
946 		if (intr & KMIN_INTR_SCSI_PTR_LOAD) {
947 			*intrp &= ~KMIN_INTR_SCSI_PTR_LOAD;
948 #ifdef notdef
949 			asc_dma_intr();
950 #endif
951 		}
952 
953 		if (intr & (KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E))
954 			*intrp &= ~(KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E);
955 
956 		if (intr & KMIN_INTR_LANCE_READ_E)
957 			*intrp &= ~KMIN_INTR_LANCE_READ_E;
958 
959 		if (intr & KMIN_INTR_TIMEOUT)
960 			kn02ba_errintr();
961 
962 		if (intr & KMIN_INTR_CLOCK) {
963 			temp = c->regc;	/* XXX clear interrupt bits */
964 			cf.pc = pc;
965 			cf.sr = statusReg;
966 			hardclock(&cf);
967 		}
968 
969 		if ((intr & KMIN_INTR_SCC_0) &&
970 			tc_slot_info[KMIN_SCC0_SLOT].intr)
971 			(*(tc_slot_info[KMIN_SCC0_SLOT].intr))
972 			(tc_slot_info[KMIN_SCC0_SLOT].unit);
973 
974 		if ((intr & KMIN_INTR_SCC_1) &&
975 			tc_slot_info[KMIN_SCC1_SLOT].intr)
976 			(*(tc_slot_info[KMIN_SCC1_SLOT].intr))
977 			(tc_slot_info[KMIN_SCC1_SLOT].unit);
978 
979 		if ((intr & KMIN_INTR_SCSI) &&
980 			tc_slot_info[KMIN_SCSI_SLOT].intr)
981 			(*(tc_slot_info[KMIN_SCSI_SLOT].intr))
982 			(tc_slot_info[KMIN_SCSI_SLOT].unit);
983 
984 		if ((intr & KMIN_INTR_LANCE) &&
985 			tc_slot_info[KMIN_LANCE_SLOT].intr)
986 			(*(tc_slot_info[KMIN_LANCE_SLOT].intr))
987 			(tc_slot_info[KMIN_LANCE_SLOT].unit);
988 
989 		if (user_warned && ((intr & KMIN_INTR_PSWARN) == 0)) {
990 			*imaskp = 0;
991 			printf("%s\n", "Power supply ok now.");
992 			user_warned = 0;
993 		}
994 		if ((intr & KMIN_INTR_PSWARN) && (user_warned < 3)) {
995 			*imaskp = 0;
996 			user_warned++;
997 			printf("%s\n", "Power supply overheating");
998 		}
999 	}
1000 	if ((mask & MACH_INT_MASK_0) && tc_slot_info[0].intr)
1001 		(*tc_slot_info[0].intr)(tc_slot_info[0].unit);
1002 	if ((mask & MACH_INT_MASK_1) && tc_slot_info[1].intr)
1003 		(*tc_slot_info[1].intr)(tc_slot_info[1].unit);
1004 	if ((mask & MACH_INT_MASK_2) && tc_slot_info[2].intr)
1005 		(*tc_slot_info[2].intr)(tc_slot_info[2].unit);
1006 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1007 		MACH_SR_INT_ENA_CUR);
1008 }
1009 
1010 /*
1011  * Maxine hardwark interrupts. (Personal DECstation 5000/xx)
1012  */
1013 xine_intr(mask, pc, statusReg, causeReg)
1014 	unsigned mask;
1015 	unsigned pc;
1016 	unsigned statusReg;
1017 	unsigned causeReg;
1018 {
1019 	register u_int intr;
1020 	register volatile struct chiptime *c = Mach_clock_addr;
1021 	volatile u_int *imaskp = (volatile u_int *)
1022 		MACH_PHYS_TO_UNCACHED(XINE_REG_IMSK);
1023 	volatile u_int *intrp = (volatile u_int *)
1024 		MACH_PHYS_TO_UNCACHED(XINE_REG_INTR);
1025 	u_int old_mask;
1026 	struct clockframe cf;
1027 	int temp;
1028 
1029 	old_mask = *imaskp & xine_tc3_imask;
1030 	*imaskp = old_mask;
1031 
1032 	/* handle clock interrupts ASAP */
1033 	if (mask & MACH_INT_MASK_1) {
1034 		temp = c->regc;	/* XXX clear interrupt bits */
1035 		cf.pc = pc;
1036 		cf.sr = statusReg;
1037 		hardclock(&cf);
1038 		causeReg &= ~MACH_INT_MASK_1;
1039 		/* reenable clock interrupts */
1040 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1041 	}
1042 	if (mask & MACH_INT_MASK_3) {
1043 		intr = *intrp;
1044 		/* masked interrupts are still observable */
1045 		intr &= old_mask;
1046 
1047 		if (intr & XINE_INTR_SCSI_PTR_LOAD) {
1048 			*intrp &= ~XINE_INTR_SCSI_PTR_LOAD;
1049 #ifdef notdef
1050 			asc_dma_intr();
1051 #endif
1052 		}
1053 
1054 		if (intr & (XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E))
1055 			*intrp &= ~(XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E);
1056 
1057 		if (intr & XINE_INTR_LANCE_READ_E)
1058 			*intrp &= ~XINE_INTR_LANCE_READ_E;
1059 
1060 		if ((intr & XINE_INTR_FLOPPY) &&
1061 			tc_slot_info[XINE_FLOPPY_SLOT].intr)
1062 			(*(tc_slot_info[XINE_FLOPPY_SLOT].intr))
1063 			(tc_slot_info[XINE_FLOPPY_SLOT].unit);
1064 
1065 		if ((intr & XINE_INTR_TC_0) &&
1066 			tc_slot_info[0].intr)
1067 			(*(tc_slot_info[0].intr))
1068 			(tc_slot_info[0].unit);
1069 
1070 		if ((intr & XINE_INTR_ISDN) &&
1071 			tc_slot_info[XINE_ISDN_SLOT].intr)
1072 			(*(tc_slot_info[XINE_ISDN_SLOT].intr))
1073 			(tc_slot_info[XINE_ISDN_SLOT].unit);
1074 
1075 		if ((intr & XINE_INTR_SCSI) &&
1076 			tc_slot_info[XINE_SCSI_SLOT].intr)
1077 			(*(tc_slot_info[XINE_SCSI_SLOT].intr))
1078 			(tc_slot_info[XINE_SCSI_SLOT].unit);
1079 
1080 		if ((intr & XINE_INTR_LANCE) &&
1081 			tc_slot_info[XINE_LANCE_SLOT].intr)
1082 			(*(tc_slot_info[XINE_LANCE_SLOT].intr))
1083 			(tc_slot_info[XINE_LANCE_SLOT].unit);
1084 
1085 		if ((intr & XINE_INTR_SCC_0) &&
1086 			tc_slot_info[XINE_SCC0_SLOT].intr)
1087 			(*(tc_slot_info[XINE_SCC0_SLOT].intr))
1088 			(tc_slot_info[XINE_SCC0_SLOT].unit);
1089 
1090 		if ((intr & XINE_INTR_TC_1) &&
1091 			tc_slot_info[1].intr)
1092 			(*(tc_slot_info[1].intr))
1093 			(tc_slot_info[1].unit);
1094 
1095 		if ((intr & XINE_INTR_DTOP_RX) &&
1096 			tc_slot_info[XINE_DTOP_SLOT].intr)
1097 			(*(tc_slot_info[XINE_DTOP_SLOT].intr))
1098 			(tc_slot_info[XINE_DTOP_SLOT].unit);
1099 
1100 	}
1101 	if (mask & MACH_INT_MASK_2)
1102 		kn02ba_errintr();
1103 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1104 		MACH_SR_INT_ENA_CUR);
1105 }
1106 
1107 #ifdef DS5000_240
1108 /*
1109  * 3Max+ hardware interrupts. (DECstation 5000/240) UNTESTED!!
1110  */
1111 kn03_intr(mask, pc, statusReg, causeReg)
1112 	unsigned mask;
1113 	unsigned pc;
1114 	unsigned statusReg;
1115 	unsigned causeReg;
1116 {
1117 	register u_int intr;
1118 	register volatile struct chiptime *c = Mach_clock_addr;
1119 	volatile u_int *imaskp = (volatile u_int *)
1120 		MACH_PHYS_TO_UNCACHED(KN03_REG_IMSK);
1121 	volatile u_int *intrp = (volatile u_int *)
1122 		MACH_PHYS_TO_UNCACHED(KN03_REG_INTR);
1123 	u_int old_mask;
1124 	struct clockframe cf;
1125 	int temp;
1126 
1127 	old_mask = *imaskp & kn03_tc3_imask;
1128 	*imaskp = old_mask;
1129 
1130 	/* handle clock interrupts ASAP */
1131 	if (mask & MACH_INT_MASK_1) {
1132 		temp = c->regc;	/* XXX clear interrupt bits */
1133 		cf.pc = pc;
1134 		cf.sr = statusReg;
1135 		hardclock(&cf);
1136 		causeReg &= ~MACH_INT_MASK_1;
1137 		/* reenable clock interrupts */
1138 		splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1139 	}
1140 	if (mask & MACH_INT_MASK_0) {
1141 		intr = *intrp;
1142 		/* masked interrupts are still observable */
1143 		intr &= old_mask;
1144 
1145 		if (intr & KN03_INTR_SCSI_PTR_LOAD) {
1146 			*intrp &= ~KN03_INTR_SCSI_PTR_LOAD;
1147 #ifdef notdef
1148 			asc_dma_intr();
1149 #endif
1150 		}
1151 
1152 		if (intr & (KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E))
1153 			*intrp &= ~(KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E);
1154 
1155 		if (intr & KN03_INTR_LANCE_READ_E)
1156 			*intrp &= ~KN03_INTR_LANCE_READ_E;
1157 
1158 		if ((intr & KN03_INTR_TC_0) &&
1159 			tc_slot_info[0].intr)
1160 			(*(tc_slot_info[0].intr))
1161 			(tc_slot_info[0].unit);
1162 
1163 		if ((intr & KN03_INTR_TC_1) &&
1164 			tc_slot_info[1].intr)
1165 			(*(tc_slot_info[1].intr))
1166 			(tc_slot_info[1].unit);
1167 
1168 		if ((intr & KN03_INTR_TC_2) &&
1169 			tc_slot_info[2].intr)
1170 			(*(tc_slot_info[2].intr))
1171 			(tc_slot_info[2].unit);
1172 
1173 		if ((intr & KN03_INTR_SCSI) &&
1174 			tc_slot_info[KN03_SCSI_SLOT].intr)
1175 			(*(tc_slot_info[KN03_SCSI_SLOT].intr))
1176 			(tc_slot_info[KN03_SCSI_SLOT].unit);
1177 
1178 		if ((intr & KN03_INTR_LANCE) &&
1179 			tc_slot_info[KN03_LANCE_SLOT].intr)
1180 			(*(tc_slot_info[KN03_LANCE_SLOT].intr))
1181 			(tc_slot_info[KN03_LANCE_SLOT].unit);
1182 
1183 		if ((intr & KN03_INTR_SCC_0) &&
1184 			tc_slot_info[KN03_SCC0_SLOT].intr)
1185 			(*(tc_slot_info[KN03_SCC0_SLOT].intr))
1186 			(tc_slot_info[KN03_SCC0_SLOT].unit);
1187 
1188 		if ((intr & KN03_INTR_SCC_1) &&
1189 			tc_slot_info[KN03_SCC1_SLOT].intr)
1190 			(*(tc_slot_info[KN03_SCC1_SLOT].intr))
1191 			(tc_slot_info[KN03_SCC1_SLOT].unit);
1192 
1193 	}
1194 	if (mask & MACH_INT_MASK_3)
1195 		kn03_errintr();
1196 	return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1197 		MACH_SR_INT_ENA_CUR);
1198 }
1199 #endif /* DS5000_240 */
1200 
1201 /*
1202  * This is called from MachUserIntr() if astpending is set.
1203  * This is very similar to the tail of trap().
1204  */
1205 softintr(statusReg, pc)
1206 	unsigned statusReg;	/* status register at time of the exception */
1207 	unsigned pc;		/* program counter where to continue */
1208 {
1209 	register struct proc *p = curproc;
1210 	int sig;
1211 
1212 	cnt.v_soft++;
1213 	/* take pending signals */
1214 	while ((sig = CURSIG(p)) != 0)
1215 		psig(sig);
1216 	p->p_pri = p->p_usrpri;
1217 	astpending = 0;
1218 	if (p->p_flag & SOWEUPC) {
1219 		p->p_flag &= ~SOWEUPC;
1220 		ADDUPROF(p);
1221 	}
1222 	if (want_resched) {
1223 		int s;
1224 
1225 		/*
1226 		 * Since we are curproc, clock will normally just change
1227 		 * our priority without moving us from one queue to another
1228 		 * (since the running process is not on a queue.)
1229 		 * If that happened after we setrq ourselves but before we
1230 		 * swtch()'ed, we might not be on the queue indicated by
1231 		 * our priority.
1232 		 */
1233 		s = splstatclock();
1234 		setrq(p);
1235 		p->p_stats->p_ru.ru_nivcsw++;
1236 		swtch();
1237 		splx(s);
1238 		while ((sig = CURSIG(p)) != 0)
1239 			psig(sig);
1240 	}
1241 	curpri = p->p_pri;
1242 }
1243 
1244 #ifdef DEBUG
1245 trapDump(msg)
1246 	char *msg;
1247 {
1248 	register int i;
1249 	int s;
1250 
1251 	s = splhigh();
1252 	printf("trapDump(%s)\n", msg);
1253 	for (i = 0; i < TRAPSIZE; i++) {
1254 		if (trp == trapdebug)
1255 			trp = &trapdebug[TRAPSIZE - 1];
1256 		else
1257 			trp--;
1258 		if (trp->cause == 0)
1259 			break;
1260 		printf("%s: ADR %x PC %x CR %x SR %x\n",
1261 			trap_type[(trp->cause & MACH_CR_EXC_CODE) >>
1262 				MACH_CR_EXC_CODE_SHIFT],
1263 			trp->vadr, trp->pc, trp->cause, trp->status);
1264 		printf("   RA %x code %d\n", trp-> ra, trp->code);
1265 	}
1266 	bzero(trapdebug, sizeof(trapdebug));
1267 	trp = trapdebug;
1268 	splx(s);
1269 }
1270 #endif
1271 
1272 #ifdef X_KLUGE
1273 /*
1274  * This is a kludge to allow X windows to work.
1275  */
1276 caddr_t
1277 vmUserMap(size, pa)
1278 	int size;
1279 	unsigned pa;
1280 {
1281 	register caddr_t v;
1282 	unsigned off, entry;
1283 
1284 	if (nUserMapPtes == 0)
1285 		UserMapPid = curproc->p_pid;
1286 	else if (UserMapPid != curproc->p_pid)
1287 		return ((caddr_t)0);
1288 	off = pa & PGOFSET;
1289 	size = btoc(off + size);
1290 	if (nUserMapPtes + size > NPTES)
1291 		return ((caddr_t)0);
1292 	v = (caddr_t)(USER_MAP_ADDR + pmax_ptob(nUserMapPtes) + off);
1293 	entry = (pa & 0x9ffff000) | PG_V | PG_M;
1294 	if (pa >= MACH_UNCACHED_MEMORY_ADDR)
1295 		entry |= PG_N;
1296 	while (size > 0) {
1297 		UserMapPtes[nUserMapPtes].pt_entry = entry;
1298 		entry += NBPG;
1299 		nUserMapPtes++;
1300 		size--;
1301 	}
1302 	return (v);
1303 }
1304 
1305 vmUserUnmap()
1306 {
1307 	int id;
1308 
1309 	nUserMapPtes = 0;
1310 	if (UserMapPid == curproc->p_pid) {
1311 		id = curproc->p_vmspace->vm_pmap.pm_tlbpid;
1312 		if (id >= 0)
1313 			MachTLBFlushPID(id);
1314 	}
1315 	UserMapPid = 0;
1316 }
1317 #endif
1318 
1319 /*
1320  *----------------------------------------------------------------------
1321  *
1322  * MemErrorInterrupts --
1323  *   pmax_errintr - for the DS2100/DS3100
1324  *   kn02_errintr - for the DS5000/200
1325  *   kn02ba_errintr - for the DS5000/1xx and DS5000/xx
1326  *
1327  *	Handler an interrupt for the control register.
1328  *
1329  * Results:
1330  *	None.
1331  *
1332  * Side effects:
1333  *	None.
1334  *
1335  *----------------------------------------------------------------------
1336  */
1337 static void
1338 pmax_errintr()
1339 {
1340 	volatile u_short *sysCSRPtr =
1341 		(u_short *)MACH_PHYS_TO_UNCACHED(KN01_SYS_CSR);
1342 	u_short csr;
1343 
1344 	csr = *sysCSRPtr;
1345 
1346 	if (csr & KN01_CSR_MERR) {
1347 		printf("Memory error at 0x%x\n",
1348 			*(unsigned *)MACH_PHYS_TO_UNCACHED(KN01_SYS_ERRADR));
1349 		panic("Mem error interrupt");
1350 	}
1351 	*sysCSRPtr = (csr & ~KN01_CSR_MBZ) | 0xff;
1352 }
1353 
1354 static void
1355 kn02_errintr()
1356 {
1357 
1358 	printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR));
1359 	*(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR) = 0;
1360 	MachEmptyWriteBuffer();
1361 }
1362 
1363 #ifdef DS5000_240
1364 static void
1365 kn03_errintr()
1366 {
1367 
1368 	printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR));
1369 	*(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR) = 0;
1370 	MachEmptyWriteBuffer();
1371 }
1372 #endif /* DS5000_240 */
1373 
1374 static void
1375 kn02ba_errintr()
1376 {
1377 	register int mer, adr, siz;
1378 	static int errintr_cnt = 0;
1379 
1380 	siz = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MSR);
1381 	mer = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MER);
1382 	adr = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_AER);
1383 
1384 	/* clear interrupt bit */
1385 	*(unsigned int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_TIMEOUT) = 0;
1386 
1387 	errintr_cnt++;
1388 	printf("(%d)%s%x [%x %x %x]\n", errintr_cnt,
1389 	       "Bad memory chip at phys ",
1390 	       kn02ba_recover_erradr(adr, mer),
1391 	       mer, siz, adr);
1392 }
1393 
1394 static unsigned
1395 kn02ba_recover_erradr(phys, mer)
1396 	register unsigned phys, mer;
1397 {
1398 	/* phys holds bits 28:2, mer knows which byte */
1399 	switch (mer & KMIN_MER_LASTBYTE) {
1400 	case KMIN_LASTB31:
1401 		mer = 3; break;
1402 	case KMIN_LASTB23:
1403 		mer = 2; break;
1404 	case KMIN_LASTB15:
1405 		mer = 1; break;
1406 	case KMIN_LASTB07:
1407 		mer = 0; break;
1408 	}
1409 	return ((phys & KMIN_AER_ADDR_MASK) | mer);
1410 }
1411 
1412 /*
1413  * Return the resulting PC as if the branch was executed.
1414  */
1415 unsigned
1416 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch)
1417 	unsigned *regsPtr;
1418 	unsigned instPC;
1419 	unsigned fpcCSR;
1420 	int allowNonBranch;
1421 {
1422 	InstFmt inst;
1423 	unsigned retAddr;
1424 	int condition;
1425 	extern unsigned GetBranchDest();
1426 
1427 #if 0
1428 	printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC,
1429 		*instPC, fpcCSR);
1430 #endif
1431 
1432 	inst = *(InstFmt *)instPC;
1433 	switch ((int)inst.JType.op) {
1434 	case OP_SPECIAL:
1435 		switch ((int)inst.RType.func) {
1436 		case OP_JR:
1437 		case OP_JALR:
1438 			retAddr = regsPtr[inst.RType.rs];
1439 			break;
1440 
1441 		default:
1442 			if (!allowNonBranch)
1443 				panic("MachEmulateBranch: Non-branch");
1444 			retAddr = instPC + 4;
1445 			break;
1446 		}
1447 		break;
1448 
1449 	case OP_BCOND:
1450 		switch ((int)inst.IType.rt) {
1451 		case OP_BLTZ:
1452 		case OP_BLTZAL:
1453 			if ((int)(regsPtr[inst.RType.rs]) < 0)
1454 				retAddr = GetBranchDest((InstFmt *)instPC);
1455 			else
1456 				retAddr = instPC + 8;
1457 			break;
1458 
1459 		case OP_BGEZAL:
1460 		case OP_BGEZ:
1461 			if ((int)(regsPtr[inst.RType.rs]) >= 0)
1462 				retAddr = GetBranchDest((InstFmt *)instPC);
1463 			else
1464 				retAddr = instPC + 8;
1465 			break;
1466 
1467 		default:
1468 			panic("MachEmulateBranch: Bad branch cond");
1469 		}
1470 		break;
1471 
1472 	case OP_J:
1473 	case OP_JAL:
1474 		retAddr = (inst.JType.target << 2) |
1475 			((unsigned)instPC & 0xF0000000);
1476 		break;
1477 
1478 	case OP_BEQ:
1479 		if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1480 			retAddr = GetBranchDest((InstFmt *)instPC);
1481 		else
1482 			retAddr = instPC + 8;
1483 		break;
1484 
1485 	case OP_BNE:
1486 		if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1487 			retAddr = GetBranchDest((InstFmt *)instPC);
1488 		else
1489 			retAddr = instPC + 8;
1490 		break;
1491 
1492 	case OP_BLEZ:
1493 		if ((int)(regsPtr[inst.RType.rs]) <= 0)
1494 			retAddr = GetBranchDest((InstFmt *)instPC);
1495 		else
1496 			retAddr = instPC + 8;
1497 		break;
1498 
1499 	case OP_BGTZ:
1500 		if ((int)(regsPtr[inst.RType.rs]) > 0)
1501 			retAddr = GetBranchDest((InstFmt *)instPC);
1502 		else
1503 			retAddr = instPC + 8;
1504 		break;
1505 
1506 	case OP_COP1:
1507 		switch (inst.RType.rs) {
1508 		case OP_BCx:
1509 		case OP_BCy:
1510 			if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1511 				condition = fpcCSR & MACH_FPC_COND_BIT;
1512 			else
1513 				condition = !(fpcCSR & MACH_FPC_COND_BIT);
1514 			if (condition)
1515 				retAddr = GetBranchDest((InstFmt *)instPC);
1516 			else
1517 				retAddr = instPC + 8;
1518 			break;
1519 
1520 		default:
1521 			if (!allowNonBranch)
1522 				panic("MachEmulateBranch: Bad coproc branch instruction");
1523 			retAddr = instPC + 4;
1524 		}
1525 		break;
1526 
1527 	default:
1528 		if (!allowNonBranch)
1529 			panic("MachEmulateBranch: Non-branch instruction");
1530 		retAddr = instPC + 4;
1531 	}
1532 #if 0
1533 	printf("Target addr=%x\n", retAddr);
1534 #endif
1535 	return (retAddr);
1536 }
1537 
1538 unsigned
1539 GetBranchDest(InstPtr)
1540 	InstFmt *InstPtr;
1541 {
1542 	return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2));
1543 }
1544 
1545 /*
1546  * This routine is called by procxmt() to single step one instruction.
1547  * We do this by storing a break instruction after the current instruction,
1548  * resuming execution, and then restoring the old instruction.
1549  */
1550 cpu_singlestep(p)
1551 	register struct proc *p;
1552 {
1553 	register unsigned va;
1554 	register int *locr0 = p->p_md.md_regs;
1555 	int i;
1556 
1557 	/* compute next address after current location */
1558 	va = MachEmulateBranch(locr0, locr0[PC], 0, 1);
1559 	if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va ||
1560 	    !useracc((caddr_t)va, 4, B_READ)) {
1561 		printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
1562 			p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */
1563 		return (EFAULT);
1564 	}
1565 	p->p_md.md_ss_addr = va;
1566 	p->p_md.md_ss_instr = fuiword((caddr_t)va);
1567 	i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1568 	if (i < 0) {
1569 		vm_offset_t sa, ea;
1570 		int rv;
1571 
1572 		sa = trunc_page((vm_offset_t)va);
1573 		ea = round_page((vm_offset_t)va+sizeof(int)-1);
1574 		rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
1575 			VM_PROT_DEFAULT, FALSE);
1576 		if (rv == KERN_SUCCESS) {
1577 			i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1578 			(void) vm_map_protect(&p->p_vmspace->vm_map,
1579 				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
1580 		}
1581 	}
1582 	if (i < 0)
1583 		return (EFAULT);
1584 	printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n",
1585 		p->p_comm, p->p_pid, p->p_md.md_ss_addr,
1586 		p->p_md.md_ss_instr, locr0[PC]); /* XXX */
1587 	return (0);
1588 }
1589