xref: /original-bsd/sys/news3400/news3400/trap.c (revision 4670e840)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department, Ralph Campbell, Sony Corp. and Kazumasa Utashiro
9  * of Software Research Associates, Inc.
10  *
11  * %sccs.include.redist.c%
12  *
13  * from: Utah $Hdr: trap.c 1.32 91/04/06$
14  *
15  *	@(#)trap.c	7.7 (Berkeley) 03/09/93
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/proc.h>
21 #include <sys/kernel.h>
22 #include <sys/signalvar.h>
23 #include <sys/syscall.h>
24 #include <sys/user.h>
25 #include <sys/buf.h>
26 #ifdef KTRACE
27 #include <sys/ktrace.h>
28 #endif
29 #include <net/netisr.h>
30 
31 #include <machine/trap.h>
32 #include <machine/psl.h>
33 #include <machine/reg.h>
34 #include <machine/cpu.h>
35 #include <machine/pte.h>
36 #include <machine/mips_opcode.h>
37 #include <machine/adrsmap.h>
38 
39 #include <vm/vm.h>
40 #include <vm/vm_kern.h>
41 #include <vm/vm_page.h>
42 
43 #include "lp.h"
44 #include "bm.h"
45 #include "ms.h"
46 #include "en.h"
47 #include <news3400/hbdev/dmac_0448.h>
48 #include <news3400/sio/scc.h>
49 
50 /*
51  * This is a kludge to allow X windows to work.
52  */
53 #undef X_KLUGE
54 
55 #ifdef X_KLUGE
56 #define USER_MAP_ADDR	0x4000
57 #define NPTES 300
58 static pt_entry_t UserMapPtes[NPTES];
59 static unsigned nUserMapPtes;
60 static pid_t UserMapPid;
61 #endif
62 
63 struct	proc *machFPCurProcPtr;		/* pointer to last proc to use FP */
64 
65 extern void MachKernGenException();
66 extern void MachUserGenException();
67 extern void MachKernIntr();
68 extern void MachUserIntr();
69 extern void MachTLBModException();
70 extern void MachTLBMissException();
71 extern unsigned MachEmulateBranch();
72 
73 void (*machExceptionTable[])() = {
74 /*
75  * The kernel exception handlers.
76  */
77 	MachKernIntr,			/* external interrupt */
78 	MachKernGenException,		/* TLB modification */
79 	MachTLBMissException,		/* TLB miss (load or instr. fetch) */
80 	MachTLBMissException,		/* TLB miss (store) */
81 	MachKernGenException,		/* address error (load or I-fetch) */
82 	MachKernGenException,		/* address error (store) */
83 	MachKernGenException,		/* bus error (I-fetch) */
84 	MachKernGenException,		/* bus error (load or store) */
85 	MachKernGenException,		/* system call */
86 	MachKernGenException,		/* breakpoint */
87 	MachKernGenException,		/* reserved instruction */
88 	MachKernGenException,		/* coprocessor unusable */
89 	MachKernGenException,		/* arithmetic overflow */
90 	MachKernGenException,		/* reserved */
91 	MachKernGenException,		/* reserved */
92 	MachKernGenException,		/* reserved */
93 /*
94  * The user exception handlers.
95  */
96 	MachUserIntr,
97 	MachUserGenException,
98 	MachUserGenException,
99 	MachUserGenException,
100 	MachUserGenException,
101 	MachUserGenException,
102 	MachUserGenException,
103 	MachUserGenException,
104 	MachUserGenException,
105 	MachUserGenException,
106 	MachUserGenException,
107 	MachUserGenException,
108 	MachUserGenException,
109 	MachUserGenException,
110 	MachUserGenException,
111 	MachUserGenException,
112 };
113 
114 char	*trap_type[] = {
115 	"external interrupt",
116 	"TLB modification",
117 	"TLB miss (load or instr. fetch)",
118 	"TLB miss (store)",
119 	"address error (load or I-fetch)",
120 	"address error (store)",
121 	"bus error (I-fetch)",
122 	"bus error (load or store)",
123 	"system call",
124 	"breakpoint",
125 	"reserved instruction",
126 	"coprocessor unusable",
127 	"arithmetic overflow",
128 	"reserved 13",
129 	"reserved 14",
130 	"reserved 15",
131 };
132 
133 #ifdef DEBUG
134 #define TRAPSIZE	10
135 struct trapdebug {		/* trap history buffer for debugging */
136 	u_int	status;
137 	u_int	cause;
138 	u_int	vadr;
139 	u_int	pc;
140 	u_int	ra;
141 	u_int	code;
142 } trapdebug[TRAPSIZE], *trp = trapdebug;
143 #endif
144 
145 /*
146  * Handle an exception.
147  * Called from MachKernGenException() or MachUserGenException()
148  * when a processor trap occurs.
149  * In the case of a kernel trap, we return the pc where to resume if
150  * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc.
151  */
152 unsigned
153 trap(statusReg, causeReg, vadr, pc, args)
154 	unsigned statusReg;	/* status register at time of the exception */
155 	unsigned causeReg;	/* cause register at time of exception */
156 	unsigned vadr;		/* address (if any) the fault occured on */
157 	unsigned pc;		/* program counter where to continue */
158 {
159 	register int type, i;
160 	unsigned ucode = 0;
161 	register struct proc *p = curproc;
162 	u_quad_t sticks;
163 	vm_prot_t ftype;
164 	extern unsigned onfault_table[];
165 
166 #ifdef DEBUG
167 	trp->status = statusReg;
168 	trp->cause = causeReg;
169 	trp->vadr = vadr;
170 	trp->pc = pc;
171 	trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] :
172 		p->p_md.md_regs[RA];
173 	trp->code = 0;
174 	if (++trp == &trapdebug[TRAPSIZE])
175 		trp = trapdebug;
176 #endif
177 
178 	cnt.v_trap++;
179 	type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT;
180 	if (USERMODE(statusReg)) {
181 		type |= T_USER;
182 		sticks = p->p_sticks;
183 	}
184 
185 	/*
186 	 * Enable hardware interrupts if they were on before.
187 	 * We only respond to software interrupts when returning to user mode.
188 	 */
189 	if (statusReg & MACH_SR_INT_ENA_PREV)
190 		splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR);
191 
192 	switch (type) {
193 	case T_TLB_MOD:
194 		/* check for kernel address */
195 		if ((int)vadr < 0) {
196 			register pt_entry_t *pte;
197 			register unsigned entry;
198 #ifndef ATTR
199 			register vm_offset_t pa;
200 #endif
201 
202 			pte = kvtopte(vadr);
203 			entry = pte->pt_entry;
204 			if (entry & PG_RO) {
205 				/* write to read only page in the kernel */
206 				ftype = VM_PROT_WRITE;
207 				goto kernel_fault;
208 			}
209 			entry |= PG_M;
210 			pte->pt_entry = entry;
211 			vadr &= PG_FRAME;
212 			printf("trap: TLBupdate hi %x lo %x i %x\n", vadr,
213 				entry, MachTLBUpdate(vadr, entry)); /* XXX */
214 #ifdef ATTR
215 			pmap_attributes[atop(entry - KERNBASE)] |= PMAP_ATTR_MOD;
216 #else
217 			pa = entry & PG_FRAME;
218 			if (!IS_VM_PHYSADDR(pa))
219 				panic("trap: kmod");
220 			PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
221 #endif
222 			return (pc);
223 		}
224 		/* FALLTHROUGH */
225 
226 	case T_TLB_MOD+T_USER:
227 	    {
228 		pmap_hash_t hp;
229 #ifndef ATTR
230 		vm_offset_t pa;
231 #endif
232 #ifdef DIAGNOSTIC
233 		extern pmap_hash_t zero_pmap_hash;
234 		extern pmap_t cur_pmap;
235 
236 		if (cur_pmap->pm_hash == zero_pmap_hash ||
237 		    cur_pmap->pm_hash == (pmap_hash_t)0)
238 			panic("tlbmod");
239 #endif
240 		hp = &((pmap_hash_t)PMAP_HASH_UADDR)[PMAP_HASH(vadr)];
241 		if (((hp->pmh_pte[0].high ^ vadr) & ~PGOFSET) == 0)
242 			i = 0;
243 		else if (((hp->pmh_pte[1].high ^ vadr) & ~PGOFSET) == 0)
244 			i = 1;
245 		else
246 			panic("trap: tlb umod not found");
247 		if (hp->pmh_pte[i].low & PG_RO) {
248 			ftype = VM_PROT_WRITE;
249 			goto dofault;
250 		}
251 		hp->pmh_pte[i].low |= PG_M;
252 		printf("trap: TLBupdate hi %x lo %x i %x\n",
253 			hp->pmh_pte[i].high, hp->pmh_pte[i].low,
254 			MachTLBUpdate(hp->pmh_pte[i].high, hp->pmh_pte[i].low)); /* XXX */
255 #ifdef ATTR
256 		pmap_attributes[atop(hp->pmh_pte[i].low - KERNBASE)] |=
257 			PMAP_ATTR_MOD;
258 #else
259 		pa = hp->pmh_pte[i].low & PG_FRAME;
260 		if (!IS_VM_PHYSADDR(pa))
261 			panic("trap: umod");
262 		PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
263 #endif
264 		if (!USERMODE(statusReg))
265 			return (pc);
266 		goto out;
267 	    }
268 
269 	case T_TLB_LD_MISS:
270 	case T_TLB_ST_MISS:
271 		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
272 		/* check for kernel address */
273 		if ((int)vadr < 0) {
274 			register vm_offset_t va;
275 			int rv;
276 
277 		kernel_fault:
278 			va = trunc_page((vm_offset_t)vadr);
279 			rv = vm_fault(kernel_map, va, ftype, FALSE);
280 			if (rv == KERN_SUCCESS)
281 				return (pc);
282 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
283 				((struct pcb *)UADDR)->pcb_onfault = 0;
284 				return (onfault_table[i]);
285 			}
286 			goto err;
287 		}
288 		/*
289 		 * It is an error for the kernel to access user space except
290 		 * through the copyin/copyout routines.
291 		 */
292 		if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 0)
293 			goto err;
294 		/* check for fuswintr() or suswintr() getting a page fault */
295 		if (i == 4)
296 			return (onfault_table[i]);
297 		goto dofault;
298 
299 	case T_TLB_LD_MISS+T_USER:
300 		ftype = VM_PROT_READ;
301 		goto dofault;
302 
303 	case T_TLB_ST_MISS+T_USER:
304 		ftype = VM_PROT_WRITE;
305 	dofault:
306 	    {
307 		register vm_offset_t va;
308 		register struct vmspace *vm = p->p_vmspace;
309 		register vm_map_t map = &vm->vm_map;
310 		int rv;
311 
312 #ifdef X_KLUGE
313 		if (p->p_pid == UserMapPid &&
314 		    (va = pmax_btop(vadr - USER_MAP_ADDR)) < nUserMapPtes) {
315 			register pt_entry_t *pte;
316 
317 			pte = &UserMapPtes[va];
318 			MachTLBWriteRandom((vadr & PG_FRAME) |
319 				(vm->vm_pmap.pm_tlbpid << VMMACH_TLB_PID_SHIFT),
320 				pte->pt_entry);
321 			return (pc);
322 		}
323 #endif
324 		va = trunc_page((vm_offset_t)vadr);
325 		rv = vm_fault(map, va, ftype, FALSE);
326 		if (rv != KERN_SUCCESS) {
327 			printf("vm_fault(%x, %x, %x, 0) -> %x ADR %x PC %x RA %x\n",
328 				map, va, ftype, rv, vadr, pc,
329 				!USERMODE(statusReg) ? ((int *)&args)[19] :
330 					p->p_md.md_regs[RA]); /* XXX */
331 			printf("\tpid %d %s PC %x RA %x\n", p->p_pid,
332 				p->p_comm, p->p_md.md_regs[PC],
333 				p->p_md.md_regs[RA]); /* XXX */
334 #ifdef DEBUG
335 			trapDump("vm_fault");
336 #endif
337 		}
338 		/*
339 		 * If this was a stack access we keep track of the maximum
340 		 * accessed stack size.  Also, if vm_fault gets a protection
341 		 * failure it is due to accessing the stack region outside
342 		 * the current limit and we need to reflect that as an access
343 		 * error.
344 		 */
345 		if ((caddr_t)va >= vm->vm_maxsaddr) {
346 			if (rv == KERN_SUCCESS) {
347 				unsigned nss;
348 
349 				nss = clrnd(btoc(USRSTACK-(unsigned)va));
350 				if (nss > vm->vm_ssize)
351 					vm->vm_ssize = nss;
352 			} else if (rv == KERN_PROTECTION_FAILURE)
353 				rv = KERN_INVALID_ADDRESS;
354 		}
355 		if (rv == KERN_SUCCESS) {
356 			if (!USERMODE(statusReg))
357 				return (pc);
358 			goto out;
359 		}
360 		if (!USERMODE(statusReg)) {
361 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
362 				((struct pcb *)UADDR)->pcb_onfault = 0;
363 				return (onfault_table[i]);
364 			}
365 			goto err;
366 		}
367 		ucode = vadr;
368 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
369 		break;
370 	    }
371 
372 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
373 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
374 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
375 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
376 		i = SIGSEGV;
377 		break;
378 
379 	case T_SYSCALL+T_USER:
380 	    {
381 		register int *locr0 = p->p_md.md_regs;
382 		register struct sysent *callp;
383 		unsigned int code;
384 		int numsys;
385 		struct args {
386 			int i[8];
387 		} args;
388 		int rval[2];
389 		struct sysent *systab;
390 		extern int nsysent;
391 #ifdef COMPAT_NEWSOS
392 		extern int nnewssys;
393 		extern struct sysent newssys[];
394 #endif
395 
396 		cnt.v_syscall++;
397 		/* compute next PC after syscall instruction */
398 		if ((int)causeReg < 0)
399 			locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0);
400 		else
401 			locr0[PC] += 4;
402 		systab = sysent;
403 		numsys = nsysent;
404 		code = locr0[V0];
405 #ifdef COMPAT_NEWSOS
406 		if (code >= 1000) {
407 			code -= 1000;
408 			systab = newssys;
409 			numsys = nnewssys;
410 		}
411 #endif
412 		switch (code) {
413 		case SYS_indir:
414 			/*
415 			 * Code is first argument, followed by actual args.
416 			 */
417 			code = locr0[A0];
418 #ifdef COMPAT_NEWSOS
419 			if (code >= 1000) {
420 				code -= 1000;
421 				systab = newssys;
422 				numsys = nnewssys;
423 			}
424 #endif
425 			if (code >= numsys)
426 				callp = &systab[SYS_indir]; /* (illegal) */
427 			else
428 				callp = &systab[code];
429 			i = callp->sy_narg;
430 			args.i[0] = locr0[A1];
431 			args.i[1] = locr0[A2];
432 			args.i[2] = locr0[A3];
433 			if (i > 3) {
434 				i = copyin((caddr_t)(locr0[SP] +
435 						4 * sizeof(int)),
436 					(caddr_t)&args.i[3],
437 					(u_int)(i - 3) * sizeof(int));
438 				if (i) {
439 					locr0[V0] = i;
440 					locr0[A3] = 1;
441 #ifdef KTRACE
442 					if (KTRPOINT(p, KTR_SYSCALL))
443 						ktrsyscall(p->p_tracep, code,
444 							callp->sy_narg, args.i);
445 #endif
446 					goto done;
447 				}
448 			}
449 			break;
450 
451 		case SYS___indir:
452 			/*
453 			 * Like indir, but code is a quad, so as to maintain
454 			 * quad alignment for the rest of the arguments.
455 			 */
456 			code = locr0[A0 + _QUAD_LOWWORD];
457 			if (code >= numsys)
458 				callp = &systab[SYS_indir]; /* (illegal) */
459 			else
460 				callp = &systab[code];
461 			i = callp->sy_narg;
462 			args.i[0] = locr0[A2];
463 			args.i[1] = locr0[A3];
464 			if (i > 2) {
465 				i = copyin((caddr_t)(locr0[SP] +
466 						4 * sizeof(int)),
467 					(caddr_t)&args.i[2],
468 					(u_int)(i - 2) * sizeof(int));
469 				if (i) {
470 					locr0[V0] = i;
471 					locr0[A3] = 1;
472 #ifdef KTRACE
473 					if (KTRPOINT(p, KTR_SYSCALL))
474 						ktrsyscall(p->p_tracep, code,
475 							callp->sy_narg, args.i);
476 #endif
477 					goto done;
478 				}
479 			}
480 			break;
481 
482 		default:
483 			if (code >= numsys)
484 				callp = &systab[SYS_indir]; /* (illegal) */
485 			else
486 				callp = &systab[code];
487 			i = callp->sy_narg;
488 			args.i[0] = locr0[A0];
489 			args.i[1] = locr0[A1];
490 			args.i[2] = locr0[A2];
491 			args.i[3] = locr0[A3];
492 			if (i > 4) {
493 				i = copyin((caddr_t)(locr0[SP] +
494 						4 * sizeof(int)),
495 					(caddr_t)&args.i[4],
496 					(u_int)(i - 4) * sizeof(int));
497 				if (i) {
498 					locr0[V0] = i;
499 					locr0[A3] = 1;
500 #ifdef KTRACE
501 					if (KTRPOINT(p, KTR_SYSCALL))
502 						ktrsyscall(p->p_tracep, code,
503 							callp->sy_narg, args.i);
504 #endif
505 					goto done;
506 				}
507 			}
508 		}
509 #ifdef KTRACE
510 		if (KTRPOINT(p, KTR_SYSCALL))
511 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
512 #endif
513 		rval[0] = 0;
514 		rval[1] = locr0[V1];
515 #ifdef DEBUG
516 		if (trp == trapdebug)
517 			trapdebug[TRAPSIZE - 1].code = code;
518 		else
519 			trp[-1].code = code;
520 #endif
521 		i = (*callp->sy_call)(p, &args, rval);
522 		/*
523 		 * Reinitialize proc pointer `p' as it may be different
524 		 * if this is a child returning from fork syscall.
525 		 */
526 		p = curproc;
527 		locr0 = p->p_md.md_regs;
528 #ifdef DEBUG
529 		{ int s;
530 		s = splhigh();
531 		trp->status = statusReg;
532 		trp->cause = causeReg;
533 		trp->vadr = locr0[SP];
534 		trp->pc = locr0[PC];
535 		trp->ra = locr0[RA];
536 		trp->code = -code;
537 		if (++trp == &trapdebug[TRAPSIZE])
538 			trp = trapdebug;
539 		splx(s);
540 		}
541 #endif
542 		switch (i) {
543 		case 0:
544 			locr0[V0] = rval[0];
545 			locr0[V1] = rval[1];
546 			locr0[A3] = 0;
547 			break;
548 
549 		case ERESTART:
550 			locr0[PC] = pc;
551 			break;
552 
553 		case EJUSTRETURN:
554 			break;	/* nothing to do */
555 
556 		default:
557 			locr0[V0] = i;
558 			locr0[A3] = 1;
559 		}
560 	done:
561 #ifdef KTRACE
562 		if (KTRPOINT(p, KTR_SYSRET))
563 			ktrsysret(p->p_tracep, code, i, rval[0]);
564 #endif
565 
566 		goto out;
567 	    }
568 
569 	case T_BREAK+T_USER:
570 	    {
571 		register unsigned va, instr;
572 
573 		/* compute address of break instruction */
574 		va = pc;
575 		if ((int)causeReg < 0)
576 			va += 4;
577 
578 		/* read break instruction */
579 		instr = fuiword((caddr_t)va);
580 #ifdef KADB
581 		if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP)
582 			goto err;
583 #endif
584 		if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) {
585 			i = SIGTRAP;
586 			break;
587 		}
588 
589 		/* restore original instruction and clear BP  */
590 		i = suiword((caddr_t)va, p->p_md.md_ss_instr);
591 		if (i < 0) {
592 			vm_offset_t sa, ea;
593 			int rv;
594 
595 			sa = trunc_page((vm_offset_t)va);
596 			ea = round_page((vm_offset_t)va+sizeof(int)-1);
597 			rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
598 				VM_PROT_DEFAULT, FALSE);
599 			if (rv == KERN_SUCCESS) {
600 				i = suiword((caddr_t)va, p->p_md.md_ss_instr);
601 				(void) vm_map_protect(&p->p_vmspace->vm_map,
602 					sa, ea, VM_PROT_READ|VM_PROT_EXECUTE,
603 					FALSE);
604 			}
605 		}
606 		if (i < 0) {
607 			i = SIGTRAP;
608 			break;
609 		}
610 		p->p_md.md_ss_addr = 0;
611 		goto out;
612 	    }
613 
614 	case T_RES_INST+T_USER:
615 		i = SIGILL;
616 		break;
617 
618 	case T_COP_UNUSABLE+T_USER:
619 		if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) {
620 			i = SIGILL;	/* only FPU instructions allowed */
621 			break;
622 		}
623 		MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs);
624 		machFPCurProcPtr = p;
625 		p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT;
626 		p->p_md.md_flags |= MDP_FPUSED;
627 		goto out;
628 
629 	case T_OVFLOW+T_USER:
630 		i = SIGFPE;
631 		break;
632 
633 	case T_ADDR_ERR_LD:	/* misaligned access */
634 	case T_ADDR_ERR_ST:	/* misaligned access */
635 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
636 		if (i = ((struct pcb *)UADDR)->pcb_onfault) {
637 			((struct pcb *)UADDR)->pcb_onfault = 0;
638 			return (onfault_table[i]);
639 		}
640 		/* FALLTHROUGH */
641 
642 	default:
643 	err:
644 #ifdef KADB
645 	    {
646 		extern struct pcb kdbpcb;
647 
648 		if (USERMODE(statusReg))
649 			kdbpcb = p->p_addr->u_pcb;
650 		else {
651 			kdbpcb.pcb_regs[ZERO] = 0;
652 			kdbpcb.pcb_regs[AST] = ((int *)&args)[2];
653 			kdbpcb.pcb_regs[V0] = ((int *)&args)[3];
654 			kdbpcb.pcb_regs[V1] = ((int *)&args)[4];
655 			kdbpcb.pcb_regs[A0] = ((int *)&args)[5];
656 			kdbpcb.pcb_regs[A1] = ((int *)&args)[6];
657 			kdbpcb.pcb_regs[A2] = ((int *)&args)[7];
658 			kdbpcb.pcb_regs[A3] = ((int *)&args)[8];
659 			kdbpcb.pcb_regs[T0] = ((int *)&args)[9];
660 			kdbpcb.pcb_regs[T1] = ((int *)&args)[10];
661 			kdbpcb.pcb_regs[T2] = ((int *)&args)[11];
662 			kdbpcb.pcb_regs[T3] = ((int *)&args)[12];
663 			kdbpcb.pcb_regs[T4] = ((int *)&args)[13];
664 			kdbpcb.pcb_regs[T5] = ((int *)&args)[14];
665 			kdbpcb.pcb_regs[T6] = ((int *)&args)[15];
666 			kdbpcb.pcb_regs[T7] = ((int *)&args)[16];
667 			kdbpcb.pcb_regs[T8] = ((int *)&args)[17];
668 			kdbpcb.pcb_regs[T9] = ((int *)&args)[18];
669 			kdbpcb.pcb_regs[RA] = ((int *)&args)[19];
670 			kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21];
671 			kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22];
672 			kdbpcb.pcb_regs[PC] = pc;
673 			kdbpcb.pcb_regs[SR] = statusReg;
674 			bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int));
675 		}
676 		if (kdb(causeReg, vadr, p, !USERMODE(statusReg)))
677 			return (kdbpcb.pcb_regs[PC]);
678 	    }
679 #else
680 #ifdef DEBUG
681 		printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid,
682 			p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */
683 		trapDump("trap");
684 		traceback();
685 #endif
686 #endif
687 		panic("trap");
688 	}
689 	printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid,
690 		p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */
691 	trapsignal(p, i, ucode);
692 out:
693 	/*
694 	 * Note: we should only get here if returning to user mode.
695 	 */
696 	/* take pending signals */
697 	while ((i = CURSIG(p)) != 0)
698 		psig(i);
699 	p->p_pri = p->p_usrpri;
700 	astpending = 0;
701 	if (want_resched) {
702 		int s;
703 
704 		/*
705 		 * Since we are curproc, clock will normally just change
706 		 * our priority without moving us from one queue to another
707 		 * (since the running process is not on a queue.)
708 		 * If that happened after we setrq ourselves but before we
709 		 * swtch()'ed, we might not be on the queue indicated by
710 		 * our priority.
711 		 */
712 		s = splstatclock();
713 		setrq(p);
714 		p->p_stats->p_ru.ru_nivcsw++;
715 		swtch();
716 		splx(s);
717 		while ((i = CURSIG(p)) != 0)
718 			psig(i);
719 	}
720 	/*
721 	 * If profiling, charge system time to the trapped pc.
722 	 */
723 	if (p->p_flag & SPROFIL) {
724 		extern int psratio;
725 
726 		addupc_task(p, pc, (int)(p->p_sticks - sticks) * psratio);
727 	}
728 	curpri = p->p_pri;
729 	return (pc);
730 }
731 
732 /*
733  * Handle an interrupt.
734  * Called from MachKernIntr() or MachUserIntr()
735  * Note: curproc might be NULL.
736  */
737 interrupt(statusReg, causeReg, pc)
738 	unsigned statusReg;	/* status register at time of the exception */
739 	unsigned causeReg;	/* cause register at time of exception */
740 	unsigned pc;		/* program counter where to continue */
741 {
742 	register unsigned mask;
743 	struct clockframe cf;
744 	int oonfault = ((struct pcb *)UADDR)->pcb_onfault;
745 
746 #ifdef DEBUG
747 	trp->status = statusReg;
748 	trp->cause = causeReg;
749 	trp->vadr = 0;
750 	trp->pc = pc;
751 	trp->ra = 0;
752 	trp->code = 0;
753 	if (++trp == &trapdebug[TRAPSIZE])
754 		trp = trapdebug;
755 #endif
756 
757 	mask = causeReg & statusReg;	/* pending interrupts & enable mask */
758 	if (mask & MACH_INT_MASK_5) {		/* level 5 interrupt */
759 		splx((MACH_SPL_MASK_8 & ~causeReg) | MACH_SR_INT_ENA_CUR);
760 		printf("level 5 interrupt: PC %x CR %x SR %x\n",
761 			pc, causeReg, statusReg);
762 		causeReg &= ~MACH_INT_MASK_5;
763 	}
764 	if (mask & MACH_INT_MASK_4) {		/* level 4 interrupt */
765 		/*
766 		 * asynchronous bus error
767 		 */
768 		splx((MACH_SPL_MASK_7 & ~causeReg) | MACH_SR_INT_ENA_CUR);
769 		printf("level 4 interrupt: PC %x CR %x SR %x\n",
770 			pc, causeReg, statusReg);
771 		*(char *)INTCLR0 = INTCLR0_BERR;
772 		causeReg &= ~MACH_INT_MASK_4;
773 	}
774 	if (mask & MACH_INT_MASK_3) {		/* level 3 interrupt */
775 		/*
776 		 * fp error
777 		 */
778 		splx((MACH_SPL_MASK_6 & ~causeReg) | MACH_SR_INT_ENA_CUR);
779 		if (!USERMODE(statusReg)) {
780 #ifdef DEBUG
781 			trapDump("fpintr");
782 #else
783 			printf("FPU interrupt: PC %x CR %x SR %x\n",
784 				pc, causeReg, statusReg);
785 #endif
786 		} else
787 			MachFPInterrupt(statusReg, causeReg, pc);
788 		causeReg &= ~MACH_INT_MASK_3;
789 	}
790 	if (mask & MACH_INT_MASK_2) {		/* level 2 interrupt */
791 		register int stat;
792 
793 		splx((MACH_SPL_MASK_5 & ~causeReg) | MACH_SR_INT_ENA_CUR);
794 		stat = *(volatile u_char *)INTST0;
795 		if (stat & INTST0_TIMINT) {	/* timer */
796 			static int led_count = 0;
797 
798 			*(volatile u_char *)INTCLR0 = INTCLR0_TIMINT;
799 			cf.pc = pc;
800 			cf.sr = statusReg;
801 			hardclock(&cf);
802 			if (++led_count > hz) {
803 				led_count = 0;
804 				*(volatile u_char *)DEBUG_PORT ^= DP_LED1;
805 			}
806 		}
807 #if NBM > 0
808 		if (stat & INTST0_KBDINT)	/* keyboard */
809 			kbm_rint(SCC_KEYBOARD);
810 #endif
811 #if NMS > 0
812 		if (stat & INTST0_MSINT)	/* mouse */
813 			kbm_rint(SCC_MOUSE);
814 #endif
815 		causeReg &= ~MACH_INT_MASK_2;
816 	}
817 	if (mask & MACH_INT_MASK_1) {		/* level 1 interrupt */
818 		splx((MACH_SPL_MASK_4 & ~causeReg) | MACH_SR_INT_ENA_CUR);
819 		level1_intr();
820 		causeReg &= ~MACH_INT_MASK_1;
821 	}
822 	if (mask & MACH_INT_MASK_0) {		/* level 0 interrupt */
823 		splx((MACH_SPL_MASK_3 & ~causeReg) | MACH_SR_INT_ENA_CUR);
824 		level0_intr();
825 		causeReg &= ~MACH_INT_MASK_0;
826 	}
827 	splx((MACH_SPL_MASK_3 & ~causeReg) | MACH_SR_INT_ENA_CUR);
828 
829 	if (mask & MACH_SOFT_INT_MASK_0) {
830 		struct clockframe cf;
831 
832 		clearsoftclock();
833 		cnt.v_soft++;
834 		cf.pc = pc;
835 		cf.sr = statusReg;
836 		softclock();
837 	}
838 	/* process network interrupt if we trapped or will very soon */
839 	if ((mask & MACH_SOFT_INT_MASK_1) ||
840 	    netisr && (statusReg & MACH_SOFT_INT_MASK_1)) {
841 		clearsoftnet();
842 		cnt.v_soft++;
843 #ifdef INET
844 		if (netisr & (1 << NETISR_ARP)) {
845 			netisr &= ~(1 << NETISR_ARP);
846 			arpintr();
847 		}
848 		if (netisr & (1 << NETISR_IP)) {
849 			netisr &= ~(1 << NETISR_IP);
850 			ipintr();
851 		}
852 #endif
853 #ifdef NS
854 		if (netisr & (1 << NETISR_NS)) {
855 			netisr &= ~(1 << NETISR_NS);
856 			nsintr();
857 		}
858 #endif
859 #ifdef ISO
860 		if (netisr & (1 << NETISR_ISO)) {
861 			netisr &= ~(1 << NETISR_ISO);
862 			clnlintr();
863 		}
864 #endif
865 	}
866 	/* restore onfault flag */
867 	((struct pcb *)UADDR)->pcb_onfault = oonfault;
868 }
869 
870 /*
871  * This is called from MachUserIntr() if astpending is set.
872  * This is very similar to the tail of trap().
873  */
874 softintr(statusReg, pc)
875 	unsigned statusReg;	/* status register at time of the exception */
876 	unsigned pc;		/* program counter where to continue */
877 {
878 	register struct proc *p = curproc;
879 	int sig;
880 
881 	cnt.v_soft++;
882 	/* take pending signals */
883 	while ((sig = CURSIG(p)) != 0)
884 		psig(sig);
885 	p->p_pri = p->p_usrpri;
886 	astpending = 0;
887 	if (p->p_flag & SOWEUPC) {
888 		p->p_flag &= ~SOWEUPC;
889 		ADDUPROF(p);
890 	}
891 	if (want_resched) {
892 		int s;
893 
894 		/*
895 		 * Since we are curproc, clock will normally just change
896 		 * our priority without moving us from one queue to another
897 		 * (since the running process is not on a queue.)
898 		 * If that happened after we setrq ourselves but before we
899 		 * swtch()'ed, we might not be on the queue indicated by
900 		 * our priority.
901 		 */
902 		s = splstatclock();
903 		setrq(p);
904 		p->p_stats->p_ru.ru_nivcsw++;
905 		swtch();
906 		splx(s);
907 		while ((sig = CURSIG(p)) != 0)
908 			psig(sig);
909 	}
910 	curpri = p->p_pri;
911 }
912 
913 #ifdef DEBUG
914 trapDump(msg)
915 	char *msg;
916 {
917 	register int i;
918 	int s;
919 
920 	s = splhigh();
921 	printf("trapDump(%s)\n", msg);
922 	for (i = 0; i < TRAPSIZE; i++) {
923 		if (trp == trapdebug)
924 			trp = &trapdebug[TRAPSIZE - 1];
925 		else
926 			trp--;
927 		if (trp->cause == 0)
928 			break;
929 		printf("%s: ADR %x PC %x CR %x SR %x\n",
930 			trap_type[(trp->cause & MACH_CR_EXC_CODE) >>
931 				MACH_CR_EXC_CODE_SHIFT],
932 			trp->vadr, trp->pc, trp->cause, trp->status);
933 		printf("   RA %x code %d\n", trp->ra, trp->code);
934 	}
935 	bzero(trapdebug, sizeof(trapdebug));
936 	trp = trapdebug;
937 	splx(s);
938 }
939 #endif
940 
941 #ifdef X_KLUGE
942 /*
943  * This is a kludge to allow X windows to work.
944  */
945 caddr_t
946 vmUserMap(size, pa)
947 	int size;
948 	unsigned pa;
949 {
950 	register caddr_t v;
951 	unsigned off, entry;
952 
953 	if (nUserMapPtes == 0)
954 		UserMapPid = curproc->p_pid;
955 	else if (UserMapPid != curproc->p_pid)
956 		return ((caddr_t)0);
957 	off = pa & PGOFSET;
958 	size = btoc(off + size);
959 	if (nUserMapPtes + size > NPTES)
960 		return ((caddr_t)0);
961 	v = (caddr_t)(USER_MAP_ADDR + pmax_ptob(nUserMapPtes) + off);
962 	entry = (pa & 0x9ffff000) | PG_V | PG_M;
963 	if (pa >= MACH_UNCACHED_MEMORY_ADDR)
964 		entry |= PG_N;
965 	while (size > 0) {
966 		UserMapPtes[nUserMapPtes].pt_entry = entry;
967 		entry += NBPG;
968 		nUserMapPtes++;
969 		size--;
970 	}
971 	return (v);
972 }
973 
974 vmUserUnmap()
975 {
976 	int id;
977 
978 	nUserMapPtes = 0;
979 	if (UserMapPid == curproc->p_pid) {
980 		id = curproc->p_vmspace->vm_pmap.pm_tlbpid;
981 		if (id >= 0)
982 			MachTLBFlushPID(id);
983 	}
984 	UserMapPid = 0;
985 }
986 #endif
987 
988 /*
989  * Return the resulting PC as if the branch was executed.
990  */
991 unsigned
992 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch)
993 	unsigned *regsPtr;
994 	unsigned instPC;
995 	unsigned fpcCSR;
996 	int allowNonBranch;
997 {
998 	InstFmt inst;
999 	unsigned retAddr;
1000 	int condition;
1001 	extern unsigned GetBranchDest();
1002 
1003 #if 0
1004 	printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC,
1005 		*(unsigned *)instPC, fpcCSR);
1006 #endif
1007 
1008 	inst = *(InstFmt *)instPC;
1009 	switch ((int)inst.JType.op) {
1010 	case OP_SPECIAL:
1011 		switch ((int)inst.RType.func) {
1012 		case OP_JR:
1013 		case OP_JALR:
1014 			retAddr = regsPtr[inst.RType.rs];
1015 			break;
1016 
1017 		default:
1018 			if (!allowNonBranch)
1019 				panic("MachEmulateBranch: Non-branch");
1020 			retAddr = instPC + 4;
1021 			break;
1022 		}
1023 		break;
1024 
1025 	case OP_BCOND:
1026 		switch ((int)inst.IType.rt) {
1027 		case OP_BLTZ:
1028 		case OP_BLTZAL:
1029 			if ((int)(regsPtr[inst.RType.rs]) < 0)
1030 				retAddr = GetBranchDest((InstFmt *)instPC);
1031 			else
1032 				retAddr = instPC + 8;
1033 			break;
1034 
1035 		case OP_BGEZAL:
1036 		case OP_BGEZ:
1037 			if ((int)(regsPtr[inst.RType.rs]) >= 0)
1038 				retAddr = GetBranchDest((InstFmt *)instPC);
1039 			else
1040 				retAddr = instPC + 8;
1041 			break;
1042 
1043 		default:
1044 			panic("MachEmulateBranch: Bad branch cond");
1045 		}
1046 		break;
1047 
1048 	case OP_J:
1049 	case OP_JAL:
1050 		retAddr = (inst.JType.target << 2) |
1051 			((unsigned)instPC & 0xF0000000);
1052 		break;
1053 
1054 	case OP_BEQ:
1055 		if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1056 			retAddr = GetBranchDest((InstFmt *)instPC);
1057 		else
1058 			retAddr = instPC + 8;
1059 		break;
1060 
1061 	case OP_BNE:
1062 		if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1063 			retAddr = GetBranchDest((InstFmt *)instPC);
1064 		else
1065 			retAddr = instPC + 8;
1066 		break;
1067 
1068 	case OP_BLEZ:
1069 		if ((int)(regsPtr[inst.RType.rs]) <= 0)
1070 			retAddr = GetBranchDest((InstFmt *)instPC);
1071 		else
1072 			retAddr = instPC + 8;
1073 		break;
1074 
1075 	case OP_BGTZ:
1076 		if ((int)(regsPtr[inst.RType.rs]) > 0)
1077 			retAddr = GetBranchDest((InstFmt *)instPC);
1078 		else
1079 			retAddr = instPC + 8;
1080 		break;
1081 
1082 	case OP_COP1:
1083 		switch (inst.RType.rs) {
1084 		case OP_BCx:
1085 		case OP_BCy:
1086 			if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1087 				condition = fpcCSR & MACH_FPC_COND_BIT;
1088 			else
1089 				condition = !(fpcCSR & MACH_FPC_COND_BIT);
1090 			if (condition)
1091 				retAddr = GetBranchDest((InstFmt *)instPC);
1092 			else
1093 				retAddr = instPC + 8;
1094 			break;
1095 
1096 		default:
1097 			if (!allowNonBranch)
1098 				panic("MachEmulateBranch: Bad coproc branch instruction");
1099 			retAddr = instPC + 4;
1100 		}
1101 		break;
1102 
1103 	default:
1104 		if (!allowNonBranch)
1105 			panic("MachEmulateBranch: Non-branch instruction");
1106 		retAddr = instPC + 4;
1107 	}
1108 #if 0
1109 	printf("Target addr=%x\n", retAddr);
1110 #endif
1111 	return (retAddr);
1112 }
1113 
1114 unsigned
1115 GetBranchDest(InstPtr)
1116 	InstFmt *InstPtr;
1117 {
1118 	return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2));
1119 }
1120 
1121 /*
1122  * This routine is called by procxmt() to single step one instruction.
1123  * We do this by storing a break instruction after the current instruction,
1124  * resuming execution, and then restoring the old instruction.
1125  */
1126 cpu_singlestep(p)
1127 	register struct proc *p;
1128 {
1129 	register unsigned va;
1130 	register int *locr0 = p->p_md.md_regs;
1131 	int i;
1132 
1133 	/* compute next address after current location */
1134 	va = MachEmulateBranch(locr0, locr0[PC], 0, 1);
1135 	if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va ||
1136 	    !useracc((caddr_t)va, 4, B_READ)) {
1137 		printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
1138 			p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */
1139 		return (EFAULT);
1140 	}
1141 	p->p_md.md_ss_addr = va;
1142 	p->p_md.md_ss_instr = fuiword((caddr_t)va);
1143 	i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1144 	if (i < 0) {
1145 		vm_offset_t sa, ea;
1146 		int rv;
1147 
1148 		sa = trunc_page((vm_offset_t)va);
1149 		ea = round_page((vm_offset_t)va+sizeof(int)-1);
1150 		rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
1151 			VM_PROT_DEFAULT, FALSE);
1152 		if (rv == KERN_SUCCESS) {
1153 			i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1154 			(void) vm_map_protect(&p->p_vmspace->vm_map,
1155 				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
1156 		}
1157 	}
1158 	if (i < 0)
1159 		return (EFAULT);
1160 	printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n",
1161 		p->p_comm, p->p_pid, p->p_md.md_ss_addr,
1162 		p->p_md.md_ss_instr, locr0[PC]); /* XXX */
1163 	return (0);
1164 }
1165 
1166 /*
1167  * news3400 - INT0 service routine.
1168  *
1169  * INTST0 bit	4:	dma
1170  *		3:	slot #1
1171  *		2:	slot #3
1172  *		1:	external #1
1173  *		0:	external #3
1174  */
1175 
1176 #define	LEVEL0_MASK	\
1177 	(INTST1_DMA|INTST1_SLOT1|INTST1_SLOT3|INTST1_EXT1|INTST1_EXT3)
1178 
1179 level0_intr()
1180 {
1181 	register int stat;
1182 
1183 	stat = *(volatile u_char *)INTST1 & LEVEL0_MASK;
1184 	*(u_char *)INTCLR1 = stat;
1185 
1186 	if (stat & INTST1_DMA)
1187 		dma_intr();
1188 	if (stat & INTST1_SLOT1)
1189 		exec_hb_intr2();
1190 #if NEN > 0
1191 	if (stat & INTST1_SLOT3) {
1192 		int s, t;
1193 
1194 		s = splimp();
1195 		t = lance_intr();
1196 		(void) splx(s);
1197 		if (t == 0)
1198 			exec_hb_intr4();
1199 	}
1200 #endif
1201 #if NLE > 0
1202 	if (stat & INTST1_SLOT3) {
1203 		int s;
1204 
1205 		s = splimp();
1206 		leintr(0);
1207 		(void) splx(s);
1208 	}
1209 #endif
1210 	if (stat & INTST1_EXT1)
1211 		print_int_stat("EXT #1");
1212 	if (stat & INTST1_EXT3)
1213 		print_int_stat("EXT #3");
1214 }
1215 
1216 /*
1217  * news3400 - INT1 service routine.
1218  *
1219  * INTST0 bit	1:	centro fault
1220  *		0:	centro busy
1221  * INTST1 bit	7:	beep
1222  *		6:	scc
1223  *		5:	lance
1224  */
1225 
1226 #define LEVEL1_MASK2	(INTST0_CFLT|INTST0_CBSY)
1227 #define LEVEL1_MASK1	(INTST1_BEEP|INTST1_SCC|INTST1_LANCE)
1228 
1229 level1_intr(pc)
1230 	unsigned pc;
1231 {
1232 	register int stat;
1233 	register u_int saved_inten1 = *(u_char *)INTEN1;
1234 
1235 	*(u_char *)INTEN1 = 0;		/* disable intr: beep, lance, scc */
1236 
1237 	stat = *(volatile u_char *)INTST1 & LEVEL1_MASK1;
1238 	*(u_char *)INTCLR1 = stat;
1239 
1240 	stat &= saved_inten1;
1241 
1242 	if (stat & INTST1_BEEP) {
1243 		*(volatile u_char *)INTCLR1 = INTCLR1_BEEP;
1244 		print_int_stat("BEEP");
1245 	}
1246 	if (stat & INTST1_SCC) {
1247 		scc_intr();
1248 		if (saved_inten1 & *(u_char *)INTST1 & INTST1_SCC)
1249 			scc_intr();
1250 	}
1251 #if NEN > 0
1252 	if (stat & INTST1_LANCE)
1253 		lance_intr();
1254 #endif
1255 #if NLE > 0
1256 	if (stat & INTST1_LANCE)
1257 		leintr(0);
1258 #endif
1259 
1260 	*(u_char *)INTEN1 = saved_inten1;
1261 
1262 #if NLP > 0
1263 	/*
1264 	 * The PARK2 cannot find centro interrupt correctly.
1265 	 * We must check it by reading the cause register of cpu
1266 	 * while other interrupts are disabled.
1267 	 */
1268 	{
1269 		register int causereg;
1270 		int s = splhigh();
1271 
1272 		causereg = get_causereg();
1273 		(void) splx(s);
1274 
1275 		if ((causereg & CAUSE_IP4) == 0)
1276 			return;
1277 	}
1278 #endif
1279 
1280 	stat = (int)(*(u_char *)INTST0) & LEVEL1_MASK2;
1281 	*(u_char *)INTCLR0 = stat;
1282 
1283 	if (stat & INTST0_CBSY)		/* centro busy */
1284 #if NLP > 0
1285 		lpxint(0);
1286 #else
1287 		printf("stray intr: CBSY\n");
1288 #endif
1289 }
1290 
1291 /*
1292  * DMA interrupt service routine.
1293  */
1294 dma_intr()
1295 {
1296         register volatile u_char *gsp = (u_char *)DMAC_GSTAT;
1297         register u_int gstat = *gsp;
1298         register int mrqb, i;
1299 
1300 	/*
1301 	 * when DMA intrrupt occurs there remain some untransferred data.
1302 	 * wait data transfer completion.
1303 	 */
1304 	mrqb = (gstat & (CH0_INT|CH1_INT|CH2_INT|CH3_INT)) << 1;
1305 	if (gstat & mrqb) {
1306 		/*
1307 		 * SHOULD USE DELAY()
1308 		 */
1309 		for (i = 0; i < 50; i++)
1310 			;
1311 		if (*gsp & mrqb)
1312 			printf("dma_intr: MRQ\n");
1313 	}
1314 
1315 	/* SCSI Dispatch */
1316 	if (gstat & CH_INT(CH_SCSI))
1317 		scintr();
1318 
1319 #include "fd.h"
1320 #if NFD > 0
1321         /* FDC Interrupt Dispatch */
1322 	if (gstat & CH_INT(CH_FDC))
1323 		fdc_intr(0);
1324 #endif /* NFD > 0 */
1325 
1326 #include "sb.h"
1327 #if NSB > 0
1328         /* Audio Interface Dispatch */
1329 	sbintr(0);
1330 #endif /* NSB > 0 */
1331 
1332         /* Video I/F Dispatch */
1333 	if (gstat & CH_INT(CH_VIDEO))
1334 		;
1335 }
1336 
1337 /*
1338  * SCC vector interrupt service routine.
1339  */
1340 scc_intr()
1341 {
1342 	int vec;
1343 	extern int scc_xint(), scc_sint(), scc_rint(), scc_cint();
1344 	static int (*func[])() = {
1345 		scc_xint,
1346 		scc_sint,
1347 		scc_rint,
1348 		scc_cint
1349 	};
1350 
1351 	vec = *(volatile u_char *)SCCVECT;
1352 	(*func[(vec & SCC_INT_MASK) >> 1])(vec);
1353 }
1354 
1355 print_int_stat(msg)
1356 	char *msg;
1357 {
1358 	int s0 = *(volatile u_char *)INTST0;
1359 	int s1 = *(volatile u_char *)INTST1;
1360 
1361 	if (msg)
1362 		printf("%s: ", msg);
1363 	else
1364 		printf("intr: ");
1365 	printf("INTST0=0x%x, INTST1=0x%x.\n", s0, s1);
1366 }
1367 
1368 traceback()
1369 {
1370 	u_int pc, sp;
1371 
1372 	getpcsp(&pc, &sp);
1373 	backtr(pc, sp);
1374 }
1375 
1376 #define EF_RA   	        92              /* r31: return address */
1377 #define KERN_REG_SIZE		(18 * 4)
1378 #define STAND_FRAME_SIZE	24
1379 #define EF_SIZE			STAND_FRAME_SIZE + KERN_REG_SIZE + 12
1380 
1381 extern u_int MachKernGenExceptionEnd[];
1382 extern u_int end[];
1383 #define	ENDOFTXT	(end + 1)
1384 
1385 #define VALID_TEXT(pc)	\
1386 	((u_int *)MACH_CODE_START <= (u_int *)MACH_UNCACHED_TO_CACHED(pc) && \
1387 	 (u_int *)MACH_UNCACHED_TO_CACHED(pc) <= (u_int *)ENDOFTXT)
1388 
1389 #define ExceptionHandler(x) \
1390 	((u_int*)MachKernGenException < (u_int*)MACH_UNCACHED_TO_CACHED(x) && \
1391 	 (u_int*)MACH_UNCACHED_TO_CACHED(x) < (u_int*)MachKernGenExceptionEnd)
1392 
1393 backtr(pc, sp)
1394 	register u_int *pc;
1395 	register caddr_t sp;
1396 {
1397 	int fsize;
1398 	u_int *getra();
1399 	extern int _gp[];
1400 
1401 	printf("start trace back pc=%x, sp=%x, pid=%d[%s]\n",
1402 		pc, sp, curproc->p_pid, curproc->p_comm);
1403 
1404 	while (VALID_TEXT(pc)) {
1405 		if (sp >= (caddr_t)KERNELSTACK || sp < (caddr_t)UADDR) {
1406 			printf("stack exhausted (sp=0x%x)\n", sp);
1407 			break;
1408 		}
1409 		if (ExceptionHandler(pc)) {
1410 			pc = (u_int *)(*((u_int *)&sp[EF_RA]));
1411 			sp += EF_SIZE;
1412 			printf("trapped from pc=%x, sp=%x\n", pc, sp);
1413 		} else {
1414 			pc = getra(pc, sp, &fsize);
1415 			sp += fsize;
1416 			printf("called from pc=%x, sp=%x\n", pc, sp);
1417 		}
1418 	}
1419 	printf("trace back END. pid=%d[%s]\n", curproc->p_pid, curproc->p_comm);
1420 }
1421 
1422 #define	NPCSTOCK	128
1423 
1424 u_int *
1425 getra(pc, sp, fsize)
1426 	register int *pc;
1427 	register caddr_t sp;
1428 	int *fsize;
1429 {
1430 	u_int regs[32];
1431 	int *opcs[NPCSTOCK];
1432 	register int i, nbpc = 0;
1433 	int printed = 0;
1434 	InstFmt I;
1435 
1436 	*fsize = 0;
1437 	for (i = 0; i < 32; i++) regs[i] = 0;
1438 	for (; (u_int*)MACH_UNCACHED_TO_CACHED(pc) < (u_int*)ENDOFTXT; pc++) {
1439 		I.word = *pc;
1440 		switch (I.IType.op) {
1441 
1442 		case OP_ADDIU:
1443 			/* sp += fsize */
1444 			if (I.IType.rs == SP && I.IType.rt == SP)
1445 				*fsize = (u_short)I.IType.imm;
1446 			break;
1447 
1448 		case OP_LW:
1449 			if (I.IType.rs != SP)
1450 				break;
1451 			regs[I.IType.rt] = *(u_int *)&sp[(short)I.IType.imm];
1452 			break;
1453 
1454 		case OP_BEQ:
1455 			if (I.IType.rs != ZERO || I.IType.rt != ZERO)
1456 				break;
1457 			for (i = 0; i < nbpc; i++)
1458 				if (pc == opcs[i]) {
1459 					/*
1460 					 * Brach constructs infinite loop.
1461 					 */
1462 					if (!printed) {
1463 						printf("branch loop\n");
1464 						printed = 1;
1465 					}
1466 					break;
1467 				}
1468 			if (i == nbpc) {
1469 				opcs[nbpc] = pc;
1470 				nbpc = imin(nbpc + 1, NPCSTOCK);
1471 				pc = pc + (short)I.IType.imm;
1472 			}
1473 			break;
1474 
1475 		default:
1476 			break;
1477 		}
1478 
1479 		I.word = *(pc - 1);
1480 		if (I.RType.op == OP_SPECIAL && I.RType.func == OP_JR)
1481 			return ((int *)regs[I.RType.rs]);
1482 	}
1483 	printf("pc run out of TEXT\n");
1484 	return (0);
1485 }
1486