xref: /original-bsd/sys/pmax/pmax/trap.c (revision e59fb703)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department and Ralph Campbell.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.32 91/04/06$
13  *
14  *	@(#)trap.c	7.1 (Berkeley) 01/07/92
15  */
16 
17 #include "param.h"
18 #include "systm.h"
19 #include "proc.h"
20 #include "kernel.h"
21 #include "signalvar.h"
22 #include "user.h"
23 #ifdef KTRACE
24 #include "ktrace.h"
25 #endif
26 #include "net/netisr.h"
27 
28 #include "../include/trap.h"
29 #include "../include/psl.h"
30 #include "../include/reg.h"
31 #include "../include/cpu.h"
32 #include "pte.h"
33 #include "clockreg.h"
34 
35 #include "vm/vm.h"
36 #include "vm/vm_kern.h"
37 #include "vm/vm_page.h"
38 
39 /*
40  * This is a kludge to allow X windows to work.
41  */
42 #define X_KLUGE
43 
44 #ifdef X_KLUGE
45 #define USER_MAP_ADDR	0x4000
46 #define NPTES 300
47 static pt_entry_t UserMapPtes[NPTES];
48 static unsigned nUserMapPtes;
49 static pid_t UserMapPid;
50 #endif
51 
52 struct	proc *machFPCurProcPtr;		/* pointer to last proc to use FP */
53 
54 extern void MachKernGenException();
55 extern void MachUserGenException();
56 extern void MachKernIntr();
57 extern void MachUserIntr();
58 extern void MachTLBModException();
59 extern void MachTLBMissException();
60 extern void MemErrorInterrupt();
61 
62 void (*machExceptionTable[])() = {
63 /*
64  * The kernel exception handlers.
65  */
66 	MachKernIntr,			/* external interrupt */
67 	MachKernGenException,		/* TLB modification */
68 	MachTLBMissException,		/* TLB miss (load or instr. fetch) */
69 	MachTLBMissException,		/* TLB miss (store) */
70 	MachKernGenException,		/* address error (load or I-fetch) */
71 	MachKernGenException,		/* address error (store) */
72 	MachKernGenException,		/* bus error (I-fetch) */
73 	MachKernGenException,		/* bus error (load or store) */
74 	MachKernGenException,		/* system call */
75 	MachKernGenException,		/* breakpoint */
76 	MachKernGenException,		/* reserved instruction */
77 	MachKernGenException,		/* coprocessor unusable */
78 	MachKernGenException,		/* arithmetic overflow */
79 	MachKernGenException,		/* reserved */
80 	MachKernGenException,		/* reserved */
81 	MachKernGenException,		/* reserved */
82 /*
83  * The user exception handlers.
84  */
85 	MachUserIntr,
86 	MachUserGenException,
87 	MachUserGenException,
88 	MachUserGenException,
89 	MachUserGenException,
90 	MachUserGenException,
91 	MachUserGenException,
92 	MachUserGenException,
93 	MachUserGenException,
94 	MachUserGenException,
95 	MachUserGenException,
96 	MachUserGenException,
97 	MachUserGenException,
98 	MachUserGenException,
99 	MachUserGenException,
100 	MachUserGenException,
101 };
102 
103 char	*trap_type[] = {
104 	"external interrupt",
105 	"TLB modification",
106 	"TLB miss (load or instr. fetch)",
107 	"TLB miss (store)",
108 	"address error (load or I-fetch)",
109 	"address error (store)",
110 	"bus error (I-fetch)",
111 	"bus error (load or store)",
112 	"system call",
113 	"breakpoint",
114 	"reserved instruction",
115 	"coprocessor unusable",
116 	"arithmetic overflow",
117 	"reserved 13",
118 	"reserved 14",
119 	"reserved 15",
120 };
121 
122 #ifdef DEBUG
123 #define TRAPSIZE	10
124 struct trapdebug {		/* trap history buffer for debugging */
125 	u_int	status;
126 	u_int	cause;
127 	u_int	vadr;
128 	u_int	pc;
129 	u_int	ra;
130 	u_int	code;
131 } trapdebug[TRAPSIZE], *trp = trapdebug;
132 #endif
133 
134 /*
135  * Handle an exception.
136  * Called from MachKernGenException() or MachUserGenException()
137  * when a processor trap occurs.
138  * In the case of a kernel trap, we return the pc where to resume if
139  * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc.
140  */
141 unsigned
142 trap(statusReg, causeReg, vadr, pc, args /* XXX */)
143 	unsigned statusReg;	/* status register at time of the exception */
144 	unsigned causeReg;	/* cause register at time of exception */
145 	unsigned vadr;		/* address (if any) the fault occured on */
146 	unsigned pc;		/* program counter where to continue */
147 {
148 	register int type, i;
149 	unsigned ucode = 0;
150 	register struct proc *p = curproc;
151 	struct timeval syst;
152 	vm_prot_t ftype;
153 	extern unsigned onfault_table[];
154 
155 #ifdef DEBUG
156 	trp->status = statusReg;
157 	trp->cause = causeReg;
158 	trp->vadr = vadr;
159 	trp->pc = pc;
160 	trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] : p->p_regs[RA];
161 	trp->code = 0;
162 	if (++trp == &trapdebug[TRAPSIZE])
163 		trp = trapdebug;
164 #endif
165 
166 	cnt.v_trap++;
167 	type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT;
168 	if (USERMODE(statusReg)) {
169 		type |= T_USER;
170 		syst = p->p_stime;
171 	}
172 
173 	/*
174 	 * Enable hardware interrupts if they were on before.
175 	 * We only respond to software interrupts when returning to user mode.
176 	 */
177 	if (statusReg & MACH_SR_INT_ENA_PREV)
178 		splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR);
179 
180 	switch (type) {
181 	case T_TLB_MOD:
182 		if ((int)vadr < 0) {
183 			register pt_entry_t *pte;
184 			register unsigned entry;
185 #ifndef ATTR
186 			register vm_offset_t pa;
187 #endif
188 
189 			pte = kvtopte(vadr);
190 			entry = pte->pt_entry;
191 			if (entry & PG_RO) {
192 				/* write to read only page in the kernel */
193 				ftype = VM_PROT_WRITE;
194 				goto kernel_fault;
195 			}
196 			entry |= PG_M;
197 			pte->pt_entry = entry;
198 			vadr &= PG_FRAME;
199 			printf("trap: TLBupdate hi %x lo %x i %x\n", vadr,
200 				entry, MachTLBUpdate(vadr, entry)); /* XXX */
201 #ifdef ATTR
202 			pmap_attributes[atop(entry - KERNBASE)] |= PMAP_ATTR_MOD;
203 #else
204 			pa = entry & PG_FRAME;
205 			if (!IS_VM_PHYSADDR(pa))
206 				panic("trap: kmod");
207 			PHYS_TO_VM_PAGE(pa)->clean = FALSE;
208 #endif
209 			return (pc);
210 		}
211 		/* FALLTHROUGH */
212 
213 	case T_TLB_MOD+T_USER:
214 	    {
215 		pmap_hash_t hp;
216 #ifndef ATTR
217 		vm_offset_t pa;
218 #endif
219 #ifdef DIAGNOSTIC
220 		extern pmap_hash_t zero_pmap_hash;
221 		extern pmap_t cur_pmap;
222 
223 		if (cur_pmap->pm_hash == zero_pmap_hash)
224 			panic("tlbmod");
225 #endif
226 		hp = &((pmap_hash_t)PMAP_HASH_UADDR)[PMAP_HASH(vadr)];
227 		if (hp->low & PG_RO) {
228 			ftype = VM_PROT_WRITE;
229 			goto dofault;
230 		}
231 		hp->low |= PG_M;
232 		printf("trap: TLBupdate hi %x lo %x i %x\n", hp->high, hp->low,
233 			MachTLBUpdate(hp->high, hp->low)); /* XXX */
234 #ifdef ATTR
235 		pmap_attributes[atop(hp->low - KERNBASE)] |= PMAP_ATTR_MOD;
236 #else
237 		pa = hp->low & PG_FRAME;
238 		if (!IS_VM_PHYSADDR(pa))
239 			panic("trap: umod");
240 		PHYS_TO_VM_PAGE(pa)->clean = FALSE;
241 #endif
242 		if (!USERMODE(statusReg))
243 			return (pc);
244 		goto out;
245 	    }
246 
247 	case T_TLB_LD_MISS:
248 	case T_TLB_ST_MISS:
249 		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
250 		if ((int)vadr < 0) {
251 			register vm_offset_t va;
252 			int rv;
253 
254 		kernel_fault:
255 			va = trunc_page((vm_offset_t)vadr);
256 			rv = vm_fault(kernel_map, va, ftype, FALSE);
257 			if (rv == KERN_SUCCESS)
258 				return (pc);
259 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
260 				((struct pcb *)UADDR)->pcb_onfault = 0;
261 				return (onfault_table[i]);
262 			}
263 			goto err;
264 		}
265 		goto dofault;
266 
267 	case T_TLB_LD_MISS+T_USER:
268 		ftype = VM_PROT_READ;
269 		goto dofault;
270 
271 	case T_TLB_ST_MISS+T_USER:
272 		ftype = VM_PROT_WRITE;
273 	dofault:
274 	    {
275 		register vm_offset_t va;
276 		register struct vmspace *vm = p->p_vmspace;
277 		register vm_map_t map = &vm->vm_map;
278 		int rv;
279 
280 #ifdef X_KLUGE
281 		if (p->p_pid == UserMapPid &&
282 		    (va = pmax_btop(vadr - USER_MAP_ADDR)) < nUserMapPtes) {
283 			register pt_entry_t *pte;
284 
285 			pte = &UserMapPtes[va];
286 			MachTLBWriteRandom((vadr & PG_FRAME) |
287 				(vm->vm_pmap.pm_tlbpid << VMMACH_TLB_PID_SHIFT),
288 				pte->pt_entry);
289 			return (pc);
290 		}
291 #endif
292 		va = trunc_page((vm_offset_t)vadr);
293 		rv = vm_fault(map, va, ftype, FALSE);
294 		if (rv != KERN_SUCCESS) {
295 			printf("vm_fault(%x, %x, %x, 0) -> %x ADR %x PC %x RA %x\n",
296 				map, va, ftype, rv, vadr, pc,
297 				!USERMODE(statusReg) ? ((int *)&args)[19] :
298 					p->p_regs[RA]); /* XXX */
299 			printf("\tpid %d %s PC %x RA %x\n", p->p_pid,
300 				p->p_comm, p->p_regs[PC], p->p_regs[RA]); /* XXX */
301 			trapDump("vm_fault");
302 		}
303 		/*
304 		 * If this was a stack access we keep track of the maximum
305 		 * accessed stack size.  Also, if vm_fault gets a protection
306 		 * failure it is due to accessing the stack region outside
307 		 * the current limit and we need to reflect that as an access
308 		 * error.
309 		 */
310 		if ((caddr_t)va >= vm->vm_maxsaddr) {
311 			if (rv == KERN_SUCCESS) {
312 				unsigned nss;
313 
314 				nss = clrnd(btoc(USRSTACK-(unsigned)va));
315 				if (nss > vm->vm_ssize)
316 					vm->vm_ssize = nss;
317 			} else if (rv == KERN_PROTECTION_FAILURE)
318 				rv = KERN_INVALID_ADDRESS;
319 		}
320 		if (rv == KERN_SUCCESS) {
321 			if (!USERMODE(statusReg))
322 				return (pc);
323 			goto out;
324 		}
325 		if (!USERMODE(statusReg)) {
326 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
327 				((struct pcb *)UADDR)->pcb_onfault = 0;
328 				return (onfault_table[i]);
329 			}
330 			goto err;
331 		}
332 		ucode = vadr;
333 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
334 		break;
335 	    }
336 
337 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
338 		if (vadr == KERNBASE) {
339 			struct args {
340 				int	i[1];
341 			} args;
342 			int rval[2];
343 
344 			/*
345 			 * Assume a signal handler is trying to return
346 			 * (see sendsig() and sigreturn()). We have to
347 			 * pop the sigframe struct to get the address of
348 			 * the sigcontext.
349 			 */
350 			args.i[0] = p->p_regs[SP] + 4 * sizeof(int);
351 			(void) sigreturn(curproc, &args, rval);
352 			goto out;
353 		}
354 		/* FALLTHROUGH */
355 
356 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
357 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
358 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
359 		i = SIGSEGV;
360 		break;
361 
362 	case T_SYSCALL+T_USER:
363 	    {
364 		register int *locr0 = p->p_regs;
365 		register struct sysent *callp;
366 		int code, numsys;
367 		struct args {
368 			int i[8];
369 		} args;
370 		int rval[2];
371 		struct sysent *systab;
372 		extern unsigned MachEmulateBranch();
373 		extern int nsysent;
374 #ifdef ULTRIXCOMPAT
375 		extern struct sysent ultrixsysent[];
376 		extern int ultrixnsysent;
377 #endif
378 
379 		cnt.v_syscall++;
380 		/* compute next PC after syscall instruction */
381 		if ((int)causeReg < 0)
382 			locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0);
383 		else
384 			locr0[PC] += 4;
385 		systab = sysent;
386 		numsys = nsysent;
387 #ifdef ULTRIXCOMPAT
388 		if (p->p_md.md_flags & MDP_ULTRIX) {
389 			systab = ultrixsysent;
390 			numsys = ultrixnsysent;
391 		}
392 #endif
393 		code = locr0[V0];
394 		if (code == 0) {			/* indir */
395 			code = locr0[A0];
396 			if (code >= numsys)
397 				callp = &systab[0];	/* indir (illegal) */
398 			else
399 				callp = &systab[code];
400 			i = callp->sy_narg;
401 			args.i[0] = locr0[A1];
402 			args.i[1] = locr0[A2];
403 			args.i[2] = locr0[A3];
404 			if (i > 3) {
405 				i = copyin((caddr_t)(locr0[SP] +
406 						3 * sizeof(int)),
407 					(caddr_t)&args.i[3],
408 					(u_int)(i - 3) * sizeof(int));
409 				if (i) {
410 					locr0[V0] = i;
411 					locr0[A3] = 1;
412 #ifdef KTRACE
413 					if (KTRPOINT(p, KTR_SYSCALL))
414 						ktrsyscall(p->p_tracep, code,
415 							callp->sy_narg, args.i);
416 #endif
417 					goto done;
418 				}
419 			}
420 		} else {
421 			if (code >= numsys)
422 				callp = &systab[0];	/* indir (illegal) */
423 			else
424 				callp = &systab[code];
425 			i = callp->sy_narg;
426 			args.i[0] = locr0[A0];
427 			args.i[1] = locr0[A1];
428 			args.i[2] = locr0[A2];
429 			args.i[3] = locr0[A3];
430 			if (i > 4) {
431 				i = copyin((caddr_t)(locr0[SP] +
432 						4 * sizeof(int)),
433 					(caddr_t)&args.i[4],
434 					(u_int)(i - 4) * sizeof(int));
435 				if (i) {
436 					locr0[V0] = i;
437 					locr0[A3] = 1;
438 #ifdef KTRACE
439 					if (KTRPOINT(p, KTR_SYSCALL))
440 						ktrsyscall(p->p_tracep, code,
441 							callp->sy_narg, args.i);
442 #endif
443 					goto done;
444 				}
445 			}
446 		}
447 #ifdef KTRACE
448 		if (KTRPOINT(p, KTR_SYSCALL))
449 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
450 #endif
451 		rval[0] = 0;
452 		rval[1] = locr0[V1];
453 #ifdef DEBUG
454 		if (trp == trapdebug)
455 			trapdebug[TRAPSIZE - 1].code = code;
456 		else
457 			trp[-1].code = code;
458 #endif
459 		i = (*callp->sy_call)(p, &args, rval);
460 		/*
461 		 * Reinitialize proc pointer `p' as it may be different
462 		 * if this is a child returning from fork syscall.
463 		 */
464 		p = curproc;
465 		locr0 = p->p_regs;
466 #ifdef DEBUG
467 		{ int s;
468 		s = splhigh();
469 		trp->status = statusReg;
470 		trp->cause = causeReg;
471 		trp->vadr = locr0[SP];
472 		trp->pc = locr0[PC];
473 		trp->ra = locr0[RA];
474 		trp->code = -code;
475 		if (++trp == &trapdebug[TRAPSIZE])
476 			trp = trapdebug;
477 		splx(s);
478 		}
479 #endif
480 		if (i == ERESTART)
481 			locr0[PC] = pc;
482 		else if (i != EJUSTRETURN) {
483 			if (i) {
484 				locr0[V0] = i;
485 				locr0[A3] = 1;
486 			} else {
487 				locr0[V0] = rval[0];
488 				locr0[V1] = rval[1];
489 				locr0[A3] = 0;
490 			}
491 		}
492 		/* else if (i == EJUSTRETURN) */
493 			/* nothing to do */
494 	done:
495 #ifdef KTRACE
496 		if (KTRPOINT(p, KTR_SYSRET))
497 			ktrsysret(p->p_tracep, code, i, rval[0]);
498 #endif
499 		goto out;
500 	    }
501 
502 	case T_BREAK+T_USER:
503 		i = SIGTRAP;
504 		break;
505 
506 	case T_RES_INST+T_USER:
507 		i = SIGILL;
508 		break;
509 
510 	case T_COP_UNUSABLE+T_USER:
511 		if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) {
512 			i = SIGILL;	/* only FPU instructions allowed */
513 			break;
514 		}
515 		MachSwitchFPState(machFPCurProcPtr, p->p_regs);
516 		machFPCurProcPtr = p;
517 		p->p_regs[PS] |= MACH_SR_COP_1_BIT;
518 		p->p_md.md_flags |= MDP_FPUSED;
519 		goto out;
520 
521 	case T_OVFLOW+T_USER:
522 		i = SIGFPE;
523 		break;
524 
525 	case T_ADDR_ERR_LD:	/* misaligned access */
526 	case T_ADDR_ERR_ST:	/* misaligned access */
527 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
528 		if (i = ((struct pcb *)UADDR)->pcb_onfault) {
529 			((struct pcb *)UADDR)->pcb_onfault = 0;
530 			return (onfault_table[i]);
531 		}
532 		/* FALLTHROUGH */
533 
534 	default:
535 	err:
536 		panic("trap");
537 	}
538 	printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid,
539 		p->p_comm, i, vadr, pc, p->p_regs[RA]); /* XXX */
540 	trapsignal(p, i, ucode);
541 out:
542 	/*
543 	 * Note: we should only get here if returning to user mode.
544 	 */
545 	astpending = 0;
546 	while (i = CURSIG(p))
547 		psig(i);
548 	p->p_pri = p->p_usrpri;
549 	if (want_resched) {
550 		/*
551 		 * Since we are curproc, clock will normally just change
552 		 * our priority without moving us from one queue to another
553 		 * (since the running process is not on a queue.)
554 		 * If that happened after we setrq ourselves but before we
555 		 * swtch()'ed, we might not be on the queue indicated by
556 		 * our priority.
557 		 */
558 		(void) splclock();
559 		setrq(p);
560 		p->p_stats->p_ru.ru_nivcsw++;
561 		swtch();
562 		while (i = CURSIG(p))
563 			psig(i);
564 	}
565 	if (p->p_stats->p_prof.pr_scale) {
566 		int ticks;
567 		struct timeval *tv = &p->p_stime;
568 
569 		ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
570 			(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
571 		if (ticks)
572 			addupc(pc, &p->p_stats->p_prof, ticks);
573 	}
574 	curpri = p->p_pri;
575 	return (pc);
576 }
577 
578 int temp; /*XXX*/
579 
580 /*
581  * Handle an interrupt.
582  * Called from MachKernIntr() or MachUserIntr()
583  * Note: curproc might be NULL.
584  */
585 interrupt(statusReg, causeReg, pc)
586 	unsigned statusReg;	/* status register at time of the exception */
587 	unsigned causeReg;	/* cause register at time of exception */
588 	unsigned pc;		/* program counter where to continue */
589 {
590 	register int i;
591 	register unsigned mask;
592 	clockframe cf;
593 
594 #ifdef DEBUG
595 	trp->status = statusReg;
596 	trp->cause = causeReg;
597 	trp->vadr = 0;
598 	trp->pc = pc;
599 	trp->ra = 0;
600 	trp->code = 0;
601 	if (++trp == &trapdebug[TRAPSIZE])
602 		trp = trapdebug;
603 #endif
604 
605 	cnt.v_intr++;
606 	mask = causeReg & statusReg;	/* pending interrupts & enable mask */
607 	/*
608 	 * Enable hardware interrupts which were enabled but not pending.
609 	 * We only respond to software interrupts when returning to spl0.
610 	 */
611 	splx((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
612 		MACH_SR_INT_ENA_CUR);
613 	/*
614 	 * The first three tests should probably use
615 	 * some kind of table generated by 'config'.
616 	 */
617 	if (mask & MACH_INT_MASK_0)
618 		siiintr();
619 	if (mask & MACH_INT_MASK_1)
620 		leintr();
621 	if (mask & MACH_INT_MASK_2)
622 		dcintr();
623 	if (mask & MACH_INT_MASK_3) {
624 		register volatile struct chiptime *c =
625 			(volatile struct chiptime *)MACH_CLOCK_ADDR;
626 
627 		temp = c->regc;	/* clear interrupt bits */
628 		cf.pc = pc;
629 		cf.ps = statusReg;
630 		hardclock(cf);
631 	}
632 	if (mask & MACH_INT_MASK_4)
633 		MemErrorInterrupt();
634 	if (mask & MACH_INT_MASK_5) {
635 		printf("FPU interrupt: PC %x CR %x SR %x\n",
636 			pc, causeReg, statusReg); /* XXX */
637 		if (!USERMODE(statusReg)) {
638 #ifdef DEBUG
639 			trapDump("fpintr");
640 #else
641 			printf("FPU interrupt: PC %x CR %x SR %x\n",
642 				pc, causeReg, statusReg);
643 #endif
644 		} else
645 			MachFPInterrupt(statusReg, causeReg, pc);
646 	}
647 	if (mask & MACH_SOFT_INT_MASK_0) {
648 		clockframe cf;
649 
650 		clearsoftclock();
651 		cf.pc = pc;
652 		cf.ps = statusReg;
653 		softclock(cf);
654 	}
655 	/* process network interrupt if we trapped or will very soon */
656 	if ((mask & MACH_SOFT_INT_MASK_1) ||
657 	    netisr && (statusReg & MACH_SOFT_INT_MASK_1)) {
658 		clearsoftnet();
659 #ifdef INET
660 		if (netisr & (1 << NETISR_ARP)) {
661 			netisr &= ~(1 << NETISR_ARP);
662 			arpintr();
663 		}
664 		if (netisr & (1 << NETISR_IP)) {
665 			netisr &= ~(1 << NETISR_IP);
666 			ipintr();
667 		}
668 #endif
669 #ifdef NS
670 		if (netisr & (1 << NETISR_NS)) {
671 			netisr &= ~(1 << NETISR_NS);
672 			nsintr();
673 		}
674 #endif
675 #ifdef ISO
676 		if (netisr & (1 << NETISR_ISO)) {
677 			netisr &= ~(1 << NETISR_ISO);
678 			clnlintr();
679 		}
680 #endif
681 	}
682 }
683 
684 /*
685  * This is called from MachUserIntr() if astpending is set.
686  * This is very similar to the tail of trap().
687  */
688 softintr(statusReg, pc)
689 	unsigned statusReg;	/* status register at time of the exception */
690 	unsigned pc;		/* program counter where to continue */
691 {
692 	register struct proc *p = curproc;
693 	register int i;
694 
695 	cnt.v_soft++;
696 	astpending = 0;
697 	while (i = CURSIG(p))
698 		psig(i);
699 	p->p_pri = p->p_usrpri;
700 	if (want_resched) {
701 		/*
702 		 * Since we are curproc, clock will normally just change
703 		 * our priority without moving us from one queue to another
704 		 * (since the running process is not on a queue.)
705 		 * If that happened after we setrq ourselves but before we
706 		 * swtch()'ed, we might not be on the queue indicated by
707 		 * our priority.
708 		 */
709 		(void) splclock();
710 		setrq(p);
711 		p->p_stats->p_ru.ru_nivcsw++;
712 		swtch();
713 		while (i = CURSIG(p))
714 			psig(i);
715 	}
716 	curpri = p->p_pri;
717 }
718 
719 #ifdef DEBUG
720 trapDump(msg)
721 	char *msg;
722 {
723 	register int i;
724 	int s;
725 
726 	s = splhigh();
727 	printf("trapDump(%s)\n", msg);
728 	for (i = 0; i < TRAPSIZE; i++) {
729 		if (trp == trapdebug)
730 			trp = &trapdebug[TRAPSIZE - 1];
731 		else
732 			trp--;
733 		if (trp->cause == 0)
734 			break;
735 		printf("%s: ADR %x PC %x CR %x SR %x\n",
736 			trap_type[(trp->cause & MACH_CR_EXC_CODE) >>
737 				MACH_CR_EXC_CODE_SHIFT],
738 			trp->vadr, trp->pc, trp->cause, trp->status);
739 		printf("   RA %x code %d\n", trp-> ra, trp->code);
740 	}
741 	bzero(trapdebug, sizeof(trapdebug));
742 	trp = trapdebug;
743 	splx(s);
744 }
745 #endif
746 
747 #ifdef X_KLUGE
748 /*
749  * This is a kludge to allow X windows to work.
750  */
751 caddr_t
752 vmUserMap(size, pa)
753 	int size;
754 	unsigned pa;
755 {
756 	register caddr_t v;
757 	unsigned off, entry;
758 
759 	if (nUserMapPtes == 0)
760 		UserMapPid = curproc->p_pid;
761 	else if (UserMapPid != curproc->p_pid)
762 		return ((caddr_t)0);
763 	off = pa & PGOFSET;
764 	size = btoc(off + size);
765 	if (nUserMapPtes + size > NPTES)
766 		return ((caddr_t)0);
767 	v = (caddr_t)(USER_MAP_ADDR + pmax_ptob(nUserMapPtes) + off);
768 	entry = (pa & 0x9ffff000) | PG_V | PG_M;
769 	if (pa >= MACH_UNCACHED_MEMORY_ADDR)
770 		entry |= PG_N;
771 	while (size > 0) {
772 		UserMapPtes[nUserMapPtes].pt_entry = entry;
773 		entry += NBPG;
774 		nUserMapPtes++;
775 		size--;
776 	}
777 	return (v);
778 }
779 
780 vmUserUnmap()
781 {
782 	int id;
783 
784 	nUserMapPtes = 0;
785 	if (UserMapPid == curproc->p_pid) {
786 		id = curproc->p_vmspace->vm_pmap.pm_tlbpid;
787 		if (id >= 0)
788 			MachTLBFlushPID(id);
789 	}
790 	UserMapPid = 0;
791 }
792 #endif
793 
794 /*
795  *----------------------------------------------------------------------
796  *
797  * MemErrorInterrupt --
798  *
799  *	Handler an interrupt for the control register.
800  *
801  * Results:
802  *	None.
803  *
804  * Side effects:
805  *	None.
806  *
807  *----------------------------------------------------------------------
808  */
809 static void
810 MemErrorInterrupt()
811 {
812 	volatile u_short *sysCSRPtr = (u_short *)MACH_SYS_CSR_ADDR;
813 	u_short csr;
814 
815 	csr = *sysCSRPtr;
816 
817 	if (csr & MACH_CSR_MEM_ERR) {
818 		printf("Memory error at 0x%x\n",
819 			*(unsigned *)MACH_WRITE_ERROR_ADDR);
820 		panic("Mem error interrupt");
821 	}
822 	*sysCSRPtr = (csr & ~MACH_CSR_MBZ) | 0xff;
823 }
824 
825 /* machDis.c -
826  *
827  *     	This contains the routine which disassembles an instruction to find
828  *	the target.
829  *
830  *	Copyright (C) 1989 Digital Equipment Corporation.
831  *	Permission to use, copy, modify, and distribute this software and
832  *	its documentation for any purpose and without fee is hereby granted,
833  *	provided that the above copyright notice appears in all copies.
834  *	Digital Equipment Corporation makes no representations about the
835  *	suitability of this software for any purpose.  It is provided "as is"
836  *	without express or implied warranty.
837  */
838 
839 #ifndef lint
840 static char rcsid[] = "$Header: /sprite/src/kernel/mach/ds3100.md/RCS/machDis.c,v 1.1 89/07/11 17:55:43 nelson Exp $ SPRITE (Berkeley)";
841 #endif not lint
842 
843 /*
844  * Define the instruction formats.
845  */
846 typedef union {
847 	unsigned word;
848 
849 	struct {
850 		unsigned imm: 16;
851 		unsigned f2: 5;
852 		unsigned f1: 5;
853 		unsigned op: 6;
854 	} IType;
855 
856 	struct {
857 		unsigned target: 26;
858 		unsigned op: 6;
859 	} JType;
860 
861 	struct {
862 		unsigned funct: 6;
863 		unsigned f4: 5;
864 		unsigned f3: 5;
865 		unsigned f2: 5;
866 		unsigned f1: 5;
867 		unsigned op: 6;
868 	} RType;
869 
870 	struct {
871 		unsigned funct: 6;
872 		unsigned fd: 5;
873 		unsigned fs: 5;
874 		unsigned ft: 5;
875 		unsigned fmt: 4;
876 		unsigned : 1;		/* always '1' */
877 		unsigned op: 6;		/* always '0x11' */
878 	} FRType;
879 } InstFmt;
880 
881 /*
882  * Opcodes of the branch instructions.
883  */
884 #define OP_SPECIAL	0x00
885 #define OP_BCOND	0x01
886 #define OP_J		0x02
887 #define	OP_JAL		0x03
888 #define OP_BEQ		0x04
889 #define OP_BNE		0x05
890 #define OP_BLEZ		0x06
891 #define OP_BGTZ		0x07
892 
893 /*
894  * Branch subops of the special opcode.
895  */
896 #define OP_JR		0x08
897 #define OP_JALR		0x09
898 
899 /*
900  * Sub-ops for OP_BCOND code.
901  */
902 #define OP_BLTZ		0x00
903 #define OP_BGEZ		0x01
904 #define OP_BLTZAL	0x10
905 #define OP_BGEZAL	0x11
906 
907 /*
908  * Coprocessor branch masks.
909  */
910 #define COPz_BC_MASK	0x1a
911 #define COPz_BC		0x08
912 #define COPz_BC_TF_MASK	0x01
913 #define COPz_BC_TRUE	0x01
914 #define COPz_BC_FALSE	0x00
915 
916 /*
917  * Coprocessor 1 operation.
918  */
919 #define OP_COP_1	0x11
920 
921 /*
922  * Return the resulting PC as if the branch was executed.
923  */
924 unsigned
925 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch)
926 	unsigned *regsPtr;
927 	unsigned instPC;
928 	unsigned fpcCSR;
929 	int allowNonBranch;
930 {
931 	InstFmt *instPtr;
932 	unsigned retAddr;
933 	int condition;
934 	extern unsigned GetBranchDest();
935 
936 #ifdef notdef
937 	printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC,
938 		*instPC, fpcCSR);
939 #endif
940 
941 	instPtr = (InstFmt *)instPC;
942 	switch ((int)instPtr->JType.op) {
943 	case OP_SPECIAL:
944 		switch ((int)instPtr->RType.funct) {
945 		case OP_JR:
946 		case OP_JALR:
947 			retAddr = regsPtr[instPtr->RType.f1];
948 			break;
949 
950 		default:
951 			if (!allowNonBranch)
952 				panic("MachEmulateBranch: Non-branch");
953 			retAddr = instPC + 4;
954 			break;
955 		}
956 		break;
957 
958 	case OP_BCOND:
959 		switch ((int)instPtr->IType.f2) {
960 		case OP_BLTZ:
961 		case OP_BLTZAL:
962 			if ((int)(regsPtr[instPtr->RType.f1]) < 0)
963 				retAddr = GetBranchDest(instPtr);
964 			else
965 				retAddr = instPC + 8;
966 			break;
967 
968 		case OP_BGEZAL:
969 		case OP_BGEZ:
970 			if ((int)(regsPtr[instPtr->RType.f1]) >= 0)
971 				retAddr = GetBranchDest(instPtr);
972 			else
973 				retAddr = instPC + 8;
974 			break;
975 
976 		default:
977 			panic("MachEmulateBranch: Bad branch cond");
978 		}
979 		break;
980 
981 	case OP_J:
982 	case OP_JAL:
983 		retAddr = (instPtr->JType.target << 2) |
984 			((unsigned)instPC & 0xF0000000);
985 		break;
986 
987 	case OP_BEQ:
988 		if (regsPtr[instPtr->RType.f1] == regsPtr[instPtr->RType.f2])
989 			retAddr = GetBranchDest(instPtr);
990 		else
991 			retAddr = instPC + 8;
992 		break;
993 
994 	case OP_BNE:
995 		if (regsPtr[instPtr->RType.f1] != regsPtr[instPtr->RType.f2])
996 			retAddr = GetBranchDest(instPtr);
997 		else
998 			retAddr = instPC + 8;
999 		break;
1000 
1001 	case OP_BLEZ:
1002 		if ((int)(regsPtr[instPtr->RType.f1]) <= 0)
1003 			retAddr = GetBranchDest(instPtr);
1004 		else
1005 			retAddr = instPC + 8;
1006 		break;
1007 
1008 	case OP_BGTZ:
1009 		if ((int)(regsPtr[instPtr->RType.f1]) > 0)
1010 			retAddr = GetBranchDest(instPtr);
1011 		else
1012 			retAddr = instPC + 8;
1013 		break;
1014 
1015 	case OP_COP_1:
1016 		if ((instPtr->RType.f1 & COPz_BC_MASK) == COPz_BC) {
1017 			if ((instPtr->RType.f2 & COPz_BC_TF_MASK) ==
1018 			    COPz_BC_TRUE)
1019 				condition = fpcCSR & MACH_FPC_COND_BIT;
1020 			else
1021 				condition = !(fpcCSR & MACH_FPC_COND_BIT);
1022 			if (condition)
1023 				retAddr = GetBranchDest(instPtr);
1024 			else
1025 				retAddr = instPC + 8;
1026 		} else if (allowNonBranch)
1027 			retAddr = instPC + 4;
1028 		else
1029 			panic("MachEmulateBranch: Bad coproc branch instruction");
1030 		break;
1031 
1032 	default:
1033 		if (!allowNonBranch)
1034 			panic("MachEmulateBranch: Non-branch instruction");
1035 		retAddr = instPC + 4;
1036 	}
1037 #ifdef notdef
1038 	printf("Target addr=%x\n", retAddr);
1039 #endif
1040 	return (retAddr);
1041 }
1042 
1043 unsigned
1044 GetBranchDest(InstPtr)
1045 	InstFmt *InstPtr;
1046 {
1047 	return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2));
1048 }
1049