xref: /original-bsd/sys/pmax/pmax/trap.c (revision fcafb5d3)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department and Ralph Campbell.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.32 91/04/06$
13  *
14  *	@(#)trap.c	7.5 (Berkeley) 03/23/92
15  */
16 
17 #include "param.h"
18 #include "systm.h"
19 #include "proc.h"
20 #include "kernel.h"
21 #include "signalvar.h"
22 #include "user.h"
23 #include "buf.h"
24 #ifdef KTRACE
25 #include "ktrace.h"
26 #endif
27 #include "net/netisr.h"
28 
29 #include "../include/trap.h"
30 #include "../include/psl.h"
31 #include "../include/reg.h"
32 #include "../include/cpu.h"
33 #include "../include/pte.h"
34 #include "../include/mips_opcode.h"
35 #include "clockreg.h"
36 
37 #include "vm/vm.h"
38 #include "vm/vm_kern.h"
39 #include "vm/vm_page.h"
40 
41 /*
42  * This is a kludge to allow X windows to work.
43  */
44 #define X_KLUGE
45 
46 #ifdef X_KLUGE
47 #define USER_MAP_ADDR	0x4000
48 #define NPTES 300
49 static pt_entry_t UserMapPtes[NPTES];
50 static unsigned nUserMapPtes;
51 static pid_t UserMapPid;
52 #endif
53 
54 struct	proc *machFPCurProcPtr;		/* pointer to last proc to use FP */
55 
56 extern void MachKernGenException();
57 extern void MachUserGenException();
58 extern void MachKernIntr();
59 extern void MachUserIntr();
60 extern void MachTLBModException();
61 extern void MachTLBMissException();
62 extern void MemErrorInterrupt();
63 extern unsigned MachEmulateBranch();
64 
65 void (*machExceptionTable[])() = {
66 /*
67  * The kernel exception handlers.
68  */
69 	MachKernIntr,			/* external interrupt */
70 	MachKernGenException,		/* TLB modification */
71 	MachTLBMissException,		/* TLB miss (load or instr. fetch) */
72 	MachTLBMissException,		/* TLB miss (store) */
73 	MachKernGenException,		/* address error (load or I-fetch) */
74 	MachKernGenException,		/* address error (store) */
75 	MachKernGenException,		/* bus error (I-fetch) */
76 	MachKernGenException,		/* bus error (load or store) */
77 	MachKernGenException,		/* system call */
78 	MachKernGenException,		/* breakpoint */
79 	MachKernGenException,		/* reserved instruction */
80 	MachKernGenException,		/* coprocessor unusable */
81 	MachKernGenException,		/* arithmetic overflow */
82 	MachKernGenException,		/* reserved */
83 	MachKernGenException,		/* reserved */
84 	MachKernGenException,		/* reserved */
85 /*
86  * The user exception handlers.
87  */
88 	MachUserIntr,
89 	MachUserGenException,
90 	MachUserGenException,
91 	MachUserGenException,
92 	MachUserGenException,
93 	MachUserGenException,
94 	MachUserGenException,
95 	MachUserGenException,
96 	MachUserGenException,
97 	MachUserGenException,
98 	MachUserGenException,
99 	MachUserGenException,
100 	MachUserGenException,
101 	MachUserGenException,
102 	MachUserGenException,
103 	MachUserGenException,
104 };
105 
106 char	*trap_type[] = {
107 	"external interrupt",
108 	"TLB modification",
109 	"TLB miss (load or instr. fetch)",
110 	"TLB miss (store)",
111 	"address error (load or I-fetch)",
112 	"address error (store)",
113 	"bus error (I-fetch)",
114 	"bus error (load or store)",
115 	"system call",
116 	"breakpoint",
117 	"reserved instruction",
118 	"coprocessor unusable",
119 	"arithmetic overflow",
120 	"reserved 13",
121 	"reserved 14",
122 	"reserved 15",
123 };
124 
125 #ifdef DEBUG
126 #define TRAPSIZE	10
127 struct trapdebug {		/* trap history buffer for debugging */
128 	u_int	status;
129 	u_int	cause;
130 	u_int	vadr;
131 	u_int	pc;
132 	u_int	ra;
133 	u_int	code;
134 } trapdebug[TRAPSIZE], *trp = trapdebug;
135 #endif
136 
137 /*
138  * Handle an exception.
139  * Called from MachKernGenException() or MachUserGenException()
140  * when a processor trap occurs.
141  * In the case of a kernel trap, we return the pc where to resume if
142  * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc.
143  */
144 unsigned
145 trap(statusReg, causeReg, vadr, pc, args)
146 	unsigned statusReg;	/* status register at time of the exception */
147 	unsigned causeReg;	/* cause register at time of exception */
148 	unsigned vadr;		/* address (if any) the fault occured on */
149 	unsigned pc;		/* program counter where to continue */
150 {
151 	register int type, i;
152 	unsigned ucode = 0;
153 	register struct proc *p = curproc;
154 	struct timeval syst;
155 	vm_prot_t ftype;
156 	extern unsigned onfault_table[];
157 
158 #ifdef DEBUG
159 	trp->status = statusReg;
160 	trp->cause = causeReg;
161 	trp->vadr = vadr;
162 	trp->pc = pc;
163 	trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] :
164 		p->p_md.md_regs[RA];
165 	trp->code = 0;
166 	if (++trp == &trapdebug[TRAPSIZE])
167 		trp = trapdebug;
168 #endif
169 
170 	cnt.v_trap++;
171 	type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT;
172 	if (USERMODE(statusReg)) {
173 		type |= T_USER;
174 		syst = p->p_stime;
175 	}
176 
177 	/*
178 	 * Enable hardware interrupts if they were on before.
179 	 * We only respond to software interrupts when returning to user mode.
180 	 */
181 	if (statusReg & MACH_SR_INT_ENA_PREV)
182 		splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR);
183 
184 	switch (type) {
185 	case T_TLB_MOD:
186 		if ((int)vadr < 0) {
187 			register pt_entry_t *pte;
188 			register unsigned entry;
189 #ifndef ATTR
190 			register vm_offset_t pa;
191 #endif
192 
193 			pte = kvtopte(vadr);
194 			entry = pte->pt_entry;
195 			if (entry & PG_RO) {
196 				/* write to read only page in the kernel */
197 				ftype = VM_PROT_WRITE;
198 				goto kernel_fault;
199 			}
200 			entry |= PG_M;
201 			pte->pt_entry = entry;
202 			vadr &= PG_FRAME;
203 			printf("trap: TLBupdate hi %x lo %x i %x\n", vadr,
204 				entry, MachTLBUpdate(vadr, entry)); /* XXX */
205 #ifdef ATTR
206 			pmap_attributes[atop(entry - KERNBASE)] |= PMAP_ATTR_MOD;
207 #else
208 			pa = entry & PG_FRAME;
209 			if (!IS_VM_PHYSADDR(pa))
210 				panic("trap: kmod");
211 			PHYS_TO_VM_PAGE(pa)->clean = FALSE;
212 #endif
213 			return (pc);
214 		}
215 		/* FALLTHROUGH */
216 
217 	case T_TLB_MOD+T_USER:
218 	    {
219 		pmap_hash_t hp;
220 #ifndef ATTR
221 		vm_offset_t pa;
222 #endif
223 #ifdef DIAGNOSTIC
224 		extern pmap_hash_t zero_pmap_hash;
225 		extern pmap_t cur_pmap;
226 
227 		if (cur_pmap->pm_hash == zero_pmap_hash)
228 			panic("tlbmod");
229 #endif
230 		hp = &((pmap_hash_t)PMAP_HASH_UADDR)[PMAP_HASH(vadr)];
231 		if (hp->low & PG_RO) {
232 			ftype = VM_PROT_WRITE;
233 			goto dofault;
234 		}
235 		hp->low |= PG_M;
236 		printf("trap: TLBupdate hi %x lo %x i %x\n", hp->high, hp->low,
237 			MachTLBUpdate(hp->high, hp->low)); /* XXX */
238 #ifdef ATTR
239 		pmap_attributes[atop(hp->low - KERNBASE)] |= PMAP_ATTR_MOD;
240 #else
241 		pa = hp->low & PG_FRAME;
242 		if (!IS_VM_PHYSADDR(pa))
243 			panic("trap: umod");
244 		PHYS_TO_VM_PAGE(pa)->clean = FALSE;
245 #endif
246 		if (!USERMODE(statusReg))
247 			return (pc);
248 		goto out;
249 	    }
250 
251 	case T_TLB_LD_MISS:
252 	case T_TLB_ST_MISS:
253 		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
254 		if ((int)vadr < 0) {
255 			register vm_offset_t va;
256 			int rv;
257 
258 		kernel_fault:
259 			va = trunc_page((vm_offset_t)vadr);
260 			rv = vm_fault(kernel_map, va, ftype, FALSE);
261 			if (rv == KERN_SUCCESS)
262 				return (pc);
263 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
264 				((struct pcb *)UADDR)->pcb_onfault = 0;
265 				return (onfault_table[i]);
266 			}
267 			goto err;
268 		}
269 		goto dofault;
270 
271 	case T_TLB_LD_MISS+T_USER:
272 		ftype = VM_PROT_READ;
273 		goto dofault;
274 
275 	case T_TLB_ST_MISS+T_USER:
276 		ftype = VM_PROT_WRITE;
277 	dofault:
278 	    {
279 		register vm_offset_t va;
280 		register struct vmspace *vm = p->p_vmspace;
281 		register vm_map_t map = &vm->vm_map;
282 		int rv;
283 
284 #ifdef X_KLUGE
285 		if (p->p_pid == UserMapPid &&
286 		    (va = pmax_btop(vadr - USER_MAP_ADDR)) < nUserMapPtes) {
287 			register pt_entry_t *pte;
288 
289 			pte = &UserMapPtes[va];
290 			MachTLBWriteRandom((vadr & PG_FRAME) |
291 				(vm->vm_pmap.pm_tlbpid << VMMACH_TLB_PID_SHIFT),
292 				pte->pt_entry);
293 			return (pc);
294 		}
295 #endif
296 		va = trunc_page((vm_offset_t)vadr);
297 		rv = vm_fault(map, va, ftype, FALSE);
298 		if (rv != KERN_SUCCESS) {
299 			printf("vm_fault(%x, %x, %x, 0) -> %x ADR %x PC %x RA %x\n",
300 				map, va, ftype, rv, vadr, pc,
301 				!USERMODE(statusReg) ? ((int *)&args)[19] :
302 					p->p_md.md_regs[RA]); /* XXX */
303 			printf("\tpid %d %s PC %x RA %x\n", p->p_pid,
304 				p->p_comm, p->p_md.md_regs[PC],
305 				p->p_md.md_regs[RA]); /* XXX */
306 			trapDump("vm_fault");
307 		}
308 		/*
309 		 * If this was a stack access we keep track of the maximum
310 		 * accessed stack size.  Also, if vm_fault gets a protection
311 		 * failure it is due to accessing the stack region outside
312 		 * the current limit and we need to reflect that as an access
313 		 * error.
314 		 */
315 		if ((caddr_t)va >= vm->vm_maxsaddr) {
316 			if (rv == KERN_SUCCESS) {
317 				unsigned nss;
318 
319 				nss = clrnd(btoc(USRSTACK-(unsigned)va));
320 				if (nss > vm->vm_ssize)
321 					vm->vm_ssize = nss;
322 			} else if (rv == KERN_PROTECTION_FAILURE)
323 				rv = KERN_INVALID_ADDRESS;
324 		}
325 		if (rv == KERN_SUCCESS) {
326 			if (!USERMODE(statusReg))
327 				return (pc);
328 			goto out;
329 		}
330 		if (!USERMODE(statusReg)) {
331 			if (i = ((struct pcb *)UADDR)->pcb_onfault) {
332 				((struct pcb *)UADDR)->pcb_onfault = 0;
333 				return (onfault_table[i]);
334 			}
335 			goto err;
336 		}
337 		ucode = vadr;
338 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
339 		break;
340 	    }
341 
342 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
343 		if (vadr == KERNBASE) {
344 			struct args {
345 				int	i[1];
346 			} args;
347 			int rval[2];
348 
349 			/*
350 			 * Assume a signal handler is trying to return
351 			 * (see sendsig() and sigreturn()). We have to
352 			 * pop the sigframe struct to get the address of
353 			 * the sigcontext.
354 			 */
355 			args.i[0] = p->p_md.md_regs[SP] + 4 * sizeof(int);
356 			(void) sigreturn(curproc, &args, rval);
357 			goto out;
358 		}
359 		/* FALLTHROUGH */
360 
361 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
362 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
363 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
364 		i = SIGSEGV;
365 		break;
366 
367 	case T_SYSCALL+T_USER:
368 	    {
369 		register int *locr0 = p->p_md.md_regs;
370 		register struct sysent *callp;
371 		unsigned int code;
372 		int numsys;
373 		struct args {
374 			int i[8];
375 		} args;
376 		int rval[2];
377 		struct sysent *systab;
378 		extern int nsysent;
379 #ifdef ULTRIXCOMPAT
380 		extern struct sysent ultrixsysent[];
381 		extern int ultrixnsysent;
382 #endif
383 
384 		cnt.v_syscall++;
385 		/* compute next PC after syscall instruction */
386 		if ((int)causeReg < 0)
387 			locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0);
388 		else
389 			locr0[PC] += 4;
390 		systab = sysent;
391 		numsys = nsysent;
392 #ifdef ULTRIXCOMPAT
393 		if (p->p_md.md_flags & MDP_ULTRIX) {
394 			systab = ultrixsysent;
395 			numsys = ultrixnsysent;
396 		}
397 #endif
398 		code = locr0[V0];
399 		if (code == 0) {			/* indir */
400 			code = locr0[A0];
401 			if (code >= numsys)
402 				callp = &systab[0];	/* indir (illegal) */
403 			else
404 				callp = &systab[code];
405 			i = callp->sy_narg;
406 			args.i[0] = locr0[A1];
407 			args.i[1] = locr0[A2];
408 			args.i[2] = locr0[A3];
409 			if (i > 3) {
410 				i = copyin((caddr_t)(locr0[SP] +
411 						3 * sizeof(int)),
412 					(caddr_t)&args.i[3],
413 					(u_int)(i - 3) * sizeof(int));
414 				if (i) {
415 					locr0[V0] = i;
416 					locr0[A3] = 1;
417 #ifdef KTRACE
418 					if (KTRPOINT(p, KTR_SYSCALL))
419 						ktrsyscall(p->p_tracep, code,
420 							callp->sy_narg, args.i);
421 #endif
422 					goto done;
423 				}
424 			}
425 		} else {
426 			if (code >= numsys)
427 				callp = &systab[0];	/* indir (illegal) */
428 			else
429 				callp = &systab[code];
430 			i = callp->sy_narg;
431 			args.i[0] = locr0[A0];
432 			args.i[1] = locr0[A1];
433 			args.i[2] = locr0[A2];
434 			args.i[3] = locr0[A3];
435 			if (i > 4) {
436 				i = copyin((caddr_t)(locr0[SP] +
437 						4 * sizeof(int)),
438 					(caddr_t)&args.i[4],
439 					(u_int)(i - 4) * sizeof(int));
440 				if (i) {
441 					locr0[V0] = i;
442 					locr0[A3] = 1;
443 #ifdef KTRACE
444 					if (KTRPOINT(p, KTR_SYSCALL))
445 						ktrsyscall(p->p_tracep, code,
446 							callp->sy_narg, args.i);
447 #endif
448 					goto done;
449 				}
450 			}
451 		}
452 #ifdef KTRACE
453 		if (KTRPOINT(p, KTR_SYSCALL))
454 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
455 #endif
456 		rval[0] = 0;
457 		rval[1] = locr0[V1];
458 #ifdef DEBUG
459 		if (trp == trapdebug)
460 			trapdebug[TRAPSIZE - 1].code = code;
461 		else
462 			trp[-1].code = code;
463 #endif
464 		i = (*callp->sy_call)(p, &args, rval);
465 		/*
466 		 * Reinitialize proc pointer `p' as it may be different
467 		 * if this is a child returning from fork syscall.
468 		 */
469 		p = curproc;
470 		locr0 = p->p_md.md_regs;
471 #ifdef DEBUG
472 		{ int s;
473 		s = splhigh();
474 		trp->status = statusReg;
475 		trp->cause = causeReg;
476 		trp->vadr = locr0[SP];
477 		trp->pc = locr0[PC];
478 		trp->ra = locr0[RA];
479 		trp->code = -code;
480 		if (++trp == &trapdebug[TRAPSIZE])
481 			trp = trapdebug;
482 		splx(s);
483 		}
484 #endif
485 		if (i == ERESTART)
486 			locr0[PC] = pc;
487 		else if (i != EJUSTRETURN) {
488 			if (i) {
489 				locr0[V0] = i;
490 				locr0[A3] = 1;
491 			} else {
492 				locr0[V0] = rval[0];
493 				locr0[V1] = rval[1];
494 				locr0[A3] = 0;
495 			}
496 		}
497 		/* else if (i == EJUSTRETURN) */
498 			/* nothing to do */
499 	done:
500 #ifdef KTRACE
501 		if (KTRPOINT(p, KTR_SYSRET))
502 			ktrsysret(p->p_tracep, code, i, rval[0]);
503 #endif
504 		goto out;
505 	    }
506 
507 	case T_BREAK+T_USER:
508 	    {
509 		register unsigned va, instr;
510 
511 		/* compute address of break instruction */
512 		va = pc;
513 		if ((int)causeReg < 0)
514 			va += 4;
515 
516 		/* read break instruction */
517 		instr = fuiword(va);
518 #ifdef KADB
519 		if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP)
520 			goto err;
521 #endif
522 		if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) {
523 			i = SIGTRAP;
524 			break;
525 		}
526 
527 		/* restore original instruction and clear BP  */
528 		i = suiword((caddr_t)va, p->p_md.md_ss_instr);
529 		if (i < 0) {
530 			vm_offset_t sa, ea;
531 			int rv;
532 
533 			sa = trunc_page((vm_offset_t)va);
534 			ea = round_page((vm_offset_t)va+sizeof(int)-1);
535 			rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
536 				VM_PROT_DEFAULT, FALSE);
537 			if (rv == KERN_SUCCESS) {
538 				i = suiword((caddr_t)va, p->p_md.md_ss_instr);
539 				(void) vm_map_protect(&p->p_vmspace->vm_map,
540 					sa, ea, VM_PROT_READ|VM_PROT_EXECUTE,
541 					FALSE);
542 			}
543 		}
544 		if (i < 0) {
545 			i = SIGTRAP;
546 			break;
547 		}
548 		p->p_md.md_ss_addr = 0;
549 		goto out;
550 	    }
551 
552 	case T_RES_INST+T_USER:
553 		i = SIGILL;
554 		break;
555 
556 	case T_COP_UNUSABLE+T_USER:
557 		if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) {
558 			i = SIGILL;	/* only FPU instructions allowed */
559 			break;
560 		}
561 		MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs);
562 		machFPCurProcPtr = p;
563 		p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT;
564 		p->p_md.md_flags |= MDP_FPUSED;
565 		goto out;
566 
567 	case T_OVFLOW+T_USER:
568 		i = SIGFPE;
569 		break;
570 
571 	case T_ADDR_ERR_LD:	/* misaligned access */
572 	case T_ADDR_ERR_ST:	/* misaligned access */
573 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
574 		if (i = ((struct pcb *)UADDR)->pcb_onfault) {
575 			((struct pcb *)UADDR)->pcb_onfault = 0;
576 			return (onfault_table[i]);
577 		}
578 		/* FALLTHROUGH */
579 
580 	default:
581 	err:
582 #ifdef KADB
583 	    {
584 		extern struct pcb kdbpcb;
585 
586 		if (USERMODE(statusReg))
587 			kdbpcb = p->p_addr->u_pcb;
588 		else {
589 			kdbpcb.pcb_regs[ZERO] = 0;
590 			kdbpcb.pcb_regs[AST] = ((int *)&args)[2];
591 			kdbpcb.pcb_regs[V0] = ((int *)&args)[3];
592 			kdbpcb.pcb_regs[V1] = ((int *)&args)[4];
593 			kdbpcb.pcb_regs[A0] = ((int *)&args)[5];
594 			kdbpcb.pcb_regs[A1] = ((int *)&args)[6];
595 			kdbpcb.pcb_regs[A2] = ((int *)&args)[7];
596 			kdbpcb.pcb_regs[A3] = ((int *)&args)[8];
597 			kdbpcb.pcb_regs[T0] = ((int *)&args)[9];
598 			kdbpcb.pcb_regs[T1] = ((int *)&args)[10];
599 			kdbpcb.pcb_regs[T2] = ((int *)&args)[11];
600 			kdbpcb.pcb_regs[T3] = ((int *)&args)[12];
601 			kdbpcb.pcb_regs[T4] = ((int *)&args)[13];
602 			kdbpcb.pcb_regs[T5] = ((int *)&args)[14];
603 			kdbpcb.pcb_regs[T6] = ((int *)&args)[15];
604 			kdbpcb.pcb_regs[T7] = ((int *)&args)[16];
605 			kdbpcb.pcb_regs[T8] = ((int *)&args)[17];
606 			kdbpcb.pcb_regs[T9] = ((int *)&args)[18];
607 			kdbpcb.pcb_regs[RA] = ((int *)&args)[19];
608 			kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21];
609 			kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22];
610 			kdbpcb.pcb_regs[PC] = pc;
611 			kdbpcb.pcb_regs[SR] = statusReg;
612 			bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int));
613 		}
614 		if (kdb(causeReg, vadr, p, !USERMODE(statusReg)))
615 			return (kdbpcb.pcb_regs[PC]);
616 	    }
617 #endif
618 		panic("trap");
619 	}
620 	printf("trap: pid %d %s sig %d adr %x pc %x ra %x\n", p->p_pid,
621 		p->p_comm, i, vadr, pc, p->p_md.md_regs[RA]); /* XXX */
622 	trapsignal(p, i, ucode);
623 out:
624 	/*
625 	 * Note: we should only get here if returning to user mode.
626 	 */
627 	astpending = 0;
628 	while (i = CURSIG(p))
629 		psig(i);
630 	p->p_pri = p->p_usrpri;
631 	if (want_resched) {
632 		int s;
633 
634 		/*
635 		 * Since we are curproc, clock will normally just change
636 		 * our priority without moving us from one queue to another
637 		 * (since the running process is not on a queue.)
638 		 * If that happened after we setrq ourselves but before we
639 		 * swtch()'ed, we might not be on the queue indicated by
640 		 * our priority.
641 		 */
642 		s = splclock();
643 		setrq(p);
644 		p->p_stats->p_ru.ru_nivcsw++;
645 		swtch();
646 		splx(s);
647 		while (i = CURSIG(p))
648 			psig(i);
649 	}
650 	if (p->p_stats->p_prof.pr_scale) {
651 		int ticks;
652 		struct timeval *tv = &p->p_stime;
653 
654 		ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
655 			(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
656 		if (ticks)
657 			addupc(pc, &p->p_stats->p_prof, ticks);
658 	}
659 	curpri = p->p_pri;
660 	return (pc);
661 }
662 
663 #ifdef DS5000
664 struct	intr_tab intr_tab[8];
665 #endif
666 
667 int temp; /* XXX ULTRIX compiler bug with -O */
668 
669 /*
670  * Handle an interrupt.
671  * Called from MachKernIntr() or MachUserIntr()
672  * Note: curproc might be NULL.
673  */
674 interrupt(statusReg, causeReg, pc)
675 	unsigned statusReg;	/* status register at time of the exception */
676 	unsigned causeReg;	/* cause register at time of exception */
677 	unsigned pc;		/* program counter where to continue */
678 {
679 	register unsigned mask;
680 	clockframe cf;
681 
682 #ifdef DEBUG
683 	trp->status = statusReg;
684 	trp->cause = causeReg;
685 	trp->vadr = 0;
686 	trp->pc = pc;
687 	trp->ra = 0;
688 	trp->code = 0;
689 	if (++trp == &trapdebug[TRAPSIZE])
690 		trp = trapdebug;
691 #endif
692 
693 	cnt.v_intr++;
694 	mask = causeReg & statusReg;	/* pending interrupts & enable mask */
695 #ifdef DS3100
696 	/* handle clock interrupts ASAP */
697 	if (mask & MACH_INT_MASK_3) {
698 		register volatile struct chiptime *c =
699 			(volatile struct chiptime *)MACH_CLOCK_ADDR;
700 
701 		temp = c->regc;	/* clear interrupt bits */
702 		cf.pc = pc;
703 		cf.ps = statusReg;
704 		hardclock(cf);
705 		causeReg &= ~MACH_INT_MASK_3;	/* reenable clock interrupts */
706 	}
707 	/*
708 	 * Enable hardware interrupts which were enabled but not pending.
709 	 * We only respond to software interrupts when returning to spl0.
710 	 */
711 	splx((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
712 		MACH_SR_INT_ENA_CUR);
713 	if (mask & MACH_INT_MASK_0)
714 		siiintr(0);
715 	if (mask & MACH_INT_MASK_1)
716 		leintr(0);
717 	if (mask & MACH_INT_MASK_2)
718 		dcintr(0);
719 	if (mask & MACH_INT_MASK_4)
720 		MemErrorInterrupt();
721 #endif /* DS3100 */
722 #ifdef DS5000
723 	/* handle clock interrupts ASAP */
724 	if (mask & MACH_INT_MASK_1) {
725 		register volatile struct chiptime *c =
726 			(volatile struct chiptime *)MACH_CLOCK_ADDR;
727 		register unsigned csr;
728 		static int warned = 0;
729 
730 		csr = *(unsigned *)MACH_SYS_CSR_ADDR;
731 		if ((csr & MACH_CSR_PSWARN) && !warned) {
732 			warned = 1;
733 			printf("WARNING: power supply is overheating!\n");
734 		} else if (warned && !(csr & MACH_CSR_PSWARN)) {
735 			warned = 0;
736 			printf("WARNING: power supply is OK again\n");
737 		}
738 
739 		temp = c->regc;	/* clear interrupt bits */
740 		cf.pc = pc;
741 		cf.ps = statusReg;
742 		hardclock(cf);
743 		causeReg &= ~MACH_INT_MASK_1;	/* reenable clock interrupts */
744 	}
745 	if (mask & MACH_INT_MASK_0) {
746 		register unsigned csr;
747 		register unsigned i, m;
748 
749 		csr = *(unsigned *)MACH_SYS_CSR_ADDR;
750 		m = csr & (csr >> MACH_CSR_IOINTEN_SHIFT) & MACH_CSR_IOINT_MASK;
751 #if 0
752 		*(unsigned *)MACH_SYS_CSR_ADDR =
753 			(csr & ~(MACH_CSR_MBZ | 0xFF)) |
754 			(m << MACH_CSR_IOINTEN_SHIFT);
755 #endif
756 		/*
757 		 * Enable hardware interrupts which were enabled but not
758 		 * pending. We only respond to software interrupts when
759 		 * returning to spl0.
760 		 */
761 		splx((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
762 			MACH_SR_INT_ENA_CUR);
763 		for (i = 0; m; i++, m >>= 1) {
764 			if (!(m & 1))
765 				continue;
766 			if (intr_tab[i].func)
767 				(*intr_tab[i].func)(intr_tab[i].unit);
768 			else
769 				printf("spurious interrupt %d\n", i);
770 		}
771 #if 0
772 		*(unsigned *)MACH_SYS_CSR_ADDR =
773 			csr & ~(MACH_CSR_MBZ | 0xFF);
774 #endif
775 	} else {
776 		/*
777 		 * Enable hardware interrupts which were enabled but not
778 		 * pending. We only respond to software interrupts when
779 		 * returning to spl0.
780 		 */
781 		splx((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
782 			MACH_SR_INT_ENA_CUR);
783 	}
784 	if (mask & MACH_INT_MASK_3)
785 		MemErrorInterrupt();
786 #endif /* DS5000 */
787 	if (mask & MACH_INT_MASK_5) {
788 		if (!USERMODE(statusReg)) {
789 #ifdef DEBUG
790 			trapDump("fpintr");
791 #else
792 			printf("FPU interrupt: PC %x CR %x SR %x\n",
793 				pc, causeReg, statusReg);
794 #endif
795 		} else
796 			MachFPInterrupt(statusReg, causeReg, pc);
797 	}
798 	if (mask & MACH_SOFT_INT_MASK_0) {
799 		clockframe cf;
800 
801 		clearsoftclock();
802 		cf.pc = pc;
803 		cf.ps = statusReg;
804 		softclock(cf);
805 	}
806 	/* process network interrupt if we trapped or will very soon */
807 	if ((mask & MACH_SOFT_INT_MASK_1) ||
808 	    netisr && (statusReg & MACH_SOFT_INT_MASK_1)) {
809 		clearsoftnet();
810 #ifdef INET
811 		if (netisr & (1 << NETISR_ARP)) {
812 			netisr &= ~(1 << NETISR_ARP);
813 			arpintr();
814 		}
815 		if (netisr & (1 << NETISR_IP)) {
816 			netisr &= ~(1 << NETISR_IP);
817 			ipintr();
818 		}
819 #endif
820 #ifdef NS
821 		if (netisr & (1 << NETISR_NS)) {
822 			netisr &= ~(1 << NETISR_NS);
823 			nsintr();
824 		}
825 #endif
826 #ifdef ISO
827 		if (netisr & (1 << NETISR_ISO)) {
828 			netisr &= ~(1 << NETISR_ISO);
829 			clnlintr();
830 		}
831 #endif
832 	}
833 }
834 
835 /*
836  * This is called from MachUserIntr() if astpending is set.
837  * This is very similar to the tail of trap().
838  */
839 softintr(statusReg, pc)
840 	unsigned statusReg;	/* status register at time of the exception */
841 	unsigned pc;		/* program counter where to continue */
842 {
843 	register struct proc *p = curproc;
844 	register int i;
845 
846 	cnt.v_soft++;
847 	astpending = 0;
848 	while (i = CURSIG(p))
849 		psig(i);
850 	p->p_pri = p->p_usrpri;
851 	if (want_resched) {
852 		int s;
853 
854 		/*
855 		 * Since we are curproc, clock will normally just change
856 		 * our priority without moving us from one queue to another
857 		 * (since the running process is not on a queue.)
858 		 * If that happened after we setrq ourselves but before we
859 		 * swtch()'ed, we might not be on the queue indicated by
860 		 * our priority.
861 		 */
862 		s = splclock();
863 		setrq(p);
864 		p->p_stats->p_ru.ru_nivcsw++;
865 		swtch();
866 		splx(s);
867 		while (i = CURSIG(p))
868 			psig(i);
869 	}
870 	curpri = p->p_pri;
871 }
872 
873 #ifdef DEBUG
874 trapDump(msg)
875 	char *msg;
876 {
877 	register int i;
878 	int s;
879 
880 	s = splhigh();
881 	printf("trapDump(%s)\n", msg);
882 	for (i = 0; i < TRAPSIZE; i++) {
883 		if (trp == trapdebug)
884 			trp = &trapdebug[TRAPSIZE - 1];
885 		else
886 			trp--;
887 		if (trp->cause == 0)
888 			break;
889 		printf("%s: ADR %x PC %x CR %x SR %x\n",
890 			trap_type[(trp->cause & MACH_CR_EXC_CODE) >>
891 				MACH_CR_EXC_CODE_SHIFT],
892 			trp->vadr, trp->pc, trp->cause, trp->status);
893 		printf("   RA %x code %d\n", trp-> ra, trp->code);
894 	}
895 	bzero(trapdebug, sizeof(trapdebug));
896 	trp = trapdebug;
897 	splx(s);
898 }
899 #endif
900 
901 #ifdef X_KLUGE
902 /*
903  * This is a kludge to allow X windows to work.
904  */
905 caddr_t
906 vmUserMap(size, pa)
907 	int size;
908 	unsigned pa;
909 {
910 	register caddr_t v;
911 	unsigned off, entry;
912 
913 	if (nUserMapPtes == 0)
914 		UserMapPid = curproc->p_pid;
915 	else if (UserMapPid != curproc->p_pid)
916 		return ((caddr_t)0);
917 	off = pa & PGOFSET;
918 	size = btoc(off + size);
919 	if (nUserMapPtes + size > NPTES)
920 		return ((caddr_t)0);
921 	v = (caddr_t)(USER_MAP_ADDR + pmax_ptob(nUserMapPtes) + off);
922 	entry = (pa & 0x9ffff000) | PG_V | PG_M;
923 	if (pa >= MACH_UNCACHED_MEMORY_ADDR)
924 		entry |= PG_N;
925 	while (size > 0) {
926 		UserMapPtes[nUserMapPtes].pt_entry = entry;
927 		entry += NBPG;
928 		nUserMapPtes++;
929 		size--;
930 	}
931 	return (v);
932 }
933 
934 vmUserUnmap()
935 {
936 	int id;
937 
938 	nUserMapPtes = 0;
939 	if (UserMapPid == curproc->p_pid) {
940 		id = curproc->p_vmspace->vm_pmap.pm_tlbpid;
941 		if (id >= 0)
942 			MachTLBFlushPID(id);
943 	}
944 	UserMapPid = 0;
945 }
946 #endif
947 
948 /*
949  *----------------------------------------------------------------------
950  *
951  * MemErrorInterrupt --
952  *
953  *	Handler an interrupt for the control register.
954  *
955  * Results:
956  *	None.
957  *
958  * Side effects:
959  *	None.
960  *
961  *----------------------------------------------------------------------
962  */
963 static void
964 MemErrorInterrupt()
965 {
966 #ifdef DS3100
967 	volatile u_short *sysCSRPtr = (u_short *)MACH_SYS_CSR_ADDR;
968 	u_short csr;
969 
970 	csr = *sysCSRPtr;
971 
972 	if (csr & MACH_CSR_MEM_ERR) {
973 		printf("Memory error at 0x%x\n",
974 			*(unsigned *)MACH_WRITE_ERROR_ADDR);
975 		panic("Mem error interrupt");
976 	}
977 	*sysCSRPtr = (csr & ~MACH_CSR_MBZ) | 0xff;
978 #endif /* DS3100 */
979 #ifdef DS5000
980 	printf("erradr %x\n", *(unsigned *)MACH_ERROR_ADDR);
981 	*(unsigned *)MACH_ERROR_ADDR = 0;
982 	MachEmptyWriteBuffer();
983 #endif /* DS5000 */
984 }
985 
986 /*
987  * Return the resulting PC as if the branch was executed.
988  */
989 unsigned
990 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch)
991 	unsigned *regsPtr;
992 	unsigned instPC;
993 	unsigned fpcCSR;
994 	int allowNonBranch;
995 {
996 	InstFmt inst;
997 	unsigned retAddr;
998 	int condition;
999 	extern unsigned GetBranchDest();
1000 
1001 #if 0
1002 	printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC,
1003 		*instPC, fpcCSR);
1004 #endif
1005 
1006 	inst = *(InstFmt *)instPC;
1007 	switch ((int)inst.JType.op) {
1008 	case OP_SPECIAL:
1009 		switch ((int)inst.RType.func) {
1010 		case OP_JR:
1011 		case OP_JALR:
1012 			retAddr = regsPtr[inst.RType.rs];
1013 			break;
1014 
1015 		default:
1016 			if (!allowNonBranch)
1017 				panic("MachEmulateBranch: Non-branch");
1018 			retAddr = instPC + 4;
1019 			break;
1020 		}
1021 		break;
1022 
1023 	case OP_BCOND:
1024 		switch ((int)inst.IType.rt) {
1025 		case OP_BLTZ:
1026 		case OP_BLTZAL:
1027 			if ((int)(regsPtr[inst.RType.rs]) < 0)
1028 				retAddr = GetBranchDest((InstFmt *)instPC);
1029 			else
1030 				retAddr = instPC + 8;
1031 			break;
1032 
1033 		case OP_BGEZAL:
1034 		case OP_BGEZ:
1035 			if ((int)(regsPtr[inst.RType.rs]) >= 0)
1036 				retAddr = GetBranchDest((InstFmt *)instPC);
1037 			else
1038 				retAddr = instPC + 8;
1039 			break;
1040 
1041 		default:
1042 			panic("MachEmulateBranch: Bad branch cond");
1043 		}
1044 		break;
1045 
1046 	case OP_J:
1047 	case OP_JAL:
1048 		retAddr = (inst.JType.target << 2) |
1049 			((unsigned)instPC & 0xF0000000);
1050 		break;
1051 
1052 	case OP_BEQ:
1053 		if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1054 			retAddr = GetBranchDest((InstFmt *)instPC);
1055 		else
1056 			retAddr = instPC + 8;
1057 		break;
1058 
1059 	case OP_BNE:
1060 		if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1061 			retAddr = GetBranchDest((InstFmt *)instPC);
1062 		else
1063 			retAddr = instPC + 8;
1064 		break;
1065 
1066 	case OP_BLEZ:
1067 		if ((int)(regsPtr[inst.RType.rs]) <= 0)
1068 			retAddr = GetBranchDest((InstFmt *)instPC);
1069 		else
1070 			retAddr = instPC + 8;
1071 		break;
1072 
1073 	case OP_BGTZ:
1074 		if ((int)(regsPtr[inst.RType.rs]) > 0)
1075 			retAddr = GetBranchDest((InstFmt *)instPC);
1076 		else
1077 			retAddr = instPC + 8;
1078 		break;
1079 
1080 	case OP_COP1:
1081 		switch (inst.RType.rs) {
1082 		case OP_BCx:
1083 		case OP_BCy:
1084 			if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1085 				condition = fpcCSR & MACH_FPC_COND_BIT;
1086 			else
1087 				condition = !(fpcCSR & MACH_FPC_COND_BIT);
1088 			if (condition)
1089 				retAddr = GetBranchDest((InstFmt *)instPC);
1090 			else
1091 				retAddr = instPC + 8;
1092 			break;
1093 
1094 		default:
1095 			if (!allowNonBranch)
1096 				panic("MachEmulateBranch: Bad coproc branch instruction");
1097 			retAddr = instPC + 4;
1098 		}
1099 		break;
1100 
1101 	default:
1102 		if (!allowNonBranch)
1103 			panic("MachEmulateBranch: Non-branch instruction");
1104 		retAddr = instPC + 4;
1105 	}
1106 #if 0
1107 	printf("Target addr=%x\n", retAddr);
1108 #endif
1109 	return (retAddr);
1110 }
1111 
1112 unsigned
1113 GetBranchDest(InstPtr)
1114 	InstFmt *InstPtr;
1115 {
1116 	return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2));
1117 }
1118 
1119 /*
1120  * This routine is called by procxmt() to single step one instruction.
1121  * We do this by storing a break instruction after the current instruction,
1122  * resuming execution, and then restoring the old instruction.
1123  */
1124 cpu_singlestep(p)
1125 	register struct proc *p;
1126 {
1127 	register unsigned va;
1128 	register int *locr0 = p->p_md.md_regs;
1129 	int i;
1130 
1131 	/* compute next address after current location */
1132 	va = MachEmulateBranch(locr0, locr0[PC], 0, 1);
1133 	if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va ||
1134 	    !useracc((caddr_t)va, 4, B_READ)) {
1135 		printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
1136 			p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */
1137 		return (EFAULT);
1138 	}
1139 	p->p_md.md_ss_addr = va;
1140 	p->p_md.md_ss_instr = fuiword(va);
1141 	i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1142 	if (i < 0) {
1143 		vm_offset_t sa, ea;
1144 		int rv;
1145 
1146 		sa = trunc_page((vm_offset_t)va);
1147 		ea = round_page((vm_offset_t)va+sizeof(int)-1);
1148 		rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
1149 			VM_PROT_DEFAULT, FALSE);
1150 		if (rv == KERN_SUCCESS) {
1151 			i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1152 			(void) vm_map_protect(&p->p_vmspace->vm_map,
1153 				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
1154 		}
1155 	}
1156 	if (i < 0)
1157 		return (EFAULT);
1158 	printf("SS %s (%d): breakpoint set at %x: %x (pc %x)\n",
1159 		p->p_comm, p->p_pid, p->p_md.md_ss_addr,
1160 		p->p_md.md_ss_instr, locr0[PC]); /* XXX */
1161 	return (0);
1162 }
1163