1 /*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1992, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department and Ralph Campbell.
9 *
10 * %sccs.include.redist.c%
11 *
12 * from: Utah $Hdr: trap.c 1.32 91/04/06$
13 *
14 * @(#)trap.c 8.7 (Berkeley) 06/02/95
15 */
16
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/proc.h>
20 #include <sys/kernel.h>
21 #include <sys/signalvar.h>
22 #include <sys/syscall.h>
23 #include <sys/user.h>
24 #include <sys/buf.h>
25 #ifdef KTRACE
26 #include <sys/ktrace.h>
27 #endif
28 #include <net/netisr.h>
29
30 #include <machine/trap.h>
31 #include <machine/psl.h>
32 #include <machine/reg.h>
33 #include <machine/cpu.h>
34 #include <machine/pte.h>
35 #include <machine/mips_opcode.h>
36
37 #include <vm/vm.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_page.h>
40
41 #include <pmax/pmax/clockreg.h>
42 #include <pmax/pmax/kn01.h>
43 #include <pmax/pmax/kn02.h>
44 #include <pmax/pmax/kmin.h>
45 #include <pmax/pmax/maxine.h>
46 #include <pmax/pmax/kn03.h>
47 #include <pmax/pmax/asic.h>
48 #include <pmax/pmax/turbochannel.h>
49
50 #include <pmax/stand/dec_prom.h>
51
52 #include <asc.h>
53 #include <sii.h>
54 #include <le.h>
55 #include <dc.h>
56
57 struct proc *machFPCurProcPtr; /* pointer to last proc to use FP */
58
59 extern void MachKernGenException();
60 extern void MachUserGenException();
61 extern void MachKernIntr();
62 extern void MachUserIntr();
63 extern void MachTLBModException();
64 extern void MachTLBMissException();
65 extern unsigned MachEmulateBranch();
66
67 void (*machExceptionTable[])() = {
68 /*
69 * The kernel exception handlers.
70 */
71 MachKernIntr, /* external interrupt */
72 MachKernGenException, /* TLB modification */
73 MachTLBMissException, /* TLB miss (load or instr. fetch) */
74 MachTLBMissException, /* TLB miss (store) */
75 MachKernGenException, /* address error (load or I-fetch) */
76 MachKernGenException, /* address error (store) */
77 MachKernGenException, /* bus error (I-fetch) */
78 MachKernGenException, /* bus error (load or store) */
79 MachKernGenException, /* system call */
80 MachKernGenException, /* breakpoint */
81 MachKernGenException, /* reserved instruction */
82 MachKernGenException, /* coprocessor unusable */
83 MachKernGenException, /* arithmetic overflow */
84 MachKernGenException, /* reserved */
85 MachKernGenException, /* reserved */
86 MachKernGenException, /* reserved */
87 /*
88 * The user exception handlers.
89 */
90 MachUserIntr,
91 MachUserGenException,
92 MachUserGenException,
93 MachUserGenException,
94 MachUserGenException,
95 MachUserGenException,
96 MachUserGenException,
97 MachUserGenException,
98 MachUserGenException,
99 MachUserGenException,
100 MachUserGenException,
101 MachUserGenException,
102 MachUserGenException,
103 MachUserGenException,
104 MachUserGenException,
105 MachUserGenException,
106 };
107
108 char *trap_type[] = {
109 "external interrupt",
110 "TLB modification",
111 "TLB miss (load or instr. fetch)",
112 "TLB miss (store)",
113 "address error (load or I-fetch)",
114 "address error (store)",
115 "bus error (I-fetch)",
116 "bus error (load or store)",
117 "system call",
118 "breakpoint",
119 "reserved instruction",
120 "coprocessor unusable",
121 "arithmetic overflow",
122 "reserved 13",
123 "reserved 14",
124 "reserved 15",
125 };
126
127 #ifdef DEBUG
128 #define TRAPSIZE 10
129 struct trapdebug { /* trap history buffer for debugging */
130 u_int status;
131 u_int cause;
132 u_int vadr;
133 u_int pc;
134 u_int ra;
135 u_int code;
136 } trapdebug[TRAPSIZE], *trp = trapdebug;
137
138 u_int intr_level; /* number of nested interrupts */
139 #endif
140
141 static void pmax_errintr();
142 static void kn02_errintr(), kn02ba_errintr();
143 #ifdef DS5000_240
144 static void kn03_errintr();
145 #endif
146 static unsigned kn02ba_recover_erradr();
147 extern tc_option_t tc_slot_info[TC_MAX_LOGICAL_SLOTS];
148 extern u_long kmin_tc3_imask, xine_tc3_imask;
149 extern const struct callback *callv;
150 #ifdef DS5000_240
151 extern u_long kn03_tc3_imask;
152 #endif
153 int (*pmax_hardware_intr)() = (int (*)())0;
154 extern volatile struct chiptime *Mach_clock_addr;
155 extern long intrcnt[];
156
157 /*
158 * Handle an exception.
159 * Called from MachKernGenException() or MachUserGenException()
160 * when a processor trap occurs.
161 * In the case of a kernel trap, we return the pc where to resume if
162 * ((struct pcb *)UADDR)->pcb_onfault is set, otherwise, return old pc.
163 */
164 unsigned
trap(statusReg,causeReg,vadr,pc,args)165 trap(statusReg, causeReg, vadr, pc, args)
166 unsigned statusReg; /* status register at time of the exception */
167 unsigned causeReg; /* cause register at time of exception */
168 unsigned vadr; /* address (if any) the fault occured on */
169 unsigned pc; /* program counter where to continue */
170 {
171 register int type, i;
172 unsigned ucode = 0;
173 register struct proc *p = curproc;
174 u_quad_t sticks;
175 vm_prot_t ftype;
176 extern unsigned onfault_table[];
177
178 #ifdef DEBUG
179 trp->status = statusReg;
180 trp->cause = causeReg;
181 trp->vadr = vadr;
182 trp->pc = pc;
183 trp->ra = !USERMODE(statusReg) ? ((int *)&args)[19] :
184 p->p_md.md_regs[RA];
185 trp->code = 0;
186 if (++trp == &trapdebug[TRAPSIZE])
187 trp = trapdebug;
188 #endif
189
190 cnt.v_trap++;
191 type = (causeReg & MACH_CR_EXC_CODE) >> MACH_CR_EXC_CODE_SHIFT;
192 if (USERMODE(statusReg)) {
193 type |= T_USER;
194 sticks = p->p_sticks;
195 }
196
197 /*
198 * Enable hardware interrupts if they were on before.
199 * We only respond to software interrupts when returning to user mode.
200 */
201 if (statusReg & MACH_SR_INT_ENA_PREV)
202 splx((statusReg & MACH_HARD_INT_MASK) | MACH_SR_INT_ENA_CUR);
203
204 switch (type) {
205 case T_TLB_MOD:
206 /* check for kernel address */
207 if ((int)vadr < 0) {
208 register pt_entry_t *pte;
209 register unsigned entry;
210 register vm_offset_t pa;
211
212 pte = kvtopte(vadr);
213 entry = pte->pt_entry;
214 #ifdef DIAGNOSTIC
215 if (!(entry & PG_V) || (entry & PG_M))
216 panic("trap: ktlbmod: invalid pte");
217 #endif
218 if (entry & PG_RO) {
219 /* write to read only page in the kernel */
220 ftype = VM_PROT_WRITE;
221 goto kernel_fault;
222 }
223 entry |= PG_M;
224 pte->pt_entry = entry;
225 vadr &= ~PGOFSET;
226 MachTLBUpdate(vadr, entry);
227 pa = entry & PG_FRAME;
228 #ifdef ATTR
229 pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD;
230 #else
231 if (!IS_VM_PHYSADDR(pa))
232 panic("trap: ktlbmod: unmanaged page");
233 PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
234 #endif
235 return (pc);
236 }
237 /* FALLTHROUGH */
238
239 case T_TLB_MOD+T_USER:
240 {
241 register pt_entry_t *pte;
242 register unsigned entry;
243 register vm_offset_t pa;
244 pmap_t pmap = &p->p_vmspace->vm_pmap;
245
246 if (!(pte = pmap_segmap(pmap, vadr)))
247 panic("trap: utlbmod: invalid segmap");
248 pte += (vadr >> PGSHIFT) & (NPTEPG - 1);
249 entry = pte->pt_entry;
250 #ifdef DIAGNOSTIC
251 if (!(entry & PG_V) || (entry & PG_M))
252 panic("trap: utlbmod: invalid pte");
253 #endif
254 if (entry & PG_RO) {
255 /* write to read only page */
256 ftype = VM_PROT_WRITE;
257 goto dofault;
258 }
259 entry |= PG_M;
260 pte->pt_entry = entry;
261 vadr = (vadr & ~PGOFSET) |
262 (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
263 MachTLBUpdate(vadr, entry);
264 pa = entry & PG_FRAME;
265 #ifdef ATTR
266 pmap_attributes[atop(pa)] |= PMAP_ATTR_MOD;
267 #else
268 if (!IS_VM_PHYSADDR(pa))
269 panic("trap: utlbmod: unmanaged page");
270 PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
271 #endif
272 if (!USERMODE(statusReg))
273 return (pc);
274 goto out;
275 }
276
277 case T_TLB_LD_MISS:
278 case T_TLB_ST_MISS:
279 ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
280 /* check for kernel address */
281 if ((int)vadr < 0) {
282 register vm_offset_t va;
283 int rv;
284
285 kernel_fault:
286 va = trunc_page((vm_offset_t)vadr);
287 rv = vm_fault(kernel_map, va, ftype, FALSE);
288 if (rv == KERN_SUCCESS)
289 return (pc);
290 if (i = ((struct pcb *)UADDR)->pcb_onfault) {
291 ((struct pcb *)UADDR)->pcb_onfault = 0;
292 return (onfault_table[i]);
293 }
294 goto err;
295 }
296 /*
297 * It is an error for the kernel to access user space except
298 * through the copyin/copyout routines.
299 */
300 if ((i = ((struct pcb *)UADDR)->pcb_onfault) == 0)
301 goto err;
302 /* check for fuswintr() or suswintr() getting a page fault */
303 if (i == 4)
304 return (onfault_table[i]);
305 goto dofault;
306
307 case T_TLB_LD_MISS+T_USER:
308 ftype = VM_PROT_READ;
309 goto dofault;
310
311 case T_TLB_ST_MISS+T_USER:
312 ftype = VM_PROT_WRITE;
313 dofault:
314 {
315 register vm_offset_t va;
316 register struct vmspace *vm;
317 register vm_map_t map;
318 int rv;
319
320 vm = p->p_vmspace;
321 map = &vm->vm_map;
322 va = trunc_page((vm_offset_t)vadr);
323 rv = vm_fault(map, va, ftype, FALSE);
324 /*
325 * If this was a stack access we keep track of the maximum
326 * accessed stack size. Also, if vm_fault gets a protection
327 * failure it is due to accessing the stack region outside
328 * the current limit and we need to reflect that as an access
329 * error.
330 */
331 if ((caddr_t)va >= vm->vm_maxsaddr) {
332 if (rv == KERN_SUCCESS) {
333 unsigned nss;
334
335 nss = clrnd(btoc(USRSTACK-(unsigned)va));
336 if (nss > vm->vm_ssize)
337 vm->vm_ssize = nss;
338 } else if (rv == KERN_PROTECTION_FAILURE)
339 rv = KERN_INVALID_ADDRESS;
340 }
341 if (rv == KERN_SUCCESS) {
342 if (!USERMODE(statusReg))
343 return (pc);
344 goto out;
345 }
346 if (!USERMODE(statusReg)) {
347 if (i = ((struct pcb *)UADDR)->pcb_onfault) {
348 ((struct pcb *)UADDR)->pcb_onfault = 0;
349 return (onfault_table[i]);
350 }
351 goto err;
352 }
353 ucode = vadr;
354 i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
355 break;
356 }
357
358 case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */
359 case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */
360 case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */
361 case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */
362 i = SIGSEGV;
363 break;
364
365 case T_SYSCALL+T_USER:
366 {
367 register int *locr0 = p->p_md.md_regs;
368 register struct sysent *callp;
369 unsigned int code;
370 int numsys;
371 struct args {
372 int i[8];
373 } args;
374 int rval[2];
375 struct sysent *systab;
376 extern int nsysent;
377 #ifdef ULTRIXCOMPAT
378 extern struct sysent ultrixsysent[];
379 extern int ultrixnsysent;
380 #endif
381
382 cnt.v_syscall++;
383 /* compute next PC after syscall instruction */
384 if ((int)causeReg < 0)
385 locr0[PC] = MachEmulateBranch(locr0, pc, 0, 0);
386 else
387 locr0[PC] += 4;
388 systab = sysent;
389 numsys = nsysent;
390 #ifdef ULTRIXCOMPAT
391 if (p->p_md.md_flags & MDP_ULTRIX) {
392 systab = ultrixsysent;
393 numsys = ultrixnsysent;
394 }
395 #endif
396 code = locr0[V0];
397 switch (code) {
398 case SYS_syscall:
399 /*
400 * Code is first argument, followed by actual args.
401 */
402 code = locr0[A0];
403 if (code >= numsys)
404 callp = &systab[SYS_syscall]; /* (illegal) */
405 else
406 callp = &systab[code];
407 i = callp->sy_argsize;
408 args.i[0] = locr0[A1];
409 args.i[1] = locr0[A2];
410 args.i[2] = locr0[A3];
411 if (i > 3 * sizeof(register_t)) {
412 i = copyin((caddr_t)(locr0[SP] +
413 4 * sizeof(register_t)),
414 (caddr_t)&args.i[3],
415 (u_int)(i - 3 * sizeof(register_t)));
416 if (i) {
417 locr0[V0] = i;
418 locr0[A3] = 1;
419 #ifdef KTRACE
420 if (KTRPOINT(p, KTR_SYSCALL))
421 ktrsyscall(p->p_tracep, code,
422 callp->sy_argsize,
423 args.i);
424 #endif
425 goto done;
426 }
427 }
428 break;
429
430 case SYS___syscall:
431 /*
432 * Like syscall, but code is a quad, so as to maintain
433 * quad alignment for the rest of the arguments.
434 */
435 code = locr0[A0 + _QUAD_LOWWORD];
436 if (code >= numsys)
437 callp = &systab[SYS_syscall]; /* (illegal) */
438 else
439 callp = &systab[code];
440 i = callp->sy_argsize;
441 args.i[0] = locr0[A2];
442 args.i[1] = locr0[A3];
443 if (i > 2 * sizeof(register_t)) {
444 i = copyin((caddr_t)(locr0[SP] +
445 4 * sizeof(register_t)),
446 (caddr_t)&args.i[2],
447 (u_int)(i - 2 * sizeof(register_t)));
448 if (i) {
449 locr0[V0] = i;
450 locr0[A3] = 1;
451 #ifdef KTRACE
452 if (KTRPOINT(p, KTR_SYSCALL))
453 ktrsyscall(p->p_tracep, code,
454 callp->sy_argsize,
455 args.i);
456 #endif
457 goto done;
458 }
459 }
460 break;
461
462 default:
463 if (code >= numsys)
464 callp = &systab[SYS_syscall]; /* (illegal) */
465 else
466 callp = &systab[code];
467 i = callp->sy_argsize;
468 args.i[0] = locr0[A0];
469 args.i[1] = locr0[A1];
470 args.i[2] = locr0[A2];
471 args.i[3] = locr0[A3];
472 if (i > 4 * sizeof(register_t)) {
473 i = copyin((caddr_t)(locr0[SP] +
474 4 * sizeof(register_t)),
475 (caddr_t)&args.i[4],
476 (u_int)(i - 4 * sizeof(register_t)));
477 if (i) {
478 locr0[V0] = i;
479 locr0[A3] = 1;
480 #ifdef KTRACE
481 if (KTRPOINT(p, KTR_SYSCALL))
482 ktrsyscall(p->p_tracep, code,
483 callp->sy_argsize,
484 args.i);
485 #endif
486 goto done;
487 }
488 }
489 }
490 #ifdef KTRACE
491 if (KTRPOINT(p, KTR_SYSCALL))
492 ktrsyscall(p->p_tracep, code, callp->sy_argsize, args.i);
493 #endif
494 rval[0] = 0;
495 rval[1] = locr0[V1];
496 #ifdef DEBUG
497 if (trp == trapdebug)
498 trapdebug[TRAPSIZE - 1].code = code;
499 else
500 trp[-1].code = code;
501 #endif
502 i = (*callp->sy_call)(p, &args, rval);
503 /*
504 * Reinitialize proc pointer `p' as it may be different
505 * if this is a child returning from fork syscall.
506 */
507 p = curproc;
508 locr0 = p->p_md.md_regs;
509 #ifdef DEBUG
510 { int s;
511 s = splhigh();
512 trp->status = statusReg;
513 trp->cause = causeReg;
514 trp->vadr = locr0[SP];
515 trp->pc = locr0[PC];
516 trp->ra = locr0[RA];
517 trp->code = -code;
518 if (++trp == &trapdebug[TRAPSIZE])
519 trp = trapdebug;
520 splx(s);
521 }
522 #endif
523 switch (i) {
524 case 0:
525 locr0[V0] = rval[0];
526 locr0[V1] = rval[1];
527 locr0[A3] = 0;
528 break;
529
530 case ERESTART:
531 locr0[PC] = pc;
532 break;
533
534 case EJUSTRETURN:
535 break; /* nothing to do */
536
537 default:
538 locr0[V0] = i;
539 locr0[A3] = 1;
540 }
541 done:
542 #ifdef KTRACE
543 if (KTRPOINT(p, KTR_SYSRET))
544 ktrsysret(p->p_tracep, code, i, rval[0]);
545 #endif
546 goto out;
547 }
548
549 case T_BREAK+T_USER:
550 {
551 register unsigned va, instr;
552
553 /* compute address of break instruction */
554 va = pc;
555 if ((int)causeReg < 0)
556 va += 4;
557
558 /* read break instruction */
559 instr = fuiword((caddr_t)va);
560 #if 0
561 printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
562 p->p_comm, p->p_pid, instr, pc,
563 p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
564 #endif
565 #ifdef KADB
566 if (instr == MACH_BREAK_BRKPT || instr == MACH_BREAK_SSTEP)
567 goto err;
568 #endif
569 if (p->p_md.md_ss_addr != va || instr != MACH_BREAK_SSTEP) {
570 i = SIGTRAP;
571 break;
572 }
573
574 /* restore original instruction and clear BP */
575 i = suiword((caddr_t)va, p->p_md.md_ss_instr);
576 if (i < 0) {
577 vm_offset_t sa, ea;
578 int rv;
579
580 sa = trunc_page((vm_offset_t)va);
581 ea = round_page((vm_offset_t)va+sizeof(int)-1);
582 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
583 VM_PROT_DEFAULT, FALSE);
584 if (rv == KERN_SUCCESS) {
585 i = suiword((caddr_t)va, p->p_md.md_ss_instr);
586 (void) vm_map_protect(&p->p_vmspace->vm_map,
587 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE,
588 FALSE);
589 }
590 }
591 if (i < 0)
592 printf("Warning: can't restore instruction at %x: %x\n",
593 p->p_md.md_ss_addr, p->p_md.md_ss_instr);
594 p->p_md.md_ss_addr = 0;
595 i = SIGTRAP;
596 break;
597 }
598
599 case T_RES_INST+T_USER:
600 i = SIGILL;
601 break;
602
603 case T_COP_UNUSABLE+T_USER:
604 if ((causeReg & MACH_CR_COP_ERR) != 0x10000000) {
605 i = SIGILL; /* only FPU instructions allowed */
606 break;
607 }
608 MachSwitchFPState(machFPCurProcPtr, p->p_md.md_regs);
609 machFPCurProcPtr = p;
610 p->p_md.md_regs[PS] |= MACH_SR_COP_1_BIT;
611 p->p_md.md_flags |= MDP_FPUSED;
612 goto out;
613
614 case T_OVFLOW+T_USER:
615 i = SIGFPE;
616 break;
617
618 case T_ADDR_ERR_LD: /* misaligned access */
619 case T_ADDR_ERR_ST: /* misaligned access */
620 case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */
621 if (i = ((struct pcb *)UADDR)->pcb_onfault) {
622 ((struct pcb *)UADDR)->pcb_onfault = 0;
623 return (onfault_table[i]);
624 }
625 /* FALLTHROUGH */
626
627 default:
628 err:
629 #ifdef KADB
630 {
631 extern struct pcb kdbpcb;
632
633 if (USERMODE(statusReg))
634 kdbpcb = p->p_addr->u_pcb;
635 else {
636 kdbpcb.pcb_regs[ZERO] = 0;
637 kdbpcb.pcb_regs[AST] = ((int *)&args)[2];
638 kdbpcb.pcb_regs[V0] = ((int *)&args)[3];
639 kdbpcb.pcb_regs[V1] = ((int *)&args)[4];
640 kdbpcb.pcb_regs[A0] = ((int *)&args)[5];
641 kdbpcb.pcb_regs[A1] = ((int *)&args)[6];
642 kdbpcb.pcb_regs[A2] = ((int *)&args)[7];
643 kdbpcb.pcb_regs[A3] = ((int *)&args)[8];
644 kdbpcb.pcb_regs[T0] = ((int *)&args)[9];
645 kdbpcb.pcb_regs[T1] = ((int *)&args)[10];
646 kdbpcb.pcb_regs[T2] = ((int *)&args)[11];
647 kdbpcb.pcb_regs[T3] = ((int *)&args)[12];
648 kdbpcb.pcb_regs[T4] = ((int *)&args)[13];
649 kdbpcb.pcb_regs[T5] = ((int *)&args)[14];
650 kdbpcb.pcb_regs[T6] = ((int *)&args)[15];
651 kdbpcb.pcb_regs[T7] = ((int *)&args)[16];
652 kdbpcb.pcb_regs[T8] = ((int *)&args)[17];
653 kdbpcb.pcb_regs[T9] = ((int *)&args)[18];
654 kdbpcb.pcb_regs[RA] = ((int *)&args)[19];
655 kdbpcb.pcb_regs[MULLO] = ((int *)&args)[21];
656 kdbpcb.pcb_regs[MULHI] = ((int *)&args)[22];
657 kdbpcb.pcb_regs[PC] = pc;
658 kdbpcb.pcb_regs[SR] = statusReg;
659 bzero((caddr_t)&kdbpcb.pcb_regs[F0], 33 * sizeof(int));
660 }
661 if (kdb(causeReg, vadr, p, !USERMODE(statusReg)))
662 return (kdbpcb.pcb_regs[PC]);
663 }
664 #else
665 #ifdef DEBUG
666 trapDump("trap");
667 #endif
668 #endif
669 panic("trap");
670 }
671 trapsignal(p, i, ucode);
672 out:
673 /*
674 * Note: we should only get here if returning to user mode.
675 */
676 /* take pending signals */
677 while ((i = CURSIG(p)) != 0)
678 postsig(i);
679 p->p_priority = p->p_usrpri;
680 astpending = 0;
681 if (want_resched) {
682 int s;
683
684 /*
685 * Since we are curproc, clock will normally just change
686 * our priority without moving us from one queue to another
687 * (since the running process is not on a queue.)
688 * If that happened after we put ourselves on the run queue
689 * but before we switched, we might not be on the queue
690 * indicated by our priority.
691 */
692 s = splstatclock();
693 setrunqueue(p);
694 p->p_stats->p_ru.ru_nivcsw++;
695 mi_switch();
696 splx(s);
697 while ((i = CURSIG(p)) != 0)
698 postsig(i);
699 }
700
701 /*
702 * If profiling, charge system time to the trapped pc.
703 */
704 if (p->p_flag & P_PROFIL) {
705 extern int psratio;
706
707 addupc_task(p, pc, (int)(p->p_sticks - sticks) * psratio);
708 }
709
710 curpriority = p->p_priority;
711 return (pc);
712 }
713
714 /*
715 * Handle an interrupt.
716 * Called from MachKernIntr() or MachUserIntr()
717 * Note: curproc might be NULL.
718 */
interrupt(statusReg,causeReg,pc)719 interrupt(statusReg, causeReg, pc)
720 unsigned statusReg; /* status register at time of the exception */
721 unsigned causeReg; /* cause register at time of exception */
722 unsigned pc; /* program counter where to continue */
723 {
724 register unsigned mask;
725 struct clockframe cf;
726
727 #ifdef DEBUG
728 trp->status = statusReg;
729 trp->cause = causeReg;
730 trp->vadr = 0;
731 trp->pc = pc;
732 trp->ra = 0;
733 trp->code = 0;
734 if (++trp == &trapdebug[TRAPSIZE])
735 trp = trapdebug;
736
737 intr_level++;
738 #endif
739
740 cnt.v_intr++;
741 mask = causeReg & statusReg; /* pending interrupts & enable mask */
742 if (pmax_hardware_intr)
743 splx((*pmax_hardware_intr)(mask, pc, statusReg, causeReg));
744 if (mask & MACH_INT_MASK_5) {
745 intrcnt[7]++;
746 if (!USERMODE(statusReg)) {
747 #ifdef DEBUG
748 trapDump("fpintr");
749 #else
750 printf("FPU interrupt: PC %x CR %x SR %x\n",
751 pc, causeReg, statusReg);
752 #endif
753 } else
754 MachFPInterrupt(statusReg, causeReg, pc);
755 }
756 if (mask & MACH_SOFT_INT_MASK_0) {
757 clearsoftclock();
758 cnt.v_soft++;
759 intrcnt[0]++;
760 softclock();
761 }
762 /* process network interrupt if we trapped or will very soon */
763 if ((mask & MACH_SOFT_INT_MASK_1) ||
764 netisr && (statusReg & MACH_SOFT_INT_MASK_1)) {
765 clearsoftnet();
766 cnt.v_soft++;
767 intrcnt[1]++;
768 #ifdef INET
769 if (netisr & (1 << NETISR_ARP)) {
770 netisr &= ~(1 << NETISR_ARP);
771 arpintr();
772 }
773 if (netisr & (1 << NETISR_IP)) {
774 netisr &= ~(1 << NETISR_IP);
775 ipintr();
776 }
777 #endif
778 #ifdef NS
779 if (netisr & (1 << NETISR_NS)) {
780 netisr &= ~(1 << NETISR_NS);
781 nsintr();
782 }
783 #endif
784 #ifdef ISO
785 if (netisr & (1 << NETISR_ISO)) {
786 netisr &= ~(1 << NETISR_ISO);
787 clnlintr();
788 }
789 #endif
790 }
791 #ifdef DEBUG
792 intr_level--;
793 #endif
794 }
795
796 /*
797 * Handle pmax (DECstation 2100/3100) interrupts.
798 */
pmax_intr(mask,pc,statusReg,causeReg)799 pmax_intr(mask, pc, statusReg, causeReg)
800 unsigned mask;
801 unsigned pc;
802 unsigned statusReg;
803 unsigned causeReg;
804 {
805 register volatile struct chiptime *c = Mach_clock_addr;
806 struct clockframe cf;
807 int temp;
808
809 /* handle clock interrupts ASAP */
810 if (mask & MACH_INT_MASK_3) {
811 intrcnt[6]++;
812 temp = c->regc; /* XXX clear interrupt bits */
813 cf.pc = pc;
814 cf.sr = statusReg;
815 hardclock(&cf);
816 /* keep clock interrupts enabled */
817 causeReg &= ~MACH_INT_MASK_3;
818 }
819 /* Re-enable clock interrupts */
820 splx(MACH_INT_MASK_3 | MACH_SR_INT_ENA_CUR);
821 #if NSII > 0
822 if (mask & MACH_INT_MASK_0) {
823 intrcnt[2]++;
824 siiintr(0);
825 }
826 #endif
827 #if NLE > 0
828 if (mask & MACH_INT_MASK_1) {
829 intrcnt[3]++;
830 leintr(0);
831 }
832 #endif
833 #if NDC > 0
834 if (mask & MACH_INT_MASK_2) {
835 intrcnt[4]++;
836 dcintr(0);
837 }
838 #endif
839 if (mask & MACH_INT_MASK_4) {
840 intrcnt[5]++;
841 pmax_errintr();
842 }
843 return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
844 MACH_SR_INT_ENA_CUR);
845 }
846
847 /*
848 * Handle hardware interrupts for the KN02. (DECstation 5000/200)
849 * Returns spl value.
850 */
kn02_intr(mask,pc,statusReg,causeReg)851 kn02_intr(mask, pc, statusReg, causeReg)
852 unsigned mask;
853 unsigned pc;
854 unsigned statusReg;
855 unsigned causeReg;
856 {
857 register unsigned i, m;
858 register volatile struct chiptime *c = Mach_clock_addr;
859 register unsigned csr;
860 int temp;
861 struct clockframe cf;
862 static int warned = 0;
863
864 /* handle clock interrupts ASAP */
865 if (mask & MACH_INT_MASK_1) {
866 csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
867 if ((csr & KN02_CSR_PSWARN) && !warned) {
868 warned = 1;
869 printf("WARNING: power supply is overheating!\n");
870 } else if (warned && !(csr & KN02_CSR_PSWARN)) {
871 warned = 0;
872 printf("WARNING: power supply is OK again\n");
873 }
874 intrcnt[6]++;
875
876 temp = c->regc; /* XXX clear interrupt bits */
877 cf.pc = pc;
878 cf.sr = statusReg;
879 hardclock(&cf);
880
881 /* keep clock interrupts enabled */
882 causeReg &= ~MACH_INT_MASK_1;
883 }
884 /* Re-enable clock interrupts */
885 splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
886 if (mask & MACH_INT_MASK_0) {
887 static int map[8] = { 8, 8, 8, 8, 8, 4, 3, 2 };
888
889 csr = *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR);
890 m = csr & (csr >> KN02_CSR_IOINTEN_SHIFT) & KN02_CSR_IOINT;
891 #if 0
892 *(unsigned *)MACHPHYS_TO_UNCACHED(KN02_SYS_CSR) =
893 (csr & ~(KN02_CSR_WRESERVED | 0xFF)) |
894 (m << KN02_CSR_IOINTEN_SHIFT);
895 #endif
896 for (i = 0; m; i++, m >>= 1) {
897 if (!(m & 1))
898 continue;
899 intrcnt[map[i]]++;
900 if (tc_slot_info[i].intr)
901 (*tc_slot_info[i].intr)(tc_slot_info[i].unit);
902 else
903 printf("spurious interrupt %d\n", i);
904 }
905 #if 0
906 *(unsigned *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CSR) =
907 csr & ~(KN02_CSR_WRESERVED | 0xFF);
908 #endif
909 }
910 if (mask & MACH_INT_MASK_3) {
911 intrcnt[5]++;
912 kn02_errintr();
913 }
914 return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
915 MACH_SR_INT_ENA_CUR);
916 }
917
918 /*
919 * 3min hardware interrupts. (DECstation 5000/1xx)
920 */
kmin_intr(mask,pc,statusReg,causeReg)921 kmin_intr(mask, pc, statusReg, causeReg)
922 unsigned mask;
923 unsigned pc;
924 unsigned statusReg;
925 unsigned causeReg;
926 {
927 register u_int intr;
928 register volatile struct chiptime *c = Mach_clock_addr;
929 volatile u_int *imaskp =
930 (volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_IMSK);
931 volatile u_int *intrp =
932 (volatile u_int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_INTR);
933 unsigned int old_mask;
934 struct clockframe cf;
935 int temp;
936 static int user_warned = 0;
937
938 old_mask = *imaskp & kmin_tc3_imask;
939 *imaskp = old_mask;
940
941 if (mask & MACH_INT_MASK_4)
942 (*callv->halt)((int *)0, 0);
943 if (mask & MACH_INT_MASK_3) {
944 intr = *intrp;
945 /* masked interrupts are still observable */
946 intr &= old_mask;
947
948 if (intr & KMIN_INTR_SCSI_PTR_LOAD) {
949 *intrp &= ~KMIN_INTR_SCSI_PTR_LOAD;
950 #ifdef notdef
951 asc_dma_intr();
952 #endif
953 }
954
955 if (intr & (KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E))
956 *intrp &= ~(KMIN_INTR_SCSI_OVRUN | KMIN_INTR_SCSI_READ_E);
957
958 if (intr & KMIN_INTR_LANCE_READ_E)
959 *intrp &= ~KMIN_INTR_LANCE_READ_E;
960
961 if (intr & KMIN_INTR_TIMEOUT)
962 kn02ba_errintr();
963
964 if (intr & KMIN_INTR_CLOCK) {
965 temp = c->regc; /* XXX clear interrupt bits */
966 cf.pc = pc;
967 cf.sr = statusReg;
968 hardclock(&cf);
969 }
970
971 if ((intr & KMIN_INTR_SCC_0) &&
972 tc_slot_info[KMIN_SCC0_SLOT].intr)
973 (*(tc_slot_info[KMIN_SCC0_SLOT].intr))
974 (tc_slot_info[KMIN_SCC0_SLOT].unit);
975
976 if ((intr & KMIN_INTR_SCC_1) &&
977 tc_slot_info[KMIN_SCC1_SLOT].intr)
978 (*(tc_slot_info[KMIN_SCC1_SLOT].intr))
979 (tc_slot_info[KMIN_SCC1_SLOT].unit);
980
981 if ((intr & KMIN_INTR_SCSI) &&
982 tc_slot_info[KMIN_SCSI_SLOT].intr)
983 (*(tc_slot_info[KMIN_SCSI_SLOT].intr))
984 (tc_slot_info[KMIN_SCSI_SLOT].unit);
985
986 if ((intr & KMIN_INTR_LANCE) &&
987 tc_slot_info[KMIN_LANCE_SLOT].intr)
988 (*(tc_slot_info[KMIN_LANCE_SLOT].intr))
989 (tc_slot_info[KMIN_LANCE_SLOT].unit);
990
991 if (user_warned && ((intr & KMIN_INTR_PSWARN) == 0)) {
992 printf("%s\n", "Power supply ok now.");
993 user_warned = 0;
994 }
995 if ((intr & KMIN_INTR_PSWARN) && (user_warned < 3)) {
996 user_warned++;
997 printf("%s\n", "Power supply overheating");
998 }
999 }
1000 if ((mask & MACH_INT_MASK_0) && tc_slot_info[0].intr)
1001 (*tc_slot_info[0].intr)(tc_slot_info[0].unit);
1002 if ((mask & MACH_INT_MASK_1) && tc_slot_info[1].intr)
1003 (*tc_slot_info[1].intr)(tc_slot_info[1].unit);
1004 if ((mask & MACH_INT_MASK_2) && tc_slot_info[2].intr)
1005 (*tc_slot_info[2].intr)(tc_slot_info[2].unit);
1006 return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1007 MACH_SR_INT_ENA_CUR);
1008 }
1009
1010 /*
1011 * Maxine hardware interrupts. (Personal DECstation 5000/xx)
1012 */
xine_intr(mask,pc,statusReg,causeReg)1013 xine_intr(mask, pc, statusReg, causeReg)
1014 unsigned mask;
1015 unsigned pc;
1016 unsigned statusReg;
1017 unsigned causeReg;
1018 {
1019 register u_int intr;
1020 register volatile struct chiptime *c = Mach_clock_addr;
1021 volatile u_int *imaskp = (volatile u_int *)
1022 MACH_PHYS_TO_UNCACHED(XINE_REG_IMSK);
1023 volatile u_int *intrp = (volatile u_int *)
1024 MACH_PHYS_TO_UNCACHED(XINE_REG_INTR);
1025 u_int old_mask;
1026 struct clockframe cf;
1027 int temp;
1028
1029 old_mask = *imaskp & xine_tc3_imask;
1030 *imaskp = old_mask;
1031
1032 if (mask & MACH_INT_MASK_4)
1033 (*callv->halt)((int *)0, 0);
1034
1035 /* handle clock interrupts ASAP */
1036 if (mask & MACH_INT_MASK_1) {
1037 temp = c->regc; /* XXX clear interrupt bits */
1038 cf.pc = pc;
1039 cf.sr = statusReg;
1040 hardclock(&cf);
1041 causeReg &= ~MACH_INT_MASK_1;
1042 /* reenable clock interrupts */
1043 splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1044 }
1045 if (mask & MACH_INT_MASK_3) {
1046 intr = *intrp;
1047 /* masked interrupts are still observable */
1048 intr &= old_mask;
1049
1050 if (intr & XINE_INTR_SCSI_PTR_LOAD) {
1051 *intrp &= ~XINE_INTR_SCSI_PTR_LOAD;
1052 #ifdef notdef
1053 asc_dma_intr();
1054 #endif
1055 }
1056
1057 if (intr & (XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E))
1058 *intrp &= ~(XINE_INTR_SCSI_OVRUN | XINE_INTR_SCSI_READ_E);
1059
1060 if (intr & XINE_INTR_LANCE_READ_E)
1061 *intrp &= ~XINE_INTR_LANCE_READ_E;
1062
1063 if ((intr & XINE_INTR_SCC_0) &&
1064 tc_slot_info[XINE_SCC0_SLOT].intr)
1065 (*(tc_slot_info[XINE_SCC0_SLOT].intr))
1066 (tc_slot_info[XINE_SCC0_SLOT].unit);
1067
1068 if ((intr & XINE_INTR_DTOP_RX) &&
1069 tc_slot_info[XINE_DTOP_SLOT].intr)
1070 (*(tc_slot_info[XINE_DTOP_SLOT].intr))
1071 (tc_slot_info[XINE_DTOP_SLOT].unit);
1072
1073 if ((intr & XINE_INTR_FLOPPY) &&
1074 tc_slot_info[XINE_FLOPPY_SLOT].intr)
1075 (*(tc_slot_info[XINE_FLOPPY_SLOT].intr))
1076 (tc_slot_info[XINE_FLOPPY_SLOT].unit);
1077
1078 if ((intr & XINE_INTR_TC_0) &&
1079 tc_slot_info[0].intr)
1080 (*(tc_slot_info[0].intr))
1081 (tc_slot_info[0].unit);
1082
1083 if ((intr & XINE_INTR_TC_1) &&
1084 tc_slot_info[1].intr)
1085 (*(tc_slot_info[1].intr))
1086 (tc_slot_info[1].unit);
1087
1088 if ((intr & XINE_INTR_ISDN) &&
1089 tc_slot_info[XINE_ISDN_SLOT].intr)
1090 (*(tc_slot_info[XINE_ISDN_SLOT].intr))
1091 (tc_slot_info[XINE_ISDN_SLOT].unit);
1092
1093 if ((intr & XINE_INTR_SCSI) &&
1094 tc_slot_info[XINE_SCSI_SLOT].intr)
1095 (*(tc_slot_info[XINE_SCSI_SLOT].intr))
1096 (tc_slot_info[XINE_SCSI_SLOT].unit);
1097
1098 if ((intr & XINE_INTR_LANCE) &&
1099 tc_slot_info[XINE_LANCE_SLOT].intr)
1100 (*(tc_slot_info[XINE_LANCE_SLOT].intr))
1101 (tc_slot_info[XINE_LANCE_SLOT].unit);
1102
1103 }
1104 if (mask & MACH_INT_MASK_2)
1105 kn02ba_errintr();
1106 return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1107 MACH_SR_INT_ENA_CUR);
1108 }
1109
1110 #ifdef DS5000_240
1111 /*
1112 * 3Max+ hardware interrupts. (DECstation 5000/240) UNTESTED!!
1113 */
kn03_intr(mask,pc,statusReg,causeReg)1114 kn03_intr(mask, pc, statusReg, causeReg)
1115 unsigned mask;
1116 unsigned pc;
1117 unsigned statusReg;
1118 unsigned causeReg;
1119 {
1120 register u_int intr;
1121 register volatile struct chiptime *c = Mach_clock_addr;
1122 volatile u_int *imaskp = (volatile u_int *)
1123 MACH_PHYS_TO_UNCACHED(KN03_REG_IMSK);
1124 volatile u_int *intrp = (volatile u_int *)
1125 MACH_PHYS_TO_UNCACHED(KN03_REG_INTR);
1126 u_int old_mask;
1127 struct clockframe cf;
1128 int temp;
1129 static int user_warned = 0;
1130
1131 old_mask = *imaskp & kn03_tc3_imask;
1132 *imaskp = old_mask;
1133
1134 if (mask & MACH_INT_MASK_4)
1135 (*callv->halt)((int *)0, 0);
1136
1137 /* handle clock interrupts ASAP */
1138 if (mask & MACH_INT_MASK_1) {
1139 temp = c->regc; /* XXX clear interrupt bits */
1140 cf.pc = pc;
1141 cf.sr = statusReg;
1142 hardclock(&cf);
1143 causeReg &= ~MACH_INT_MASK_1;
1144 /* reenable clock interrupts */
1145 splx(MACH_INT_MASK_1 | MACH_SR_INT_ENA_CUR);
1146 }
1147 if (mask & MACH_INT_MASK_0) {
1148 intr = *intrp;
1149 /* masked interrupts are still observable */
1150 intr &= old_mask;
1151
1152 if (intr & KN03_INTR_SCSI_PTR_LOAD) {
1153 *intrp &= ~KN03_INTR_SCSI_PTR_LOAD;
1154 #ifdef notdef
1155 asc_dma_intr();
1156 #endif
1157 }
1158
1159 if (intr & (KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E))
1160 *intrp &= ~(KN03_INTR_SCSI_OVRUN | KN03_INTR_SCSI_READ_E);
1161
1162 if (intr & KN03_INTR_LANCE_READ_E)
1163 *intrp &= ~KN03_INTR_LANCE_READ_E;
1164
1165 if ((intr & KN03_INTR_SCC_0) &&
1166 tc_slot_info[KN03_SCC0_SLOT].intr)
1167 (*(tc_slot_info[KN03_SCC0_SLOT].intr))
1168 (tc_slot_info[KN03_SCC0_SLOT].unit);
1169
1170 if ((intr & KN03_INTR_SCC_1) &&
1171 tc_slot_info[KN03_SCC1_SLOT].intr)
1172 (*(tc_slot_info[KN03_SCC1_SLOT].intr))
1173 (tc_slot_info[KN03_SCC1_SLOT].unit);
1174
1175 if ((intr & KN03_INTR_TC_0) &&
1176 tc_slot_info[0].intr)
1177 (*(tc_slot_info[0].intr))
1178 (tc_slot_info[0].unit);
1179
1180 if ((intr & KN03_INTR_TC_1) &&
1181 tc_slot_info[1].intr)
1182 (*(tc_slot_info[1].intr))
1183 (tc_slot_info[1].unit);
1184
1185 if ((intr & KN03_INTR_TC_2) &&
1186 tc_slot_info[2].intr)
1187 (*(tc_slot_info[2].intr))
1188 (tc_slot_info[2].unit);
1189
1190 if ((intr & KN03_INTR_SCSI) &&
1191 tc_slot_info[KN03_SCSI_SLOT].intr)
1192 (*(tc_slot_info[KN03_SCSI_SLOT].intr))
1193 (tc_slot_info[KN03_SCSI_SLOT].unit);
1194
1195 if ((intr & KN03_INTR_LANCE) &&
1196 tc_slot_info[KN03_LANCE_SLOT].intr)
1197 (*(tc_slot_info[KN03_LANCE_SLOT].intr))
1198 (tc_slot_info[KN03_LANCE_SLOT].unit);
1199
1200 if (user_warned && ((intr & KN03_INTR_PSWARN) == 0)) {
1201 printf("%s\n", "Power supply ok now.");
1202 user_warned = 0;
1203 }
1204 if ((intr & KN03_INTR_PSWARN) && (user_warned < 3)) {
1205 user_warned++;
1206 printf("%s\n", "Power supply overheating");
1207 }
1208 }
1209 if (mask & MACH_INT_MASK_3)
1210 kn03_errintr();
1211 return ((statusReg & ~causeReg & MACH_HARD_INT_MASK) |
1212 MACH_SR_INT_ENA_CUR);
1213 }
1214 #endif /* DS5000_240 */
1215
1216 /*
1217 * This is called from MachUserIntr() if astpending is set.
1218 * This is very similar to the tail of trap().
1219 */
softintr(statusReg,pc)1220 softintr(statusReg, pc)
1221 unsigned statusReg; /* status register at time of the exception */
1222 unsigned pc; /* program counter where to continue */
1223 {
1224 register struct proc *p = curproc;
1225 int sig;
1226
1227 cnt.v_soft++;
1228 /* take pending signals */
1229 while ((sig = CURSIG(p)) != 0)
1230 postsig(sig);
1231 p->p_priority = p->p_usrpri;
1232 astpending = 0;
1233 if (p->p_flag & P_OWEUPC) {
1234 p->p_flag &= ~P_OWEUPC;
1235 ADDUPROF(p);
1236 }
1237 if (want_resched) {
1238 int s;
1239
1240 /*
1241 * Since we are curproc, clock will normally just change
1242 * our priority without moving us from one queue to another
1243 * (since the running process is not on a queue.)
1244 * If that happened after we put ourselves on the run queue
1245 * but before we switched, we might not be on the queue
1246 * indicated by our priority.
1247 */
1248 s = splstatclock();
1249 setrunqueue(p);
1250 p->p_stats->p_ru.ru_nivcsw++;
1251 mi_switch();
1252 splx(s);
1253 while ((sig = CURSIG(p)) != 0)
1254 postsig(sig);
1255 }
1256 curpriority = p->p_priority;
1257 }
1258
1259 #ifdef DEBUG
trapDump(msg)1260 trapDump(msg)
1261 char *msg;
1262 {
1263 register int i;
1264 int s;
1265
1266 s = splhigh();
1267 printf("trapDump(%s)\n", msg);
1268 for (i = 0; i < TRAPSIZE; i++) {
1269 if (trp == trapdebug)
1270 trp = &trapdebug[TRAPSIZE - 1];
1271 else
1272 trp--;
1273 if (trp->cause == 0)
1274 break;
1275 printf("%s: ADR %x PC %x CR %x SR %x\n",
1276 trap_type[(trp->cause & MACH_CR_EXC_CODE) >>
1277 MACH_CR_EXC_CODE_SHIFT],
1278 trp->vadr, trp->pc, trp->cause, trp->status);
1279 printf(" RA %x code %d\n", trp-> ra, trp->code);
1280 }
1281 bzero(trapdebug, sizeof(trapdebug));
1282 trp = trapdebug;
1283 splx(s);
1284 }
1285 #endif
1286
1287 /*
1288 *----------------------------------------------------------------------
1289 *
1290 * MemErrorInterrupts --
1291 * pmax_errintr - for the DS2100/DS3100
1292 * kn02_errintr - for the DS5000/200
1293 * kn02ba_errintr - for the DS5000/1xx and DS5000/xx
1294 *
1295 * Handler an interrupt for the control register.
1296 *
1297 * Results:
1298 * None.
1299 *
1300 * Side effects:
1301 * None.
1302 *
1303 *----------------------------------------------------------------------
1304 */
1305 static void
pmax_errintr()1306 pmax_errintr()
1307 {
1308 volatile u_short *sysCSRPtr =
1309 (u_short *)MACH_PHYS_TO_UNCACHED(KN01_SYS_CSR);
1310 u_short csr;
1311
1312 csr = *sysCSRPtr;
1313
1314 if (csr & KN01_CSR_MERR) {
1315 printf("Memory error at 0x%x\n",
1316 *(unsigned *)MACH_PHYS_TO_UNCACHED(KN01_SYS_ERRADR));
1317 panic("Mem error interrupt");
1318 }
1319 *sysCSRPtr = (csr & ~KN01_CSR_MBZ) | 0xff;
1320 }
1321
1322 static void
kn02_errintr()1323 kn02_errintr()
1324 {
1325 u_int erradr, chksyn, physadr;
1326 int i;
1327
1328 erradr = *(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR);
1329 chksyn = *(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CHKSYN);
1330 *(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_ERRADR) = 0;
1331 MachEmptyWriteBuffer();
1332
1333 if (!(erradr & KN02_ERR_VALID))
1334 return;
1335 /* extract the physical word address and compensate for pipelining */
1336 physadr = erradr & KN02_ERR_ADDRESS;
1337 if (!(erradr & KN02_ERR_WRITE))
1338 physadr = (physadr & ~0xfff) | ((physadr & 0xfff) - 5);
1339 physadr <<= 2;
1340 printf("%s memory %s %s error at 0x%x\n",
1341 (erradr & KN02_ERR_CPU) ? "CPU" : "DMA",
1342 (erradr & KN02_ERR_WRITE) ? "write" : "read",
1343 (erradr & KN02_ERR_ECCERR) ? "ECC" : "timeout",
1344 physadr);
1345 if (erradr & KN02_ERR_ECCERR) {
1346 *(u_int *)MACH_PHYS_TO_UNCACHED(KN02_SYS_CHKSYN) = 0;
1347 MachEmptyWriteBuffer();
1348 printf("ECC 0x%x\n", chksyn);
1349
1350 /* check for a corrected, single bit, read error */
1351 if (!(erradr & KN02_ERR_WRITE)) {
1352 if (physadr & 0x4) {
1353 /* check high word */
1354 if (chksyn & KN02_ECC_SNGHI)
1355 return;
1356 } else {
1357 /* check low word */
1358 if (chksyn & KN02_ECC_SNGLO)
1359 return;
1360 }
1361 }
1362 }
1363 panic("Mem error interrupt");
1364 }
1365
1366 #ifdef DS5000_240
1367 static void
kn03_errintr()1368 kn03_errintr()
1369 {
1370
1371 printf("erradr %x\n", *(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR));
1372 *(unsigned *)MACH_PHYS_TO_UNCACHED(KN03_SYS_ERRADR) = 0;
1373 MachEmptyWriteBuffer();
1374 }
1375 #endif /* DS5000_240 */
1376
1377 static void
kn02ba_errintr()1378 kn02ba_errintr()
1379 {
1380 register int mer, adr, siz;
1381 static int errintr_cnt = 0;
1382
1383 siz = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MSR);
1384 mer = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_MER);
1385 adr = *(volatile int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_AER);
1386
1387 /* clear interrupt bit */
1388 *(unsigned int *)MACH_PHYS_TO_UNCACHED(KMIN_REG_TIMEOUT) = 0;
1389
1390 errintr_cnt++;
1391 printf("(%d)%s%x [%x %x %x]\n", errintr_cnt,
1392 "Bad memory chip at phys ",
1393 kn02ba_recover_erradr(adr, mer),
1394 mer, siz, adr);
1395 }
1396
1397 static unsigned
kn02ba_recover_erradr(phys,mer)1398 kn02ba_recover_erradr(phys, mer)
1399 register unsigned phys, mer;
1400 {
1401 /* phys holds bits 28:2, mer knows which byte */
1402 switch (mer & KMIN_MER_LASTBYTE) {
1403 case KMIN_LASTB31:
1404 mer = 3; break;
1405 case KMIN_LASTB23:
1406 mer = 2; break;
1407 case KMIN_LASTB15:
1408 mer = 1; break;
1409 case KMIN_LASTB07:
1410 mer = 0; break;
1411 }
1412 return ((phys & KMIN_AER_ADDR_MASK) | mer);
1413 }
1414
1415 /*
1416 * Return the resulting PC as if the branch was executed.
1417 */
1418 unsigned
MachEmulateBranch(regsPtr,instPC,fpcCSR,allowNonBranch)1419 MachEmulateBranch(regsPtr, instPC, fpcCSR, allowNonBranch)
1420 unsigned *regsPtr;
1421 unsigned instPC;
1422 unsigned fpcCSR;
1423 int allowNonBranch;
1424 {
1425 InstFmt inst;
1426 unsigned retAddr;
1427 int condition;
1428 extern unsigned GetBranchDest();
1429
1430
1431 inst = *(InstFmt *)instPC;
1432 #if 0
1433 printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC,
1434 inst.word, fpcCSR); /* XXX */
1435 #endif
1436 switch ((int)inst.JType.op) {
1437 case OP_SPECIAL:
1438 switch ((int)inst.RType.func) {
1439 case OP_JR:
1440 case OP_JALR:
1441 retAddr = regsPtr[inst.RType.rs];
1442 break;
1443
1444 default:
1445 if (!allowNonBranch)
1446 panic("MachEmulateBranch: Non-branch");
1447 retAddr = instPC + 4;
1448 break;
1449 }
1450 break;
1451
1452 case OP_BCOND:
1453 switch ((int)inst.IType.rt) {
1454 case OP_BLTZ:
1455 case OP_BLTZAL:
1456 if ((int)(regsPtr[inst.RType.rs]) < 0)
1457 retAddr = GetBranchDest((InstFmt *)instPC);
1458 else
1459 retAddr = instPC + 8;
1460 break;
1461
1462 case OP_BGEZAL:
1463 case OP_BGEZ:
1464 if ((int)(regsPtr[inst.RType.rs]) >= 0)
1465 retAddr = GetBranchDest((InstFmt *)instPC);
1466 else
1467 retAddr = instPC + 8;
1468 break;
1469
1470 default:
1471 panic("MachEmulateBranch: Bad branch cond");
1472 }
1473 break;
1474
1475 case OP_J:
1476 case OP_JAL:
1477 retAddr = (inst.JType.target << 2) |
1478 ((unsigned)instPC & 0xF0000000);
1479 break;
1480
1481 case OP_BEQ:
1482 if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1483 retAddr = GetBranchDest((InstFmt *)instPC);
1484 else
1485 retAddr = instPC + 8;
1486 break;
1487
1488 case OP_BNE:
1489 if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1490 retAddr = GetBranchDest((InstFmt *)instPC);
1491 else
1492 retAddr = instPC + 8;
1493 break;
1494
1495 case OP_BLEZ:
1496 if ((int)(regsPtr[inst.RType.rs]) <= 0)
1497 retAddr = GetBranchDest((InstFmt *)instPC);
1498 else
1499 retAddr = instPC + 8;
1500 break;
1501
1502 case OP_BGTZ:
1503 if ((int)(regsPtr[inst.RType.rs]) > 0)
1504 retAddr = GetBranchDest((InstFmt *)instPC);
1505 else
1506 retAddr = instPC + 8;
1507 break;
1508
1509 case OP_COP1:
1510 switch (inst.RType.rs) {
1511 case OP_BCx:
1512 case OP_BCy:
1513 if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1514 condition = fpcCSR & MACH_FPC_COND_BIT;
1515 else
1516 condition = !(fpcCSR & MACH_FPC_COND_BIT);
1517 if (condition)
1518 retAddr = GetBranchDest((InstFmt *)instPC);
1519 else
1520 retAddr = instPC + 8;
1521 break;
1522
1523 default:
1524 if (!allowNonBranch)
1525 panic("MachEmulateBranch: Bad coproc branch instruction");
1526 retAddr = instPC + 4;
1527 }
1528 break;
1529
1530 default:
1531 if (!allowNonBranch)
1532 panic("MachEmulateBranch: Non-branch instruction");
1533 retAddr = instPC + 4;
1534 }
1535 #if 0
1536 printf("Target addr=%x\n", retAddr); /* XXX */
1537 #endif
1538 return (retAddr);
1539 }
1540
1541 unsigned
GetBranchDest(InstPtr)1542 GetBranchDest(InstPtr)
1543 InstFmt *InstPtr;
1544 {
1545 return ((unsigned)InstPtr + 4 + ((short)InstPtr->IType.imm << 2));
1546 }
1547
1548 /*
1549 * This routine is called by procxmt() to single step one instruction.
1550 * We do this by storing a break instruction after the current instruction,
1551 * resuming execution, and then restoring the old instruction.
1552 */
cpu_singlestep(p)1553 cpu_singlestep(p)
1554 register struct proc *p;
1555 {
1556 register unsigned va;
1557 register int *locr0 = p->p_md.md_regs;
1558 int i;
1559
1560 /* compute next address after current location */
1561 va = MachEmulateBranch(locr0, locr0[PC], locr0[FSR], 1);
1562 if (p->p_md.md_ss_addr || p->p_md.md_ss_addr == va ||
1563 !useracc((caddr_t)va, 4, B_READ)) {
1564 printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
1565 p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */
1566 return (EFAULT);
1567 }
1568 p->p_md.md_ss_addr = va;
1569 p->p_md.md_ss_instr = fuiword((caddr_t)va);
1570 i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1571 if (i < 0) {
1572 vm_offset_t sa, ea;
1573 int rv;
1574
1575 sa = trunc_page((vm_offset_t)va);
1576 ea = round_page((vm_offset_t)va+sizeof(int)-1);
1577 rv = vm_map_protect(&p->p_vmspace->vm_map, sa, ea,
1578 VM_PROT_DEFAULT, FALSE);
1579 if (rv == KERN_SUCCESS) {
1580 i = suiword((caddr_t)va, MACH_BREAK_SSTEP);
1581 (void) vm_map_protect(&p->p_vmspace->vm_map,
1582 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
1583 }
1584 }
1585 if (i < 0)
1586 return (EFAULT);
1587 #if 0
1588 printf("SS %s (%d): breakpoint set at %x: %x (pc %x) br %x\n",
1589 p->p_comm, p->p_pid, p->p_md.md_ss_addr,
1590 p->p_md.md_ss_instr, locr0[PC], fuword((caddr_t)va)); /* XXX */
1591 #endif
1592 return (0);
1593 }
1594
1595 #ifdef DEBUG
kdbpeek(addr)1596 kdbpeek(addr)
1597 {
1598 if (addr & 3) {
1599 printf("kdbpeek: unaligned address %x\n", addr);
1600 return (-1);
1601 }
1602 return (*(int *)addr);
1603 }
1604
1605 #define MIPS_JR_RA 0x03e00008 /* instruction code for jr ra */
1606
1607 /*
1608 * Print a stack backtrace.
1609 */
1610 void
stacktrace(a0,a1,a2,a3)1611 stacktrace(a0, a1, a2, a3)
1612 int a0, a1, a2, a3;
1613 {
1614 unsigned pc, sp, fp, ra, va, subr;
1615 unsigned instr, mask;
1616 InstFmt i;
1617 int more, stksize;
1618 int regs[3];
1619 extern setsoftclock();
1620 extern char start[], edata[];
1621
1622 cpu_getregs(regs);
1623
1624 /* get initial values from the exception frame */
1625 sp = regs[0];
1626 pc = regs[1];
1627 ra = 0;
1628 fp = regs[2];
1629
1630 loop:
1631 /* check for current PC in the kernel interrupt handler code */
1632 if (pc >= (unsigned)MachKernIntr && pc < (unsigned)MachUserIntr) {
1633 /* NOTE: the offsets depend on the code in locore.s */
1634 printf("interrupt\n");
1635 a0 = kdbpeek(sp + 36);
1636 a1 = kdbpeek(sp + 40);
1637 a2 = kdbpeek(sp + 44);
1638 a3 = kdbpeek(sp + 48);
1639 pc = kdbpeek(sp + 20);
1640 ra = kdbpeek(sp + 92);
1641 sp = kdbpeek(sp + 100);
1642 fp = kdbpeek(sp + 104);
1643 }
1644
1645 /* check for current PC in the exception handler code */
1646 if (pc >= 0x80000000 && pc < (unsigned)setsoftclock) {
1647 ra = 0;
1648 subr = 0;
1649 goto done;
1650 }
1651
1652 /* check for bad PC */
1653 if (pc & 3 || pc < 0x80000000 || pc >= (unsigned)edata) {
1654 printf("PC 0x%x: not in kernel\n", pc);
1655 ra = 0;
1656 subr = 0;
1657 goto done;
1658 }
1659
1660 /*
1661 * Find the beginning of the current subroutine by scanning backwards
1662 * from the current PC for the end of the previous subroutine.
1663 */
1664 va = pc - sizeof(int);
1665 while ((instr = kdbpeek(va)) != MIPS_JR_RA)
1666 va -= sizeof(int);
1667 va += 2 * sizeof(int); /* skip back over branch & delay slot */
1668 /* skip over nulls which might separate .o files */
1669 while ((instr = kdbpeek(va)) == 0)
1670 va += sizeof(int);
1671 subr = va;
1672
1673 /* scan forwards to find stack size and any saved registers */
1674 stksize = 0;
1675 more = 3;
1676 mask = 0;
1677 for (; more; va += sizeof(int), more = (more == 3) ? 3 : more - 1) {
1678 /* stop if hit our current position */
1679 if (va >= pc)
1680 break;
1681 instr = kdbpeek(va);
1682 i.word = instr;
1683 switch (i.JType.op) {
1684 case OP_SPECIAL:
1685 switch (i.RType.func) {
1686 case OP_JR:
1687 case OP_JALR:
1688 more = 2; /* stop after next instruction */
1689 break;
1690
1691 case OP_SYSCALL:
1692 case OP_BREAK:
1693 more = 1; /* stop now */
1694 };
1695 break;
1696
1697 case OP_BCOND:
1698 case OP_J:
1699 case OP_JAL:
1700 case OP_BEQ:
1701 case OP_BNE:
1702 case OP_BLEZ:
1703 case OP_BGTZ:
1704 more = 2; /* stop after next instruction */
1705 break;
1706
1707 case OP_COP0:
1708 case OP_COP1:
1709 case OP_COP2:
1710 case OP_COP3:
1711 switch (i.RType.rs) {
1712 case OP_BCx:
1713 case OP_BCy:
1714 more = 2; /* stop after next instruction */
1715 };
1716 break;
1717
1718 case OP_SW:
1719 /* look for saved registers on the stack */
1720 if (i.IType.rs != 29)
1721 break;
1722 /* only restore the first one */
1723 if (mask & (1 << i.IType.rt))
1724 break;
1725 mask |= 1 << i.IType.rt;
1726 switch (i.IType.rt) {
1727 case 4: /* a0 */
1728 a0 = kdbpeek(sp + (short)i.IType.imm);
1729 break;
1730
1731 case 5: /* a1 */
1732 a1 = kdbpeek(sp + (short)i.IType.imm);
1733 break;
1734
1735 case 6: /* a2 */
1736 a2 = kdbpeek(sp + (short)i.IType.imm);
1737 break;
1738
1739 case 7: /* a3 */
1740 a3 = kdbpeek(sp + (short)i.IType.imm);
1741 break;
1742
1743 case 30: /* fp */
1744 fp = kdbpeek(sp + (short)i.IType.imm);
1745 break;
1746
1747 case 31: /* ra */
1748 ra = kdbpeek(sp + (short)i.IType.imm);
1749 }
1750 break;
1751
1752 case OP_ADDI:
1753 case OP_ADDIU:
1754 /* look for stack pointer adjustment */
1755 if (i.IType.rs != 29 || i.IType.rt != 29)
1756 break;
1757 stksize = (short)i.IType.imm;
1758 }
1759 }
1760
1761 done:
1762 printf("%x+%x (%x,%x,%x,%x) ra %x sz %d\n",
1763 subr, pc - subr, a0, a1, a2, a3, ra, stksize);
1764
1765 if (ra) {
1766 if (pc == ra && stksize == 0)
1767 printf("stacktrace: loop!\n");
1768 else {
1769 pc = ra;
1770 sp -= stksize;
1771 goto loop;
1772 }
1773 }
1774 }
1775 #endif /* DEBUG */
1776