1 /*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1982, 1986, 1990, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department.
9 *
10 * %sccs.include.redist.c%
11 *
12 * from: Utah $Hdr: machdep.c 1.74 92/12/20$
13 *
14 * @(#)machdep.c 8.15 (Berkeley) 05/26/95
15 */
16
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/signalvar.h>
20 #include <sys/kernel.h>
21 #include <sys/map.h>
22 #include <sys/proc.h>
23 #include <sys/buf.h>
24 #include <sys/reboot.h>
25 #include <sys/conf.h>
26 #include <sys/file.h>
27 #include <sys/clist.h>
28 #include <sys/callout.h>
29 #include <sys/malloc.h>
30 #include <sys/mbuf.h>
31 #include <sys/msgbuf.h>
32 #include <sys/ioctl.h>
33 #include <sys/tty.h>
34 #include <sys/mount.h>
35 #include <sys/user.h>
36 #include <sys/exec.h>
37 #include <sys/sysctl.h>
38 #ifdef SYSVSHM
39 #include <sys/shm.h>
40 #endif
41 #ifdef HPUXCOMPAT
42 #include <hp/hpux/hpux.h>
43 #endif
44
45 #include <machine/cpu.h>
46 #include <machine/reg.h>
47 #include <machine/psl.h>
48 #include <hp/dev/cons.h>
49 #include <hp300/hp300/isr.h>
50 #include <hp300/hp300/pte.h>
51 #include <net/netisr.h>
52
53 #define MAXMEM 64*1024*CLSIZE /* XXX - from cmap.h */
54 #include <vm/vm_extern.h>
55 #include <vm/vm_kern.h>
56
57 /* the following is used externally (sysctl_hw) */
58 char machine[] = "hp300"; /* cpu "architecture" */
59
60 vm_map_t buffer_map;
61 extern vm_offset_t avail_end;
62
63 /*
64 * Declare these as initialized data so we can patch them.
65 */
66 int nswbuf = 0;
67 #ifdef NBUF
68 int nbuf = NBUF;
69 #else
70 int nbuf = 0;
71 #endif
72 #ifdef BUFPAGES
73 int bufpages = BUFPAGES;
74 #else
75 int bufpages = 0;
76 #endif
77 int msgbufmapped; /* set when safe to use msgbuf */
78 int maxmem; /* max memory per process */
79 int physmem = MAXMEM; /* max supported memory, changes to actual */
80 /*
81 * safepri is a safe priority for sleep to set for a spin-wait
82 * during autoconfiguration or after a panic.
83 */
84 int safepri = PSL_LOWIPL;
85
86 extern u_int lowram;
87 extern short exframesize[];
88
89 /*
90 * Console initialization: called early on from main,
91 * before vm init or startup. Do enough configuration
92 * to choose and initialize a console.
93 */
consinit()94 consinit()
95 {
96
97 /*
98 * Set cpuspeed immediately since cninit() called routines
99 * might use delay. Note that we only set it if a custom value
100 * has not already been specified.
101 */
102 if (cpuspeed == 0) {
103 switch (machineid) {
104 case HP_320:
105 case HP_330:
106 case HP_340:
107 cpuspeed = MHZ_16;
108 break;
109 case HP_350:
110 case HP_360:
111 case HP_380:
112 cpuspeed = MHZ_25;
113 break;
114 case HP_370:
115 case HP_433:
116 cpuspeed = MHZ_33;
117 break;
118 case HP_375:
119 cpuspeed = MHZ_50;
120 break;
121 default: /* assume the fastest */
122 cpuspeed = MHZ_50;
123 break;
124 }
125 if (mmutype == MMU_68040)
126 cpuspeed *= 2; /* XXX */
127 }
128 /*
129 * Find what hardware is attached to this machine.
130 */
131 find_devs();
132
133 /*
134 * Initialize the console before we print anything out.
135 */
136 cninit();
137 }
138
139 /*
140 * cpu_startup: allocate memory for variable-sized tables,
141 * initialize cpu, and do autoconfiguration.
142 */
cpu_startup()143 cpu_startup()
144 {
145 register unsigned i;
146 register caddr_t v, firstaddr;
147 int base, residual;
148 vm_offset_t minaddr, maxaddr;
149 vm_size_t size;
150 #ifdef BUFFERS_UNMANAGED
151 vm_offset_t bufmemp;
152 caddr_t buffermem;
153 int ix;
154 #endif
155 #ifdef DEBUG
156 extern int pmapdebug;
157 int opmapdebug = pmapdebug;
158
159 pmapdebug = 0;
160 #endif
161
162 /*
163 * Initialize error message buffer (at end of core).
164 * avail_end was pre-decremented in pmap_bootstrap to compensate.
165 */
166 for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
167 pmap_enter(kernel_pmap, (vm_offset_t)msgbufp,
168 avail_end + i * NBPG, VM_PROT_ALL, TRUE);
169 msgbufmapped = 1;
170
171 /*
172 * Good {morning,afternoon,evening,night}.
173 */
174 printf(version);
175 identifycpu();
176 printf("real mem = %d\n", ctob(physmem));
177
178 /*
179 * Allocate space for system data structures.
180 * The first available real memory address is in "firstaddr".
181 * The first available kernel virtual address is in "v".
182 * As pages of kernel virtual memory are allocated, "v" is incremented.
183 * As pages of memory are allocated and cleared,
184 * "firstaddr" is incremented.
185 * An index into the kernel page table corresponding to the
186 * virtual memory address maintained in "v" is kept in "mapaddr".
187 */
188 /*
189 * Make two passes. The first pass calculates how much memory is
190 * needed and allocates it. The second pass assigns virtual
191 * addresses to the various data structures.
192 */
193 firstaddr = 0;
194 again:
195 v = (caddr_t)firstaddr;
196
197 #define valloc(name, type, num) \
198 (name) = (type *)v; v = (caddr_t)((name)+(num))
199 #define valloclim(name, type, num, lim) \
200 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
201 valloc(cfree, struct cblock, nclist);
202 valloc(callout, struct callout, ncallout);
203 valloc(swapmap, struct map, nswapmap = maxproc * 2);
204 #ifdef SYSVSHM
205 valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
206 #endif
207
208 /*
209 * Determine how many buffers to allocate.
210 * Since HPs tend to be long on memory and short on disk speed,
211 * we allocate more buffer space than the BSD standard of
212 * use 10% of memory for the first 2 Meg, 5% of remaining.
213 * We just allocate a flat 10%. Insure a minimum of 16 buffers.
214 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
215 */
216 if (bufpages == 0)
217 bufpages = physmem / 10 / CLSIZE;
218 if (nbuf == 0) {
219 nbuf = bufpages;
220 if (nbuf < 16)
221 nbuf = 16;
222 }
223 if (nswbuf == 0) {
224 nswbuf = (nbuf / 2) &~ 1; /* force even */
225 if (nswbuf > 256)
226 nswbuf = 256; /* sanity */
227 }
228 valloc(swbuf, struct buf, nswbuf);
229 valloc(buf, struct buf, nbuf);
230 /*
231 * End of first pass, size has been calculated so allocate memory
232 */
233 if (firstaddr == 0) {
234 size = (vm_size_t)(v - firstaddr);
235 firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size));
236 if (firstaddr == 0)
237 panic("startup: no room for tables");
238 #ifdef BUFFERS_UNMANAGED
239 buffermem = (caddr_t) kmem_alloc(kernel_map, bufpages*CLBYTES);
240 if (buffermem == 0)
241 panic("startup: no room for buffers");
242 #endif
243 goto again;
244 }
245 /*
246 * End of second pass, addresses have been assigned
247 */
248 if ((vm_size_t)(v - firstaddr) != size)
249 panic("startup: table size inconsistency");
250 /*
251 * Now allocate buffers proper. They are different than the above
252 * in that they usually occupy more virtual memory than physical.
253 */
254 size = MAXBSIZE * nbuf;
255 buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
256 &maxaddr, size, TRUE);
257 minaddr = (vm_offset_t)buffers;
258 if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
259 &minaddr, size, FALSE) != KERN_SUCCESS)
260 panic("startup: cannot allocate buffers");
261 base = bufpages / nbuf;
262 residual = bufpages % nbuf;
263 #ifdef BUFFERS_UNMANAGED
264 bufmemp = (vm_offset_t) buffermem;
265 #endif
266 for (i = 0; i < nbuf; i++) {
267 vm_size_t curbufsize;
268 vm_offset_t curbuf;
269
270 /*
271 * First <residual> buffers get (base+1) physical pages
272 * allocated for them. The rest get (base) physical pages.
273 *
274 * The rest of each buffer occupies virtual space,
275 * but has no physical memory allocated for it.
276 */
277 curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
278 curbufsize = CLBYTES * (i < residual ? base+1 : base);
279 #ifdef BUFFERS_UNMANAGED
280 /*
281 * Move the physical pages over from buffermem.
282 */
283 for (ix = 0; ix < curbufsize/CLBYTES; ix++) {
284 vm_offset_t pa;
285
286 pa = pmap_extract(kernel_pmap, bufmemp);
287 if (pa == 0)
288 panic("startup: unmapped buffer");
289 pmap_remove(kernel_pmap, bufmemp, bufmemp+CLBYTES);
290 pmap_enter(kernel_pmap,
291 (vm_offset_t)(curbuf + ix * CLBYTES),
292 pa, VM_PROT_READ|VM_PROT_WRITE, TRUE);
293 bufmemp += CLBYTES;
294 }
295 #else
296 vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
297 vm_map_simplify(buffer_map, curbuf);
298 #endif
299 }
300 #ifdef BUFFERS_UNMANAGED
301 #if 0
302 /*
303 * We would like to free the (now empty) original address range
304 * but too many bad things will happen if we try.
305 */
306 kmem_free(kernel_map, (vm_offset_t)buffermem, bufpages*CLBYTES);
307 #endif
308 #endif
309 /*
310 * Allocate a submap for exec arguments. This map effectively
311 * limits the number of processes exec'ing at any time.
312 */
313 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
314 16*NCARGS, TRUE);
315 /*
316 * Allocate a submap for physio
317 */
318 phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
319 VM_PHYS_SIZE, TRUE);
320
321 /*
322 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
323 * we use the more space efficient malloc in place of kmem_alloc.
324 */
325 mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
326 M_MBUF, M_NOWAIT);
327 bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
328 mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
329 VM_MBUF_SIZE, FALSE);
330 /*
331 * Initialize callouts
332 */
333 callfree = callout;
334 for (i = 1; i < ncallout; i++)
335 callout[i-1].c_next = &callout[i];
336 callout[i-1].c_next = NULL;
337
338 #ifdef DEBUG
339 pmapdebug = opmapdebug;
340 #endif
341 printf("avail mem = %d\n", ptoa(cnt.v_free_count));
342 printf("using %d buffers containing %d bytes of memory\n",
343 nbuf, bufpages * CLBYTES);
344 /*
345 * Set up CPU-specific registers, cache, etc.
346 */
347 initcpu();
348
349 /*
350 * Set up buffers, so they can be used to read disk labels.
351 */
352 bufinit();
353
354 /*
355 * Configure the system.
356 */
357 configure();
358 }
359
360 /*
361 * Set registers on exec.
362 * XXX Should clear registers except sp, pc,
363 * but would break init; should be fixed soon.
364 */
setregs(p,entry,retval)365 setregs(p, entry, retval)
366 register struct proc *p;
367 u_long entry;
368 int retval[2];
369 {
370 struct frame *frame = (struct frame *)p->p_md.md_regs;
371
372 frame->f_pc = entry & ~1;
373 #ifdef FPCOPROC
374 /* restore a null state frame */
375 p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
376 m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
377 #endif
378 #ifdef HPUXCOMPAT
379 if (p->p_md.md_flags & MDP_HPUX) {
380
381 frame->f_regs[A0] = 0; /* not 68010 (bit 31), no FPA (30) */
382 retval[0] = 0; /* no float card */
383 #ifdef FPCOPROC
384 retval[1] = 1; /* yes 68881 */
385 #else
386 retval[1] = 0; /* no 68881 */
387 #endif
388 }
389 /*
390 * XXX This doesn't have much to do with setting registers but
391 * I didn't want to muck up kern_exec.c with this code, so I
392 * stuck it here.
393 *
394 * Ensure we perform the right action on traps type 1 and 2:
395 * If our parent is an HPUX process and we are being traced, turn
396 * on HPUX style interpretation. Else if we were using the HPUX
397 * style interpretation, revert to the BSD interpretation.
398 *
399 * Note that we do this by changing the trap instruction in the
400 * global "sigcode" array which then gets copied out to the user's
401 * sigcode in the stack. Since we are changing it in the global
402 * array we must always reset it, even for non-HPUX processes.
403 *
404 * Note also that implementing it in this way creates a potential
405 * race where we could have tweaked it for process A which then
406 * blocks in the copyout to the stack and process B comes along
407 * and untweaks it causing A to wind up with the wrong setting
408 * when the copyout continues. However, since we have already
409 * copied something out to this user stack page (thereby faulting
410 * it in), this scenerio is extremely unlikely.
411 */
412 {
413 extern short sigcodetrap[];
414
415 if ((p->p_pptr->p_md.md_flags & MDP_HPUX) &&
416 (p->p_flag & P_TRACED)) {
417 p->p_md.md_flags |= MDP_HPUXTRACE;
418 *sigcodetrap = 0x4E42;
419 } else {
420 p->p_md.md_flags &= ~MDP_HPUXTRACE;
421 *sigcodetrap = 0x4E41;
422 }
423 }
424 #endif
425 }
426
427 /*
428 * Info for CTL_HW
429 */
430 char cpu_model[120];
431 extern char version[];
432
identifycpu()433 identifycpu()
434 {
435 char *t, *mc;
436 int len;
437
438 switch (machineid) {
439 case HP_320:
440 t = "320 (16.67MHz";
441 break;
442 case HP_330:
443 t = "318/319/330 (16.67MHz";
444 break;
445 case HP_340:
446 t = "340 (16.67MHz";
447 break;
448 case HP_350:
449 t = "350 (25MHz";
450 break;
451 case HP_360:
452 t = "360 (25MHz";
453 break;
454 case HP_370:
455 t = "370 (33.33MHz";
456 break;
457 case HP_375:
458 t = "345/375 (50MHz";
459 break;
460 case HP_380:
461 t = "380/425 (25MHz";
462 break;
463 case HP_433:
464 t = "433 (33MHz";
465 break;
466 default:
467 printf("\nunknown machine type %d\n", machineid);
468 panic("startup");
469 }
470 mc = (mmutype == MMU_68040 ? "40" :
471 (mmutype == MMU_68030 ? "30" : "20"));
472 sprintf(cpu_model, "HP9000/%s MC680%s CPU", t, mc);
473 switch (mmutype) {
474 case MMU_68040:
475 case MMU_68030:
476 strcat(cpu_model, "+MMU");
477 break;
478 case MMU_68851:
479 strcat(cpu_model, ", MC68851 MMU");
480 break;
481 case MMU_HP:
482 strcat(cpu_model, ", HP MMU");
483 break;
484 default:
485 printf("%s\nunknown MMU type %d\n", cpu_model, mmutype);
486 panic("startup");
487 }
488 len = strlen(cpu_model);
489 if (mmutype == MMU_68040)
490 len += sprintf(cpu_model + len,
491 "+FPU, 4k on-chip physical I/D caches");
492 else if (mmutype == MMU_68030)
493 len += sprintf(cpu_model + len, ", %sMHz MC68882 FPU",
494 machineid == HP_340 ? "16.67" :
495 (machineid == HP_360 ? "25" :
496 (machineid == HP_370 ? "33.33" : "50")));
497 else
498 len += sprintf(cpu_model + len, ", %sMHz MC68881 FPU",
499 machineid == HP_350 ? "20" : "16.67");
500 switch (ectype) {
501 case EC_VIRT:
502 sprintf(cpu_model + len, ", %dK virtual-address cache",
503 machineid == HP_320 ? 16 : 32);
504 break;
505 case EC_PHYS:
506 sprintf(cpu_model + len, ", %dK physical-address cache",
507 machineid == HP_370 ? 64 : 32);
508 break;
509 }
510 strcat(cpu_model, ")");
511 printf("%s\n", cpu_model);
512 /*
513 * Now that we have told the user what they have,
514 * let them know if that machine type isn't configured.
515 */
516 switch (machineid) {
517 case -1: /* keep compilers happy */
518 #if !defined(HP320) && !defined(HP350)
519 case HP_320:
520 case HP_350:
521 #endif
522 #ifndef HP330
523 case HP_330:
524 #endif
525 #if !defined(HP360) && !defined(HP370)
526 case HP_340:
527 case HP_360:
528 case HP_370:
529 #endif
530 #if !defined(HP380)
531 case HP_380:
532 case HP_433:
533 #endif
534 panic("CPU type not configured");
535 default:
536 break;
537 }
538 }
539
540 /*
541 * machine dependent system variables.
542 */
cpu_sysctl(name,namelen,oldp,oldlenp,newp,newlen,p)543 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
544 int *name;
545 u_int namelen;
546 void *oldp;
547 size_t *oldlenp;
548 void *newp;
549 size_t newlen;
550 struct proc *p;
551 {
552
553 /* all sysctl names at this level are terminal */
554 if (namelen != 1)
555 return (ENOTDIR); /* overloaded */
556
557 switch (name[0]) {
558 case CPU_CONSDEV:
559 return (sysctl_rdstruct(oldp, oldlenp, newp, &cn_tty->t_dev,
560 sizeof cn_tty->t_dev));
561 default:
562 return (EOPNOTSUPP);
563 }
564 /* NOTREACHED */
565 }
566
567 #ifdef USELEDS
568 #include <hp300/hp300/led.h>
569
570 int inledcontrol = 0; /* 1 if we are in ledcontrol already, cheap mutex */
571 char *ledaddr;
572
573 /*
574 * Map the LED page and setup the KVA to access it.
575 */
ledinit()576 ledinit()
577 {
578 extern caddr_t ledbase;
579
580 pmap_enter(kernel_pmap, (vm_offset_t)ledbase, (vm_offset_t)LED_ADDR,
581 VM_PROT_READ|VM_PROT_WRITE, TRUE);
582 ledaddr = (char *) ((int)ledbase | (LED_ADDR & PGOFSET));
583 }
584
585 /*
586 * Do lights:
587 * `ons' is a mask of LEDs to turn on,
588 * `offs' is a mask of LEDs to turn off,
589 * `togs' is a mask of LEDs to toggle.
590 * Note we don't use splclock/splx for mutual exclusion.
591 * They are expensive and we really don't need to be that precise.
592 * Besides we would like to be able to profile this routine.
593 */
ledcontrol(ons,offs,togs)594 ledcontrol(ons, offs, togs)
595 register int ons, offs, togs;
596 {
597 static char currentleds;
598 register char leds;
599
600 inledcontrol = 1;
601 leds = currentleds;
602 if (ons)
603 leds |= ons;
604 if (offs)
605 leds &= ~offs;
606 if (togs)
607 leds ^= togs;
608 currentleds = leds;
609 *ledaddr = ~leds;
610 inledcontrol = 0;
611 }
612 #endif
613
614 #define SS_RTEFRAME 1
615 #define SS_FPSTATE 2
616 #define SS_USERREGS 4
617
618 struct sigstate {
619 int ss_flags; /* which of the following are valid */
620 struct frame ss_frame; /* original exception frame */
621 struct fpframe ss_fpstate; /* 68881/68882 state info */
622 };
623
624 /*
625 * WARNING: code in locore.s assumes the layout shown for sf_signum
626 * thru sf_handler so... don't screw with them!
627 */
628 struct sigframe {
629 int sf_signum; /* signo for handler */
630 int sf_code; /* additional info for handler */
631 struct sigcontext *sf_scp; /* context ptr for handler */
632 sig_t sf_handler; /* handler addr for u_sigc */
633 struct sigstate sf_state; /* state of the hardware */
634 struct sigcontext sf_sc; /* actual context */
635 };
636
637 #ifdef HPUXCOMPAT
638 struct hpuxsigcontext {
639 int hsc_syscall;
640 char hsc_action;
641 char hsc_pad1;
642 char hsc_pad2;
643 char hsc_onstack;
644 int hsc_mask;
645 int hsc_sp;
646 short hsc_ps;
647 int hsc_pc;
648 /* the rest aren't part of the context but are included for our convenience */
649 short hsc_pad;
650 u_int hsc_magic; /* XXX sigreturn: cookie */
651 struct sigcontext *hsc_realsc; /* XXX sigreturn: ptr to BSD context */
652 };
653
654 /*
655 * For an HP-UX process, a partial hpuxsigframe follows the normal sigframe.
656 * Tremendous waste of space, but some HP-UX applications (e.g. LCL) need it.
657 */
658 struct hpuxsigframe {
659 int hsf_signum;
660 int hsf_code;
661 struct sigcontext *hsf_scp;
662 struct hpuxsigcontext hsf_sc;
663 int hsf_regs[15];
664 };
665 #endif
666
667 #ifdef DEBUG
668 int sigdebug = 0;
669 int sigpid = 0;
670 #define SDB_FOLLOW 0x01
671 #define SDB_KSTACK 0x02
672 #define SDB_FPSTATE 0x04
673 #endif
674
675 /*
676 * Send an interrupt to process.
677 */
678 void
sendsig(catcher,sig,mask,code)679 sendsig(catcher, sig, mask, code)
680 sig_t catcher;
681 int sig, mask;
682 u_long code;
683 {
684 register struct proc *p = curproc;
685 register struct sigframe *fp, *kfp;
686 register struct frame *frame;
687 register struct sigacts *psp = p->p_sigacts;
688 register short ft;
689 int oonstack, fsize;
690 extern char sigcode[], esigcode[];
691
692 frame = (struct frame *)p->p_md.md_regs;
693 ft = frame->f_format;
694 oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
695 /*
696 * Allocate and validate space for the signal handler
697 * context. Note that if the stack is in P0 space, the
698 * call to grow() is a nop, and the useracc() check
699 * will fail if the process has not already allocated
700 * the space with a `brk'.
701 */
702 #ifdef HPUXCOMPAT
703 if (p->p_md.md_flags & MDP_HPUX)
704 fsize = sizeof(struct sigframe) + sizeof(struct hpuxsigframe);
705 else
706 #endif
707 fsize = sizeof(struct sigframe);
708 if ((psp->ps_flags & SAS_ALTSTACK) &&
709 (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
710 (psp->ps_sigonstack & sigmask(sig))) {
711 fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
712 psp->ps_sigstk.ss_size - fsize);
713 psp->ps_sigstk.ss_flags |= SA_ONSTACK;
714 } else
715 fp = (struct sigframe *)(frame->f_regs[SP] - fsize);
716 if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
717 (void)grow(p, (vm_offset_t)fp);
718 #ifdef DEBUG
719 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
720 printf("sendsig(%d): sig %d ssp %x usp %x scp %x ft %d\n",
721 p->p_pid, sig, &oonstack, fp, &fp->sf_sc, ft);
722 #endif
723 if (useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
724 #ifdef DEBUG
725 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
726 printf("sendsig(%d): useracc failed on sig %d\n",
727 p->p_pid, sig);
728 #endif
729 /*
730 * Process has trashed its stack; give it an illegal
731 * instruction to halt it in its tracks.
732 */
733 SIGACTION(p, SIGILL) = SIG_DFL;
734 sig = sigmask(SIGILL);
735 p->p_sigignore &= ~sig;
736 p->p_sigcatch &= ~sig;
737 p->p_sigmask &= ~sig;
738 psignal(p, SIGILL);
739 return;
740 }
741 kfp = (struct sigframe *)malloc((u_long)fsize, M_TEMP, M_WAITOK);
742 /*
743 * Build the argument list for the signal handler.
744 */
745 kfp->sf_signum = sig;
746 kfp->sf_code = code;
747 kfp->sf_scp = &fp->sf_sc;
748 kfp->sf_handler = catcher;
749 /*
750 * Save necessary hardware state. Currently this includes:
751 * - general registers
752 * - original exception frame (if not a "normal" frame)
753 * - FP coprocessor state
754 */
755 kfp->sf_state.ss_flags = SS_USERREGS;
756 bcopy((caddr_t)frame->f_regs,
757 (caddr_t)kfp->sf_state.ss_frame.f_regs, sizeof frame->f_regs);
758 if (ft >= FMT7) {
759 #ifdef DEBUG
760 if (ft > 15 || exframesize[ft] < 0)
761 panic("sendsig: bogus frame type");
762 #endif
763 kfp->sf_state.ss_flags |= SS_RTEFRAME;
764 kfp->sf_state.ss_frame.f_format = frame->f_format;
765 kfp->sf_state.ss_frame.f_vector = frame->f_vector;
766 bcopy((caddr_t)&frame->F_u,
767 (caddr_t)&kfp->sf_state.ss_frame.F_u, exframesize[ft]);
768 /*
769 * Leave an indicator that we need to clean up the kernel
770 * stack. We do this by setting the "pad word" above the
771 * hardware stack frame to the amount the stack must be
772 * adjusted by.
773 *
774 * N.B. we increment rather than just set f_stackadj in
775 * case we are called from syscall when processing a
776 * sigreturn. In that case, f_stackadj may be non-zero.
777 */
778 frame->f_stackadj += exframesize[ft];
779 frame->f_format = frame->f_vector = 0;
780 #ifdef DEBUG
781 if (sigdebug & SDB_FOLLOW)
782 printf("sendsig(%d): copy out %d of frame %d\n",
783 p->p_pid, exframesize[ft], ft);
784 #endif
785 }
786 #ifdef FPCOPROC
787 kfp->sf_state.ss_flags |= SS_FPSTATE;
788 m68881_save(&kfp->sf_state.ss_fpstate);
789 #ifdef DEBUG
790 if ((sigdebug & SDB_FPSTATE) && *(char *)&kfp->sf_state.ss_fpstate)
791 printf("sendsig(%d): copy out FP state (%x) to %x\n",
792 p->p_pid, *(u_int *)&kfp->sf_state.ss_fpstate,
793 &kfp->sf_state.ss_fpstate);
794 #endif
795 #endif
796 /*
797 * Build the signal context to be used by sigreturn.
798 */
799 kfp->sf_sc.sc_onstack = oonstack;
800 kfp->sf_sc.sc_mask = mask;
801 kfp->sf_sc.sc_sp = frame->f_regs[SP];
802 kfp->sf_sc.sc_fp = frame->f_regs[A6];
803 kfp->sf_sc.sc_ap = (int)&fp->sf_state;
804 kfp->sf_sc.sc_pc = frame->f_pc;
805 kfp->sf_sc.sc_ps = frame->f_sr;
806 #ifdef HPUXCOMPAT
807 /*
808 * Create an HP-UX style sigcontext structure and associated goo
809 */
810 if (p->p_md.md_flags & MDP_HPUX) {
811 register struct hpuxsigframe *hkfp;
812
813 hkfp = (struct hpuxsigframe *)&kfp[1];
814 hkfp->hsf_signum = bsdtohpuxsig(kfp->sf_signum);
815 hkfp->hsf_code = kfp->sf_code;
816 hkfp->hsf_scp = (struct sigcontext *)
817 &((struct hpuxsigframe *)(&fp[1]))->hsf_sc;
818 hkfp->hsf_sc.hsc_syscall = 0; /* XXX */
819 hkfp->hsf_sc.hsc_action = 0; /* XXX */
820 hkfp->hsf_sc.hsc_pad1 = hkfp->hsf_sc.hsc_pad2 = 0;
821 hkfp->hsf_sc.hsc_onstack = kfp->sf_sc.sc_onstack;
822 hkfp->hsf_sc.hsc_mask = kfp->sf_sc.sc_mask;
823 hkfp->hsf_sc.hsc_sp = kfp->sf_sc.sc_sp;
824 hkfp->hsf_sc.hsc_ps = kfp->sf_sc.sc_ps;
825 hkfp->hsf_sc.hsc_pc = kfp->sf_sc.sc_pc;
826 hkfp->hsf_sc.hsc_pad = 0;
827 hkfp->hsf_sc.hsc_magic = 0xdeadbeef;
828 hkfp->hsf_sc.hsc_realsc = kfp->sf_scp;
829 bcopy((caddr_t)frame->f_regs, (caddr_t)hkfp->hsf_regs,
830 sizeof (hkfp->hsf_regs));
831
832 kfp->sf_signum = hkfp->hsf_signum;
833 kfp->sf_scp = hkfp->hsf_scp;
834 }
835 #endif
836 (void) copyout((caddr_t)kfp, (caddr_t)fp, fsize);
837 frame->f_regs[SP] = (int)fp;
838 #ifdef DEBUG
839 if (sigdebug & SDB_FOLLOW)
840 printf("sendsig(%d): sig %d scp %x fp %x sc_sp %x sc_ap %x\n",
841 p->p_pid, sig, kfp->sf_scp, fp,
842 kfp->sf_sc.sc_sp, kfp->sf_sc.sc_ap);
843 #endif
844 /*
845 * Signal trampoline code is at base of user stack.
846 */
847 frame->f_pc = (int)PS_STRINGS - (esigcode - sigcode);
848 #ifdef DEBUG
849 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
850 printf("sendsig(%d): sig %d returns\n",
851 p->p_pid, sig);
852 #endif
853 free((caddr_t)kfp, M_TEMP);
854 }
855
856 /*
857 * System call to cleanup state after a signal
858 * has been taken. Reset signal mask and
859 * stack state from context left by sendsig (above).
860 * Return to previous pc and psl as specified by
861 * context left by sendsig. Check carefully to
862 * make sure that the user has not modified the
863 * psl to gain improper priviledges or to cause
864 * a machine fault.
865 */
866 struct sigreturn_args {
867 struct sigcontext *sigcntxp;
868 };
869 /* ARGSUSED */
870 sigreturn(p, uap, retval)
871 struct proc *p;
872 struct sigreturn_args *uap;
873 int *retval;
874 {
875 register struct sigcontext *scp;
876 register struct frame *frame;
877 register int rf;
878 struct sigcontext tsigc;
879 struct sigstate tstate;
880 int flags;
881
882 scp = uap->sigcntxp;
883 #ifdef DEBUG
884 if (sigdebug & SDB_FOLLOW)
885 printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
886 #endif
887 if ((int)scp & 1)
888 return (EINVAL);
889 #ifdef HPUXCOMPAT
890 /*
891 * Grab context as an HP-UX style context and determine if it
892 * was one that we contructed in sendsig.
893 */
894 if (p->p_md.md_flags & MDP_HPUX) {
895 struct hpuxsigcontext *hscp = (struct hpuxsigcontext *)scp;
896 struct hpuxsigcontext htsigc;
897
898 if (useracc((caddr_t)hscp, sizeof (*hscp), B_WRITE) == 0 ||
899 copyin((caddr_t)hscp, (caddr_t)&htsigc, sizeof htsigc))
900 return (EINVAL);
901 /*
902 * If not generated by sendsig or we cannot restore the
903 * BSD-style sigcontext, just restore what we can -- state
904 * will be lost, but them's the breaks.
905 */
906 hscp = &htsigc;
907 if (hscp->hsc_magic != 0xdeadbeef ||
908 (scp = hscp->hsc_realsc) == 0 ||
909 useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
910 copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc)) {
911 if (hscp->hsc_onstack & 01)
912 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
913 else
914 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
915 p->p_sigmask = hscp->hsc_mask &~ sigcantmask;
916 frame = (struct frame *) p->p_md.md_regs;
917 frame->f_regs[SP] = hscp->hsc_sp;
918 frame->f_pc = hscp->hsc_pc;
919 frame->f_sr = hscp->hsc_ps &~ PSL_USERCLR;
920 return (EJUSTRETURN);
921 }
922 /*
923 * Otherwise, overlay BSD context with possibly modified
924 * HP-UX values.
925 */
926 tsigc.sc_onstack = hscp->hsc_onstack;
927 tsigc.sc_mask = hscp->hsc_mask;
928 tsigc.sc_sp = hscp->hsc_sp;
929 tsigc.sc_ps = hscp->hsc_ps;
930 tsigc.sc_pc = hscp->hsc_pc;
931 } else
932 #endif
933 /*
934 * Test and fetch the context structure.
935 * We grab it all at once for speed.
936 */
937 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
938 copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc))
939 return (EINVAL);
940 scp = &tsigc;
941 if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_S)) != 0)
942 return (EINVAL);
943 /*
944 * Restore the user supplied information
945 */
946 if (scp->sc_onstack & 01)
947 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
948 else
949 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
950 p->p_sigmask = scp->sc_mask &~ sigcantmask;
951 frame = (struct frame *) p->p_md.md_regs;
952 frame->f_regs[SP] = scp->sc_sp;
953 frame->f_regs[A6] = scp->sc_fp;
954 frame->f_pc = scp->sc_pc;
955 frame->f_sr = scp->sc_ps;
956 /*
957 * Grab pointer to hardware state information.
958 * If zero, the user is probably doing a longjmp.
959 */
960 if ((rf = scp->sc_ap) == 0)
961 return (EJUSTRETURN);
962 /*
963 * See if there is anything to do before we go to the
964 * expense of copying in close to 1/2K of data
965 */
966 flags = fuword((caddr_t)rf);
967 #ifdef DEBUG
968 if (sigdebug & SDB_FOLLOW)
969 printf("sigreturn(%d): sc_ap %x flags %x\n",
970 p->p_pid, rf, flags);
971 #endif
972 /*
973 * fuword failed (bogus sc_ap value).
974 */
975 if (flags == -1)
976 return (EINVAL);
977 if (flags == 0 || copyin((caddr_t)rf, (caddr_t)&tstate, sizeof tstate))
978 return (EJUSTRETURN);
979 #ifdef DEBUG
980 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
981 printf("sigreturn(%d): ssp %x usp %x scp %x ft %d\n",
982 p->p_pid, &flags, scp->sc_sp, uap->sigcntxp,
983 (flags&SS_RTEFRAME) ? tstate.ss_frame.f_format : -1);
984 #endif
985 /*
986 * Restore most of the users registers except for A6 and SP
987 * which were handled above.
988 */
989 if (flags & SS_USERREGS)
990 bcopy((caddr_t)tstate.ss_frame.f_regs,
991 (caddr_t)frame->f_regs, sizeof(frame->f_regs)-2*NBPW);
992 /*
993 * Restore long stack frames. Note that we do not copy
994 * back the saved SR or PC, they were picked up above from
995 * the sigcontext structure.
996 */
997 if (flags & SS_RTEFRAME) {
998 register int sz;
999
1000 /* grab frame type and validate */
1001 sz = tstate.ss_frame.f_format;
1002 if (sz > 15 || (sz = exframesize[sz]) < 0)
1003 return (EINVAL);
1004 frame->f_stackadj -= sz;
1005 frame->f_format = tstate.ss_frame.f_format;
1006 frame->f_vector = tstate.ss_frame.f_vector;
1007 bcopy((caddr_t)&tstate.ss_frame.F_u, (caddr_t)&frame->F_u, sz);
1008 #ifdef DEBUG
1009 if (sigdebug & SDB_FOLLOW)
1010 printf("sigreturn(%d): copy in %d of frame type %d\n",
1011 p->p_pid, sz, tstate.ss_frame.f_format);
1012 #endif
1013 }
1014 #ifdef FPCOPROC
1015 /*
1016 * Finally we restore the original FP context
1017 */
1018 if (flags & SS_FPSTATE)
1019 m68881_restore(&tstate.ss_fpstate);
1020 #ifdef DEBUG
1021 if ((sigdebug & SDB_FPSTATE) && *(char *)&tstate.ss_fpstate)
1022 printf("sigreturn(%d): copied in FP state (%x) at %x\n",
1023 p->p_pid, *(u_int *)&tstate.ss_fpstate,
1024 &tstate.ss_fpstate);
1025 #endif
1026 #endif
1027 #ifdef DEBUG
1028 if ((sigdebug & SDB_FOLLOW) ||
1029 ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid))
1030 printf("sigreturn(%d): returns\n", p->p_pid);
1031 #endif
1032 return (EJUSTRETURN);
1033 }
1034
1035 int waittime = -1;
1036
boot(howto)1037 boot(howto)
1038 register int howto;
1039 {
1040 struct proc *p = curproc; /* XXX */
1041
1042 /* take a snap shot before clobbering any registers */
1043 if (curproc && curproc->p_addr)
1044 savectx(curproc->p_addr, 0);
1045
1046 boothowto = howto;
1047 if ((howto&RB_NOSYNC) == 0 && waittime < 0) {
1048 register struct buf *bp;
1049 int iter, nbusy;
1050
1051 waittime = 0;
1052 (void) spl0();
1053 printf("syncing disks... ");
1054 /*
1055 * Release vnodes held by texts before sync.
1056 */
1057 if (panicstr == 0)
1058 vnode_pager_umount(NULL);
1059 #ifdef notdef
1060 #include "vn.h"
1061 #if NVN > 0
1062 vnshutdown();
1063 #endif
1064 #endif
1065 sync(p, (void *)NULL, (int *)NULL);
1066 /*
1067 * Unmount filesystems
1068 */
1069 if (panicstr == 0)
1070 vfs_unmountall();
1071
1072 for (iter = 0; iter < 20; iter++) {
1073 nbusy = 0;
1074 for (bp = &buf[nbuf]; --bp >= buf; )
1075 if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
1076 nbusy++;
1077 if (nbusy == 0)
1078 break;
1079 printf("%d ", nbusy);
1080 DELAY(40000 * iter);
1081 }
1082 if (nbusy)
1083 printf("giving up\n");
1084 else
1085 printf("done\n");
1086 /*
1087 * If we've been adjusting the clock, the todr
1088 * will be out of synch; adjust it now.
1089 */
1090 resettodr();
1091 }
1092 splhigh(); /* extreme priority */
1093 if (howto&RB_HALT) {
1094 printf("halted\n\n");
1095 asm(" stop #0x2700");
1096 } else {
1097 if (howto & RB_DUMP)
1098 dumpsys();
1099 doboot();
1100 /*NOTREACHED*/
1101 }
1102 /*NOTREACHED*/
1103 }
1104
1105 int dumpmag = 0x8fca0101; /* magic number for savecore */
1106 int dumpsize = 0; /* also for savecore */
1107 long dumplo = 0;
1108
dumpconf()1109 dumpconf()
1110 {
1111 int nblks;
1112
1113 /*
1114 * XXX include the final RAM page which is not included in physmem.
1115 */
1116 dumpsize = physmem + 1;
1117 if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
1118 nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1119 if (dumpsize > btoc(dbtob(nblks - dumplo)))
1120 dumpsize = btoc(dbtob(nblks - dumplo));
1121 else if (dumplo == 0)
1122 dumplo = nblks - btodb(ctob(dumpsize));
1123 }
1124 /*
1125 * Don't dump on the first CLBYTES (why CLBYTES?)
1126 * in case the dump device includes a disk label.
1127 */
1128 if (dumplo < btodb(CLBYTES))
1129 dumplo = btodb(CLBYTES);
1130 }
1131
1132 /*
1133 * Doadump comes here after turning off memory management and
1134 * getting on the dump stack, either when called above, or by
1135 * the auto-restart code.
1136 */
dumpsys()1137 dumpsys()
1138 {
1139
1140 msgbufmapped = 0;
1141 if (dumpdev == NODEV)
1142 return;
1143 /*
1144 * For dumps during autoconfiguration,
1145 * if dump device has already configured...
1146 */
1147 if (dumpsize == 0)
1148 dumpconf();
1149 if (dumplo < 0)
1150 return;
1151 printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
1152 printf("dump ");
1153 switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
1154
1155 case ENXIO:
1156 printf("device bad\n");
1157 break;
1158
1159 case EFAULT:
1160 printf("device not ready\n");
1161 break;
1162
1163 case EINVAL:
1164 printf("area improper\n");
1165 break;
1166
1167 case EIO:
1168 printf("i/o error\n");
1169 break;
1170
1171 default:
1172 printf("succeeded\n");
1173 break;
1174 }
1175 }
1176
initcpu()1177 initcpu()
1178 {
1179 #ifdef MAPPEDCOPY
1180 extern u_int mappedcopysize;
1181
1182 /*
1183 * Initialize lower bound for doing copyin/copyout using
1184 * page mapping (if not already set). We don't do this on
1185 * VAC machines as it loses big time.
1186 */
1187 if (mappedcopysize == 0) {
1188 if (ectype == EC_VIRT)
1189 mappedcopysize = (u_int) -1;
1190 else
1191 mappedcopysize = NBPG;
1192 }
1193 #endif
1194 parityenable();
1195 #ifdef USELEDS
1196 ledinit();
1197 #endif
1198 }
1199
straytrap(pc,evec)1200 straytrap(pc, evec)
1201 int pc;
1202 u_short evec;
1203 {
1204 printf("unexpected trap (vector offset %x) from %x\n",
1205 evec & 0xFFF, pc);
1206 }
1207
1208 int *nofault;
1209
badaddr(addr)1210 badaddr(addr)
1211 register caddr_t addr;
1212 {
1213 register int i;
1214 label_t faultbuf;
1215
1216 #ifdef lint
1217 i = *addr; if (i) return(0);
1218 #endif
1219 nofault = (int *) &faultbuf;
1220 if (setjmp((label_t *)nofault)) {
1221 nofault = (int *) 0;
1222 return(1);
1223 }
1224 i = *(volatile short *)addr;
1225 nofault = (int *) 0;
1226 return(0);
1227 }
1228
badbaddr(addr)1229 badbaddr(addr)
1230 register caddr_t addr;
1231 {
1232 register int i;
1233 label_t faultbuf;
1234
1235 #ifdef lint
1236 i = *addr; if (i) return(0);
1237 #endif
1238 nofault = (int *) &faultbuf;
1239 if (setjmp((label_t *)nofault)) {
1240 nofault = (int *) 0;
1241 return(1);
1242 }
1243 i = *(volatile char *)addr;
1244 nofault = (int *) 0;
1245 return(0);
1246 }
1247
netintr()1248 netintr()
1249 {
1250 #ifdef INET
1251 if (netisr & (1 << NETISR_ARP)) {
1252 netisr &= ~(1 << NETISR_ARP);
1253 arpintr();
1254 }
1255 if (netisr & (1 << NETISR_IP)) {
1256 netisr &= ~(1 << NETISR_IP);
1257 ipintr();
1258 }
1259 #endif
1260 #ifdef NS
1261 if (netisr & (1 << NETISR_NS)) {
1262 netisr &= ~(1 << NETISR_NS);
1263 nsintr();
1264 }
1265 #endif
1266 #ifdef ISO
1267 if (netisr & (1 << NETISR_ISO)) {
1268 netisr &= ~(1 << NETISR_ISO);
1269 clnlintr();
1270 }
1271 #endif
1272 #ifdef CCITT
1273 if (netisr & (1 << NETISR_CCITT)) {
1274 netisr &= ~(1 << NETISR_CCITT);
1275 ccittintr();
1276 }
1277 #endif
1278 }
1279
intrhand(sr)1280 intrhand(sr)
1281 int sr;
1282 {
1283 register struct isr *isr;
1284 register int found = 0;
1285 register int ipl;
1286 extern struct isr isrqueue[];
1287 static int straycount;
1288
1289 ipl = (sr >> 8) & 7;
1290 switch (ipl) {
1291
1292 case 3:
1293 case 4:
1294 case 5:
1295 ipl = ISRIPL(ipl);
1296 isr = isrqueue[ipl].isr_forw;
1297 for (; isr != &isrqueue[ipl]; isr = isr->isr_forw) {
1298 if ((isr->isr_intr)(isr->isr_arg)) {
1299 found++;
1300 break;
1301 }
1302 }
1303 if (found)
1304 straycount = 0;
1305 else if (++straycount > 50)
1306 panic("intrhand: stray interrupt");
1307 else
1308 printf("stray interrupt, sr 0x%x\n", sr);
1309 break;
1310
1311 case 0:
1312 case 1:
1313 case 2:
1314 case 6:
1315 case 7:
1316 if (++straycount > 50)
1317 panic("intrhand: unexpected sr");
1318 else
1319 printf("intrhand: unexpected sr 0x%x\n", sr);
1320 break;
1321 }
1322 }
1323
1324 #if defined(DEBUG) && !defined(PANICBUTTON)
1325 #define PANICBUTTON
1326 #endif
1327
1328 #ifdef PANICBUTTON
1329 int panicbutton = 1; /* non-zero if panic buttons are enabled */
1330 int crashandburn = 0;
1331 int candbdelay = 50; /* give em half a second */
1332
1333 void
candbtimer(arg)1334 candbtimer(arg)
1335 void *arg;
1336 {
1337
1338 crashandburn = 0;
1339 }
1340 #endif
1341
1342 /*
1343 * Level 7 interrupts can be caused by the keyboard or parity errors.
1344 */
1345 nmihand(frame)
1346 struct frame frame;
1347 {
1348 if (kbdnmi()) {
1349 #ifdef PANICBUTTON
1350 static int innmihand = 0;
1351
1352 /*
1353 * Attempt to reduce the window of vulnerability for recursive
1354 * NMIs (e.g. someone holding down the keyboard reset button).
1355 */
1356 if (innmihand == 0) {
1357 innmihand = 1;
1358 printf("Got a keyboard NMI\n");
1359 innmihand = 0;
1360 }
1361 if (panicbutton) {
1362 if (crashandburn) {
1363 crashandburn = 0;
1364 panic(panicstr ?
1365 "forced crash, nosync" : "forced crash");
1366 }
1367 crashandburn++;
1368 timeout(candbtimer, (void *)0, candbdelay);
1369 }
1370 #endif
1371 return;
1372 }
1373 if (parityerror(&frame))
1374 return;
1375 /* panic?? */
1376 printf("unexpected level 7 interrupt ignored\n");
1377 }
1378
1379 /*
1380 * Parity error section. Contains magic.
1381 */
1382 #define PARREG ((volatile short *)IIOV(0x5B0000))
1383 static int gotparmem = 0;
1384 #ifdef DEBUG
1385 int ignorekperr = 0; /* ignore kernel parity errors */
1386 #endif
1387
1388 /*
1389 * Enable parity detection
1390 */
parityenable()1391 parityenable()
1392 {
1393 label_t faultbuf;
1394
1395 nofault = (int *) &faultbuf;
1396 if (setjmp((label_t *)nofault)) {
1397 nofault = (int *) 0;
1398 #ifdef DEBUG
1399 printf("No parity memory\n");
1400 #endif
1401 return;
1402 }
1403 *PARREG = 1;
1404 nofault = (int *) 0;
1405 gotparmem = 1;
1406 #ifdef DEBUG
1407 printf("Parity detection enabled\n");
1408 #endif
1409 }
1410
1411 /*
1412 * Determine if level 7 interrupt was caused by a parity error
1413 * and deal with it if it was. Returns 1 if it was a parity error.
1414 */
1415 parityerror(fp)
1416 struct frame *fp;
1417 {
1418 if (!gotparmem)
1419 return(0);
1420 *PARREG = 0;
1421 DELAY(10);
1422 *PARREG = 1;
1423 if (panicstr) {
1424 printf("parity error after panic ignored\n");
1425 return(1);
1426 }
1427 if (!findparerror())
1428 printf("WARNING: transient parity error ignored\n");
1429 else if (USERMODE(fp->f_sr)) {
1430 printf("pid %d: parity error\n", curproc->p_pid);
1431 uprintf("sorry, pid %d killed due to memory parity error\n",
1432 curproc->p_pid);
1433 psignal(curproc, SIGKILL);
1434 #ifdef DEBUG
1435 } else if (ignorekperr) {
1436 printf("WARNING: kernel parity error ignored\n");
1437 #endif
1438 } else {
1439 regdump(fp, 128);
1440 panic("kernel parity error");
1441 }
1442 return(1);
1443 }
1444
1445 /*
1446 * Yuk! There has got to be a better way to do this!
1447 * Searching all of memory with interrupts blocked can lead to disaster.
1448 */
findparerror()1449 findparerror()
1450 {
1451 static label_t parcatch;
1452 static int looking = 0;
1453 volatile int pg, o, s;
1454 register volatile int *ip;
1455 register int i;
1456 int found;
1457
1458 #ifdef lint
1459 i = o = pg = 0; if (i) return(0);
1460 #endif
1461 /*
1462 * If looking is true we are searching for a known parity error
1463 * and it has just occured. All we do is return to the higher
1464 * level invocation.
1465 */
1466 if (looking)
1467 longjmp(&parcatch);
1468 s = splhigh();
1469 /*
1470 * If setjmp returns true, the parity error we were searching
1471 * for has just occured (longjmp above) at the current pg+o
1472 */
1473 if (setjmp(&parcatch)) {
1474 printf("Parity error at 0x%x\n", ctob(pg)|o);
1475 found = 1;
1476 goto done;
1477 }
1478 /*
1479 * If we get here, a parity error has occured for the first time
1480 * and we need to find it. We turn off any external caches and
1481 * loop thru memory, testing every longword til a fault occurs and
1482 * we regain control at setjmp above. Note that because of the
1483 * setjmp, pg and o need to be volatile or their values will be lost.
1484 */
1485 looking = 1;
1486 ecacheoff();
1487 for (pg = btoc(lowram); pg < btoc(lowram)+physmem; pg++) {
1488 pmap_enter(kernel_pmap, (vm_offset_t)vmmap, ctob(pg),
1489 VM_PROT_READ, TRUE);
1490 ip = (int *)vmmap;
1491 for (o = 0; o < NBPG; o += sizeof(int))
1492 i = *ip++;
1493 }
1494 /*
1495 * Getting here implies no fault was found. Should never happen.
1496 */
1497 printf("Couldn't locate parity error\n");
1498 found = 0;
1499 done:
1500 looking = 0;
1501 pmap_remove(kernel_pmap, (vm_offset_t)vmmap, (vm_offset_t)&vmmap[NBPG]);
1502 ecacheon();
1503 splx(s);
1504 return(found);
1505 }
1506
1507 regdump(fp, sbytes)
1508 struct frame *fp; /* must not be register */
1509 int sbytes;
1510 {
1511 static int doingdump = 0;
1512 register int i;
1513 int s;
1514 extern char *hexstr();
1515
1516 if (doingdump)
1517 return;
1518 s = splhigh();
1519 doingdump = 1;
1520 printf("pid = %d, pc = %s, ",
1521 curproc ? curproc->p_pid : -1, hexstr(fp->f_pc, 8));
1522 printf("ps = %s, ", hexstr(fp->f_sr, 4));
1523 printf("sfc = %s, ", hexstr(getsfc(), 4));
1524 printf("dfc = %s\n", hexstr(getdfc(), 4));
1525 printf("Registers:\n ");
1526 for (i = 0; i < 8; i++)
1527 printf(" %d", i);
1528 printf("\ndreg:");
1529 for (i = 0; i < 8; i++)
1530 printf(" %s", hexstr(fp->f_regs[i], 8));
1531 printf("\nareg:");
1532 for (i = 0; i < 8; i++)
1533 printf(" %s", hexstr(fp->f_regs[i+8], 8));
1534 if (sbytes > 0) {
1535 if (fp->f_sr & PSL_S) {
1536 printf("\n\nKernel stack (%s):",
1537 hexstr((int)(((int *)&fp)-1), 8));
1538 dumpmem(((int *)&fp)-1, sbytes, 0);
1539 } else {
1540 printf("\n\nUser stack (%s):", hexstr(fp->f_regs[SP], 8));
1541 dumpmem((int *)fp->f_regs[SP], sbytes, 1);
1542 }
1543 }
1544 doingdump = 0;
1545 splx(s);
1546 }
1547
1548 extern char kstack[];
1549 #define KSADDR ((int *)&(kstack[(UPAGES-1)*NBPG]))
1550
dumpmem(ptr,sz,ustack)1551 dumpmem(ptr, sz, ustack)
1552 register int *ptr;
1553 int sz, ustack;
1554 {
1555 register int i, val;
1556 extern char *hexstr();
1557
1558 for (i = 0; i < sz; i++) {
1559 if ((i & 7) == 0)
1560 printf("\n%s: ", hexstr((int)ptr, 6));
1561 else
1562 printf(" ");
1563 if (ustack == 1) {
1564 if ((val = fuword(ptr++)) == -1)
1565 break;
1566 } else {
1567 if (ustack == 0 &&
1568 (ptr < KSADDR || ptr > KSADDR+(NBPG/4-1)))
1569 break;
1570 val = *ptr++;
1571 }
1572 printf("%s", hexstr(val, 8));
1573 }
1574 printf("\n");
1575 }
1576
1577 char *
hexstr(val,len)1578 hexstr(val, len)
1579 register int val;
1580 int len;
1581 {
1582 static char nbuf[9];
1583 register int x, i;
1584
1585 if (len > 8)
1586 return("");
1587 nbuf[len] = '\0';
1588 for (i = len-1; i >= 0; --i) {
1589 x = val & 0xF;
1590 if (x > 9)
1591 nbuf[i] = x - 10 + 'A';
1592 else
1593 nbuf[i] = x + '0';
1594 val >>= 4;
1595 }
1596 return(nbuf);
1597 }
1598
1599 #ifdef DEBUG
1600 char oflowmsg[] = "k-stack overflow";
1601 char uflowmsg[] = "k-stack underflow";
1602
badkstack(oflow,fr)1603 badkstack(oflow, fr)
1604 int oflow;
1605 struct frame fr;
1606 {
1607 extern char kstackatbase[];
1608
1609 printf("%s: sp should be %x\n",
1610 oflow ? oflowmsg : uflowmsg,
1611 kstackatbase - (exframesize[fr.f_format] + 8));
1612 regdump(&fr, 0);
1613 panic(oflow ? oflowmsg : uflowmsg);
1614 }
1615
1616 /*
1617 * print a primitive backtrace for the requested process.
1618 */
1619 backtrace(p)
1620 struct proc *p;
1621 {
1622 long fix, arg, pc, *lfp;
1623 caddr_t fp;
1624 char *fmt;
1625 int i;
1626
1627 if (p != curproc) {
1628 pc = *((long *)(p->p_addr->u_pcb.pcb_regs[11] + fix));
1629 fp = (caddr_t)p->p_addr->u_pcb.pcb_regs[10];
1630 fix = ((caddr_t)p->p_addr - kstack);
1631 } else {
1632 /*
1633 * Have to grab current frame pointer; start with function
1634 * that called backtrace.
1635 */
1636 asm("movl a6, %0" : "=r" (fp));
1637 lfp = (long *)fp;
1638 pc = lfp[1];
1639 fp = (caddr_t)lfp[0];
1640 fix = 0;
1641 }
1642
1643 printf("Process %s\n", p->p_comm);
1644 while (fp > kstack) {
1645 fp += fix;
1646 if (kernacc(fp, 6 * sizeof(*lfp), B_READ) == 0)
1647 return;
1648 lfp = (long *)fp;
1649 printf("Function: 0x%x(0x%x, 0x%x, 0x%x, 0x%x)\n",
1650 pc, lfp[2], lfp[3], lfp[4], lfp[5]);
1651 pc = lfp[1];
1652 fp = (caddr_t)lfp[0];
1653 }
1654 }
1655 #endif /* DEBUG */
1656