xref: /original-bsd/sys/hp300/hp300/machdep.c (revision 95ecee29)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: machdep.c 1.74 92/12/20$
13  *
14  *	@(#)machdep.c	8.3 (Berkeley) 11/14/93
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/signalvar.h>
20 #include <sys/kernel.h>
21 #include <sys/map.h>
22 #include <sys/proc.h>
23 #include <sys/buf.h>
24 #include <sys/reboot.h>
25 #include <sys/conf.h>
26 #include <sys/file.h>
27 #include <sys/clist.h>
28 #include <sys/callout.h>
29 #include <sys/malloc.h>
30 #include <sys/mbuf.h>
31 #include <sys/msgbuf.h>
32 #include <sys/ioctl.h>
33 #include <sys/tty.h>
34 #include <sys/mount.h>
35 #include <sys/user.h>
36 #include <sys/exec.h>
37 #include <sys/sysctl.h>
38 #ifdef SYSVSHM
39 #include <sys/shm.h>
40 #endif
41 #ifdef HPUXCOMPAT
42 #include <hp/hpux/hpux.h>
43 #endif
44 
45 #include <machine/cpu.h>
46 #include <machine/reg.h>
47 #include <machine/psl.h>
48 #include <hp/dev/cons.h>
49 #include <hp300/hp300/isr.h>
50 #include <hp300/hp300/pte.h>
51 #include <net/netisr.h>
52 
53 #define	MAXMEM	64*1024*CLSIZE	/* XXX - from cmap.h */
54 #include <vm/vm_kern.h>
55 
56 /* the following is used externally (sysctl_hw) */
57 char machine[] = "hp300";		/* cpu "architecture" */
58 
59 vm_map_t buffer_map;
60 extern vm_offset_t avail_end;
61 
62 /*
63  * Declare these as initialized data so we can patch them.
64  */
65 int	nswbuf = 0;
66 #ifdef	NBUF
67 int	nbuf = NBUF;
68 #else
69 int	nbuf = 0;
70 #endif
71 #ifdef	BUFPAGES
72 int	bufpages = BUFPAGES;
73 #else
74 int	bufpages = 0;
75 #endif
76 int	msgbufmapped;		/* set when safe to use msgbuf */
77 int	maxmem;			/* max memory per process */
78 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
79 /*
80  * safepri is a safe priority for sleep to set for a spin-wait
81  * during autoconfiguration or after a panic.
82  */
83 int	safepri = PSL_LOWIPL;
84 
85 extern	u_int lowram;
86 extern	short exframesize[];
87 
88 /*
89  * Console initialization: called early on from main,
90  * before vm init or startup.  Do enough configuration
91  * to choose and initialize a console.
92  */
93 consinit()
94 {
95 
96 	/*
97 	 * Set cpuspeed immediately since cninit() called routines
98 	 * might use delay.  Note that we only set it if a custom value
99 	 * has not already been specified.
100 	 */
101 	if (cpuspeed == 0) {
102 		switch (machineid) {
103 		case HP_320:
104 		case HP_330:
105 		case HP_340:
106 			cpuspeed = MHZ_16;
107 			break;
108 		case HP_350:
109 		case HP_360:
110 		case HP_380:
111 			cpuspeed = MHZ_25;
112 			break;
113 		case HP_370:
114 		case HP_433:
115 			cpuspeed = MHZ_33;
116 			break;
117 		case HP_375:
118 			cpuspeed = MHZ_50;
119 			break;
120 		default:	/* assume the fastest */
121 			cpuspeed = MHZ_50;
122 			break;
123 		}
124 		if (mmutype == MMU_68040)
125 			cpuspeed *= 2;	/* XXX */
126 	}
127 	/*
128          * Find what hardware is attached to this machine.
129          */
130 	find_devs();
131 
132 	/*
133 	 * Initialize the console before we print anything out.
134 	 */
135 	cninit();
136 }
137 
138 /*
139  * cpu_startup: allocate memory for variable-sized tables,
140  * initialize cpu, and do autoconfiguration.
141  */
142 cpu_startup()
143 {
144 	register unsigned i;
145 	register caddr_t v, firstaddr;
146 	int base, residual;
147 	vm_offset_t minaddr, maxaddr;
148 	vm_size_t size;
149 #ifdef DEBUG
150 	extern int pmapdebug;
151 	int opmapdebug = pmapdebug;
152 
153 	pmapdebug = 0;
154 #endif
155 
156 	/*
157 	 * Initialize error message buffer (at end of core).
158 	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
159 	 */
160 	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
161 		pmap_enter(kernel_pmap, (vm_offset_t)msgbufp,
162 		    avail_end + i * NBPG, VM_PROT_ALL, TRUE);
163 	msgbufmapped = 1;
164 
165 	/*
166 	 * Good {morning,afternoon,evening,night}.
167 	 */
168 	printf(version);
169 	identifycpu();
170 	printf("real mem = %d\n", ctob(physmem));
171 
172 	/*
173 	 * Allocate space for system data structures.
174 	 * The first available real memory address is in "firstaddr".
175 	 * The first available kernel virtual address is in "v".
176 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
177 	 * As pages of memory are allocated and cleared,
178 	 * "firstaddr" is incremented.
179 	 * An index into the kernel page table corresponding to the
180 	 * virtual memory address maintained in "v" is kept in "mapaddr".
181 	 */
182 	/*
183 	 * Make two passes.  The first pass calculates how much memory is
184 	 * needed and allocates it.  The second pass assigns virtual
185 	 * addresses to the various data structures.
186 	 */
187 	firstaddr = 0;
188 again:
189 	v = (caddr_t)firstaddr;
190 
191 #define	valloc(name, type, num) \
192 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
193 #define	valloclim(name, type, num, lim) \
194 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
195 	valloc(cfree, struct cblock, nclist);
196 	valloc(callout, struct callout, ncallout);
197 	valloc(swapmap, struct map, nswapmap = maxproc * 2);
198 #ifdef SYSVSHM
199 	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
200 #endif
201 
202 	/*
203 	 * Determine how many buffers to allocate.
204 	 * Since HPs tend to be long on memory and short on disk speed,
205 	 * we allocate more buffer space than the BSD standard of
206 	 * use 10% of memory for the first 2 Meg, 5% of remaining.
207 	 * We just allocate a flat 10%.  Insure a minimum of 16 buffers.
208 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
209 	 */
210 	if (bufpages == 0)
211 		bufpages = physmem / 10 / CLSIZE;
212 	if (nbuf == 0) {
213 		nbuf = bufpages;
214 		if (nbuf < 16)
215 			nbuf = 16;
216 	}
217 	if (nswbuf == 0) {
218 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
219 		if (nswbuf > 256)
220 			nswbuf = 256;		/* sanity */
221 	}
222 	valloc(swbuf, struct buf, nswbuf);
223 	valloc(buf, struct buf, nbuf);
224 	/*
225 	 * End of first pass, size has been calculated so allocate memory
226 	 */
227 	if (firstaddr == 0) {
228 		size = (vm_size_t)(v - firstaddr);
229 		firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size));
230 		if (firstaddr == 0)
231 			panic("startup: no room for tables");
232 		goto again;
233 	}
234 	/*
235 	 * End of second pass, addresses have been assigned
236 	 */
237 	if ((vm_size_t)(v - firstaddr) != size)
238 		panic("startup: table size inconsistency");
239 	/*
240 	 * Now allocate buffers proper.  They are different than the above
241 	 * in that they usually occupy more virtual memory than physical.
242 	 */
243 	size = MAXBSIZE * nbuf;
244 	buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
245 				   &maxaddr, size, FALSE);
246 	minaddr = (vm_offset_t)buffers;
247 	if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
248 			&minaddr, size, FALSE) != KERN_SUCCESS)
249 		panic("startup: cannot allocate buffers");
250 	base = bufpages / nbuf;
251 	residual = bufpages % nbuf;
252 	for (i = 0; i < nbuf; i++) {
253 		vm_size_t curbufsize;
254 		vm_offset_t curbuf;
255 
256 		/*
257 		 * First <residual> buffers get (base+1) physical pages
258 		 * allocated for them.  The rest get (base) physical pages.
259 		 *
260 		 * The rest of each buffer occupies virtual space,
261 		 * but has no physical memory allocated for it.
262 		 */
263 		curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
264 		curbufsize = CLBYTES * (i < residual ? base+1 : base);
265 		vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
266 		vm_map_simplify(buffer_map, curbuf);
267 	}
268 	/*
269 	 * Allocate a submap for exec arguments.  This map effectively
270 	 * limits the number of processes exec'ing at any time.
271 	 */
272 	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
273 				 16*NCARGS, TRUE);
274 	/*
275 	 * Allocate a submap for physio
276 	 */
277 	phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
278 				 VM_PHYS_SIZE, TRUE);
279 
280 	/*
281 	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
282 	 * we use the more space efficient malloc in place of kmem_alloc.
283 	 */
284 	mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
285 				   M_MBUF, M_NOWAIT);
286 	bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
287 	mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
288 			       VM_MBUF_SIZE, FALSE);
289 	/*
290 	 * Initialize callouts
291 	 */
292 	callfree = callout;
293 	for (i = 1; i < ncallout; i++)
294 		callout[i-1].c_next = &callout[i];
295 	callout[i-1].c_next = NULL;
296 
297 #ifdef DEBUG
298 	pmapdebug = opmapdebug;
299 #endif
300 	printf("avail mem = %d\n", ptoa(cnt.v_free_count));
301 	printf("using %d buffers containing %d bytes of memory\n",
302 		nbuf, bufpages * CLBYTES);
303 	/*
304 	 * Set up CPU-specific registers, cache, etc.
305 	 */
306 	initcpu();
307 
308 	/*
309 	 * Set up buffers, so they can be used to read disk labels.
310 	 */
311 	bufinit();
312 
313 	/*
314 	 * Configure the system.
315 	 */
316 	configure();
317 }
318 
319 /*
320  * Set registers on exec.
321  * XXX Should clear registers except sp, pc,
322  * but would break init; should be fixed soon.
323  */
324 setregs(p, entry, retval)
325 	register struct proc *p;
326 	u_long entry;
327 	int retval[2];
328 {
329 	struct frame *frame = (struct frame *)p->p_md.md_regs;
330 
331 	frame->f_pc = entry & ~1;
332 #ifdef FPCOPROC
333 	/* restore a null state frame */
334 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
335 	m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
336 #endif
337 #ifdef HPUXCOMPAT
338 	if (p->p_md.md_flags & MDP_HPUX) {
339 
340 		frame->f_regs[A0] = 0; /* not 68010 (bit 31), no FPA (30) */
341 		retval[0] = 0;		/* no float card */
342 #ifdef FPCOPROC
343 		retval[1] = 1;		/* yes 68881 */
344 #else
345 		retval[1] = 0;		/* no 68881 */
346 #endif
347 	}
348 	/*
349 	 * XXX This doesn't have much to do with setting registers but
350 	 * I didn't want to muck up kern_exec.c with this code, so I
351 	 * stuck it here.
352 	 *
353 	 * Ensure we perform the right action on traps type 1 and 2:
354 	 * If our parent is an HPUX process and we are being traced, turn
355 	 * on HPUX style interpretation.  Else if we were using the HPUX
356 	 * style interpretation, revert to the BSD interpretation.
357 	 *
358 	 * Note that we do this by changing the trap instruction in the
359 	 * global "sigcode" array which then gets copied out to the user's
360 	 * sigcode in the stack.  Since we are changing it in the global
361 	 * array we must always reset it, even for non-HPUX processes.
362 	 *
363 	 * Note also that implementing it in this way creates a potential
364 	 * race where we could have tweaked it for process A which then
365 	 * blocks in the copyout to the stack and process B comes along
366 	 * and untweaks it causing A to wind up with the wrong setting
367 	 * when the copyout continues.  However, since we have already
368 	 * copied something out to this user stack page (thereby faulting
369 	 * it in), this scenerio is extremely unlikely.
370 	 */
371 	{
372 		extern short sigcodetrap[];
373 
374 		if ((p->p_pptr->p_md.md_flags & MDP_HPUX) &&
375 		    (p->p_flag & P_TRACED)) {
376 			p->p_md.md_flags |= MDP_HPUXTRACE;
377 			*sigcodetrap = 0x4E42;
378 		} else {
379 			p->p_md.md_flags &= ~MDP_HPUXTRACE;
380 			*sigcodetrap = 0x4E41;
381 		}
382 	}
383 #endif
384 }
385 
386 /*
387  * Info for CTL_HW
388  */
389 extern	char machine[];
390 char	cpu_model[120];
391 extern	char ostype[], osrelease[], version[];
392 
393 identifycpu()
394 {
395 	char *t, *mc;
396 	int len;
397 
398 	switch (machineid) {
399 	case HP_320:
400 		t = "320 (16.67MHz";
401 		break;
402 	case HP_330:
403 		t = "318/319/330 (16.67MHz";
404 		break;
405 	case HP_340:
406 		t = "340 (16.67MHz";
407 		break;
408 	case HP_350:
409 		t = "350 (25MHz";
410 		break;
411 	case HP_360:
412 		t = "360 (25MHz";
413 		break;
414 	case HP_370:
415 		t = "370 (33.33MHz";
416 		break;
417 	case HP_375:
418 		t = "345/375 (50MHz";
419 		break;
420 	case HP_380:
421 		t = "380/425 (25MHz";
422 		break;
423 	case HP_433:
424 		t = "433 (33MHz";
425 		break;
426 	default:
427 		printf("\nunknown machine type %d\n", machineid);
428 		panic("startup");
429 	}
430 	mc = (mmutype == MMU_68040 ? "40" :
431 	       (mmutype == MMU_68030 ? "30" : "20"));
432 	sprintf(cpu_model, "HP9000/%s MC680%s CPU", t, mc);
433 	switch (mmutype) {
434 	case MMU_68040:
435 	case MMU_68030:
436 		strcat(cpu_model, "+MMU");
437 		break;
438 	case MMU_68851:
439 		strcat(cpu_model, ", MC68851 MMU");
440 		break;
441 	case MMU_HP:
442 		strcat(cpu_model, ", HP MMU");
443 		break;
444 	default:
445 		printf("%s\nunknown MMU type %d\n", cpu_model, mmutype);
446 		panic("startup");
447 	}
448 	len = strlen(cpu_model);
449 	if (mmutype == MMU_68040)
450 		len += sprintf(cpu_model + len,
451 		    "+FPU, 4k on-chip physical I/D caches");
452 	else if (mmutype == MMU_68030)
453 		len += sprintf(cpu_model + len, ", %sMHz MC68882 FPU",
454 		       machineid == HP_340 ? "16.67" :
455 		       (machineid == HP_360 ? "25" :
456 			(machineid == HP_370 ? "33.33" : "50")));
457 	else
458 		len += sprintf(cpu_model + len, ", %sMHz MC68881 FPU",
459 		       machineid == HP_350 ? "20" : "16.67");
460 	switch (ectype) {
461 	case EC_VIRT:
462 		sprintf(cpu_model + len, ", %dK virtual-address cache",
463 		       machineid == HP_320 ? 16 : 32);
464 		break;
465 	case EC_PHYS:
466 		sprintf(cpu_model + len, ", %dK physical-address cache",
467 		       machineid == HP_370 ? 64 : 32);
468 		break;
469 	}
470 	strcat(cpu_model, ")");
471 	printf("%s\n", cpu_model);
472 	/*
473 	 * Now that we have told the user what they have,
474 	 * let them know if that machine type isn't configured.
475 	 */
476 	switch (machineid) {
477 	case -1:		/* keep compilers happy */
478 #if !defined(HP320) && !defined(HP350)
479 	case HP_320:
480 	case HP_350:
481 #endif
482 #ifndef HP330
483 	case HP_330:
484 #endif
485 #if !defined(HP360) && !defined(HP370)
486 	case HP_340:
487 	case HP_360:
488 	case HP_370:
489 #endif
490 #if !defined(HP380)
491 	case HP_380:
492 	case HP_433:
493 #endif
494 		panic("CPU type not configured");
495 	default:
496 		break;
497 	}
498 }
499 
500 /*
501  * machine dependent system variables.
502  */
503 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
504 	int *name;
505 	u_int namelen;
506 	void *oldp;
507 	size_t *oldlenp;
508 	void *newp;
509 	size_t newlen;
510 	struct proc *p;
511 {
512 
513 	/* all sysctl names at this level are terminal */
514 	if (namelen != 1)
515 		return (ENOTDIR);		/* overloaded */
516 
517 	switch (name[0]) {
518 	case CPU_CONSDEV:
519 		return (sysctl_rdstruct(oldp, oldlenp, newp, &cn_tty->t_dev,
520 		    sizeof cn_tty->t_dev));
521 	default:
522 		return (EOPNOTSUPP);
523 	}
524 	/* NOTREACHED */
525 }
526 
527 #ifdef USELEDS
528 #include <hp300/hp300/led.h>
529 
530 int inledcontrol = 0;	/* 1 if we are in ledcontrol already, cheap mutex */
531 char *ledaddr;
532 
533 /*
534  * Map the LED page and setup the KVA to access it.
535  */
536 ledinit()
537 {
538 	extern caddr_t ledbase;
539 
540 	pmap_enter(kernel_pmap, (vm_offset_t)ledbase, (vm_offset_t)LED_ADDR,
541 		   VM_PROT_READ|VM_PROT_WRITE, TRUE);
542 	ledaddr = (char *) ((int)ledbase | (LED_ADDR & PGOFSET));
543 }
544 
545 /*
546  * Do lights:
547  *	`ons' is a mask of LEDs to turn on,
548  *	`offs' is a mask of LEDs to turn off,
549  *	`togs' is a mask of LEDs to toggle.
550  * Note we don't use splclock/splx for mutual exclusion.
551  * They are expensive and we really don't need to be that precise.
552  * Besides we would like to be able to profile this routine.
553  */
554 ledcontrol(ons, offs, togs)
555 	register int ons, offs, togs;
556 {
557 	static char currentleds;
558 	register char leds;
559 
560 	inledcontrol = 1;
561 	leds = currentleds;
562 	if (ons)
563 		leds |= ons;
564 	if (offs)
565 		leds &= ~offs;
566 	if (togs)
567 		leds ^= togs;
568 	currentleds = leds;
569 	*ledaddr = ~leds;
570 	inledcontrol = 0;
571 }
572 #endif
573 
574 #define SS_RTEFRAME	1
575 #define SS_FPSTATE	2
576 #define SS_USERREGS	4
577 
578 struct sigstate {
579 	int	ss_flags;		/* which of the following are valid */
580 	struct	frame ss_frame;		/* original exception frame */
581 	struct	fpframe ss_fpstate;	/* 68881/68882 state info */
582 };
583 
584 /*
585  * WARNING: code in locore.s assumes the layout shown for sf_signum
586  * thru sf_handler so... don't screw with them!
587  */
588 struct sigframe {
589 	int	sf_signum;		/* signo for handler */
590 	int	sf_code;		/* additional info for handler */
591 	struct	sigcontext *sf_scp;	/* context ptr for handler */
592 	sig_t	sf_handler;		/* handler addr for u_sigc */
593 	struct	sigstate sf_state;	/* state of the hardware */
594 	struct	sigcontext sf_sc;	/* actual context */
595 };
596 
597 #ifdef HPUXCOMPAT
598 struct	hpuxsigcontext {
599 	int	hsc_syscall;
600 	char	hsc_action;
601 	char	hsc_pad1;
602 	char	hsc_pad2;
603 	char	hsc_onstack;
604 	int	hsc_mask;
605 	int	hsc_sp;
606 	short	hsc_ps;
607 	int	hsc_pc;
608 /* the rest aren't part of the context but are included for our convenience */
609 	short	hsc_pad;
610 	u_int	hsc_magic;		/* XXX sigreturn: cookie */
611 	struct	sigcontext *hsc_realsc;	/* XXX sigreturn: ptr to BSD context */
612 };
613 
614 /*
615  * For an HP-UX process, a partial hpuxsigframe follows the normal sigframe.
616  * Tremendous waste of space, but some HP-UX applications (e.g. LCL) need it.
617  */
618 struct hpuxsigframe {
619 	int	hsf_signum;
620 	int	hsf_code;
621 	struct	sigcontext *hsf_scp;
622 	struct	hpuxsigcontext hsf_sc;
623 	int	hsf_regs[15];
624 };
625 #endif
626 
627 #ifdef DEBUG
628 int sigdebug = 0;
629 int sigpid = 0;
630 #define SDB_FOLLOW	0x01
631 #define SDB_KSTACK	0x02
632 #define SDB_FPSTATE	0x04
633 #endif
634 
635 /*
636  * Send an interrupt to process.
637  */
638 void
639 sendsig(catcher, sig, mask, code)
640 	sig_t catcher;
641 	int sig, mask;
642 	unsigned code;
643 {
644 	register struct proc *p = curproc;
645 	register struct sigframe *fp, *kfp;
646 	register struct frame *frame;
647 	register struct sigacts *psp = p->p_sigacts;
648 	register short ft;
649 	int oonstack, fsize;
650 	extern char sigcode[], esigcode[];
651 
652 	frame = (struct frame *)p->p_md.md_regs;
653 	ft = frame->f_format;
654 	oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
655 	/*
656 	 * Allocate and validate space for the signal handler
657 	 * context. Note that if the stack is in P0 space, the
658 	 * call to grow() is a nop, and the useracc() check
659 	 * will fail if the process has not already allocated
660 	 * the space with a `brk'.
661 	 */
662 #ifdef HPUXCOMPAT
663 	if (p->p_md.md_flags & MDP_HPUX)
664 		fsize = sizeof(struct sigframe) + sizeof(struct hpuxsigframe);
665 	else
666 #endif
667 	fsize = sizeof(struct sigframe);
668 	if ((psp->ps_flags & SAS_ALTSTACK) &&
669 	    (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
670 	    (psp->ps_sigonstack & sigmask(sig))) {
671 		fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
672 					 psp->ps_sigstk.ss_size - fsize);
673 		psp->ps_sigstk.ss_flags |= SA_ONSTACK;
674 	} else
675 		fp = (struct sigframe *)(frame->f_regs[SP] - fsize);
676 	if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
677 		(void)grow(p, (unsigned)fp);
678 #ifdef DEBUG
679 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
680 		printf("sendsig(%d): sig %d ssp %x usp %x scp %x ft %d\n",
681 		       p->p_pid, sig, &oonstack, fp, &fp->sf_sc, ft);
682 #endif
683 	if (useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
684 #ifdef DEBUG
685 		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
686 			printf("sendsig(%d): useracc failed on sig %d\n",
687 			       p->p_pid, sig);
688 #endif
689 		/*
690 		 * Process has trashed its stack; give it an illegal
691 		 * instruction to halt it in its tracks.
692 		 */
693 		SIGACTION(p, SIGILL) = SIG_DFL;
694 		sig = sigmask(SIGILL);
695 		p->p_sigignore &= ~sig;
696 		p->p_sigcatch &= ~sig;
697 		p->p_sigmask &= ~sig;
698 		psignal(p, SIGILL);
699 		return;
700 	}
701 	kfp = (struct sigframe *)malloc((u_long)fsize, M_TEMP, M_WAITOK);
702 	/*
703 	 * Build the argument list for the signal handler.
704 	 */
705 	kfp->sf_signum = sig;
706 	kfp->sf_code = code;
707 	kfp->sf_scp = &fp->sf_sc;
708 	kfp->sf_handler = catcher;
709 	/*
710 	 * Save necessary hardware state.  Currently this includes:
711 	 *	- general registers
712 	 *	- original exception frame (if not a "normal" frame)
713 	 *	- FP coprocessor state
714 	 */
715 	kfp->sf_state.ss_flags = SS_USERREGS;
716 	bcopy((caddr_t)frame->f_regs,
717 	      (caddr_t)kfp->sf_state.ss_frame.f_regs, sizeof frame->f_regs);
718 	if (ft >= FMT7) {
719 #ifdef DEBUG
720 		if (ft > 15 || exframesize[ft] < 0)
721 			panic("sendsig: bogus frame type");
722 #endif
723 		kfp->sf_state.ss_flags |= SS_RTEFRAME;
724 		kfp->sf_state.ss_frame.f_format = frame->f_format;
725 		kfp->sf_state.ss_frame.f_vector = frame->f_vector;
726 		bcopy((caddr_t)&frame->F_u,
727 		      (caddr_t)&kfp->sf_state.ss_frame.F_u, exframesize[ft]);
728 		/*
729 		 * Leave an indicator that we need to clean up the kernel
730 		 * stack.  We do this by setting the "pad word" above the
731 		 * hardware stack frame to the amount the stack must be
732 		 * adjusted by.
733 		 *
734 		 * N.B. we increment rather than just set f_stackadj in
735 		 * case we are called from syscall when processing a
736 		 * sigreturn.  In that case, f_stackadj may be non-zero.
737 		 */
738 		frame->f_stackadj += exframesize[ft];
739 		frame->f_format = frame->f_vector = 0;
740 #ifdef DEBUG
741 		if (sigdebug & SDB_FOLLOW)
742 			printf("sendsig(%d): copy out %d of frame %d\n",
743 			       p->p_pid, exframesize[ft], ft);
744 #endif
745 	}
746 #ifdef FPCOPROC
747 	kfp->sf_state.ss_flags |= SS_FPSTATE;
748 	m68881_save(&kfp->sf_state.ss_fpstate);
749 #ifdef DEBUG
750 	if ((sigdebug & SDB_FPSTATE) && *(char *)&kfp->sf_state.ss_fpstate)
751 		printf("sendsig(%d): copy out FP state (%x) to %x\n",
752 		       p->p_pid, *(u_int *)&kfp->sf_state.ss_fpstate,
753 		       &kfp->sf_state.ss_fpstate);
754 #endif
755 #endif
756 	/*
757 	 * Build the signal context to be used by sigreturn.
758 	 */
759 	kfp->sf_sc.sc_onstack = oonstack;
760 	kfp->sf_sc.sc_mask = mask;
761 	kfp->sf_sc.sc_sp = frame->f_regs[SP];
762 	kfp->sf_sc.sc_fp = frame->f_regs[A6];
763 	kfp->sf_sc.sc_ap = (int)&fp->sf_state;
764 	kfp->sf_sc.sc_pc = frame->f_pc;
765 	kfp->sf_sc.sc_ps = frame->f_sr;
766 #ifdef HPUXCOMPAT
767 	/*
768 	 * Create an HP-UX style sigcontext structure and associated goo
769 	 */
770 	if (p->p_md.md_flags & MDP_HPUX) {
771 		register struct hpuxsigframe *hkfp;
772 
773 		hkfp = (struct hpuxsigframe *)&kfp[1];
774 		hkfp->hsf_signum = bsdtohpuxsig(kfp->sf_signum);
775 		hkfp->hsf_code = kfp->sf_code;
776 		hkfp->hsf_scp = (struct sigcontext *)
777 			&((struct hpuxsigframe *)(&fp[1]))->hsf_sc;
778 		hkfp->hsf_sc.hsc_syscall = 0;		/* XXX */
779 		hkfp->hsf_sc.hsc_action = 0;		/* XXX */
780 		hkfp->hsf_sc.hsc_pad1 = hkfp->hsf_sc.hsc_pad2 = 0;
781 		hkfp->hsf_sc.hsc_onstack = kfp->sf_sc.sc_onstack;
782 		hkfp->hsf_sc.hsc_mask = kfp->sf_sc.sc_mask;
783 		hkfp->hsf_sc.hsc_sp = kfp->sf_sc.sc_sp;
784 		hkfp->hsf_sc.hsc_ps = kfp->sf_sc.sc_ps;
785 		hkfp->hsf_sc.hsc_pc = kfp->sf_sc.sc_pc;
786 		hkfp->hsf_sc.hsc_pad = 0;
787 		hkfp->hsf_sc.hsc_magic = 0xdeadbeef;
788 		hkfp->hsf_sc.hsc_realsc = kfp->sf_scp;
789 		bcopy((caddr_t)frame->f_regs, (caddr_t)hkfp->hsf_regs,
790 		      sizeof (hkfp->hsf_regs));
791 
792 		kfp->sf_signum = hkfp->hsf_signum;
793 		kfp->sf_scp = hkfp->hsf_scp;
794 	}
795 #endif
796 	(void) copyout((caddr_t)kfp, (caddr_t)fp, fsize);
797 	frame->f_regs[SP] = (int)fp;
798 #ifdef DEBUG
799 	if (sigdebug & SDB_FOLLOW)
800 		printf("sendsig(%d): sig %d scp %x fp %x sc_sp %x sc_ap %x\n",
801 		       p->p_pid, sig, kfp->sf_scp, fp,
802 		       kfp->sf_sc.sc_sp, kfp->sf_sc.sc_ap);
803 #endif
804 	/*
805 	 * Signal trampoline code is at base of user stack.
806 	 */
807 	frame->f_pc = (int)PS_STRINGS - (esigcode - sigcode);
808 #ifdef DEBUG
809 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
810 		printf("sendsig(%d): sig %d returns\n",
811 		       p->p_pid, sig);
812 #endif
813 	free((caddr_t)kfp, M_TEMP);
814 }
815 
816 /*
817  * System call to cleanup state after a signal
818  * has been taken.  Reset signal mask and
819  * stack state from context left by sendsig (above).
820  * Return to previous pc and psl as specified by
821  * context left by sendsig. Check carefully to
822  * make sure that the user has not modified the
823  * psl to gain improper priviledges or to cause
824  * a machine fault.
825  */
826 struct sigreturn_args {
827 	struct sigcontext *sigcntxp;
828 };
829 /* ARGSUSED */
830 sigreturn(p, uap, retval)
831 	struct proc *p;
832 	struct sigreturn_args *uap;
833 	int *retval;
834 {
835 	register struct sigcontext *scp;
836 	register struct frame *frame;
837 	register int rf;
838 	struct sigcontext tsigc;
839 	struct sigstate tstate;
840 	int flags;
841 
842 	scp = uap->sigcntxp;
843 #ifdef DEBUG
844 	if (sigdebug & SDB_FOLLOW)
845 		printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
846 #endif
847 	if ((int)scp & 1)
848 		return (EINVAL);
849 #ifdef HPUXCOMPAT
850 	/*
851 	 * Grab context as an HP-UX style context and determine if it
852 	 * was one that we contructed in sendsig.
853 	 */
854 	if (p->p_md.md_flags & MDP_HPUX) {
855 		struct hpuxsigcontext *hscp = (struct hpuxsigcontext *)scp;
856 		struct hpuxsigcontext htsigc;
857 
858 		if (useracc((caddr_t)hscp, sizeof (*hscp), B_WRITE) == 0 ||
859 		    copyin((caddr_t)hscp, (caddr_t)&htsigc, sizeof htsigc))
860 			return (EINVAL);
861 		/*
862 		 * If not generated by sendsig or we cannot restore the
863 		 * BSD-style sigcontext, just restore what we can -- state
864 		 * will be lost, but them's the breaks.
865 		 */
866 		hscp = &htsigc;
867 		if (hscp->hsc_magic != 0xdeadbeef ||
868 		    (scp = hscp->hsc_realsc) == 0 ||
869 		    useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
870 		    copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc)) {
871 			if (hscp->hsc_onstack & 01)
872 				p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
873 			else
874 				p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
875 			p->p_sigmask = hscp->hsc_mask &~ sigcantmask;
876 			frame = (struct frame *) p->p_md.md_regs;
877 			frame->f_regs[SP] = hscp->hsc_sp;
878 			frame->f_pc = hscp->hsc_pc;
879 			frame->f_sr = hscp->hsc_ps &~ PSL_USERCLR;
880 			return (EJUSTRETURN);
881 		}
882 		/*
883 		 * Otherwise, overlay BSD context with possibly modified
884 		 * HP-UX values.
885 		 */
886 		tsigc.sc_onstack = hscp->hsc_onstack;
887 		tsigc.sc_mask = hscp->hsc_mask;
888 		tsigc.sc_sp = hscp->hsc_sp;
889 		tsigc.sc_ps = hscp->hsc_ps;
890 		tsigc.sc_pc = hscp->hsc_pc;
891 	} else
892 #endif
893 	/*
894 	 * Test and fetch the context structure.
895 	 * We grab it all at once for speed.
896 	 */
897 	if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
898 	    copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc))
899 		return (EINVAL);
900 	scp = &tsigc;
901 	if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_S)) != 0)
902 		return (EINVAL);
903 	/*
904 	 * Restore the user supplied information
905 	 */
906 	if (scp->sc_onstack & 01)
907 		p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
908 	else
909 		p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
910 	p->p_sigmask = scp->sc_mask &~ sigcantmask;
911 	frame = (struct frame *) p->p_md.md_regs;
912 	frame->f_regs[SP] = scp->sc_sp;
913 	frame->f_regs[A6] = scp->sc_fp;
914 	frame->f_pc = scp->sc_pc;
915 	frame->f_sr = scp->sc_ps;
916 	/*
917 	 * Grab pointer to hardware state information.
918 	 * If zero, the user is probably doing a longjmp.
919 	 */
920 	if ((rf = scp->sc_ap) == 0)
921 		return (EJUSTRETURN);
922 	/*
923 	 * See if there is anything to do before we go to the
924 	 * expense of copying in close to 1/2K of data
925 	 */
926 	flags = fuword((caddr_t)rf);
927 #ifdef DEBUG
928 	if (sigdebug & SDB_FOLLOW)
929 		printf("sigreturn(%d): sc_ap %x flags %x\n",
930 		       p->p_pid, rf, flags);
931 #endif
932 	/*
933 	 * fuword failed (bogus sc_ap value).
934 	 */
935 	if (flags == -1)
936 		return (EINVAL);
937 	if (flags == 0 || copyin((caddr_t)rf, (caddr_t)&tstate, sizeof tstate))
938 		return (EJUSTRETURN);
939 #ifdef DEBUG
940 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
941 		printf("sigreturn(%d): ssp %x usp %x scp %x ft %d\n",
942 		       p->p_pid, &flags, scp->sc_sp, uap->sigcntxp,
943 		       (flags&SS_RTEFRAME) ? tstate.ss_frame.f_format : -1);
944 #endif
945 	/*
946 	 * Restore most of the users registers except for A6 and SP
947 	 * which were handled above.
948 	 */
949 	if (flags & SS_USERREGS)
950 		bcopy((caddr_t)tstate.ss_frame.f_regs,
951 		      (caddr_t)frame->f_regs, sizeof(frame->f_regs)-2*NBPW);
952 	/*
953 	 * Restore long stack frames.  Note that we do not copy
954 	 * back the saved SR or PC, they were picked up above from
955 	 * the sigcontext structure.
956 	 */
957 	if (flags & SS_RTEFRAME) {
958 		register int sz;
959 
960 		/* grab frame type and validate */
961 		sz = tstate.ss_frame.f_format;
962 		if (sz > 15 || (sz = exframesize[sz]) < 0)
963 			return (EINVAL);
964 		frame->f_stackadj -= sz;
965 		frame->f_format = tstate.ss_frame.f_format;
966 		frame->f_vector = tstate.ss_frame.f_vector;
967 		bcopy((caddr_t)&tstate.ss_frame.F_u, (caddr_t)&frame->F_u, sz);
968 #ifdef DEBUG
969 		if (sigdebug & SDB_FOLLOW)
970 			printf("sigreturn(%d): copy in %d of frame type %d\n",
971 			       p->p_pid, sz, tstate.ss_frame.f_format);
972 #endif
973 	}
974 #ifdef FPCOPROC
975 	/*
976 	 * Finally we restore the original FP context
977 	 */
978 	if (flags & SS_FPSTATE)
979 		m68881_restore(&tstate.ss_fpstate);
980 #ifdef DEBUG
981 	if ((sigdebug & SDB_FPSTATE) && *(char *)&tstate.ss_fpstate)
982 		printf("sigreturn(%d): copied in FP state (%x) at %x\n",
983 		       p->p_pid, *(u_int *)&tstate.ss_fpstate,
984 		       &tstate.ss_fpstate);
985 #endif
986 #endif
987 #ifdef DEBUG
988 	if ((sigdebug & SDB_FOLLOW) ||
989 	    ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid))
990 		printf("sigreturn(%d): returns\n", p->p_pid);
991 #endif
992 	return (EJUSTRETURN);
993 }
994 
995 int	waittime = -1;
996 
997 boot(howto)
998 	register int howto;
999 {
1000 	/* take a snap shot before clobbering any registers */
1001 	if (curproc)
1002 		savectx(curproc->p_addr, 0);
1003 
1004 	boothowto = howto;
1005 	if ((howto&RB_NOSYNC) == 0 && waittime < 0) {
1006 		register struct buf *bp;
1007 		int iter, nbusy;
1008 
1009 		waittime = 0;
1010 		(void) spl0();
1011 		printf("syncing disks... ");
1012 		/*
1013 		 * Release vnodes held by texts before sync.
1014 		 */
1015 		if (panicstr == 0)
1016 			vnode_pager_umount(NULL);
1017 #ifdef notdef
1018 #include "fd.h"
1019 #if NFD > 0
1020 		fdshutdown();
1021 #endif
1022 #endif
1023 		sync(&proc0, (void *)NULL, (int *)NULL);
1024 
1025 		for (iter = 0; iter < 20; iter++) {
1026 			nbusy = 0;
1027 			for (bp = &buf[nbuf]; --bp >= buf; )
1028 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
1029 					nbusy++;
1030 			if (nbusy == 0)
1031 				break;
1032 			printf("%d ", nbusy);
1033 			DELAY(40000 * iter);
1034 		}
1035 		if (nbusy)
1036 			printf("giving up\n");
1037 		else
1038 			printf("done\n");
1039 		/*
1040 		 * If we've been adjusting the clock, the todr
1041 		 * will be out of synch; adjust it now.
1042 		 */
1043 		resettodr();
1044 	}
1045 	splhigh();			/* extreme priority */
1046 	if (howto&RB_HALT) {
1047 		printf("halted\n\n");
1048 		asm("	stop	#0x2700");
1049 	} else {
1050 		if (howto & RB_DUMP)
1051 			dumpsys();
1052 		doboot();
1053 		/*NOTREACHED*/
1054 	}
1055 	/*NOTREACHED*/
1056 }
1057 
1058 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
1059 int	dumpsize = 0;		/* also for savecore */
1060 long	dumplo = 0;
1061 
1062 dumpconf()
1063 {
1064 	int nblks;
1065 
1066 	dumpsize = physmem;
1067 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
1068 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1069 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
1070 			dumpsize = btoc(dbtob(nblks - dumplo));
1071 		else if (dumplo == 0)
1072 			dumplo = nblks - btodb(ctob(physmem));
1073 	}
1074 	/*
1075 	 * Don't dump on the first CLBYTES (why CLBYTES?)
1076 	 * in case the dump device includes a disk label.
1077 	 */
1078 	if (dumplo < btodb(CLBYTES))
1079 		dumplo = btodb(CLBYTES);
1080 }
1081 
1082 /*
1083  * Doadump comes here after turning off memory management and
1084  * getting on the dump stack, either when called above, or by
1085  * the auto-restart code.
1086  */
1087 dumpsys()
1088 {
1089 
1090 	msgbufmapped = 0;
1091 	if (dumpdev == NODEV)
1092 		return;
1093 	/*
1094 	 * For dumps during autoconfiguration,
1095 	 * if dump device has already configured...
1096 	 */
1097 	if (dumpsize == 0)
1098 		dumpconf();
1099 	if (dumplo < 0)
1100 		return;
1101 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
1102 	printf("dump ");
1103 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
1104 
1105 	case ENXIO:
1106 		printf("device bad\n");
1107 		break;
1108 
1109 	case EFAULT:
1110 		printf("device not ready\n");
1111 		break;
1112 
1113 	case EINVAL:
1114 		printf("area improper\n");
1115 		break;
1116 
1117 	case EIO:
1118 		printf("i/o error\n");
1119 		break;
1120 
1121 	default:
1122 		printf("succeeded\n");
1123 		break;
1124 	}
1125 }
1126 
1127 initcpu()
1128 {
1129 #ifdef MAPPEDCOPY
1130 	extern u_int mappedcopysize;
1131 
1132 	/*
1133 	 * Initialize lower bound for doing copyin/copyout using
1134 	 * page mapping (if not already set).  We don't do this on
1135 	 * VAC machines as it loses big time.
1136 	 */
1137 	if (mappedcopysize == 0) {
1138 		if (ectype == EC_VIRT)
1139 			mappedcopysize = (u_int) -1;
1140 		else
1141 			mappedcopysize = NBPG;
1142 	}
1143 #endif
1144 	parityenable();
1145 #ifdef USELEDS
1146 	ledinit();
1147 #endif
1148 }
1149 
1150 straytrap(pc, evec)
1151 	int pc;
1152 	u_short evec;
1153 {
1154 	printf("unexpected trap (vector offset %x) from %x\n",
1155 	       evec & 0xFFF, pc);
1156 }
1157 
1158 int	*nofault;
1159 
1160 badaddr(addr)
1161 	register caddr_t addr;
1162 {
1163 	register int i;
1164 	label_t	faultbuf;
1165 
1166 #ifdef lint
1167 	i = *addr; if (i) return(0);
1168 #endif
1169 	nofault = (int *) &faultbuf;
1170 	if (setjmp((label_t *)nofault)) {
1171 		nofault = (int *) 0;
1172 		return(1);
1173 	}
1174 	i = *(volatile short *)addr;
1175 	nofault = (int *) 0;
1176 	return(0);
1177 }
1178 
1179 badbaddr(addr)
1180 	register caddr_t addr;
1181 {
1182 	register int i;
1183 	label_t	faultbuf;
1184 
1185 #ifdef lint
1186 	i = *addr; if (i) return(0);
1187 #endif
1188 	nofault = (int *) &faultbuf;
1189 	if (setjmp((label_t *)nofault)) {
1190 		nofault = (int *) 0;
1191 		return(1);
1192 	}
1193 	i = *(volatile char *)addr;
1194 	nofault = (int *) 0;
1195 	return(0);
1196 }
1197 
1198 netintr()
1199 {
1200 #ifdef INET
1201 	if (netisr & (1 << NETISR_ARP)) {
1202 		netisr &= ~(1 << NETISR_ARP);
1203 		arpintr();
1204 	}
1205 	if (netisr & (1 << NETISR_IP)) {
1206 		netisr &= ~(1 << NETISR_IP);
1207 		ipintr();
1208 	}
1209 #endif
1210 #ifdef NS
1211 	if (netisr & (1 << NETISR_NS)) {
1212 		netisr &= ~(1 << NETISR_NS);
1213 		nsintr();
1214 	}
1215 #endif
1216 #ifdef ISO
1217 	if (netisr & (1 << NETISR_ISO)) {
1218 		netisr &= ~(1 << NETISR_ISO);
1219 		clnlintr();
1220 	}
1221 #endif
1222 #ifdef CCITT
1223 	if (netisr & (1 << NETISR_CCITT)) {
1224 		netisr &= ~(1 << NETISR_CCITT);
1225 		ccittintr();
1226 	}
1227 #endif
1228 }
1229 
1230 intrhand(sr)
1231 	int sr;
1232 {
1233 	register struct isr *isr;
1234 	register int found = 0;
1235 	register int ipl;
1236 	extern struct isr isrqueue[];
1237 
1238 	ipl = (sr >> 8) & 7;
1239 	switch (ipl) {
1240 
1241 	case 3:
1242 	case 4:
1243 	case 5:
1244 		ipl = ISRIPL(ipl);
1245 		isr = isrqueue[ipl].isr_forw;
1246 		for (; isr != &isrqueue[ipl]; isr = isr->isr_forw) {
1247 			if ((isr->isr_intr)(isr->isr_arg)) {
1248 				found++;
1249 				break;
1250 			}
1251 		}
1252 		if (found == 0)
1253 			printf("stray interrupt, sr 0x%x\n", sr);
1254 		break;
1255 
1256 	case 0:
1257 	case 1:
1258 	case 2:
1259 	case 6:
1260 	case 7:
1261 		printf("intrhand: unexpected sr 0x%x\n", sr);
1262 		break;
1263 	}
1264 }
1265 
1266 #if defined(DEBUG) && !defined(PANICBUTTON)
1267 #define PANICBUTTON
1268 #endif
1269 
1270 #ifdef PANICBUTTON
1271 int panicbutton = 1;	/* non-zero if panic buttons are enabled */
1272 int crashandburn = 0;
1273 int candbdelay = 50;	/* give em half a second */
1274 
1275 void
1276 candbtimer(arg)
1277 	void *arg;
1278 {
1279 
1280 	crashandburn = 0;
1281 }
1282 #endif
1283 
1284 /*
1285  * Level 7 interrupts can be caused by the keyboard or parity errors.
1286  */
1287 nmihand(frame)
1288 	struct frame frame;
1289 {
1290 	if (kbdnmi()) {
1291 #ifdef PANICBUTTON
1292 		static int innmihand = 0;
1293 
1294 		/*
1295 		 * Attempt to reduce the window of vulnerability for recursive
1296 		 * NMIs (e.g. someone holding down the keyboard reset button).
1297 		 */
1298 		if (innmihand == 0) {
1299 			innmihand = 1;
1300 			printf("Got a keyboard NMI\n");
1301 			innmihand = 0;
1302 		}
1303 		if (panicbutton) {
1304 			if (crashandburn) {
1305 				crashandburn = 0;
1306 				panic(panicstr ?
1307 				      "forced crash, nosync" : "forced crash");
1308 			}
1309 			crashandburn++;
1310 			timeout(candbtimer, (void *)0, candbdelay);
1311 		}
1312 #endif
1313 		return;
1314 	}
1315 	if (parityerror(&frame))
1316 		return;
1317 	/* panic?? */
1318 	printf("unexpected level 7 interrupt ignored\n");
1319 }
1320 
1321 /*
1322  * Parity error section.  Contains magic.
1323  */
1324 #define PARREG		((volatile short *)IIOV(0x5B0000))
1325 static int gotparmem = 0;
1326 #ifdef DEBUG
1327 int ignorekperr = 0;	/* ignore kernel parity errors */
1328 #endif
1329 
1330 /*
1331  * Enable parity detection
1332  */
1333 parityenable()
1334 {
1335 	label_t	faultbuf;
1336 
1337 	nofault = (int *) &faultbuf;
1338 	if (setjmp((label_t *)nofault)) {
1339 		nofault = (int *) 0;
1340 #ifdef DEBUG
1341 		printf("No parity memory\n");
1342 #endif
1343 		return;
1344 	}
1345 	*PARREG = 1;
1346 	nofault = (int *) 0;
1347 	gotparmem = 1;
1348 #ifdef DEBUG
1349 	printf("Parity detection enabled\n");
1350 #endif
1351 }
1352 
1353 /*
1354  * Determine if level 7 interrupt was caused by a parity error
1355  * and deal with it if it was.  Returns 1 if it was a parity error.
1356  */
1357 parityerror(fp)
1358 	struct frame *fp;
1359 {
1360 	if (!gotparmem)
1361 		return(0);
1362 	*PARREG = 0;
1363 	DELAY(10);
1364 	*PARREG = 1;
1365 	if (panicstr) {
1366 		printf("parity error after panic ignored\n");
1367 		return(1);
1368 	}
1369 	if (!findparerror())
1370 		printf("WARNING: transient parity error ignored\n");
1371 	else if (USERMODE(fp->f_sr)) {
1372 		printf("pid %d: parity error\n", curproc->p_pid);
1373 		uprintf("sorry, pid %d killed due to memory parity error\n",
1374 			curproc->p_pid);
1375 		psignal(curproc, SIGKILL);
1376 #ifdef DEBUG
1377 	} else if (ignorekperr) {
1378 		printf("WARNING: kernel parity error ignored\n");
1379 #endif
1380 	} else {
1381 		regdump(fp, 128);
1382 		panic("kernel parity error");
1383 	}
1384 	return(1);
1385 }
1386 
1387 /*
1388  * Yuk!  There has got to be a better way to do this!
1389  * Searching all of memory with interrupts blocked can lead to disaster.
1390  */
1391 findparerror()
1392 {
1393 	static label_t parcatch;
1394 	static int looking = 0;
1395 	volatile struct pte opte;
1396 	volatile int pg, o, s;
1397 	register volatile int *ip;
1398 	register int i;
1399 	int found;
1400 
1401 #ifdef lint
1402 	ip = &found;
1403 	i = o = pg = 0; if (i) return(0);
1404 #endif
1405 	/*
1406 	 * If looking is true we are searching for a known parity error
1407 	 * and it has just occured.  All we do is return to the higher
1408 	 * level invocation.
1409 	 */
1410 	if (looking)
1411 		longjmp(&parcatch);
1412 	s = splhigh();
1413 	/*
1414 	 * If setjmp returns true, the parity error we were searching
1415 	 * for has just occured (longjmp above) at the current pg+o
1416 	 */
1417 	if (setjmp(&parcatch)) {
1418 		printf("Parity error at 0x%x\n", ctob(pg)|o);
1419 		found = 1;
1420 		goto done;
1421 	}
1422 	/*
1423 	 * If we get here, a parity error has occured for the first time
1424 	 * and we need to find it.  We turn off any external caches and
1425 	 * loop thru memory, testing every longword til a fault occurs and
1426 	 * we regain control at setjmp above.  Note that because of the
1427 	 * setjmp, pg and o need to be volatile or their values will be lost.
1428 	 */
1429 	looking = 1;
1430 	ecacheoff();
1431 	for (pg = btoc(lowram); pg < btoc(lowram)+physmem; pg++) {
1432 		pmap_enter(kernel_pmap, (vm_offset_t)vmmap, ctob(pg),
1433 		    VM_PROT_READ, TRUE);
1434 		for (o = 0; o < NBPG; o += sizeof(int))
1435 			i = *(int *)(&vmmap[o]);
1436 	}
1437 	/*
1438 	 * Getting here implies no fault was found.  Should never happen.
1439 	 */
1440 	printf("Couldn't locate parity error\n");
1441 	found = 0;
1442 done:
1443 	looking = 0;
1444 	pmap_remove(kernel_pmap, (vm_offset_t)vmmap,
1445 	    (vm_offset_t)&vmmap[NBPG]);
1446 	ecacheon();
1447 	splx(s);
1448 	return(found);
1449 }
1450 
1451 regdump(fp, sbytes)
1452 	struct frame *fp; /* must not be register */
1453 	int sbytes;
1454 {
1455 	static int doingdump = 0;
1456 	register int i;
1457 	int s;
1458 	extern char *hexstr();
1459 
1460 	if (doingdump)
1461 		return;
1462 	s = splhigh();
1463 	doingdump = 1;
1464 	printf("pid = %d, pc = %s, ",
1465 	       curproc ? curproc->p_pid : -1, hexstr(fp->f_pc, 8));
1466 	printf("ps = %s, ", hexstr(fp->f_sr, 4));
1467 	printf("sfc = %s, ", hexstr(getsfc(), 4));
1468 	printf("dfc = %s\n", hexstr(getdfc(), 4));
1469 	printf("Registers:\n     ");
1470 	for (i = 0; i < 8; i++)
1471 		printf("        %d", i);
1472 	printf("\ndreg:");
1473 	for (i = 0; i < 8; i++)
1474 		printf(" %s", hexstr(fp->f_regs[i], 8));
1475 	printf("\nareg:");
1476 	for (i = 0; i < 8; i++)
1477 		printf(" %s", hexstr(fp->f_regs[i+8], 8));
1478 	if (sbytes > 0) {
1479 		if (fp->f_sr & PSL_S) {
1480 			printf("\n\nKernel stack (%s):",
1481 			       hexstr((int)(((int *)&fp)-1), 8));
1482 			dumpmem(((int *)&fp)-1, sbytes, 0);
1483 		} else {
1484 			printf("\n\nUser stack (%s):", hexstr(fp->f_regs[SP], 8));
1485 			dumpmem((int *)fp->f_regs[SP], sbytes, 1);
1486 		}
1487 	}
1488 	doingdump = 0;
1489 	splx(s);
1490 }
1491 
1492 extern char kstack[];
1493 #define KSADDR	((int *)&(kstack[(UPAGES-1)*NBPG]))
1494 
1495 dumpmem(ptr, sz, ustack)
1496 	register int *ptr;
1497 	int sz;
1498 {
1499 	register int i, val;
1500 	extern char *hexstr();
1501 
1502 	for (i = 0; i < sz; i++) {
1503 		if ((i & 7) == 0)
1504 			printf("\n%s: ", hexstr((int)ptr, 6));
1505 		else
1506 			printf(" ");
1507 		if (ustack == 1) {
1508 			if ((val = fuword(ptr++)) == -1)
1509 				break;
1510 		} else {
1511 			if (ustack == 0 &&
1512 			    (ptr < KSADDR || ptr > KSADDR+(NBPG/4-1)))
1513 				break;
1514 			val = *ptr++;
1515 		}
1516 		printf("%s", hexstr(val, 8));
1517 	}
1518 	printf("\n");
1519 }
1520 
1521 char *
1522 hexstr(val, len)
1523 	register int val;
1524 {
1525 	static char nbuf[9];
1526 	register int x, i;
1527 
1528 	if (len > 8)
1529 		return("");
1530 	nbuf[len] = '\0';
1531 	for (i = len-1; i >= 0; --i) {
1532 		x = val & 0xF;
1533 		if (x > 9)
1534 			nbuf[i] = x - 10 + 'A';
1535 		else
1536 			nbuf[i] = x + '0';
1537 		val >>= 4;
1538 	}
1539 	return(nbuf);
1540 }
1541 
1542 #ifdef DEBUG
1543 char oflowmsg[] = "k-stack overflow";
1544 char uflowmsg[] = "k-stack underflow";
1545 
1546 badkstack(oflow, fr)
1547 	int oflow;
1548 	struct frame fr;
1549 {
1550 	extern char kstackatbase[];
1551 
1552 	printf("%s: sp should be %x\n",
1553 	       oflow ? oflowmsg : uflowmsg,
1554 	       kstackatbase - (exframesize[fr.f_format] + 8));
1555 	regdump(&fr, 0);
1556 	panic(oflow ? oflowmsg : uflowmsg);
1557 }
1558 #endif
1559