xref: /original-bsd/sys/hp300/hp300/machdep.c (revision 333da485)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: machdep.c 1.74 92/12/20$
13  *
14  *	@(#)machdep.c	8.6 (Berkeley) 01/12/94
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/signalvar.h>
20 #include <sys/kernel.h>
21 #include <sys/map.h>
22 #include <sys/proc.h>
23 #include <sys/buf.h>
24 #include <sys/reboot.h>
25 #include <sys/conf.h>
26 #include <sys/file.h>
27 #include <sys/clist.h>
28 #include <sys/callout.h>
29 #include <sys/malloc.h>
30 #include <sys/mbuf.h>
31 #include <sys/msgbuf.h>
32 #include <sys/ioctl.h>
33 #include <sys/tty.h>
34 #include <sys/mount.h>
35 #include <sys/user.h>
36 #include <sys/exec.h>
37 #include <sys/sysctl.h>
38 #ifdef SYSVSHM
39 #include <sys/shm.h>
40 #endif
41 #ifdef HPUXCOMPAT
42 #include <hp/hpux/hpux.h>
43 #endif
44 
45 #include <machine/cpu.h>
46 #include <machine/reg.h>
47 #include <machine/psl.h>
48 #include <hp/dev/cons.h>
49 #include <hp300/hp300/isr.h>
50 #include <hp300/hp300/pte.h>
51 #include <net/netisr.h>
52 
53 #define	MAXMEM	64*1024*CLSIZE	/* XXX - from cmap.h */
54 #include <vm/vm_kern.h>
55 
56 /* the following is used externally (sysctl_hw) */
57 char machine[] = "hp300";		/* cpu "architecture" */
58 
59 vm_map_t buffer_map;
60 extern vm_offset_t avail_end;
61 
62 /*
63  * Declare these as initialized data so we can patch them.
64  */
65 int	nswbuf = 0;
66 #ifdef	NBUF
67 int	nbuf = NBUF;
68 #else
69 int	nbuf = 0;
70 #endif
71 #ifdef	BUFPAGES
72 int	bufpages = BUFPAGES;
73 #else
74 int	bufpages = 0;
75 #endif
76 int	msgbufmapped;		/* set when safe to use msgbuf */
77 int	maxmem;			/* max memory per process */
78 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
79 /*
80  * safepri is a safe priority for sleep to set for a spin-wait
81  * during autoconfiguration or after a panic.
82  */
83 int	safepri = PSL_LOWIPL;
84 
85 extern	u_int lowram;
86 extern	short exframesize[];
87 
88 /*
89  * Console initialization: called early on from main,
90  * before vm init or startup.  Do enough configuration
91  * to choose and initialize a console.
92  */
93 consinit()
94 {
95 
96 	/*
97 	 * Set cpuspeed immediately since cninit() called routines
98 	 * might use delay.  Note that we only set it if a custom value
99 	 * has not already been specified.
100 	 */
101 	if (cpuspeed == 0) {
102 		switch (machineid) {
103 		case HP_320:
104 		case HP_330:
105 		case HP_340:
106 			cpuspeed = MHZ_16;
107 			break;
108 		case HP_350:
109 		case HP_360:
110 		case HP_380:
111 			cpuspeed = MHZ_25;
112 			break;
113 		case HP_370:
114 		case HP_433:
115 			cpuspeed = MHZ_33;
116 			break;
117 		case HP_375:
118 			cpuspeed = MHZ_50;
119 			break;
120 		default:	/* assume the fastest */
121 			cpuspeed = MHZ_50;
122 			break;
123 		}
124 		if (mmutype == MMU_68040)
125 			cpuspeed *= 2;	/* XXX */
126 	}
127 	/*
128          * Find what hardware is attached to this machine.
129          */
130 	find_devs();
131 
132 	/*
133 	 * Initialize the console before we print anything out.
134 	 */
135 	cninit();
136 }
137 
138 /*
139  * cpu_startup: allocate memory for variable-sized tables,
140  * initialize cpu, and do autoconfiguration.
141  */
142 cpu_startup()
143 {
144 	register unsigned i;
145 	register caddr_t v, firstaddr;
146 	int base, residual;
147 	vm_offset_t minaddr, maxaddr;
148 	vm_size_t size;
149 #ifdef BUFFERS_UNMANAGED
150 	vm_offset_t bufmemp;
151 	caddr_t buffermem;
152 	int ix;
153 #endif
154 #ifdef DEBUG
155 	extern int pmapdebug;
156 	int opmapdebug = pmapdebug;
157 
158 	pmapdebug = 0;
159 #endif
160 
161 	/*
162 	 * Initialize error message buffer (at end of core).
163 	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
164 	 */
165 	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
166 		pmap_enter(kernel_pmap, (vm_offset_t)msgbufp,
167 		    avail_end + i * NBPG, VM_PROT_ALL, TRUE);
168 	msgbufmapped = 1;
169 
170 	/*
171 	 * Good {morning,afternoon,evening,night}.
172 	 */
173 	printf(version);
174 	identifycpu();
175 	printf("real mem = %d\n", ctob(physmem));
176 
177 	/*
178 	 * Allocate space for system data structures.
179 	 * The first available real memory address is in "firstaddr".
180 	 * The first available kernel virtual address is in "v".
181 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
182 	 * As pages of memory are allocated and cleared,
183 	 * "firstaddr" is incremented.
184 	 * An index into the kernel page table corresponding to the
185 	 * virtual memory address maintained in "v" is kept in "mapaddr".
186 	 */
187 	/*
188 	 * Make two passes.  The first pass calculates how much memory is
189 	 * needed and allocates it.  The second pass assigns virtual
190 	 * addresses to the various data structures.
191 	 */
192 	firstaddr = 0;
193 again:
194 	v = (caddr_t)firstaddr;
195 
196 #define	valloc(name, type, num) \
197 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
198 #define	valloclim(name, type, num, lim) \
199 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
200 	valloc(cfree, struct cblock, nclist);
201 	valloc(callout, struct callout, ncallout);
202 	valloc(swapmap, struct map, nswapmap = maxproc * 2);
203 #ifdef SYSVSHM
204 	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
205 #endif
206 
207 	/*
208 	 * Determine how many buffers to allocate.
209 	 * Since HPs tend to be long on memory and short on disk speed,
210 	 * we allocate more buffer space than the BSD standard of
211 	 * use 10% of memory for the first 2 Meg, 5% of remaining.
212 	 * We just allocate a flat 10%.  Insure a minimum of 16 buffers.
213 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
214 	 */
215 	if (bufpages == 0)
216 		bufpages = physmem / 10 / CLSIZE;
217 	if (nbuf == 0) {
218 		nbuf = bufpages;
219 		if (nbuf < 16)
220 			nbuf = 16;
221 	}
222 	if (nswbuf == 0) {
223 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
224 		if (nswbuf > 256)
225 			nswbuf = 256;		/* sanity */
226 	}
227 	valloc(swbuf, struct buf, nswbuf);
228 	valloc(buf, struct buf, nbuf);
229 	/*
230 	 * End of first pass, size has been calculated so allocate memory
231 	 */
232 	if (firstaddr == 0) {
233 		size = (vm_size_t)(v - firstaddr);
234 		firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size));
235 		if (firstaddr == 0)
236 			panic("startup: no room for tables");
237 #ifdef BUFFERS_UNMANAGED
238 		buffermem = (caddr_t) kmem_alloc(kernel_map, bufpages*CLBYTES);
239 		if (buffermem == 0)
240 			panic("startup: no room for buffers");
241 #endif
242 		goto again;
243 	}
244 	/*
245 	 * End of second pass, addresses have been assigned
246 	 */
247 	if ((vm_size_t)(v - firstaddr) != size)
248 		panic("startup: table size inconsistency");
249 	/*
250 	 * Now allocate buffers proper.  They are different than the above
251 	 * in that they usually occupy more virtual memory than physical.
252 	 */
253 	size = MAXBSIZE * nbuf;
254 	buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
255 				   &maxaddr, size, TRUE);
256 	minaddr = (vm_offset_t)buffers;
257 	if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
258 			&minaddr, size, FALSE) != KERN_SUCCESS)
259 		panic("startup: cannot allocate buffers");
260 	base = bufpages / nbuf;
261 	residual = bufpages % nbuf;
262 #ifdef BUFFERS_UNMANAGED
263 	bufmemp = (vm_offset_t) buffermem;
264 #endif
265 	for (i = 0; i < nbuf; i++) {
266 		vm_size_t curbufsize;
267 		vm_offset_t curbuf;
268 
269 		/*
270 		 * First <residual> buffers get (base+1) physical pages
271 		 * allocated for them.  The rest get (base) physical pages.
272 		 *
273 		 * The rest of each buffer occupies virtual space,
274 		 * but has no physical memory allocated for it.
275 		 */
276 		curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
277 		curbufsize = CLBYTES * (i < residual ? base+1 : base);
278 #ifdef BUFFERS_UNMANAGED
279 		/*
280 		 * Move the physical pages over from buffermem.
281 		 */
282 		for (ix = 0; ix < curbufsize/CLBYTES; ix++) {
283 			vm_offset_t pa;
284 
285 			pa = pmap_extract(kernel_pmap, bufmemp);
286 			if (pa == 0)
287 				panic("startup: unmapped buffer");
288 			pmap_remove(kernel_pmap, bufmemp, bufmemp+CLBYTES);
289 			pmap_enter(kernel_pmap,
290 				   (vm_offset_t)(curbuf + ix * CLBYTES),
291 				   pa, VM_PROT_READ|VM_PROT_WRITE, TRUE);
292 			bufmemp += CLBYTES;
293 		}
294 #else
295 		vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
296 		vm_map_simplify(buffer_map, curbuf);
297 #endif
298 	}
299 #ifdef BUFFERS_UNMANAGED
300 #if 0
301 	/*
302 	 * We would like to free the (now empty) original address range
303 	 * but too many bad things will happen if we try.
304 	 */
305 	kmem_free(kernel_map, (vm_offset_t)buffermem, bufpages*CLBYTES);
306 #endif
307 #endif
308 	/*
309 	 * Allocate a submap for exec arguments.  This map effectively
310 	 * limits the number of processes exec'ing at any time.
311 	 */
312 	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
313 				 16*NCARGS, TRUE);
314 	/*
315 	 * Allocate a submap for physio
316 	 */
317 	phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
318 				 VM_PHYS_SIZE, TRUE);
319 
320 	/*
321 	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
322 	 * we use the more space efficient malloc in place of kmem_alloc.
323 	 */
324 	mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
325 				   M_MBUF, M_NOWAIT);
326 	bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
327 	mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
328 			       VM_MBUF_SIZE, FALSE);
329 	/*
330 	 * Initialize callouts
331 	 */
332 	callfree = callout;
333 	for (i = 1; i < ncallout; i++)
334 		callout[i-1].c_next = &callout[i];
335 	callout[i-1].c_next = NULL;
336 
337 #ifdef DEBUG
338 	pmapdebug = opmapdebug;
339 #endif
340 	printf("avail mem = %d\n", ptoa(cnt.v_free_count));
341 	printf("using %d buffers containing %d bytes of memory\n",
342 		nbuf, bufpages * CLBYTES);
343 	/*
344 	 * Set up CPU-specific registers, cache, etc.
345 	 */
346 	initcpu();
347 
348 	/*
349 	 * Set up buffers, so they can be used to read disk labels.
350 	 */
351 	bufinit();
352 
353 	/*
354 	 * Configure the system.
355 	 */
356 	configure();
357 }
358 
359 /*
360  * Set registers on exec.
361  * XXX Should clear registers except sp, pc,
362  * but would break init; should be fixed soon.
363  */
364 setregs(p, entry, retval)
365 	register struct proc *p;
366 	u_long entry;
367 	int retval[2];
368 {
369 	struct frame *frame = (struct frame *)p->p_md.md_regs;
370 
371 	frame->f_pc = entry & ~1;
372 #ifdef FPCOPROC
373 	/* restore a null state frame */
374 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
375 	m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
376 #endif
377 #ifdef HPUXCOMPAT
378 	if (p->p_md.md_flags & MDP_HPUX) {
379 
380 		frame->f_regs[A0] = 0; /* not 68010 (bit 31), no FPA (30) */
381 		retval[0] = 0;		/* no float card */
382 #ifdef FPCOPROC
383 		retval[1] = 1;		/* yes 68881 */
384 #else
385 		retval[1] = 0;		/* no 68881 */
386 #endif
387 	}
388 	/*
389 	 * XXX This doesn't have much to do with setting registers but
390 	 * I didn't want to muck up kern_exec.c with this code, so I
391 	 * stuck it here.
392 	 *
393 	 * Ensure we perform the right action on traps type 1 and 2:
394 	 * If our parent is an HPUX process and we are being traced, turn
395 	 * on HPUX style interpretation.  Else if we were using the HPUX
396 	 * style interpretation, revert to the BSD interpretation.
397 	 *
398 	 * Note that we do this by changing the trap instruction in the
399 	 * global "sigcode" array which then gets copied out to the user's
400 	 * sigcode in the stack.  Since we are changing it in the global
401 	 * array we must always reset it, even for non-HPUX processes.
402 	 *
403 	 * Note also that implementing it in this way creates a potential
404 	 * race where we could have tweaked it for process A which then
405 	 * blocks in the copyout to the stack and process B comes along
406 	 * and untweaks it causing A to wind up with the wrong setting
407 	 * when the copyout continues.  However, since we have already
408 	 * copied something out to this user stack page (thereby faulting
409 	 * it in), this scenerio is extremely unlikely.
410 	 */
411 	{
412 		extern short sigcodetrap[];
413 
414 		if ((p->p_pptr->p_md.md_flags & MDP_HPUX) &&
415 		    (p->p_flag & P_TRACED)) {
416 			p->p_md.md_flags |= MDP_HPUXTRACE;
417 			*sigcodetrap = 0x4E42;
418 		} else {
419 			p->p_md.md_flags &= ~MDP_HPUXTRACE;
420 			*sigcodetrap = 0x4E41;
421 		}
422 	}
423 #endif
424 }
425 
426 /*
427  * Info for CTL_HW
428  */
429 extern	char machine[];
430 char	cpu_model[120];
431 extern	char ostype[], osrelease[], version[];
432 
433 identifycpu()
434 {
435 	char *t, *mc;
436 	int len;
437 
438 	switch (machineid) {
439 	case HP_320:
440 		t = "320 (16.67MHz";
441 		break;
442 	case HP_330:
443 		t = "318/319/330 (16.67MHz";
444 		break;
445 	case HP_340:
446 		t = "340 (16.67MHz";
447 		break;
448 	case HP_350:
449 		t = "350 (25MHz";
450 		break;
451 	case HP_360:
452 		t = "360 (25MHz";
453 		break;
454 	case HP_370:
455 		t = "370 (33.33MHz";
456 		break;
457 	case HP_375:
458 		t = "345/375 (50MHz";
459 		break;
460 	case HP_380:
461 		t = "380/425 (25MHz";
462 		break;
463 	case HP_433:
464 		t = "433 (33MHz";
465 		break;
466 	default:
467 		printf("\nunknown machine type %d\n", machineid);
468 		panic("startup");
469 	}
470 	mc = (mmutype == MMU_68040 ? "40" :
471 	       (mmutype == MMU_68030 ? "30" : "20"));
472 	sprintf(cpu_model, "HP9000/%s MC680%s CPU", t, mc);
473 	switch (mmutype) {
474 	case MMU_68040:
475 	case MMU_68030:
476 		strcat(cpu_model, "+MMU");
477 		break;
478 	case MMU_68851:
479 		strcat(cpu_model, ", MC68851 MMU");
480 		break;
481 	case MMU_HP:
482 		strcat(cpu_model, ", HP MMU");
483 		break;
484 	default:
485 		printf("%s\nunknown MMU type %d\n", cpu_model, mmutype);
486 		panic("startup");
487 	}
488 	len = strlen(cpu_model);
489 	if (mmutype == MMU_68040)
490 		len += sprintf(cpu_model + len,
491 		    "+FPU, 4k on-chip physical I/D caches");
492 	else if (mmutype == MMU_68030)
493 		len += sprintf(cpu_model + len, ", %sMHz MC68882 FPU",
494 		       machineid == HP_340 ? "16.67" :
495 		       (machineid == HP_360 ? "25" :
496 			(machineid == HP_370 ? "33.33" : "50")));
497 	else
498 		len += sprintf(cpu_model + len, ", %sMHz MC68881 FPU",
499 		       machineid == HP_350 ? "20" : "16.67");
500 	switch (ectype) {
501 	case EC_VIRT:
502 		sprintf(cpu_model + len, ", %dK virtual-address cache",
503 		       machineid == HP_320 ? 16 : 32);
504 		break;
505 	case EC_PHYS:
506 		sprintf(cpu_model + len, ", %dK physical-address cache",
507 		       machineid == HP_370 ? 64 : 32);
508 		break;
509 	}
510 	strcat(cpu_model, ")");
511 	printf("%s\n", cpu_model);
512 	/*
513 	 * Now that we have told the user what they have,
514 	 * let them know if that machine type isn't configured.
515 	 */
516 	switch (machineid) {
517 	case -1:		/* keep compilers happy */
518 #if !defined(HP320) && !defined(HP350)
519 	case HP_320:
520 	case HP_350:
521 #endif
522 #ifndef HP330
523 	case HP_330:
524 #endif
525 #if !defined(HP360) && !defined(HP370)
526 	case HP_340:
527 	case HP_360:
528 	case HP_370:
529 #endif
530 #if !defined(HP380)
531 	case HP_380:
532 	case HP_433:
533 #endif
534 		panic("CPU type not configured");
535 	default:
536 		break;
537 	}
538 }
539 
540 /*
541  * machine dependent system variables.
542  */
543 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
544 	int *name;
545 	u_int namelen;
546 	void *oldp;
547 	size_t *oldlenp;
548 	void *newp;
549 	size_t newlen;
550 	struct proc *p;
551 {
552 
553 	/* all sysctl names at this level are terminal */
554 	if (namelen != 1)
555 		return (ENOTDIR);		/* overloaded */
556 
557 	switch (name[0]) {
558 	case CPU_CONSDEV:
559 		return (sysctl_rdstruct(oldp, oldlenp, newp, &cn_tty->t_dev,
560 		    sizeof cn_tty->t_dev));
561 	default:
562 		return (EOPNOTSUPP);
563 	}
564 	/* NOTREACHED */
565 }
566 
567 #ifdef USELEDS
568 #include <hp300/hp300/led.h>
569 
570 int inledcontrol = 0;	/* 1 if we are in ledcontrol already, cheap mutex */
571 char *ledaddr;
572 
573 /*
574  * Map the LED page and setup the KVA to access it.
575  */
576 ledinit()
577 {
578 	extern caddr_t ledbase;
579 
580 	pmap_enter(kernel_pmap, (vm_offset_t)ledbase, (vm_offset_t)LED_ADDR,
581 		   VM_PROT_READ|VM_PROT_WRITE, TRUE);
582 	ledaddr = (char *) ((int)ledbase | (LED_ADDR & PGOFSET));
583 }
584 
585 /*
586  * Do lights:
587  *	`ons' is a mask of LEDs to turn on,
588  *	`offs' is a mask of LEDs to turn off,
589  *	`togs' is a mask of LEDs to toggle.
590  * Note we don't use splclock/splx for mutual exclusion.
591  * They are expensive and we really don't need to be that precise.
592  * Besides we would like to be able to profile this routine.
593  */
594 ledcontrol(ons, offs, togs)
595 	register int ons, offs, togs;
596 {
597 	static char currentleds;
598 	register char leds;
599 
600 	inledcontrol = 1;
601 	leds = currentleds;
602 	if (ons)
603 		leds |= ons;
604 	if (offs)
605 		leds &= ~offs;
606 	if (togs)
607 		leds ^= togs;
608 	currentleds = leds;
609 	*ledaddr = ~leds;
610 	inledcontrol = 0;
611 }
612 #endif
613 
614 #define SS_RTEFRAME	1
615 #define SS_FPSTATE	2
616 #define SS_USERREGS	4
617 
618 struct sigstate {
619 	int	ss_flags;		/* which of the following are valid */
620 	struct	frame ss_frame;		/* original exception frame */
621 	struct	fpframe ss_fpstate;	/* 68881/68882 state info */
622 };
623 
624 /*
625  * WARNING: code in locore.s assumes the layout shown for sf_signum
626  * thru sf_handler so... don't screw with them!
627  */
628 struct sigframe {
629 	int	sf_signum;		/* signo for handler */
630 	int	sf_code;		/* additional info for handler */
631 	struct	sigcontext *sf_scp;	/* context ptr for handler */
632 	sig_t	sf_handler;		/* handler addr for u_sigc */
633 	struct	sigstate sf_state;	/* state of the hardware */
634 	struct	sigcontext sf_sc;	/* actual context */
635 };
636 
637 #ifdef HPUXCOMPAT
638 struct	hpuxsigcontext {
639 	int	hsc_syscall;
640 	char	hsc_action;
641 	char	hsc_pad1;
642 	char	hsc_pad2;
643 	char	hsc_onstack;
644 	int	hsc_mask;
645 	int	hsc_sp;
646 	short	hsc_ps;
647 	int	hsc_pc;
648 /* the rest aren't part of the context but are included for our convenience */
649 	short	hsc_pad;
650 	u_int	hsc_magic;		/* XXX sigreturn: cookie */
651 	struct	sigcontext *hsc_realsc;	/* XXX sigreturn: ptr to BSD context */
652 };
653 
654 /*
655  * For an HP-UX process, a partial hpuxsigframe follows the normal sigframe.
656  * Tremendous waste of space, but some HP-UX applications (e.g. LCL) need it.
657  */
658 struct hpuxsigframe {
659 	int	hsf_signum;
660 	int	hsf_code;
661 	struct	sigcontext *hsf_scp;
662 	struct	hpuxsigcontext hsf_sc;
663 	int	hsf_regs[15];
664 };
665 #endif
666 
667 #ifdef DEBUG
668 int sigdebug = 0;
669 int sigpid = 0;
670 #define SDB_FOLLOW	0x01
671 #define SDB_KSTACK	0x02
672 #define SDB_FPSTATE	0x04
673 #endif
674 
675 /*
676  * Send an interrupt to process.
677  */
678 void
679 sendsig(catcher, sig, mask, code)
680 	sig_t catcher;
681 	int sig, mask;
682 	unsigned code;
683 {
684 	register struct proc *p = curproc;
685 	register struct sigframe *fp, *kfp;
686 	register struct frame *frame;
687 	register struct sigacts *psp = p->p_sigacts;
688 	register short ft;
689 	int oonstack, fsize;
690 	extern char sigcode[], esigcode[];
691 
692 	frame = (struct frame *)p->p_md.md_regs;
693 	ft = frame->f_format;
694 	oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
695 	/*
696 	 * Allocate and validate space for the signal handler
697 	 * context. Note that if the stack is in P0 space, the
698 	 * call to grow() is a nop, and the useracc() check
699 	 * will fail if the process has not already allocated
700 	 * the space with a `brk'.
701 	 */
702 #ifdef HPUXCOMPAT
703 	if (p->p_md.md_flags & MDP_HPUX)
704 		fsize = sizeof(struct sigframe) + sizeof(struct hpuxsigframe);
705 	else
706 #endif
707 	fsize = sizeof(struct sigframe);
708 	if ((psp->ps_flags & SAS_ALTSTACK) &&
709 	    (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
710 	    (psp->ps_sigonstack & sigmask(sig))) {
711 		fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
712 					 psp->ps_sigstk.ss_size - fsize);
713 		psp->ps_sigstk.ss_flags |= SA_ONSTACK;
714 	} else
715 		fp = (struct sigframe *)(frame->f_regs[SP] - fsize);
716 	if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
717 		(void)grow(p, (unsigned)fp);
718 #ifdef DEBUG
719 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
720 		printf("sendsig(%d): sig %d ssp %x usp %x scp %x ft %d\n",
721 		       p->p_pid, sig, &oonstack, fp, &fp->sf_sc, ft);
722 #endif
723 	if (useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
724 #ifdef DEBUG
725 		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
726 			printf("sendsig(%d): useracc failed on sig %d\n",
727 			       p->p_pid, sig);
728 #endif
729 		/*
730 		 * Process has trashed its stack; give it an illegal
731 		 * instruction to halt it in its tracks.
732 		 */
733 		SIGACTION(p, SIGILL) = SIG_DFL;
734 		sig = sigmask(SIGILL);
735 		p->p_sigignore &= ~sig;
736 		p->p_sigcatch &= ~sig;
737 		p->p_sigmask &= ~sig;
738 		psignal(p, SIGILL);
739 		return;
740 	}
741 	kfp = (struct sigframe *)malloc((u_long)fsize, M_TEMP, M_WAITOK);
742 	/*
743 	 * Build the argument list for the signal handler.
744 	 */
745 	kfp->sf_signum = sig;
746 	kfp->sf_code = code;
747 	kfp->sf_scp = &fp->sf_sc;
748 	kfp->sf_handler = catcher;
749 	/*
750 	 * Save necessary hardware state.  Currently this includes:
751 	 *	- general registers
752 	 *	- original exception frame (if not a "normal" frame)
753 	 *	- FP coprocessor state
754 	 */
755 	kfp->sf_state.ss_flags = SS_USERREGS;
756 	bcopy((caddr_t)frame->f_regs,
757 	      (caddr_t)kfp->sf_state.ss_frame.f_regs, sizeof frame->f_regs);
758 	if (ft >= FMT7) {
759 #ifdef DEBUG
760 		if (ft > 15 || exframesize[ft] < 0)
761 			panic("sendsig: bogus frame type");
762 #endif
763 		kfp->sf_state.ss_flags |= SS_RTEFRAME;
764 		kfp->sf_state.ss_frame.f_format = frame->f_format;
765 		kfp->sf_state.ss_frame.f_vector = frame->f_vector;
766 		bcopy((caddr_t)&frame->F_u,
767 		      (caddr_t)&kfp->sf_state.ss_frame.F_u, exframesize[ft]);
768 		/*
769 		 * Leave an indicator that we need to clean up the kernel
770 		 * stack.  We do this by setting the "pad word" above the
771 		 * hardware stack frame to the amount the stack must be
772 		 * adjusted by.
773 		 *
774 		 * N.B. we increment rather than just set f_stackadj in
775 		 * case we are called from syscall when processing a
776 		 * sigreturn.  In that case, f_stackadj may be non-zero.
777 		 */
778 		frame->f_stackadj += exframesize[ft];
779 		frame->f_format = frame->f_vector = 0;
780 #ifdef DEBUG
781 		if (sigdebug & SDB_FOLLOW)
782 			printf("sendsig(%d): copy out %d of frame %d\n",
783 			       p->p_pid, exframesize[ft], ft);
784 #endif
785 	}
786 #ifdef FPCOPROC
787 	kfp->sf_state.ss_flags |= SS_FPSTATE;
788 	m68881_save(&kfp->sf_state.ss_fpstate);
789 #ifdef DEBUG
790 	if ((sigdebug & SDB_FPSTATE) && *(char *)&kfp->sf_state.ss_fpstate)
791 		printf("sendsig(%d): copy out FP state (%x) to %x\n",
792 		       p->p_pid, *(u_int *)&kfp->sf_state.ss_fpstate,
793 		       &kfp->sf_state.ss_fpstate);
794 #endif
795 #endif
796 	/*
797 	 * Build the signal context to be used by sigreturn.
798 	 */
799 	kfp->sf_sc.sc_onstack = oonstack;
800 	kfp->sf_sc.sc_mask = mask;
801 	kfp->sf_sc.sc_sp = frame->f_regs[SP];
802 	kfp->sf_sc.sc_fp = frame->f_regs[A6];
803 	kfp->sf_sc.sc_ap = (int)&fp->sf_state;
804 	kfp->sf_sc.sc_pc = frame->f_pc;
805 	kfp->sf_sc.sc_ps = frame->f_sr;
806 #ifdef HPUXCOMPAT
807 	/*
808 	 * Create an HP-UX style sigcontext structure and associated goo
809 	 */
810 	if (p->p_md.md_flags & MDP_HPUX) {
811 		register struct hpuxsigframe *hkfp;
812 
813 		hkfp = (struct hpuxsigframe *)&kfp[1];
814 		hkfp->hsf_signum = bsdtohpuxsig(kfp->sf_signum);
815 		hkfp->hsf_code = kfp->sf_code;
816 		hkfp->hsf_scp = (struct sigcontext *)
817 			&((struct hpuxsigframe *)(&fp[1]))->hsf_sc;
818 		hkfp->hsf_sc.hsc_syscall = 0;		/* XXX */
819 		hkfp->hsf_sc.hsc_action = 0;		/* XXX */
820 		hkfp->hsf_sc.hsc_pad1 = hkfp->hsf_sc.hsc_pad2 = 0;
821 		hkfp->hsf_sc.hsc_onstack = kfp->sf_sc.sc_onstack;
822 		hkfp->hsf_sc.hsc_mask = kfp->sf_sc.sc_mask;
823 		hkfp->hsf_sc.hsc_sp = kfp->sf_sc.sc_sp;
824 		hkfp->hsf_sc.hsc_ps = kfp->sf_sc.sc_ps;
825 		hkfp->hsf_sc.hsc_pc = kfp->sf_sc.sc_pc;
826 		hkfp->hsf_sc.hsc_pad = 0;
827 		hkfp->hsf_sc.hsc_magic = 0xdeadbeef;
828 		hkfp->hsf_sc.hsc_realsc = kfp->sf_scp;
829 		bcopy((caddr_t)frame->f_regs, (caddr_t)hkfp->hsf_regs,
830 		      sizeof (hkfp->hsf_regs));
831 
832 		kfp->sf_signum = hkfp->hsf_signum;
833 		kfp->sf_scp = hkfp->hsf_scp;
834 	}
835 #endif
836 	(void) copyout((caddr_t)kfp, (caddr_t)fp, fsize);
837 	frame->f_regs[SP] = (int)fp;
838 #ifdef DEBUG
839 	if (sigdebug & SDB_FOLLOW)
840 		printf("sendsig(%d): sig %d scp %x fp %x sc_sp %x sc_ap %x\n",
841 		       p->p_pid, sig, kfp->sf_scp, fp,
842 		       kfp->sf_sc.sc_sp, kfp->sf_sc.sc_ap);
843 #endif
844 	/*
845 	 * Signal trampoline code is at base of user stack.
846 	 */
847 	frame->f_pc = (int)PS_STRINGS - (esigcode - sigcode);
848 #ifdef DEBUG
849 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
850 		printf("sendsig(%d): sig %d returns\n",
851 		       p->p_pid, sig);
852 #endif
853 	free((caddr_t)kfp, M_TEMP);
854 }
855 
856 /*
857  * System call to cleanup state after a signal
858  * has been taken.  Reset signal mask and
859  * stack state from context left by sendsig (above).
860  * Return to previous pc and psl as specified by
861  * context left by sendsig. Check carefully to
862  * make sure that the user has not modified the
863  * psl to gain improper priviledges or to cause
864  * a machine fault.
865  */
866 struct sigreturn_args {
867 	struct sigcontext *sigcntxp;
868 };
869 /* ARGSUSED */
870 sigreturn(p, uap, retval)
871 	struct proc *p;
872 	struct sigreturn_args *uap;
873 	int *retval;
874 {
875 	register struct sigcontext *scp;
876 	register struct frame *frame;
877 	register int rf;
878 	struct sigcontext tsigc;
879 	struct sigstate tstate;
880 	int flags;
881 
882 	scp = uap->sigcntxp;
883 #ifdef DEBUG
884 	if (sigdebug & SDB_FOLLOW)
885 		printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
886 #endif
887 	if ((int)scp & 1)
888 		return (EINVAL);
889 #ifdef HPUXCOMPAT
890 	/*
891 	 * Grab context as an HP-UX style context and determine if it
892 	 * was one that we contructed in sendsig.
893 	 */
894 	if (p->p_md.md_flags & MDP_HPUX) {
895 		struct hpuxsigcontext *hscp = (struct hpuxsigcontext *)scp;
896 		struct hpuxsigcontext htsigc;
897 
898 		if (useracc((caddr_t)hscp, sizeof (*hscp), B_WRITE) == 0 ||
899 		    copyin((caddr_t)hscp, (caddr_t)&htsigc, sizeof htsigc))
900 			return (EINVAL);
901 		/*
902 		 * If not generated by sendsig or we cannot restore the
903 		 * BSD-style sigcontext, just restore what we can -- state
904 		 * will be lost, but them's the breaks.
905 		 */
906 		hscp = &htsigc;
907 		if (hscp->hsc_magic != 0xdeadbeef ||
908 		    (scp = hscp->hsc_realsc) == 0 ||
909 		    useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
910 		    copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc)) {
911 			if (hscp->hsc_onstack & 01)
912 				p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
913 			else
914 				p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
915 			p->p_sigmask = hscp->hsc_mask &~ sigcantmask;
916 			frame = (struct frame *) p->p_md.md_regs;
917 			frame->f_regs[SP] = hscp->hsc_sp;
918 			frame->f_pc = hscp->hsc_pc;
919 			frame->f_sr = hscp->hsc_ps &~ PSL_USERCLR;
920 			return (EJUSTRETURN);
921 		}
922 		/*
923 		 * Otherwise, overlay BSD context with possibly modified
924 		 * HP-UX values.
925 		 */
926 		tsigc.sc_onstack = hscp->hsc_onstack;
927 		tsigc.sc_mask = hscp->hsc_mask;
928 		tsigc.sc_sp = hscp->hsc_sp;
929 		tsigc.sc_ps = hscp->hsc_ps;
930 		tsigc.sc_pc = hscp->hsc_pc;
931 	} else
932 #endif
933 	/*
934 	 * Test and fetch the context structure.
935 	 * We grab it all at once for speed.
936 	 */
937 	if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
938 	    copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc))
939 		return (EINVAL);
940 	scp = &tsigc;
941 	if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_S)) != 0)
942 		return (EINVAL);
943 	/*
944 	 * Restore the user supplied information
945 	 */
946 	if (scp->sc_onstack & 01)
947 		p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
948 	else
949 		p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
950 	p->p_sigmask = scp->sc_mask &~ sigcantmask;
951 	frame = (struct frame *) p->p_md.md_regs;
952 	frame->f_regs[SP] = scp->sc_sp;
953 	frame->f_regs[A6] = scp->sc_fp;
954 	frame->f_pc = scp->sc_pc;
955 	frame->f_sr = scp->sc_ps;
956 	/*
957 	 * Grab pointer to hardware state information.
958 	 * If zero, the user is probably doing a longjmp.
959 	 */
960 	if ((rf = scp->sc_ap) == 0)
961 		return (EJUSTRETURN);
962 	/*
963 	 * See if there is anything to do before we go to the
964 	 * expense of copying in close to 1/2K of data
965 	 */
966 	flags = fuword((caddr_t)rf);
967 #ifdef DEBUG
968 	if (sigdebug & SDB_FOLLOW)
969 		printf("sigreturn(%d): sc_ap %x flags %x\n",
970 		       p->p_pid, rf, flags);
971 #endif
972 	/*
973 	 * fuword failed (bogus sc_ap value).
974 	 */
975 	if (flags == -1)
976 		return (EINVAL);
977 	if (flags == 0 || copyin((caddr_t)rf, (caddr_t)&tstate, sizeof tstate))
978 		return (EJUSTRETURN);
979 #ifdef DEBUG
980 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
981 		printf("sigreturn(%d): ssp %x usp %x scp %x ft %d\n",
982 		       p->p_pid, &flags, scp->sc_sp, uap->sigcntxp,
983 		       (flags&SS_RTEFRAME) ? tstate.ss_frame.f_format : -1);
984 #endif
985 	/*
986 	 * Restore most of the users registers except for A6 and SP
987 	 * which were handled above.
988 	 */
989 	if (flags & SS_USERREGS)
990 		bcopy((caddr_t)tstate.ss_frame.f_regs,
991 		      (caddr_t)frame->f_regs, sizeof(frame->f_regs)-2*NBPW);
992 	/*
993 	 * Restore long stack frames.  Note that we do not copy
994 	 * back the saved SR or PC, they were picked up above from
995 	 * the sigcontext structure.
996 	 */
997 	if (flags & SS_RTEFRAME) {
998 		register int sz;
999 
1000 		/* grab frame type and validate */
1001 		sz = tstate.ss_frame.f_format;
1002 		if (sz > 15 || (sz = exframesize[sz]) < 0)
1003 			return (EINVAL);
1004 		frame->f_stackadj -= sz;
1005 		frame->f_format = tstate.ss_frame.f_format;
1006 		frame->f_vector = tstate.ss_frame.f_vector;
1007 		bcopy((caddr_t)&tstate.ss_frame.F_u, (caddr_t)&frame->F_u, sz);
1008 #ifdef DEBUG
1009 		if (sigdebug & SDB_FOLLOW)
1010 			printf("sigreturn(%d): copy in %d of frame type %d\n",
1011 			       p->p_pid, sz, tstate.ss_frame.f_format);
1012 #endif
1013 	}
1014 #ifdef FPCOPROC
1015 	/*
1016 	 * Finally we restore the original FP context
1017 	 */
1018 	if (flags & SS_FPSTATE)
1019 		m68881_restore(&tstate.ss_fpstate);
1020 #ifdef DEBUG
1021 	if ((sigdebug & SDB_FPSTATE) && *(char *)&tstate.ss_fpstate)
1022 		printf("sigreturn(%d): copied in FP state (%x) at %x\n",
1023 		       p->p_pid, *(u_int *)&tstate.ss_fpstate,
1024 		       &tstate.ss_fpstate);
1025 #endif
1026 #endif
1027 #ifdef DEBUG
1028 	if ((sigdebug & SDB_FOLLOW) ||
1029 	    ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid))
1030 		printf("sigreturn(%d): returns\n", p->p_pid);
1031 #endif
1032 	return (EJUSTRETURN);
1033 }
1034 
1035 int	waittime = -1;
1036 
1037 boot(howto)
1038 	register int howto;
1039 {
1040 	/* take a snap shot before clobbering any registers */
1041 	if (curproc && curproc->p_addr)
1042 		savectx(curproc->p_addr, 0);
1043 
1044 	boothowto = howto;
1045 	if ((howto&RB_NOSYNC) == 0 && waittime < 0) {
1046 		register struct buf *bp;
1047 		int iter, nbusy;
1048 
1049 		waittime = 0;
1050 		(void) spl0();
1051 		printf("syncing disks... ");
1052 		/*
1053 		 * Release vnodes held by texts before sync.
1054 		 */
1055 		if (panicstr == 0)
1056 			vnode_pager_umount(NULL);
1057 #ifdef notdef
1058 #include "vn.h"
1059 #if NVN > 0
1060 		vnshutdown();
1061 #endif
1062 #endif
1063 		sync(&proc0, (void *)NULL, (int *)NULL);
1064 
1065 		for (iter = 0; iter < 20; iter++) {
1066 			nbusy = 0;
1067 			for (bp = &buf[nbuf]; --bp >= buf; )
1068 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
1069 					nbusy++;
1070 			if (nbusy == 0)
1071 				break;
1072 			printf("%d ", nbusy);
1073 			DELAY(40000 * iter);
1074 		}
1075 		if (nbusy)
1076 			printf("giving up\n");
1077 		else
1078 			printf("done\n");
1079 		/*
1080 		 * If we've been adjusting the clock, the todr
1081 		 * will be out of synch; adjust it now.
1082 		 */
1083 		resettodr();
1084 	}
1085 	splhigh();			/* extreme priority */
1086 	if (howto&RB_HALT) {
1087 		printf("halted\n\n");
1088 		asm("	stop	#0x2700");
1089 	} else {
1090 		if (howto & RB_DUMP)
1091 			dumpsys();
1092 		doboot();
1093 		/*NOTREACHED*/
1094 	}
1095 	/*NOTREACHED*/
1096 }
1097 
1098 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
1099 int	dumpsize = 0;		/* also for savecore */
1100 long	dumplo = 0;
1101 
1102 dumpconf()
1103 {
1104 	int nblks;
1105 
1106 	dumpsize = physmem;
1107 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
1108 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1109 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
1110 			dumpsize = btoc(dbtob(nblks - dumplo));
1111 		else if (dumplo == 0)
1112 			dumplo = nblks - btodb(ctob(physmem));
1113 	}
1114 	/*
1115 	 * Don't dump on the first CLBYTES (why CLBYTES?)
1116 	 * in case the dump device includes a disk label.
1117 	 */
1118 	if (dumplo < btodb(CLBYTES))
1119 		dumplo = btodb(CLBYTES);
1120 }
1121 
1122 /*
1123  * Doadump comes here after turning off memory management and
1124  * getting on the dump stack, either when called above, or by
1125  * the auto-restart code.
1126  */
1127 dumpsys()
1128 {
1129 
1130 	msgbufmapped = 0;
1131 	if (dumpdev == NODEV)
1132 		return;
1133 	/*
1134 	 * For dumps during autoconfiguration,
1135 	 * if dump device has already configured...
1136 	 */
1137 	if (dumpsize == 0)
1138 		dumpconf();
1139 	if (dumplo < 0)
1140 		return;
1141 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
1142 	printf("dump ");
1143 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
1144 
1145 	case ENXIO:
1146 		printf("device bad\n");
1147 		break;
1148 
1149 	case EFAULT:
1150 		printf("device not ready\n");
1151 		break;
1152 
1153 	case EINVAL:
1154 		printf("area improper\n");
1155 		break;
1156 
1157 	case EIO:
1158 		printf("i/o error\n");
1159 		break;
1160 
1161 	default:
1162 		printf("succeeded\n");
1163 		break;
1164 	}
1165 }
1166 
1167 initcpu()
1168 {
1169 #ifdef MAPPEDCOPY
1170 	extern u_int mappedcopysize;
1171 
1172 	/*
1173 	 * Initialize lower bound for doing copyin/copyout using
1174 	 * page mapping (if not already set).  We don't do this on
1175 	 * VAC machines as it loses big time.
1176 	 */
1177 	if (mappedcopysize == 0) {
1178 		if (ectype == EC_VIRT)
1179 			mappedcopysize = (u_int) -1;
1180 		else
1181 			mappedcopysize = NBPG;
1182 	}
1183 #endif
1184 	parityenable();
1185 #ifdef USELEDS
1186 	ledinit();
1187 #endif
1188 }
1189 
1190 straytrap(pc, evec)
1191 	int pc;
1192 	u_short evec;
1193 {
1194 	printf("unexpected trap (vector offset %x) from %x\n",
1195 	       evec & 0xFFF, pc);
1196 }
1197 
1198 int	*nofault;
1199 
1200 badaddr(addr)
1201 	register caddr_t addr;
1202 {
1203 	register int i;
1204 	label_t	faultbuf;
1205 
1206 #ifdef lint
1207 	i = *addr; if (i) return(0);
1208 #endif
1209 	nofault = (int *) &faultbuf;
1210 	if (setjmp((label_t *)nofault)) {
1211 		nofault = (int *) 0;
1212 		return(1);
1213 	}
1214 	i = *(volatile short *)addr;
1215 	nofault = (int *) 0;
1216 	return(0);
1217 }
1218 
1219 badbaddr(addr)
1220 	register caddr_t addr;
1221 {
1222 	register int i;
1223 	label_t	faultbuf;
1224 
1225 #ifdef lint
1226 	i = *addr; if (i) return(0);
1227 #endif
1228 	nofault = (int *) &faultbuf;
1229 	if (setjmp((label_t *)nofault)) {
1230 		nofault = (int *) 0;
1231 		return(1);
1232 	}
1233 	i = *(volatile char *)addr;
1234 	nofault = (int *) 0;
1235 	return(0);
1236 }
1237 
1238 netintr()
1239 {
1240 #ifdef INET
1241 	if (netisr & (1 << NETISR_ARP)) {
1242 		netisr &= ~(1 << NETISR_ARP);
1243 		arpintr();
1244 	}
1245 	if (netisr & (1 << NETISR_IP)) {
1246 		netisr &= ~(1 << NETISR_IP);
1247 		ipintr();
1248 	}
1249 #endif
1250 #ifdef NS
1251 	if (netisr & (1 << NETISR_NS)) {
1252 		netisr &= ~(1 << NETISR_NS);
1253 		nsintr();
1254 	}
1255 #endif
1256 #ifdef ISO
1257 	if (netisr & (1 << NETISR_ISO)) {
1258 		netisr &= ~(1 << NETISR_ISO);
1259 		clnlintr();
1260 	}
1261 #endif
1262 #ifdef CCITT
1263 	if (netisr & (1 << NETISR_CCITT)) {
1264 		netisr &= ~(1 << NETISR_CCITT);
1265 		ccittintr();
1266 	}
1267 #endif
1268 }
1269 
1270 intrhand(sr)
1271 	int sr;
1272 {
1273 	register struct isr *isr;
1274 	register int found = 0;
1275 	register int ipl;
1276 	extern struct isr isrqueue[];
1277 	static int straycount;
1278 
1279 	ipl = (sr >> 8) & 7;
1280 	switch (ipl) {
1281 
1282 	case 3:
1283 	case 4:
1284 	case 5:
1285 		ipl = ISRIPL(ipl);
1286 		isr = isrqueue[ipl].isr_forw;
1287 		for (; isr != &isrqueue[ipl]; isr = isr->isr_forw) {
1288 			if ((isr->isr_intr)(isr->isr_arg)) {
1289 				found++;
1290 				break;
1291 			}
1292 		}
1293 		if (found)
1294 			straycount = 0;
1295 		else if (++straycount > 50)
1296 			panic("intrhand: stray interrupt");
1297 		else
1298 			printf("stray interrupt, sr 0x%x\n", sr);
1299 		break;
1300 
1301 	case 0:
1302 	case 1:
1303 	case 2:
1304 	case 6:
1305 	case 7:
1306 		if (++straycount > 50)
1307 			panic("intrhand: unexpected sr");
1308 		else
1309 			printf("intrhand: unexpected sr 0x%x\n", sr);
1310 		break;
1311 	}
1312 }
1313 
1314 #if defined(DEBUG) && !defined(PANICBUTTON)
1315 #define PANICBUTTON
1316 #endif
1317 
1318 #ifdef PANICBUTTON
1319 int panicbutton = 1;	/* non-zero if panic buttons are enabled */
1320 int crashandburn = 0;
1321 int candbdelay = 50;	/* give em half a second */
1322 
1323 void
1324 candbtimer(arg)
1325 	void *arg;
1326 {
1327 
1328 	crashandburn = 0;
1329 }
1330 #endif
1331 
1332 /*
1333  * Level 7 interrupts can be caused by the keyboard or parity errors.
1334  */
1335 nmihand(frame)
1336 	struct frame frame;
1337 {
1338 	if (kbdnmi()) {
1339 #ifdef PANICBUTTON
1340 		static int innmihand = 0;
1341 
1342 		/*
1343 		 * Attempt to reduce the window of vulnerability for recursive
1344 		 * NMIs (e.g. someone holding down the keyboard reset button).
1345 		 */
1346 		if (innmihand == 0) {
1347 			innmihand = 1;
1348 			printf("Got a keyboard NMI\n");
1349 			innmihand = 0;
1350 		}
1351 		if (panicbutton) {
1352 			if (crashandburn) {
1353 				crashandburn = 0;
1354 				panic(panicstr ?
1355 				      "forced crash, nosync" : "forced crash");
1356 			}
1357 			crashandburn++;
1358 			timeout(candbtimer, (void *)0, candbdelay);
1359 		}
1360 #endif
1361 		return;
1362 	}
1363 	if (parityerror(&frame))
1364 		return;
1365 	/* panic?? */
1366 	printf("unexpected level 7 interrupt ignored\n");
1367 }
1368 
1369 /*
1370  * Parity error section.  Contains magic.
1371  */
1372 #define PARREG		((volatile short *)IIOV(0x5B0000))
1373 static int gotparmem = 0;
1374 #ifdef DEBUG
1375 int ignorekperr = 0;	/* ignore kernel parity errors */
1376 #endif
1377 
1378 /*
1379  * Enable parity detection
1380  */
1381 parityenable()
1382 {
1383 	label_t	faultbuf;
1384 
1385 	nofault = (int *) &faultbuf;
1386 	if (setjmp((label_t *)nofault)) {
1387 		nofault = (int *) 0;
1388 #ifdef DEBUG
1389 		printf("No parity memory\n");
1390 #endif
1391 		return;
1392 	}
1393 	*PARREG = 1;
1394 	nofault = (int *) 0;
1395 	gotparmem = 1;
1396 #ifdef DEBUG
1397 	printf("Parity detection enabled\n");
1398 #endif
1399 }
1400 
1401 /*
1402  * Determine if level 7 interrupt was caused by a parity error
1403  * and deal with it if it was.  Returns 1 if it was a parity error.
1404  */
1405 parityerror(fp)
1406 	struct frame *fp;
1407 {
1408 	if (!gotparmem)
1409 		return(0);
1410 	*PARREG = 0;
1411 	DELAY(10);
1412 	*PARREG = 1;
1413 	if (panicstr) {
1414 		printf("parity error after panic ignored\n");
1415 		return(1);
1416 	}
1417 	if (!findparerror())
1418 		printf("WARNING: transient parity error ignored\n");
1419 	else if (USERMODE(fp->f_sr)) {
1420 		printf("pid %d: parity error\n", curproc->p_pid);
1421 		uprintf("sorry, pid %d killed due to memory parity error\n",
1422 			curproc->p_pid);
1423 		psignal(curproc, SIGKILL);
1424 #ifdef DEBUG
1425 	} else if (ignorekperr) {
1426 		printf("WARNING: kernel parity error ignored\n");
1427 #endif
1428 	} else {
1429 		regdump(fp, 128);
1430 		panic("kernel parity error");
1431 	}
1432 	return(1);
1433 }
1434 
1435 /*
1436  * Yuk!  There has got to be a better way to do this!
1437  * Searching all of memory with interrupts blocked can lead to disaster.
1438  */
1439 findparerror()
1440 {
1441 	static label_t parcatch;
1442 	static int looking = 0;
1443 	volatile int pg, o, s;
1444 	register volatile int *ip;
1445 	register int i;
1446 	int found;
1447 
1448 #ifdef lint
1449 	i = o = pg = 0; if (i) return(0);
1450 #endif
1451 	/*
1452 	 * If looking is true we are searching for a known parity error
1453 	 * and it has just occured.  All we do is return to the higher
1454 	 * level invocation.
1455 	 */
1456 	if (looking)
1457 		longjmp(&parcatch);
1458 	s = splhigh();
1459 	/*
1460 	 * If setjmp returns true, the parity error we were searching
1461 	 * for has just occured (longjmp above) at the current pg+o
1462 	 */
1463 	if (setjmp(&parcatch)) {
1464 		printf("Parity error at 0x%x\n", ctob(pg)|o);
1465 		found = 1;
1466 		goto done;
1467 	}
1468 	/*
1469 	 * If we get here, a parity error has occured for the first time
1470 	 * and we need to find it.  We turn off any external caches and
1471 	 * loop thru memory, testing every longword til a fault occurs and
1472 	 * we regain control at setjmp above.  Note that because of the
1473 	 * setjmp, pg and o need to be volatile or their values will be lost.
1474 	 */
1475 	looking = 1;
1476 	ecacheoff();
1477 	for (pg = btoc(lowram); pg < btoc(lowram)+physmem; pg++) {
1478 		pmap_enter(kernel_pmap, (vm_offset_t)vmmap, ctob(pg),
1479 		    VM_PROT_READ, TRUE);
1480 		ip = (int *)vmmap;
1481 		for (o = 0; o < NBPG; o += sizeof(int))
1482 			i = *ip++;
1483 	}
1484 	/*
1485 	 * Getting here implies no fault was found.  Should never happen.
1486 	 */
1487 	printf("Couldn't locate parity error\n");
1488 	found = 0;
1489 done:
1490 	looking = 0;
1491 	pmap_remove(kernel_pmap, (vm_offset_t)vmmap, (vm_offset_t)&vmmap[NBPG]);
1492 	ecacheon();
1493 	splx(s);
1494 	return(found);
1495 }
1496 
1497 regdump(fp, sbytes)
1498 	struct frame *fp; /* must not be register */
1499 	int sbytes;
1500 {
1501 	static int doingdump = 0;
1502 	register int i;
1503 	int s;
1504 	extern char *hexstr();
1505 
1506 	if (doingdump)
1507 		return;
1508 	s = splhigh();
1509 	doingdump = 1;
1510 	printf("pid = %d, pc = %s, ",
1511 	       curproc ? curproc->p_pid : -1, hexstr(fp->f_pc, 8));
1512 	printf("ps = %s, ", hexstr(fp->f_sr, 4));
1513 	printf("sfc = %s, ", hexstr(getsfc(), 4));
1514 	printf("dfc = %s\n", hexstr(getdfc(), 4));
1515 	printf("Registers:\n     ");
1516 	for (i = 0; i < 8; i++)
1517 		printf("        %d", i);
1518 	printf("\ndreg:");
1519 	for (i = 0; i < 8; i++)
1520 		printf(" %s", hexstr(fp->f_regs[i], 8));
1521 	printf("\nareg:");
1522 	for (i = 0; i < 8; i++)
1523 		printf(" %s", hexstr(fp->f_regs[i+8], 8));
1524 	if (sbytes > 0) {
1525 		if (fp->f_sr & PSL_S) {
1526 			printf("\n\nKernel stack (%s):",
1527 			       hexstr((int)(((int *)&fp)-1), 8));
1528 			dumpmem(((int *)&fp)-1, sbytes, 0);
1529 		} else {
1530 			printf("\n\nUser stack (%s):", hexstr(fp->f_regs[SP], 8));
1531 			dumpmem((int *)fp->f_regs[SP], sbytes, 1);
1532 		}
1533 	}
1534 	doingdump = 0;
1535 	splx(s);
1536 }
1537 
1538 extern char kstack[];
1539 #define KSADDR	((int *)&(kstack[(UPAGES-1)*NBPG]))
1540 
1541 dumpmem(ptr, sz, ustack)
1542 	register int *ptr;
1543 	int sz, ustack;
1544 {
1545 	register int i, val;
1546 	extern char *hexstr();
1547 
1548 	for (i = 0; i < sz; i++) {
1549 		if ((i & 7) == 0)
1550 			printf("\n%s: ", hexstr((int)ptr, 6));
1551 		else
1552 			printf(" ");
1553 		if (ustack == 1) {
1554 			if ((val = fuword(ptr++)) == -1)
1555 				break;
1556 		} else {
1557 			if (ustack == 0 &&
1558 			    (ptr < KSADDR || ptr > KSADDR+(NBPG/4-1)))
1559 				break;
1560 			val = *ptr++;
1561 		}
1562 		printf("%s", hexstr(val, 8));
1563 	}
1564 	printf("\n");
1565 }
1566 
1567 char *
1568 hexstr(val, len)
1569 	register int val;
1570 	int len;
1571 {
1572 	static char nbuf[9];
1573 	register int x, i;
1574 
1575 	if (len > 8)
1576 		return("");
1577 	nbuf[len] = '\0';
1578 	for (i = len-1; i >= 0; --i) {
1579 		x = val & 0xF;
1580 		if (x > 9)
1581 			nbuf[i] = x - 10 + 'A';
1582 		else
1583 			nbuf[i] = x + '0';
1584 		val >>= 4;
1585 	}
1586 	return(nbuf);
1587 }
1588 
1589 #ifdef DEBUG
1590 char oflowmsg[] = "k-stack overflow";
1591 char uflowmsg[] = "k-stack underflow";
1592 
1593 badkstack(oflow, fr)
1594 	int oflow;
1595 	struct frame fr;
1596 {
1597 	extern char kstackatbase[];
1598 
1599 	printf("%s: sp should be %x\n",
1600 	       oflow ? oflowmsg : uflowmsg,
1601 	       kstackatbase - (exframesize[fr.f_format] + 8));
1602 	regdump(&fr, 0);
1603 	panic(oflow ? oflowmsg : uflowmsg);
1604 }
1605 #endif
1606