xref: /original-bsd/sys/hp300/hp300/machdep.c (revision babae2df)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: machdep.c 1.74 92/12/20$
13  *
14  *	@(#)machdep.c	8.8 (Berkeley) 03/21/94
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/signalvar.h>
20 #include <sys/kernel.h>
21 #include <sys/map.h>
22 #include <sys/proc.h>
23 #include <sys/buf.h>
24 #include <sys/reboot.h>
25 #include <sys/conf.h>
26 #include <sys/file.h>
27 #include <sys/clist.h>
28 #include <sys/callout.h>
29 #include <sys/malloc.h>
30 #include <sys/mbuf.h>
31 #include <sys/msgbuf.h>
32 #include <sys/ioctl.h>
33 #include <sys/tty.h>
34 #include <sys/mount.h>
35 #include <sys/user.h>
36 #include <sys/exec.h>
37 #include <sys/sysctl.h>
38 #ifdef SYSVSHM
39 #include <sys/shm.h>
40 #endif
41 #ifdef HPUXCOMPAT
42 #include <hp/hpux/hpux.h>
43 #endif
44 
45 #include <machine/cpu.h>
46 #include <machine/reg.h>
47 #include <machine/psl.h>
48 #include <hp/dev/cons.h>
49 #include <hp300/hp300/isr.h>
50 #include <hp300/hp300/pte.h>
51 #include <net/netisr.h>
52 
53 #define	MAXMEM	64*1024*CLSIZE	/* XXX - from cmap.h */
54 #include <vm/vm_kern.h>
55 
56 /* the following is used externally (sysctl_hw) */
57 char machine[] = "hp300";		/* cpu "architecture" */
58 
59 vm_map_t buffer_map;
60 extern vm_offset_t avail_end;
61 
62 /*
63  * Declare these as initialized data so we can patch them.
64  */
65 int	nswbuf = 0;
66 #ifdef	NBUF
67 int	nbuf = NBUF;
68 #else
69 int	nbuf = 0;
70 #endif
71 #ifdef	BUFPAGES
72 int	bufpages = BUFPAGES;
73 #else
74 int	bufpages = 0;
75 #endif
76 int	msgbufmapped;		/* set when safe to use msgbuf */
77 int	maxmem;			/* max memory per process */
78 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
79 /*
80  * safepri is a safe priority for sleep to set for a spin-wait
81  * during autoconfiguration or after a panic.
82  */
83 int	safepri = PSL_LOWIPL;
84 
85 extern	u_int lowram;
86 extern	short exframesize[];
87 
88 /*
89  * Console initialization: called early on from main,
90  * before vm init or startup.  Do enough configuration
91  * to choose and initialize a console.
92  */
93 consinit()
94 {
95 
96 	/*
97 	 * Set cpuspeed immediately since cninit() called routines
98 	 * might use delay.  Note that we only set it if a custom value
99 	 * has not already been specified.
100 	 */
101 	if (cpuspeed == 0) {
102 		switch (machineid) {
103 		case HP_320:
104 		case HP_330:
105 		case HP_340:
106 			cpuspeed = MHZ_16;
107 			break;
108 		case HP_350:
109 		case HP_360:
110 		case HP_380:
111 			cpuspeed = MHZ_25;
112 			break;
113 		case HP_370:
114 		case HP_433:
115 			cpuspeed = MHZ_33;
116 			break;
117 		case HP_375:
118 			cpuspeed = MHZ_50;
119 			break;
120 		default:	/* assume the fastest */
121 			cpuspeed = MHZ_50;
122 			break;
123 		}
124 		if (mmutype == MMU_68040)
125 			cpuspeed *= 2;	/* XXX */
126 	}
127 	/*
128          * Find what hardware is attached to this machine.
129          */
130 	find_devs();
131 
132 	/*
133 	 * Initialize the console before we print anything out.
134 	 */
135 	cninit();
136 }
137 
138 /*
139  * cpu_startup: allocate memory for variable-sized tables,
140  * initialize cpu, and do autoconfiguration.
141  */
142 cpu_startup()
143 {
144 	register unsigned i;
145 	register caddr_t v, firstaddr;
146 	int base, residual;
147 	vm_offset_t minaddr, maxaddr;
148 	vm_size_t size;
149 #ifdef BUFFERS_UNMANAGED
150 	vm_offset_t bufmemp;
151 	caddr_t buffermem;
152 	int ix;
153 #endif
154 #ifdef DEBUG
155 	extern int pmapdebug;
156 	int opmapdebug = pmapdebug;
157 
158 	pmapdebug = 0;
159 #endif
160 
161 	/*
162 	 * Initialize error message buffer (at end of core).
163 	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
164 	 */
165 	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
166 		pmap_enter(kernel_pmap, (vm_offset_t)msgbufp,
167 		    avail_end + i * NBPG, VM_PROT_ALL, TRUE);
168 	msgbufmapped = 1;
169 
170 	/*
171 	 * Good {morning,afternoon,evening,night}.
172 	 */
173 	printf(version);
174 	identifycpu();
175 	printf("real mem = %d\n", ctob(physmem));
176 
177 	/*
178 	 * Allocate space for system data structures.
179 	 * The first available real memory address is in "firstaddr".
180 	 * The first available kernel virtual address is in "v".
181 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
182 	 * As pages of memory are allocated and cleared,
183 	 * "firstaddr" is incremented.
184 	 * An index into the kernel page table corresponding to the
185 	 * virtual memory address maintained in "v" is kept in "mapaddr".
186 	 */
187 	/*
188 	 * Make two passes.  The first pass calculates how much memory is
189 	 * needed and allocates it.  The second pass assigns virtual
190 	 * addresses to the various data structures.
191 	 */
192 	firstaddr = 0;
193 again:
194 	v = (caddr_t)firstaddr;
195 
196 #define	valloc(name, type, num) \
197 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
198 #define	valloclim(name, type, num, lim) \
199 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
200 	valloc(cfree, struct cblock, nclist);
201 	valloc(callout, struct callout, ncallout);
202 	valloc(swapmap, struct map, nswapmap = maxproc * 2);
203 #ifdef SYSVSHM
204 	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
205 #endif
206 
207 	/*
208 	 * Determine how many buffers to allocate.
209 	 * Since HPs tend to be long on memory and short on disk speed,
210 	 * we allocate more buffer space than the BSD standard of
211 	 * use 10% of memory for the first 2 Meg, 5% of remaining.
212 	 * We just allocate a flat 10%.  Insure a minimum of 16 buffers.
213 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
214 	 */
215 	if (bufpages == 0)
216 		bufpages = physmem / 10 / CLSIZE;
217 	if (nbuf == 0) {
218 		nbuf = bufpages;
219 		if (nbuf < 16)
220 			nbuf = 16;
221 	}
222 	if (nswbuf == 0) {
223 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
224 		if (nswbuf > 256)
225 			nswbuf = 256;		/* sanity */
226 	}
227 	valloc(swbuf, struct buf, nswbuf);
228 	valloc(buf, struct buf, nbuf);
229 	/*
230 	 * End of first pass, size has been calculated so allocate memory
231 	 */
232 	if (firstaddr == 0) {
233 		size = (vm_size_t)(v - firstaddr);
234 		firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size));
235 		if (firstaddr == 0)
236 			panic("startup: no room for tables");
237 #ifdef BUFFERS_UNMANAGED
238 		buffermem = (caddr_t) kmem_alloc(kernel_map, bufpages*CLBYTES);
239 		if (buffermem == 0)
240 			panic("startup: no room for buffers");
241 #endif
242 		goto again;
243 	}
244 	/*
245 	 * End of second pass, addresses have been assigned
246 	 */
247 	if ((vm_size_t)(v - firstaddr) != size)
248 		panic("startup: table size inconsistency");
249 	/*
250 	 * Now allocate buffers proper.  They are different than the above
251 	 * in that they usually occupy more virtual memory than physical.
252 	 */
253 	size = MAXBSIZE * nbuf;
254 	buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
255 				   &maxaddr, size, TRUE);
256 	minaddr = (vm_offset_t)buffers;
257 	if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
258 			&minaddr, size, FALSE) != KERN_SUCCESS)
259 		panic("startup: cannot allocate buffers");
260 	base = bufpages / nbuf;
261 	residual = bufpages % nbuf;
262 #ifdef BUFFERS_UNMANAGED
263 	bufmemp = (vm_offset_t) buffermem;
264 #endif
265 	for (i = 0; i < nbuf; i++) {
266 		vm_size_t curbufsize;
267 		vm_offset_t curbuf;
268 
269 		/*
270 		 * First <residual> buffers get (base+1) physical pages
271 		 * allocated for them.  The rest get (base) physical pages.
272 		 *
273 		 * The rest of each buffer occupies virtual space,
274 		 * but has no physical memory allocated for it.
275 		 */
276 		curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
277 		curbufsize = CLBYTES * (i < residual ? base+1 : base);
278 #ifdef BUFFERS_UNMANAGED
279 		/*
280 		 * Move the physical pages over from buffermem.
281 		 */
282 		for (ix = 0; ix < curbufsize/CLBYTES; ix++) {
283 			vm_offset_t pa;
284 
285 			pa = pmap_extract(kernel_pmap, bufmemp);
286 			if (pa == 0)
287 				panic("startup: unmapped buffer");
288 			pmap_remove(kernel_pmap, bufmemp, bufmemp+CLBYTES);
289 			pmap_enter(kernel_pmap,
290 				   (vm_offset_t)(curbuf + ix * CLBYTES),
291 				   pa, VM_PROT_READ|VM_PROT_WRITE, TRUE);
292 			bufmemp += CLBYTES;
293 		}
294 #else
295 		vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
296 		vm_map_simplify(buffer_map, curbuf);
297 #endif
298 	}
299 #ifdef BUFFERS_UNMANAGED
300 #if 0
301 	/*
302 	 * We would like to free the (now empty) original address range
303 	 * but too many bad things will happen if we try.
304 	 */
305 	kmem_free(kernel_map, (vm_offset_t)buffermem, bufpages*CLBYTES);
306 #endif
307 #endif
308 	/*
309 	 * Allocate a submap for exec arguments.  This map effectively
310 	 * limits the number of processes exec'ing at any time.
311 	 */
312 	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
313 				 16*NCARGS, TRUE);
314 	/*
315 	 * Allocate a submap for physio
316 	 */
317 	phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
318 				 VM_PHYS_SIZE, TRUE);
319 
320 	/*
321 	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
322 	 * we use the more space efficient malloc in place of kmem_alloc.
323 	 */
324 	mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
325 				   M_MBUF, M_NOWAIT);
326 	bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
327 	mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
328 			       VM_MBUF_SIZE, FALSE);
329 	/*
330 	 * Initialize callouts
331 	 */
332 	callfree = callout;
333 	for (i = 1; i < ncallout; i++)
334 		callout[i-1].c_next = &callout[i];
335 	callout[i-1].c_next = NULL;
336 
337 #ifdef DEBUG
338 	pmapdebug = opmapdebug;
339 #endif
340 	printf("avail mem = %d\n", ptoa(cnt.v_free_count));
341 	printf("using %d buffers containing %d bytes of memory\n",
342 		nbuf, bufpages * CLBYTES);
343 	/*
344 	 * Set up CPU-specific registers, cache, etc.
345 	 */
346 	initcpu();
347 
348 	/*
349 	 * Set up buffers, so they can be used to read disk labels.
350 	 */
351 	bufinit();
352 
353 	/*
354 	 * Configure the system.
355 	 */
356 	configure();
357 }
358 
359 /*
360  * Set registers on exec.
361  * XXX Should clear registers except sp, pc,
362  * but would break init; should be fixed soon.
363  */
364 setregs(p, entry, retval)
365 	register struct proc *p;
366 	u_long entry;
367 	int retval[2];
368 {
369 	struct frame *frame = (struct frame *)p->p_md.md_regs;
370 
371 	frame->f_pc = entry & ~1;
372 #ifdef FPCOPROC
373 	/* restore a null state frame */
374 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
375 	m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
376 #endif
377 #ifdef HPUXCOMPAT
378 	if (p->p_md.md_flags & MDP_HPUX) {
379 
380 		frame->f_regs[A0] = 0; /* not 68010 (bit 31), no FPA (30) */
381 		retval[0] = 0;		/* no float card */
382 #ifdef FPCOPROC
383 		retval[1] = 1;		/* yes 68881 */
384 #else
385 		retval[1] = 0;		/* no 68881 */
386 #endif
387 	}
388 	/*
389 	 * XXX This doesn't have much to do with setting registers but
390 	 * I didn't want to muck up kern_exec.c with this code, so I
391 	 * stuck it here.
392 	 *
393 	 * Ensure we perform the right action on traps type 1 and 2:
394 	 * If our parent is an HPUX process and we are being traced, turn
395 	 * on HPUX style interpretation.  Else if we were using the HPUX
396 	 * style interpretation, revert to the BSD interpretation.
397 	 *
398 	 * Note that we do this by changing the trap instruction in the
399 	 * global "sigcode" array which then gets copied out to the user's
400 	 * sigcode in the stack.  Since we are changing it in the global
401 	 * array we must always reset it, even for non-HPUX processes.
402 	 *
403 	 * Note also that implementing it in this way creates a potential
404 	 * race where we could have tweaked it for process A which then
405 	 * blocks in the copyout to the stack and process B comes along
406 	 * and untweaks it causing A to wind up with the wrong setting
407 	 * when the copyout continues.  However, since we have already
408 	 * copied something out to this user stack page (thereby faulting
409 	 * it in), this scenerio is extremely unlikely.
410 	 */
411 	{
412 		extern short sigcodetrap[];
413 
414 		if ((p->p_pptr->p_md.md_flags & MDP_HPUX) &&
415 		    (p->p_flag & P_TRACED)) {
416 			p->p_md.md_flags |= MDP_HPUXTRACE;
417 			*sigcodetrap = 0x4E42;
418 		} else {
419 			p->p_md.md_flags &= ~MDP_HPUXTRACE;
420 			*sigcodetrap = 0x4E41;
421 		}
422 	}
423 #endif
424 }
425 
426 /*
427  * Info for CTL_HW
428  */
429 extern	char machine[];
430 char	cpu_model[120];
431 extern	char ostype[], osrelease[], version[];
432 
433 identifycpu()
434 {
435 	char *t, *mc;
436 	int len;
437 
438 	switch (machineid) {
439 	case HP_320:
440 		t = "320 (16.67MHz";
441 		break;
442 	case HP_330:
443 		t = "318/319/330 (16.67MHz";
444 		break;
445 	case HP_340:
446 		t = "340 (16.67MHz";
447 		break;
448 	case HP_350:
449 		t = "350 (25MHz";
450 		break;
451 	case HP_360:
452 		t = "360 (25MHz";
453 		break;
454 	case HP_370:
455 		t = "370 (33.33MHz";
456 		break;
457 	case HP_375:
458 		t = "345/375 (50MHz";
459 		break;
460 	case HP_380:
461 		t = "380/425 (25MHz";
462 		break;
463 	case HP_433:
464 		t = "433 (33MHz";
465 		break;
466 	default:
467 		printf("\nunknown machine type %d\n", machineid);
468 		panic("startup");
469 	}
470 	mc = (mmutype == MMU_68040 ? "40" :
471 	       (mmutype == MMU_68030 ? "30" : "20"));
472 	sprintf(cpu_model, "HP9000/%s MC680%s CPU", t, mc);
473 	switch (mmutype) {
474 	case MMU_68040:
475 	case MMU_68030:
476 		strcat(cpu_model, "+MMU");
477 		break;
478 	case MMU_68851:
479 		strcat(cpu_model, ", MC68851 MMU");
480 		break;
481 	case MMU_HP:
482 		strcat(cpu_model, ", HP MMU");
483 		break;
484 	default:
485 		printf("%s\nunknown MMU type %d\n", cpu_model, mmutype);
486 		panic("startup");
487 	}
488 	len = strlen(cpu_model);
489 	if (mmutype == MMU_68040)
490 		len += sprintf(cpu_model + len,
491 		    "+FPU, 4k on-chip physical I/D caches");
492 	else if (mmutype == MMU_68030)
493 		len += sprintf(cpu_model + len, ", %sMHz MC68882 FPU",
494 		       machineid == HP_340 ? "16.67" :
495 		       (machineid == HP_360 ? "25" :
496 			(machineid == HP_370 ? "33.33" : "50")));
497 	else
498 		len += sprintf(cpu_model + len, ", %sMHz MC68881 FPU",
499 		       machineid == HP_350 ? "20" : "16.67");
500 	switch (ectype) {
501 	case EC_VIRT:
502 		sprintf(cpu_model + len, ", %dK virtual-address cache",
503 		       machineid == HP_320 ? 16 : 32);
504 		break;
505 	case EC_PHYS:
506 		sprintf(cpu_model + len, ", %dK physical-address cache",
507 		       machineid == HP_370 ? 64 : 32);
508 		break;
509 	}
510 	strcat(cpu_model, ")");
511 	printf("%s\n", cpu_model);
512 	/*
513 	 * Now that we have told the user what they have,
514 	 * let them know if that machine type isn't configured.
515 	 */
516 	switch (machineid) {
517 	case -1:		/* keep compilers happy */
518 #if !defined(HP320) && !defined(HP350)
519 	case HP_320:
520 	case HP_350:
521 #endif
522 #ifndef HP330
523 	case HP_330:
524 #endif
525 #if !defined(HP360) && !defined(HP370)
526 	case HP_340:
527 	case HP_360:
528 	case HP_370:
529 #endif
530 #if !defined(HP380)
531 	case HP_380:
532 	case HP_433:
533 #endif
534 		panic("CPU type not configured");
535 	default:
536 		break;
537 	}
538 }
539 
540 /*
541  * machine dependent system variables.
542  */
543 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
544 	int *name;
545 	u_int namelen;
546 	void *oldp;
547 	size_t *oldlenp;
548 	void *newp;
549 	size_t newlen;
550 	struct proc *p;
551 {
552 
553 	/* all sysctl names at this level are terminal */
554 	if (namelen != 1)
555 		return (ENOTDIR);		/* overloaded */
556 
557 	switch (name[0]) {
558 	case CPU_CONSDEV:
559 		return (sysctl_rdstruct(oldp, oldlenp, newp, &cn_tty->t_dev,
560 		    sizeof cn_tty->t_dev));
561 	default:
562 		return (EOPNOTSUPP);
563 	}
564 	/* NOTREACHED */
565 }
566 
567 #ifdef USELEDS
568 #include <hp300/hp300/led.h>
569 
570 int inledcontrol = 0;	/* 1 if we are in ledcontrol already, cheap mutex */
571 char *ledaddr;
572 
573 /*
574  * Map the LED page and setup the KVA to access it.
575  */
576 ledinit()
577 {
578 	extern caddr_t ledbase;
579 
580 	pmap_enter(kernel_pmap, (vm_offset_t)ledbase, (vm_offset_t)LED_ADDR,
581 		   VM_PROT_READ|VM_PROT_WRITE, TRUE);
582 	ledaddr = (char *) ((int)ledbase | (LED_ADDR & PGOFSET));
583 }
584 
585 /*
586  * Do lights:
587  *	`ons' is a mask of LEDs to turn on,
588  *	`offs' is a mask of LEDs to turn off,
589  *	`togs' is a mask of LEDs to toggle.
590  * Note we don't use splclock/splx for mutual exclusion.
591  * They are expensive and we really don't need to be that precise.
592  * Besides we would like to be able to profile this routine.
593  */
594 ledcontrol(ons, offs, togs)
595 	register int ons, offs, togs;
596 {
597 	static char currentleds;
598 	register char leds;
599 
600 	inledcontrol = 1;
601 	leds = currentleds;
602 	if (ons)
603 		leds |= ons;
604 	if (offs)
605 		leds &= ~offs;
606 	if (togs)
607 		leds ^= togs;
608 	currentleds = leds;
609 	*ledaddr = ~leds;
610 	inledcontrol = 0;
611 }
612 #endif
613 
614 #define SS_RTEFRAME	1
615 #define SS_FPSTATE	2
616 #define SS_USERREGS	4
617 
618 struct sigstate {
619 	int	ss_flags;		/* which of the following are valid */
620 	struct	frame ss_frame;		/* original exception frame */
621 	struct	fpframe ss_fpstate;	/* 68881/68882 state info */
622 };
623 
624 /*
625  * WARNING: code in locore.s assumes the layout shown for sf_signum
626  * thru sf_handler so... don't screw with them!
627  */
628 struct sigframe {
629 	int	sf_signum;		/* signo for handler */
630 	int	sf_code;		/* additional info for handler */
631 	struct	sigcontext *sf_scp;	/* context ptr for handler */
632 	sig_t	sf_handler;		/* handler addr for u_sigc */
633 	struct	sigstate sf_state;	/* state of the hardware */
634 	struct	sigcontext sf_sc;	/* actual context */
635 };
636 
637 #ifdef HPUXCOMPAT
638 struct	hpuxsigcontext {
639 	int	hsc_syscall;
640 	char	hsc_action;
641 	char	hsc_pad1;
642 	char	hsc_pad2;
643 	char	hsc_onstack;
644 	int	hsc_mask;
645 	int	hsc_sp;
646 	short	hsc_ps;
647 	int	hsc_pc;
648 /* the rest aren't part of the context but are included for our convenience */
649 	short	hsc_pad;
650 	u_int	hsc_magic;		/* XXX sigreturn: cookie */
651 	struct	sigcontext *hsc_realsc;	/* XXX sigreturn: ptr to BSD context */
652 };
653 
654 /*
655  * For an HP-UX process, a partial hpuxsigframe follows the normal sigframe.
656  * Tremendous waste of space, but some HP-UX applications (e.g. LCL) need it.
657  */
658 struct hpuxsigframe {
659 	int	hsf_signum;
660 	int	hsf_code;
661 	struct	sigcontext *hsf_scp;
662 	struct	hpuxsigcontext hsf_sc;
663 	int	hsf_regs[15];
664 };
665 #endif
666 
667 #ifdef DEBUG
668 int sigdebug = 0;
669 int sigpid = 0;
670 #define SDB_FOLLOW	0x01
671 #define SDB_KSTACK	0x02
672 #define SDB_FPSTATE	0x04
673 #endif
674 
675 /*
676  * Send an interrupt to process.
677  */
678 void
679 sendsig(catcher, sig, mask, code)
680 	sig_t catcher;
681 	int sig, mask;
682 	unsigned code;
683 {
684 	register struct proc *p = curproc;
685 	register struct sigframe *fp, *kfp;
686 	register struct frame *frame;
687 	register struct sigacts *psp = p->p_sigacts;
688 	register short ft;
689 	int oonstack, fsize;
690 	extern char sigcode[], esigcode[];
691 
692 	frame = (struct frame *)p->p_md.md_regs;
693 	ft = frame->f_format;
694 	oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
695 	/*
696 	 * Allocate and validate space for the signal handler
697 	 * context. Note that if the stack is in P0 space, the
698 	 * call to grow() is a nop, and the useracc() check
699 	 * will fail if the process has not already allocated
700 	 * the space with a `brk'.
701 	 */
702 #ifdef HPUXCOMPAT
703 	if (p->p_md.md_flags & MDP_HPUX)
704 		fsize = sizeof(struct sigframe) + sizeof(struct hpuxsigframe);
705 	else
706 #endif
707 	fsize = sizeof(struct sigframe);
708 	if ((psp->ps_flags & SAS_ALTSTACK) &&
709 	    (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
710 	    (psp->ps_sigonstack & sigmask(sig))) {
711 		fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
712 					 psp->ps_sigstk.ss_size - fsize);
713 		psp->ps_sigstk.ss_flags |= SA_ONSTACK;
714 	} else
715 		fp = (struct sigframe *)(frame->f_regs[SP] - fsize);
716 	if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
717 		(void)grow(p, (unsigned)fp);
718 #ifdef DEBUG
719 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
720 		printf("sendsig(%d): sig %d ssp %x usp %x scp %x ft %d\n",
721 		       p->p_pid, sig, &oonstack, fp, &fp->sf_sc, ft);
722 #endif
723 	if (useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
724 #ifdef DEBUG
725 		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
726 			printf("sendsig(%d): useracc failed on sig %d\n",
727 			       p->p_pid, sig);
728 #endif
729 		/*
730 		 * Process has trashed its stack; give it an illegal
731 		 * instruction to halt it in its tracks.
732 		 */
733 		SIGACTION(p, SIGILL) = SIG_DFL;
734 		sig = sigmask(SIGILL);
735 		p->p_sigignore &= ~sig;
736 		p->p_sigcatch &= ~sig;
737 		p->p_sigmask &= ~sig;
738 		psignal(p, SIGILL);
739 		return;
740 	}
741 	kfp = (struct sigframe *)malloc((u_long)fsize, M_TEMP, M_WAITOK);
742 	/*
743 	 * Build the argument list for the signal handler.
744 	 */
745 	kfp->sf_signum = sig;
746 	/*
747 	 * If sendsig call was delayed due to process being traced,
748 	 * code will always be zero.  Look in ps_code to see if trapsignal
749 	 * stashed something there.
750 	 */
751 	if (code == 0 && (code = psp->ps_code))
752 		psp->ps_code = 0;
753 	kfp->sf_code = code;
754 	kfp->sf_scp = &fp->sf_sc;
755 	kfp->sf_handler = catcher;
756 	/*
757 	 * Save necessary hardware state.  Currently this includes:
758 	 *	- general registers
759 	 *	- original exception frame (if not a "normal" frame)
760 	 *	- FP coprocessor state
761 	 */
762 	kfp->sf_state.ss_flags = SS_USERREGS;
763 	bcopy((caddr_t)frame->f_regs,
764 	      (caddr_t)kfp->sf_state.ss_frame.f_regs, sizeof frame->f_regs);
765 	if (ft >= FMT7) {
766 #ifdef DEBUG
767 		if (ft > 15 || exframesize[ft] < 0)
768 			panic("sendsig: bogus frame type");
769 #endif
770 		kfp->sf_state.ss_flags |= SS_RTEFRAME;
771 		kfp->sf_state.ss_frame.f_format = frame->f_format;
772 		kfp->sf_state.ss_frame.f_vector = frame->f_vector;
773 		bcopy((caddr_t)&frame->F_u,
774 		      (caddr_t)&kfp->sf_state.ss_frame.F_u, exframesize[ft]);
775 		/*
776 		 * Leave an indicator that we need to clean up the kernel
777 		 * stack.  We do this by setting the "pad word" above the
778 		 * hardware stack frame to the amount the stack must be
779 		 * adjusted by.
780 		 *
781 		 * N.B. we increment rather than just set f_stackadj in
782 		 * case we are called from syscall when processing a
783 		 * sigreturn.  In that case, f_stackadj may be non-zero.
784 		 */
785 		frame->f_stackadj += exframesize[ft];
786 		frame->f_format = frame->f_vector = 0;
787 #ifdef DEBUG
788 		if (sigdebug & SDB_FOLLOW)
789 			printf("sendsig(%d): copy out %d of frame %d\n",
790 			       p->p_pid, exframesize[ft], ft);
791 #endif
792 	}
793 #ifdef FPCOPROC
794 	kfp->sf_state.ss_flags |= SS_FPSTATE;
795 	m68881_save(&kfp->sf_state.ss_fpstate);
796 #ifdef DEBUG
797 	if ((sigdebug & SDB_FPSTATE) && *(char *)&kfp->sf_state.ss_fpstate)
798 		printf("sendsig(%d): copy out FP state (%x) to %x\n",
799 		       p->p_pid, *(u_int *)&kfp->sf_state.ss_fpstate,
800 		       &kfp->sf_state.ss_fpstate);
801 #endif
802 #endif
803 	/*
804 	 * Build the signal context to be used by sigreturn.
805 	 */
806 	kfp->sf_sc.sc_onstack = oonstack;
807 	kfp->sf_sc.sc_mask = mask;
808 	kfp->sf_sc.sc_sp = frame->f_regs[SP];
809 	kfp->sf_sc.sc_fp = frame->f_regs[A6];
810 	kfp->sf_sc.sc_ap = (int)&fp->sf_state;
811 	kfp->sf_sc.sc_pc = frame->f_pc;
812 	kfp->sf_sc.sc_ps = frame->f_sr;
813 #ifdef HPUXCOMPAT
814 	/*
815 	 * Create an HP-UX style sigcontext structure and associated goo
816 	 */
817 	if (p->p_md.md_flags & MDP_HPUX) {
818 		register struct hpuxsigframe *hkfp;
819 
820 		hkfp = (struct hpuxsigframe *)&kfp[1];
821 		hkfp->hsf_signum = bsdtohpuxsig(kfp->sf_signum);
822 		hkfp->hsf_code = kfp->sf_code;
823 		hkfp->hsf_scp = (struct sigcontext *)
824 			&((struct hpuxsigframe *)(&fp[1]))->hsf_sc;
825 		hkfp->hsf_sc.hsc_syscall = 0;		/* XXX */
826 		hkfp->hsf_sc.hsc_action = 0;		/* XXX */
827 		hkfp->hsf_sc.hsc_pad1 = hkfp->hsf_sc.hsc_pad2 = 0;
828 		hkfp->hsf_sc.hsc_onstack = kfp->sf_sc.sc_onstack;
829 		hkfp->hsf_sc.hsc_mask = kfp->sf_sc.sc_mask;
830 		hkfp->hsf_sc.hsc_sp = kfp->sf_sc.sc_sp;
831 		hkfp->hsf_sc.hsc_ps = kfp->sf_sc.sc_ps;
832 		hkfp->hsf_sc.hsc_pc = kfp->sf_sc.sc_pc;
833 		hkfp->hsf_sc.hsc_pad = 0;
834 		hkfp->hsf_sc.hsc_magic = 0xdeadbeef;
835 		hkfp->hsf_sc.hsc_realsc = kfp->sf_scp;
836 		bcopy((caddr_t)frame->f_regs, (caddr_t)hkfp->hsf_regs,
837 		      sizeof (hkfp->hsf_regs));
838 
839 		kfp->sf_signum = hkfp->hsf_signum;
840 		kfp->sf_scp = hkfp->hsf_scp;
841 	}
842 #endif
843 	(void) copyout((caddr_t)kfp, (caddr_t)fp, fsize);
844 	frame->f_regs[SP] = (int)fp;
845 #ifdef DEBUG
846 	if (sigdebug & SDB_FOLLOW)
847 		printf("sendsig(%d): sig %d scp %x fp %x sc_sp %x sc_ap %x\n",
848 		       p->p_pid, sig, kfp->sf_scp, fp,
849 		       kfp->sf_sc.sc_sp, kfp->sf_sc.sc_ap);
850 #endif
851 	/*
852 	 * Signal trampoline code is at base of user stack.
853 	 */
854 	frame->f_pc = (int)PS_STRINGS - (esigcode - sigcode);
855 #ifdef DEBUG
856 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
857 		printf("sendsig(%d): sig %d returns\n",
858 		       p->p_pid, sig);
859 #endif
860 	free((caddr_t)kfp, M_TEMP);
861 }
862 
863 /*
864  * System call to cleanup state after a signal
865  * has been taken.  Reset signal mask and
866  * stack state from context left by sendsig (above).
867  * Return to previous pc and psl as specified by
868  * context left by sendsig. Check carefully to
869  * make sure that the user has not modified the
870  * psl to gain improper priviledges or to cause
871  * a machine fault.
872  */
873 struct sigreturn_args {
874 	struct sigcontext *sigcntxp;
875 };
876 /* ARGSUSED */
877 sigreturn(p, uap, retval)
878 	struct proc *p;
879 	struct sigreturn_args *uap;
880 	int *retval;
881 {
882 	register struct sigcontext *scp;
883 	register struct frame *frame;
884 	register int rf;
885 	struct sigcontext tsigc;
886 	struct sigstate tstate;
887 	int flags;
888 
889 	scp = uap->sigcntxp;
890 #ifdef DEBUG
891 	if (sigdebug & SDB_FOLLOW)
892 		printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
893 #endif
894 	if ((int)scp & 1)
895 		return (EINVAL);
896 #ifdef HPUXCOMPAT
897 	/*
898 	 * Grab context as an HP-UX style context and determine if it
899 	 * was one that we contructed in sendsig.
900 	 */
901 	if (p->p_md.md_flags & MDP_HPUX) {
902 		struct hpuxsigcontext *hscp = (struct hpuxsigcontext *)scp;
903 		struct hpuxsigcontext htsigc;
904 
905 		if (useracc((caddr_t)hscp, sizeof (*hscp), B_WRITE) == 0 ||
906 		    copyin((caddr_t)hscp, (caddr_t)&htsigc, sizeof htsigc))
907 			return (EINVAL);
908 		/*
909 		 * If not generated by sendsig or we cannot restore the
910 		 * BSD-style sigcontext, just restore what we can -- state
911 		 * will be lost, but them's the breaks.
912 		 */
913 		hscp = &htsigc;
914 		if (hscp->hsc_magic != 0xdeadbeef ||
915 		    (scp = hscp->hsc_realsc) == 0 ||
916 		    useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
917 		    copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc)) {
918 			if (hscp->hsc_onstack & 01)
919 				p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
920 			else
921 				p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
922 			p->p_sigmask = hscp->hsc_mask &~ sigcantmask;
923 			frame = (struct frame *) p->p_md.md_regs;
924 			frame->f_regs[SP] = hscp->hsc_sp;
925 			frame->f_pc = hscp->hsc_pc;
926 			frame->f_sr = hscp->hsc_ps &~ PSL_USERCLR;
927 			return (EJUSTRETURN);
928 		}
929 		/*
930 		 * Otherwise, overlay BSD context with possibly modified
931 		 * HP-UX values.
932 		 */
933 		tsigc.sc_onstack = hscp->hsc_onstack;
934 		tsigc.sc_mask = hscp->hsc_mask;
935 		tsigc.sc_sp = hscp->hsc_sp;
936 		tsigc.sc_ps = hscp->hsc_ps;
937 		tsigc.sc_pc = hscp->hsc_pc;
938 	} else
939 #endif
940 	/*
941 	 * Test and fetch the context structure.
942 	 * We grab it all at once for speed.
943 	 */
944 	if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
945 	    copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc))
946 		return (EINVAL);
947 	scp = &tsigc;
948 	if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_S)) != 0)
949 		return (EINVAL);
950 	/*
951 	 * Restore the user supplied information
952 	 */
953 	if (scp->sc_onstack & 01)
954 		p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
955 	else
956 		p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
957 	p->p_sigmask = scp->sc_mask &~ sigcantmask;
958 	frame = (struct frame *) p->p_md.md_regs;
959 	frame->f_regs[SP] = scp->sc_sp;
960 	frame->f_regs[A6] = scp->sc_fp;
961 	frame->f_pc = scp->sc_pc;
962 	frame->f_sr = scp->sc_ps;
963 	/*
964 	 * Grab pointer to hardware state information.
965 	 * If zero, the user is probably doing a longjmp.
966 	 */
967 	if ((rf = scp->sc_ap) == 0)
968 		return (EJUSTRETURN);
969 	/*
970 	 * See if there is anything to do before we go to the
971 	 * expense of copying in close to 1/2K of data
972 	 */
973 	flags = fuword((caddr_t)rf);
974 #ifdef DEBUG
975 	if (sigdebug & SDB_FOLLOW)
976 		printf("sigreturn(%d): sc_ap %x flags %x\n",
977 		       p->p_pid, rf, flags);
978 #endif
979 	/*
980 	 * fuword failed (bogus sc_ap value).
981 	 */
982 	if (flags == -1)
983 		return (EINVAL);
984 	if (flags == 0 || copyin((caddr_t)rf, (caddr_t)&tstate, sizeof tstate))
985 		return (EJUSTRETURN);
986 #ifdef DEBUG
987 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
988 		printf("sigreturn(%d): ssp %x usp %x scp %x ft %d\n",
989 		       p->p_pid, &flags, scp->sc_sp, uap->sigcntxp,
990 		       (flags&SS_RTEFRAME) ? tstate.ss_frame.f_format : -1);
991 #endif
992 	/*
993 	 * Restore most of the users registers except for A6 and SP
994 	 * which were handled above.
995 	 */
996 	if (flags & SS_USERREGS)
997 		bcopy((caddr_t)tstate.ss_frame.f_regs,
998 		      (caddr_t)frame->f_regs, sizeof(frame->f_regs)-2*NBPW);
999 	/*
1000 	 * Restore long stack frames.  Note that we do not copy
1001 	 * back the saved SR or PC, they were picked up above from
1002 	 * the sigcontext structure.
1003 	 */
1004 	if (flags & SS_RTEFRAME) {
1005 		register int sz;
1006 
1007 		/* grab frame type and validate */
1008 		sz = tstate.ss_frame.f_format;
1009 		if (sz > 15 || (sz = exframesize[sz]) < 0)
1010 			return (EINVAL);
1011 		frame->f_stackadj -= sz;
1012 		frame->f_format = tstate.ss_frame.f_format;
1013 		frame->f_vector = tstate.ss_frame.f_vector;
1014 		bcopy((caddr_t)&tstate.ss_frame.F_u, (caddr_t)&frame->F_u, sz);
1015 #ifdef DEBUG
1016 		if (sigdebug & SDB_FOLLOW)
1017 			printf("sigreturn(%d): copy in %d of frame type %d\n",
1018 			       p->p_pid, sz, tstate.ss_frame.f_format);
1019 #endif
1020 	}
1021 #ifdef FPCOPROC
1022 	/*
1023 	 * Finally we restore the original FP context
1024 	 */
1025 	if (flags & SS_FPSTATE)
1026 		m68881_restore(&tstate.ss_fpstate);
1027 #ifdef DEBUG
1028 	if ((sigdebug & SDB_FPSTATE) && *(char *)&tstate.ss_fpstate)
1029 		printf("sigreturn(%d): copied in FP state (%x) at %x\n",
1030 		       p->p_pid, *(u_int *)&tstate.ss_fpstate,
1031 		       &tstate.ss_fpstate);
1032 #endif
1033 #endif
1034 #ifdef DEBUG
1035 	if ((sigdebug & SDB_FOLLOW) ||
1036 	    ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid))
1037 		printf("sigreturn(%d): returns\n", p->p_pid);
1038 #endif
1039 	return (EJUSTRETURN);
1040 }
1041 
1042 int	waittime = -1;
1043 
1044 boot(howto)
1045 	register int howto;
1046 {
1047 	/* take a snap shot before clobbering any registers */
1048 	if (curproc && curproc->p_addr)
1049 		savectx(curproc->p_addr, 0);
1050 
1051 	boothowto = howto;
1052 	if ((howto&RB_NOSYNC) == 0 && waittime < 0) {
1053 		register struct buf *bp;
1054 		int iter, nbusy;
1055 
1056 		waittime = 0;
1057 		(void) spl0();
1058 		printf("syncing disks... ");
1059 		/*
1060 		 * Release vnodes held by texts before sync.
1061 		 */
1062 		if (panicstr == 0)
1063 			vnode_pager_umount(NULL);
1064 #ifdef notdef
1065 #include "vn.h"
1066 #if NVN > 0
1067 		vnshutdown();
1068 #endif
1069 #endif
1070 		sync(&proc0, (void *)NULL, (int *)NULL);
1071 
1072 		for (iter = 0; iter < 20; iter++) {
1073 			nbusy = 0;
1074 			for (bp = &buf[nbuf]; --bp >= buf; )
1075 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
1076 					nbusy++;
1077 			if (nbusy == 0)
1078 				break;
1079 			printf("%d ", nbusy);
1080 			DELAY(40000 * iter);
1081 		}
1082 		if (nbusy)
1083 			printf("giving up\n");
1084 		else
1085 			printf("done\n");
1086 		/*
1087 		 * If we've been adjusting the clock, the todr
1088 		 * will be out of synch; adjust it now.
1089 		 */
1090 		resettodr();
1091 	}
1092 	splhigh();			/* extreme priority */
1093 	if (howto&RB_HALT) {
1094 		printf("halted\n\n");
1095 		asm("	stop	#0x2700");
1096 	} else {
1097 		if (howto & RB_DUMP)
1098 			dumpsys();
1099 		doboot();
1100 		/*NOTREACHED*/
1101 	}
1102 	/*NOTREACHED*/
1103 }
1104 
1105 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
1106 int	dumpsize = 0;		/* also for savecore */
1107 long	dumplo = 0;
1108 
1109 dumpconf()
1110 {
1111 	int nblks;
1112 
1113 	/*
1114 	 * XXX include the final RAM page which is not included in physmem.
1115 	 */
1116 	dumpsize = physmem + 1;
1117 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
1118 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1119 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
1120 			dumpsize = btoc(dbtob(nblks - dumplo));
1121 		else if (dumplo == 0)
1122 			dumplo = nblks - btodb(ctob(dumpsize));
1123 	}
1124 	/*
1125 	 * Don't dump on the first CLBYTES (why CLBYTES?)
1126 	 * in case the dump device includes a disk label.
1127 	 */
1128 	if (dumplo < btodb(CLBYTES))
1129 		dumplo = btodb(CLBYTES);
1130 }
1131 
1132 /*
1133  * Doadump comes here after turning off memory management and
1134  * getting on the dump stack, either when called above, or by
1135  * the auto-restart code.
1136  */
1137 dumpsys()
1138 {
1139 
1140 	msgbufmapped = 0;
1141 	if (dumpdev == NODEV)
1142 		return;
1143 	/*
1144 	 * For dumps during autoconfiguration,
1145 	 * if dump device has already configured...
1146 	 */
1147 	if (dumpsize == 0)
1148 		dumpconf();
1149 	if (dumplo < 0)
1150 		return;
1151 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
1152 	printf("dump ");
1153 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
1154 
1155 	case ENXIO:
1156 		printf("device bad\n");
1157 		break;
1158 
1159 	case EFAULT:
1160 		printf("device not ready\n");
1161 		break;
1162 
1163 	case EINVAL:
1164 		printf("area improper\n");
1165 		break;
1166 
1167 	case EIO:
1168 		printf("i/o error\n");
1169 		break;
1170 
1171 	default:
1172 		printf("succeeded\n");
1173 		break;
1174 	}
1175 }
1176 
1177 initcpu()
1178 {
1179 #ifdef MAPPEDCOPY
1180 	extern u_int mappedcopysize;
1181 
1182 	/*
1183 	 * Initialize lower bound for doing copyin/copyout using
1184 	 * page mapping (if not already set).  We don't do this on
1185 	 * VAC machines as it loses big time.
1186 	 */
1187 	if (mappedcopysize == 0) {
1188 		if (ectype == EC_VIRT)
1189 			mappedcopysize = (u_int) -1;
1190 		else
1191 			mappedcopysize = NBPG;
1192 	}
1193 #endif
1194 	parityenable();
1195 #ifdef USELEDS
1196 	ledinit();
1197 #endif
1198 }
1199 
1200 straytrap(pc, evec)
1201 	int pc;
1202 	u_short evec;
1203 {
1204 	printf("unexpected trap (vector offset %x) from %x\n",
1205 	       evec & 0xFFF, pc);
1206 }
1207 
1208 int	*nofault;
1209 
1210 badaddr(addr)
1211 	register caddr_t addr;
1212 {
1213 	register int i;
1214 	label_t	faultbuf;
1215 
1216 #ifdef lint
1217 	i = *addr; if (i) return(0);
1218 #endif
1219 	nofault = (int *) &faultbuf;
1220 	if (setjmp((label_t *)nofault)) {
1221 		nofault = (int *) 0;
1222 		return(1);
1223 	}
1224 	i = *(volatile short *)addr;
1225 	nofault = (int *) 0;
1226 	return(0);
1227 }
1228 
1229 badbaddr(addr)
1230 	register caddr_t addr;
1231 {
1232 	register int i;
1233 	label_t	faultbuf;
1234 
1235 #ifdef lint
1236 	i = *addr; if (i) return(0);
1237 #endif
1238 	nofault = (int *) &faultbuf;
1239 	if (setjmp((label_t *)nofault)) {
1240 		nofault = (int *) 0;
1241 		return(1);
1242 	}
1243 	i = *(volatile char *)addr;
1244 	nofault = (int *) 0;
1245 	return(0);
1246 }
1247 
1248 netintr()
1249 {
1250 #ifdef INET
1251 	if (netisr & (1 << NETISR_ARP)) {
1252 		netisr &= ~(1 << NETISR_ARP);
1253 		arpintr();
1254 	}
1255 	if (netisr & (1 << NETISR_IP)) {
1256 		netisr &= ~(1 << NETISR_IP);
1257 		ipintr();
1258 	}
1259 #endif
1260 #ifdef NS
1261 	if (netisr & (1 << NETISR_NS)) {
1262 		netisr &= ~(1 << NETISR_NS);
1263 		nsintr();
1264 	}
1265 #endif
1266 #ifdef ISO
1267 	if (netisr & (1 << NETISR_ISO)) {
1268 		netisr &= ~(1 << NETISR_ISO);
1269 		clnlintr();
1270 	}
1271 #endif
1272 #ifdef CCITT
1273 	if (netisr & (1 << NETISR_CCITT)) {
1274 		netisr &= ~(1 << NETISR_CCITT);
1275 		ccittintr();
1276 	}
1277 #endif
1278 }
1279 
1280 intrhand(sr)
1281 	int sr;
1282 {
1283 	register struct isr *isr;
1284 	register int found = 0;
1285 	register int ipl;
1286 	extern struct isr isrqueue[];
1287 	static int straycount;
1288 
1289 	ipl = (sr >> 8) & 7;
1290 	switch (ipl) {
1291 
1292 	case 3:
1293 	case 4:
1294 	case 5:
1295 		ipl = ISRIPL(ipl);
1296 		isr = isrqueue[ipl].isr_forw;
1297 		for (; isr != &isrqueue[ipl]; isr = isr->isr_forw) {
1298 			if ((isr->isr_intr)(isr->isr_arg)) {
1299 				found++;
1300 				break;
1301 			}
1302 		}
1303 		if (found)
1304 			straycount = 0;
1305 		else if (++straycount > 50)
1306 			panic("intrhand: stray interrupt");
1307 		else
1308 			printf("stray interrupt, sr 0x%x\n", sr);
1309 		break;
1310 
1311 	case 0:
1312 	case 1:
1313 	case 2:
1314 	case 6:
1315 	case 7:
1316 		if (++straycount > 50)
1317 			panic("intrhand: unexpected sr");
1318 		else
1319 			printf("intrhand: unexpected sr 0x%x\n", sr);
1320 		break;
1321 	}
1322 }
1323 
1324 #if defined(DEBUG) && !defined(PANICBUTTON)
1325 #define PANICBUTTON
1326 #endif
1327 
1328 #ifdef PANICBUTTON
1329 int panicbutton = 1;	/* non-zero if panic buttons are enabled */
1330 int crashandburn = 0;
1331 int candbdelay = 50;	/* give em half a second */
1332 
1333 void
1334 candbtimer(arg)
1335 	void *arg;
1336 {
1337 
1338 	crashandburn = 0;
1339 }
1340 #endif
1341 
1342 /*
1343  * Level 7 interrupts can be caused by the keyboard or parity errors.
1344  */
1345 nmihand(frame)
1346 	struct frame frame;
1347 {
1348 	if (kbdnmi()) {
1349 #ifdef PANICBUTTON
1350 		static int innmihand = 0;
1351 
1352 		/*
1353 		 * Attempt to reduce the window of vulnerability for recursive
1354 		 * NMIs (e.g. someone holding down the keyboard reset button).
1355 		 */
1356 		if (innmihand == 0) {
1357 			innmihand = 1;
1358 			printf("Got a keyboard NMI\n");
1359 			innmihand = 0;
1360 		}
1361 		if (panicbutton) {
1362 			if (crashandburn) {
1363 				crashandburn = 0;
1364 				panic(panicstr ?
1365 				      "forced crash, nosync" : "forced crash");
1366 			}
1367 			crashandburn++;
1368 			timeout(candbtimer, (void *)0, candbdelay);
1369 		}
1370 #endif
1371 		return;
1372 	}
1373 	if (parityerror(&frame))
1374 		return;
1375 	/* panic?? */
1376 	printf("unexpected level 7 interrupt ignored\n");
1377 }
1378 
1379 /*
1380  * Parity error section.  Contains magic.
1381  */
1382 #define PARREG		((volatile short *)IIOV(0x5B0000))
1383 static int gotparmem = 0;
1384 #ifdef DEBUG
1385 int ignorekperr = 0;	/* ignore kernel parity errors */
1386 #endif
1387 
1388 /*
1389  * Enable parity detection
1390  */
1391 parityenable()
1392 {
1393 	label_t	faultbuf;
1394 
1395 	nofault = (int *) &faultbuf;
1396 	if (setjmp((label_t *)nofault)) {
1397 		nofault = (int *) 0;
1398 #ifdef DEBUG
1399 		printf("No parity memory\n");
1400 #endif
1401 		return;
1402 	}
1403 	*PARREG = 1;
1404 	nofault = (int *) 0;
1405 	gotparmem = 1;
1406 #ifdef DEBUG
1407 	printf("Parity detection enabled\n");
1408 #endif
1409 }
1410 
1411 /*
1412  * Determine if level 7 interrupt was caused by a parity error
1413  * and deal with it if it was.  Returns 1 if it was a parity error.
1414  */
1415 parityerror(fp)
1416 	struct frame *fp;
1417 {
1418 	if (!gotparmem)
1419 		return(0);
1420 	*PARREG = 0;
1421 	DELAY(10);
1422 	*PARREG = 1;
1423 	if (panicstr) {
1424 		printf("parity error after panic ignored\n");
1425 		return(1);
1426 	}
1427 	if (!findparerror())
1428 		printf("WARNING: transient parity error ignored\n");
1429 	else if (USERMODE(fp->f_sr)) {
1430 		printf("pid %d: parity error\n", curproc->p_pid);
1431 		uprintf("sorry, pid %d killed due to memory parity error\n",
1432 			curproc->p_pid);
1433 		psignal(curproc, SIGKILL);
1434 #ifdef DEBUG
1435 	} else if (ignorekperr) {
1436 		printf("WARNING: kernel parity error ignored\n");
1437 #endif
1438 	} else {
1439 		regdump(fp, 128);
1440 		panic("kernel parity error");
1441 	}
1442 	return(1);
1443 }
1444 
1445 /*
1446  * Yuk!  There has got to be a better way to do this!
1447  * Searching all of memory with interrupts blocked can lead to disaster.
1448  */
1449 findparerror()
1450 {
1451 	static label_t parcatch;
1452 	static int looking = 0;
1453 	volatile int pg, o, s;
1454 	register volatile int *ip;
1455 	register int i;
1456 	int found;
1457 
1458 #ifdef lint
1459 	i = o = pg = 0; if (i) return(0);
1460 #endif
1461 	/*
1462 	 * If looking is true we are searching for a known parity error
1463 	 * and it has just occured.  All we do is return to the higher
1464 	 * level invocation.
1465 	 */
1466 	if (looking)
1467 		longjmp(&parcatch);
1468 	s = splhigh();
1469 	/*
1470 	 * If setjmp returns true, the parity error we were searching
1471 	 * for has just occured (longjmp above) at the current pg+o
1472 	 */
1473 	if (setjmp(&parcatch)) {
1474 		printf("Parity error at 0x%x\n", ctob(pg)|o);
1475 		found = 1;
1476 		goto done;
1477 	}
1478 	/*
1479 	 * If we get here, a parity error has occured for the first time
1480 	 * and we need to find it.  We turn off any external caches and
1481 	 * loop thru memory, testing every longword til a fault occurs and
1482 	 * we regain control at setjmp above.  Note that because of the
1483 	 * setjmp, pg and o need to be volatile or their values will be lost.
1484 	 */
1485 	looking = 1;
1486 	ecacheoff();
1487 	for (pg = btoc(lowram); pg < btoc(lowram)+physmem; pg++) {
1488 		pmap_enter(kernel_pmap, (vm_offset_t)vmmap, ctob(pg),
1489 		    VM_PROT_READ, TRUE);
1490 		ip = (int *)vmmap;
1491 		for (o = 0; o < NBPG; o += sizeof(int))
1492 			i = *ip++;
1493 	}
1494 	/*
1495 	 * Getting here implies no fault was found.  Should never happen.
1496 	 */
1497 	printf("Couldn't locate parity error\n");
1498 	found = 0;
1499 done:
1500 	looking = 0;
1501 	pmap_remove(kernel_pmap, (vm_offset_t)vmmap, (vm_offset_t)&vmmap[NBPG]);
1502 	ecacheon();
1503 	splx(s);
1504 	return(found);
1505 }
1506 
1507 regdump(fp, sbytes)
1508 	struct frame *fp; /* must not be register */
1509 	int sbytes;
1510 {
1511 	static int doingdump = 0;
1512 	register int i;
1513 	int s;
1514 	extern char *hexstr();
1515 
1516 	if (doingdump)
1517 		return;
1518 	s = splhigh();
1519 	doingdump = 1;
1520 	printf("pid = %d, pc = %s, ",
1521 	       curproc ? curproc->p_pid : -1, hexstr(fp->f_pc, 8));
1522 	printf("ps = %s, ", hexstr(fp->f_sr, 4));
1523 	printf("sfc = %s, ", hexstr(getsfc(), 4));
1524 	printf("dfc = %s\n", hexstr(getdfc(), 4));
1525 	printf("Registers:\n     ");
1526 	for (i = 0; i < 8; i++)
1527 		printf("        %d", i);
1528 	printf("\ndreg:");
1529 	for (i = 0; i < 8; i++)
1530 		printf(" %s", hexstr(fp->f_regs[i], 8));
1531 	printf("\nareg:");
1532 	for (i = 0; i < 8; i++)
1533 		printf(" %s", hexstr(fp->f_regs[i+8], 8));
1534 	if (sbytes > 0) {
1535 		if (fp->f_sr & PSL_S) {
1536 			printf("\n\nKernel stack (%s):",
1537 			       hexstr((int)(((int *)&fp)-1), 8));
1538 			dumpmem(((int *)&fp)-1, sbytes, 0);
1539 		} else {
1540 			printf("\n\nUser stack (%s):", hexstr(fp->f_regs[SP], 8));
1541 			dumpmem((int *)fp->f_regs[SP], sbytes, 1);
1542 		}
1543 	}
1544 	doingdump = 0;
1545 	splx(s);
1546 }
1547 
1548 extern char kstack[];
1549 #define KSADDR	((int *)&(kstack[(UPAGES-1)*NBPG]))
1550 
1551 dumpmem(ptr, sz, ustack)
1552 	register int *ptr;
1553 	int sz, ustack;
1554 {
1555 	register int i, val;
1556 	extern char *hexstr();
1557 
1558 	for (i = 0; i < sz; i++) {
1559 		if ((i & 7) == 0)
1560 			printf("\n%s: ", hexstr((int)ptr, 6));
1561 		else
1562 			printf(" ");
1563 		if (ustack == 1) {
1564 			if ((val = fuword(ptr++)) == -1)
1565 				break;
1566 		} else {
1567 			if (ustack == 0 &&
1568 			    (ptr < KSADDR || ptr > KSADDR+(NBPG/4-1)))
1569 				break;
1570 			val = *ptr++;
1571 		}
1572 		printf("%s", hexstr(val, 8));
1573 	}
1574 	printf("\n");
1575 }
1576 
1577 char *
1578 hexstr(val, len)
1579 	register int val;
1580 	int len;
1581 {
1582 	static char nbuf[9];
1583 	register int x, i;
1584 
1585 	if (len > 8)
1586 		return("");
1587 	nbuf[len] = '\0';
1588 	for (i = len-1; i >= 0; --i) {
1589 		x = val & 0xF;
1590 		if (x > 9)
1591 			nbuf[i] = x - 10 + 'A';
1592 		else
1593 			nbuf[i] = x + '0';
1594 		val >>= 4;
1595 	}
1596 	return(nbuf);
1597 }
1598 
1599 #ifdef DEBUG
1600 char oflowmsg[] = "k-stack overflow";
1601 char uflowmsg[] = "k-stack underflow";
1602 
1603 badkstack(oflow, fr)
1604 	int oflow;
1605 	struct frame fr;
1606 {
1607 	extern char kstackatbase[];
1608 
1609 	printf("%s: sp should be %x\n",
1610 	       oflow ? oflowmsg : uflowmsg,
1611 	       kstackatbase - (exframesize[fr.f_format] + 8));
1612 	regdump(&fr, 0);
1613 	panic(oflow ? oflowmsg : uflowmsg);
1614 }
1615 #endif
1616