xref: /original-bsd/sys/hp300/hp300/machdep.c (revision e58c8952)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: machdep.c 1.74 92/12/20$
13  *
14  *	@(#)machdep.c	8.10 (Berkeley) 04/20/94
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/signalvar.h>
20 #include <sys/kernel.h>
21 #include <sys/map.h>
22 #include <sys/proc.h>
23 #include <sys/buf.h>
24 #include <sys/reboot.h>
25 #include <sys/conf.h>
26 #include <sys/file.h>
27 #include <sys/clist.h>
28 #include <sys/callout.h>
29 #include <sys/malloc.h>
30 #include <sys/mbuf.h>
31 #include <sys/msgbuf.h>
32 #include <sys/ioctl.h>
33 #include <sys/tty.h>
34 #include <sys/mount.h>
35 #include <sys/user.h>
36 #include <sys/exec.h>
37 #include <sys/sysctl.h>
38 #ifdef SYSVSHM
39 #include <sys/shm.h>
40 #endif
41 #ifdef HPUXCOMPAT
42 #include <hp/hpux/hpux.h>
43 #endif
44 
45 #include <machine/cpu.h>
46 #include <machine/reg.h>
47 #include <machine/psl.h>
48 #include <hp/dev/cons.h>
49 #include <hp300/hp300/isr.h>
50 #include <hp300/hp300/pte.h>
51 #include <net/netisr.h>
52 
53 #define	MAXMEM	64*1024*CLSIZE	/* XXX - from cmap.h */
54 #include <vm/vm_kern.h>
55 
56 /* the following is used externally (sysctl_hw) */
57 char machine[] = "hp300";		/* cpu "architecture" */
58 
59 vm_map_t buffer_map;
60 extern vm_offset_t avail_end;
61 
62 /*
63  * Declare these as initialized data so we can patch them.
64  */
65 int	nswbuf = 0;
66 #ifdef	NBUF
67 int	nbuf = NBUF;
68 #else
69 int	nbuf = 0;
70 #endif
71 #ifdef	BUFPAGES
72 int	bufpages = BUFPAGES;
73 #else
74 int	bufpages = 0;
75 #endif
76 int	msgbufmapped;		/* set when safe to use msgbuf */
77 int	maxmem;			/* max memory per process */
78 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
79 /*
80  * safepri is a safe priority for sleep to set for a spin-wait
81  * during autoconfiguration or after a panic.
82  */
83 int	safepri = PSL_LOWIPL;
84 
85 extern	u_int lowram;
86 extern	short exframesize[];
87 
88 /*
89  * Console initialization: called early on from main,
90  * before vm init or startup.  Do enough configuration
91  * to choose and initialize a console.
92  */
93 consinit()
94 {
95 
96 	/*
97 	 * Set cpuspeed immediately since cninit() called routines
98 	 * might use delay.  Note that we only set it if a custom value
99 	 * has not already been specified.
100 	 */
101 	if (cpuspeed == 0) {
102 		switch (machineid) {
103 		case HP_320:
104 		case HP_330:
105 		case HP_340:
106 			cpuspeed = MHZ_16;
107 			break;
108 		case HP_350:
109 		case HP_360:
110 		case HP_380:
111 			cpuspeed = MHZ_25;
112 			break;
113 		case HP_370:
114 		case HP_433:
115 			cpuspeed = MHZ_33;
116 			break;
117 		case HP_375:
118 			cpuspeed = MHZ_50;
119 			break;
120 		default:	/* assume the fastest */
121 			cpuspeed = MHZ_50;
122 			break;
123 		}
124 		if (mmutype == MMU_68040)
125 			cpuspeed *= 2;	/* XXX */
126 	}
127 	/*
128          * Find what hardware is attached to this machine.
129          */
130 	find_devs();
131 
132 	/*
133 	 * Initialize the console before we print anything out.
134 	 */
135 	cninit();
136 }
137 
138 /*
139  * cpu_startup: allocate memory for variable-sized tables,
140  * initialize cpu, and do autoconfiguration.
141  */
142 cpu_startup()
143 {
144 	register unsigned i;
145 	register caddr_t v, firstaddr;
146 	int base, residual;
147 	vm_offset_t minaddr, maxaddr;
148 	vm_size_t size;
149 #ifdef BUFFERS_UNMANAGED
150 	vm_offset_t bufmemp;
151 	caddr_t buffermem;
152 	int ix;
153 #endif
154 #ifdef DEBUG
155 	extern int pmapdebug;
156 	int opmapdebug = pmapdebug;
157 
158 	pmapdebug = 0;
159 #endif
160 
161 	/*
162 	 * Initialize error message buffer (at end of core).
163 	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
164 	 */
165 	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
166 		pmap_enter(kernel_pmap, (vm_offset_t)msgbufp,
167 		    avail_end + i * NBPG, VM_PROT_ALL, TRUE);
168 	msgbufmapped = 1;
169 
170 	/*
171 	 * Good {morning,afternoon,evening,night}.
172 	 */
173 	printf(version);
174 	identifycpu();
175 	printf("real mem = %d\n", ctob(physmem));
176 
177 	/*
178 	 * Allocate space for system data structures.
179 	 * The first available real memory address is in "firstaddr".
180 	 * The first available kernel virtual address is in "v".
181 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
182 	 * As pages of memory are allocated and cleared,
183 	 * "firstaddr" is incremented.
184 	 * An index into the kernel page table corresponding to the
185 	 * virtual memory address maintained in "v" is kept in "mapaddr".
186 	 */
187 	/*
188 	 * Make two passes.  The first pass calculates how much memory is
189 	 * needed and allocates it.  The second pass assigns virtual
190 	 * addresses to the various data structures.
191 	 */
192 	firstaddr = 0;
193 again:
194 	v = (caddr_t)firstaddr;
195 
196 #define	valloc(name, type, num) \
197 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
198 #define	valloclim(name, type, num, lim) \
199 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
200 	valloc(cfree, struct cblock, nclist);
201 	valloc(callout, struct callout, ncallout);
202 	valloc(swapmap, struct map, nswapmap = maxproc * 2);
203 #ifdef SYSVSHM
204 	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
205 #endif
206 
207 	/*
208 	 * Determine how many buffers to allocate.
209 	 * Since HPs tend to be long on memory and short on disk speed,
210 	 * we allocate more buffer space than the BSD standard of
211 	 * use 10% of memory for the first 2 Meg, 5% of remaining.
212 	 * We just allocate a flat 10%.  Insure a minimum of 16 buffers.
213 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
214 	 */
215 	if (bufpages == 0)
216 		bufpages = physmem / 10 / CLSIZE;
217 	if (nbuf == 0) {
218 		nbuf = bufpages;
219 		if (nbuf < 16)
220 			nbuf = 16;
221 	}
222 	if (nswbuf == 0) {
223 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
224 		if (nswbuf > 256)
225 			nswbuf = 256;		/* sanity */
226 	}
227 	valloc(swbuf, struct buf, nswbuf);
228 	valloc(buf, struct buf, nbuf);
229 	/*
230 	 * End of first pass, size has been calculated so allocate memory
231 	 */
232 	if (firstaddr == 0) {
233 		size = (vm_size_t)(v - firstaddr);
234 		firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size));
235 		if (firstaddr == 0)
236 			panic("startup: no room for tables");
237 #ifdef BUFFERS_UNMANAGED
238 		buffermem = (caddr_t) kmem_alloc(kernel_map, bufpages*CLBYTES);
239 		if (buffermem == 0)
240 			panic("startup: no room for buffers");
241 #endif
242 		goto again;
243 	}
244 	/*
245 	 * End of second pass, addresses have been assigned
246 	 */
247 	if ((vm_size_t)(v - firstaddr) != size)
248 		panic("startup: table size inconsistency");
249 	/*
250 	 * Now allocate buffers proper.  They are different than the above
251 	 * in that they usually occupy more virtual memory than physical.
252 	 */
253 	size = MAXBSIZE * nbuf;
254 	buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
255 				   &maxaddr, size, TRUE);
256 	minaddr = (vm_offset_t)buffers;
257 	if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
258 			&minaddr, size, FALSE) != KERN_SUCCESS)
259 		panic("startup: cannot allocate buffers");
260 	base = bufpages / nbuf;
261 	residual = bufpages % nbuf;
262 #ifdef BUFFERS_UNMANAGED
263 	bufmemp = (vm_offset_t) buffermem;
264 #endif
265 	for (i = 0; i < nbuf; i++) {
266 		vm_size_t curbufsize;
267 		vm_offset_t curbuf;
268 
269 		/*
270 		 * First <residual> buffers get (base+1) physical pages
271 		 * allocated for them.  The rest get (base) physical pages.
272 		 *
273 		 * The rest of each buffer occupies virtual space,
274 		 * but has no physical memory allocated for it.
275 		 */
276 		curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
277 		curbufsize = CLBYTES * (i < residual ? base+1 : base);
278 #ifdef BUFFERS_UNMANAGED
279 		/*
280 		 * Move the physical pages over from buffermem.
281 		 */
282 		for (ix = 0; ix < curbufsize/CLBYTES; ix++) {
283 			vm_offset_t pa;
284 
285 			pa = pmap_extract(kernel_pmap, bufmemp);
286 			if (pa == 0)
287 				panic("startup: unmapped buffer");
288 			pmap_remove(kernel_pmap, bufmemp, bufmemp+CLBYTES);
289 			pmap_enter(kernel_pmap,
290 				   (vm_offset_t)(curbuf + ix * CLBYTES),
291 				   pa, VM_PROT_READ|VM_PROT_WRITE, TRUE);
292 			bufmemp += CLBYTES;
293 		}
294 #else
295 		vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
296 		vm_map_simplify(buffer_map, curbuf);
297 #endif
298 	}
299 #ifdef BUFFERS_UNMANAGED
300 #if 0
301 	/*
302 	 * We would like to free the (now empty) original address range
303 	 * but too many bad things will happen if we try.
304 	 */
305 	kmem_free(kernel_map, (vm_offset_t)buffermem, bufpages*CLBYTES);
306 #endif
307 #endif
308 	/*
309 	 * Allocate a submap for exec arguments.  This map effectively
310 	 * limits the number of processes exec'ing at any time.
311 	 */
312 	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
313 				 16*NCARGS, TRUE);
314 	/*
315 	 * Allocate a submap for physio
316 	 */
317 	phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
318 				 VM_PHYS_SIZE, TRUE);
319 
320 	/*
321 	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
322 	 * we use the more space efficient malloc in place of kmem_alloc.
323 	 */
324 	mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
325 				   M_MBUF, M_NOWAIT);
326 	bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
327 	mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
328 			       VM_MBUF_SIZE, FALSE);
329 	/*
330 	 * Initialize callouts
331 	 */
332 	callfree = callout;
333 	for (i = 1; i < ncallout; i++)
334 		callout[i-1].c_next = &callout[i];
335 	callout[i-1].c_next = NULL;
336 
337 #ifdef DEBUG
338 	pmapdebug = opmapdebug;
339 #endif
340 	printf("avail mem = %d\n", ptoa(cnt.v_free_count));
341 	printf("using %d buffers containing %d bytes of memory\n",
342 		nbuf, bufpages * CLBYTES);
343 	/*
344 	 * Set up CPU-specific registers, cache, etc.
345 	 */
346 	initcpu();
347 
348 	/*
349 	 * Set up buffers, so they can be used to read disk labels.
350 	 */
351 	bufinit();
352 
353 	/*
354 	 * Configure the system.
355 	 */
356 	configure();
357 }
358 
359 /*
360  * Set registers on exec.
361  * XXX Should clear registers except sp, pc,
362  * but would break init; should be fixed soon.
363  */
364 setregs(p, entry, retval)
365 	register struct proc *p;
366 	u_long entry;
367 	int retval[2];
368 {
369 	struct frame *frame = (struct frame *)p->p_md.md_regs;
370 
371 	frame->f_pc = entry & ~1;
372 #ifdef FPCOPROC
373 	/* restore a null state frame */
374 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
375 	m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
376 #endif
377 #ifdef HPUXCOMPAT
378 	if (p->p_md.md_flags & MDP_HPUX) {
379 
380 		frame->f_regs[A0] = 0; /* not 68010 (bit 31), no FPA (30) */
381 		retval[0] = 0;		/* no float card */
382 #ifdef FPCOPROC
383 		retval[1] = 1;		/* yes 68881 */
384 #else
385 		retval[1] = 0;		/* no 68881 */
386 #endif
387 	}
388 	/*
389 	 * XXX This doesn't have much to do with setting registers but
390 	 * I didn't want to muck up kern_exec.c with this code, so I
391 	 * stuck it here.
392 	 *
393 	 * Ensure we perform the right action on traps type 1 and 2:
394 	 * If our parent is an HPUX process and we are being traced, turn
395 	 * on HPUX style interpretation.  Else if we were using the HPUX
396 	 * style interpretation, revert to the BSD interpretation.
397 	 *
398 	 * Note that we do this by changing the trap instruction in the
399 	 * global "sigcode" array which then gets copied out to the user's
400 	 * sigcode in the stack.  Since we are changing it in the global
401 	 * array we must always reset it, even for non-HPUX processes.
402 	 *
403 	 * Note also that implementing it in this way creates a potential
404 	 * race where we could have tweaked it for process A which then
405 	 * blocks in the copyout to the stack and process B comes along
406 	 * and untweaks it causing A to wind up with the wrong setting
407 	 * when the copyout continues.  However, since we have already
408 	 * copied something out to this user stack page (thereby faulting
409 	 * it in), this scenerio is extremely unlikely.
410 	 */
411 	{
412 		extern short sigcodetrap[];
413 
414 		if ((p->p_pptr->p_md.md_flags & MDP_HPUX) &&
415 		    (p->p_flag & P_TRACED)) {
416 			p->p_md.md_flags |= MDP_HPUXTRACE;
417 			*sigcodetrap = 0x4E42;
418 		} else {
419 			p->p_md.md_flags &= ~MDP_HPUXTRACE;
420 			*sigcodetrap = 0x4E41;
421 		}
422 	}
423 #endif
424 }
425 
426 /*
427  * Info for CTL_HW
428  */
429 char	cpu_model[120];
430 extern	char version[];
431 
432 identifycpu()
433 {
434 	char *t, *mc;
435 	int len;
436 
437 	switch (machineid) {
438 	case HP_320:
439 		t = "320 (16.67MHz";
440 		break;
441 	case HP_330:
442 		t = "318/319/330 (16.67MHz";
443 		break;
444 	case HP_340:
445 		t = "340 (16.67MHz";
446 		break;
447 	case HP_350:
448 		t = "350 (25MHz";
449 		break;
450 	case HP_360:
451 		t = "360 (25MHz";
452 		break;
453 	case HP_370:
454 		t = "370 (33.33MHz";
455 		break;
456 	case HP_375:
457 		t = "345/375 (50MHz";
458 		break;
459 	case HP_380:
460 		t = "380/425 (25MHz";
461 		break;
462 	case HP_433:
463 		t = "433 (33MHz";
464 		break;
465 	default:
466 		printf("\nunknown machine type %d\n", machineid);
467 		panic("startup");
468 	}
469 	mc = (mmutype == MMU_68040 ? "40" :
470 	       (mmutype == MMU_68030 ? "30" : "20"));
471 	sprintf(cpu_model, "HP9000/%s MC680%s CPU", t, mc);
472 	switch (mmutype) {
473 	case MMU_68040:
474 	case MMU_68030:
475 		strcat(cpu_model, "+MMU");
476 		break;
477 	case MMU_68851:
478 		strcat(cpu_model, ", MC68851 MMU");
479 		break;
480 	case MMU_HP:
481 		strcat(cpu_model, ", HP MMU");
482 		break;
483 	default:
484 		printf("%s\nunknown MMU type %d\n", cpu_model, mmutype);
485 		panic("startup");
486 	}
487 	len = strlen(cpu_model);
488 	if (mmutype == MMU_68040)
489 		len += sprintf(cpu_model + len,
490 		    "+FPU, 4k on-chip physical I/D caches");
491 	else if (mmutype == MMU_68030)
492 		len += sprintf(cpu_model + len, ", %sMHz MC68882 FPU",
493 		       machineid == HP_340 ? "16.67" :
494 		       (machineid == HP_360 ? "25" :
495 			(machineid == HP_370 ? "33.33" : "50")));
496 	else
497 		len += sprintf(cpu_model + len, ", %sMHz MC68881 FPU",
498 		       machineid == HP_350 ? "20" : "16.67");
499 	switch (ectype) {
500 	case EC_VIRT:
501 		sprintf(cpu_model + len, ", %dK virtual-address cache",
502 		       machineid == HP_320 ? 16 : 32);
503 		break;
504 	case EC_PHYS:
505 		sprintf(cpu_model + len, ", %dK physical-address cache",
506 		       machineid == HP_370 ? 64 : 32);
507 		break;
508 	}
509 	strcat(cpu_model, ")");
510 	printf("%s\n", cpu_model);
511 	/*
512 	 * Now that we have told the user what they have,
513 	 * let them know if that machine type isn't configured.
514 	 */
515 	switch (machineid) {
516 	case -1:		/* keep compilers happy */
517 #if !defined(HP320) && !defined(HP350)
518 	case HP_320:
519 	case HP_350:
520 #endif
521 #ifndef HP330
522 	case HP_330:
523 #endif
524 #if !defined(HP360) && !defined(HP370)
525 	case HP_340:
526 	case HP_360:
527 	case HP_370:
528 #endif
529 #if !defined(HP380)
530 	case HP_380:
531 	case HP_433:
532 #endif
533 		panic("CPU type not configured");
534 	default:
535 		break;
536 	}
537 }
538 
539 /*
540  * machine dependent system variables.
541  */
542 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
543 	int *name;
544 	u_int namelen;
545 	void *oldp;
546 	size_t *oldlenp;
547 	void *newp;
548 	size_t newlen;
549 	struct proc *p;
550 {
551 
552 	/* all sysctl names at this level are terminal */
553 	if (namelen != 1)
554 		return (ENOTDIR);		/* overloaded */
555 
556 	switch (name[0]) {
557 	case CPU_CONSDEV:
558 		return (sysctl_rdstruct(oldp, oldlenp, newp, &cn_tty->t_dev,
559 		    sizeof cn_tty->t_dev));
560 	default:
561 		return (EOPNOTSUPP);
562 	}
563 	/* NOTREACHED */
564 }
565 
566 #ifdef USELEDS
567 #include <hp300/hp300/led.h>
568 
569 int inledcontrol = 0;	/* 1 if we are in ledcontrol already, cheap mutex */
570 char *ledaddr;
571 
572 /*
573  * Map the LED page and setup the KVA to access it.
574  */
575 ledinit()
576 {
577 	extern caddr_t ledbase;
578 
579 	pmap_enter(kernel_pmap, (vm_offset_t)ledbase, (vm_offset_t)LED_ADDR,
580 		   VM_PROT_READ|VM_PROT_WRITE, TRUE);
581 	ledaddr = (char *) ((int)ledbase | (LED_ADDR & PGOFSET));
582 }
583 
584 /*
585  * Do lights:
586  *	`ons' is a mask of LEDs to turn on,
587  *	`offs' is a mask of LEDs to turn off,
588  *	`togs' is a mask of LEDs to toggle.
589  * Note we don't use splclock/splx for mutual exclusion.
590  * They are expensive and we really don't need to be that precise.
591  * Besides we would like to be able to profile this routine.
592  */
593 ledcontrol(ons, offs, togs)
594 	register int ons, offs, togs;
595 {
596 	static char currentleds;
597 	register char leds;
598 
599 	inledcontrol = 1;
600 	leds = currentleds;
601 	if (ons)
602 		leds |= ons;
603 	if (offs)
604 		leds &= ~offs;
605 	if (togs)
606 		leds ^= togs;
607 	currentleds = leds;
608 	*ledaddr = ~leds;
609 	inledcontrol = 0;
610 }
611 #endif
612 
613 #define SS_RTEFRAME	1
614 #define SS_FPSTATE	2
615 #define SS_USERREGS	4
616 
617 struct sigstate {
618 	int	ss_flags;		/* which of the following are valid */
619 	struct	frame ss_frame;		/* original exception frame */
620 	struct	fpframe ss_fpstate;	/* 68881/68882 state info */
621 };
622 
623 /*
624  * WARNING: code in locore.s assumes the layout shown for sf_signum
625  * thru sf_handler so... don't screw with them!
626  */
627 struct sigframe {
628 	int	sf_signum;		/* signo for handler */
629 	int	sf_code;		/* additional info for handler */
630 	struct	sigcontext *sf_scp;	/* context ptr for handler */
631 	sig_t	sf_handler;		/* handler addr for u_sigc */
632 	struct	sigstate sf_state;	/* state of the hardware */
633 	struct	sigcontext sf_sc;	/* actual context */
634 };
635 
636 #ifdef HPUXCOMPAT
637 struct	hpuxsigcontext {
638 	int	hsc_syscall;
639 	char	hsc_action;
640 	char	hsc_pad1;
641 	char	hsc_pad2;
642 	char	hsc_onstack;
643 	int	hsc_mask;
644 	int	hsc_sp;
645 	short	hsc_ps;
646 	int	hsc_pc;
647 /* the rest aren't part of the context but are included for our convenience */
648 	short	hsc_pad;
649 	u_int	hsc_magic;		/* XXX sigreturn: cookie */
650 	struct	sigcontext *hsc_realsc;	/* XXX sigreturn: ptr to BSD context */
651 };
652 
653 /*
654  * For an HP-UX process, a partial hpuxsigframe follows the normal sigframe.
655  * Tremendous waste of space, but some HP-UX applications (e.g. LCL) need it.
656  */
657 struct hpuxsigframe {
658 	int	hsf_signum;
659 	int	hsf_code;
660 	struct	sigcontext *hsf_scp;
661 	struct	hpuxsigcontext hsf_sc;
662 	int	hsf_regs[15];
663 };
664 #endif
665 
666 #ifdef DEBUG
667 int sigdebug = 0;
668 int sigpid = 0;
669 #define SDB_FOLLOW	0x01
670 #define SDB_KSTACK	0x02
671 #define SDB_FPSTATE	0x04
672 #endif
673 
674 /*
675  * Send an interrupt to process.
676  */
677 void
678 sendsig(catcher, sig, mask, code)
679 	sig_t catcher;
680 	int sig, mask;
681 	unsigned code;
682 {
683 	register struct proc *p = curproc;
684 	register struct sigframe *fp, *kfp;
685 	register struct frame *frame;
686 	register struct sigacts *psp = p->p_sigacts;
687 	register short ft;
688 	int oonstack, fsize;
689 	extern char sigcode[], esigcode[];
690 
691 	frame = (struct frame *)p->p_md.md_regs;
692 	ft = frame->f_format;
693 	oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
694 	/*
695 	 * Allocate and validate space for the signal handler
696 	 * context. Note that if the stack is in P0 space, the
697 	 * call to grow() is a nop, and the useracc() check
698 	 * will fail if the process has not already allocated
699 	 * the space with a `brk'.
700 	 */
701 #ifdef HPUXCOMPAT
702 	if (p->p_md.md_flags & MDP_HPUX)
703 		fsize = sizeof(struct sigframe) + sizeof(struct hpuxsigframe);
704 	else
705 #endif
706 	fsize = sizeof(struct sigframe);
707 	if ((psp->ps_flags & SAS_ALTSTACK) &&
708 	    (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
709 	    (psp->ps_sigonstack & sigmask(sig))) {
710 		fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
711 					 psp->ps_sigstk.ss_size - fsize);
712 		psp->ps_sigstk.ss_flags |= SA_ONSTACK;
713 	} else
714 		fp = (struct sigframe *)(frame->f_regs[SP] - fsize);
715 	if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
716 		(void)grow(p, (unsigned)fp);
717 #ifdef DEBUG
718 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
719 		printf("sendsig(%d): sig %d ssp %x usp %x scp %x ft %d\n",
720 		       p->p_pid, sig, &oonstack, fp, &fp->sf_sc, ft);
721 #endif
722 	if (useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
723 #ifdef DEBUG
724 		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
725 			printf("sendsig(%d): useracc failed on sig %d\n",
726 			       p->p_pid, sig);
727 #endif
728 		/*
729 		 * Process has trashed its stack; give it an illegal
730 		 * instruction to halt it in its tracks.
731 		 */
732 		SIGACTION(p, SIGILL) = SIG_DFL;
733 		sig = sigmask(SIGILL);
734 		p->p_sigignore &= ~sig;
735 		p->p_sigcatch &= ~sig;
736 		p->p_sigmask &= ~sig;
737 		psignal(p, SIGILL);
738 		return;
739 	}
740 	kfp = (struct sigframe *)malloc((u_long)fsize, M_TEMP, M_WAITOK);
741 	/*
742 	 * Build the argument list for the signal handler.
743 	 */
744 	kfp->sf_signum = sig;
745 	kfp->sf_code = code;
746 	kfp->sf_scp = &fp->sf_sc;
747 	kfp->sf_handler = catcher;
748 	/*
749 	 * Save necessary hardware state.  Currently this includes:
750 	 *	- general registers
751 	 *	- original exception frame (if not a "normal" frame)
752 	 *	- FP coprocessor state
753 	 */
754 	kfp->sf_state.ss_flags = SS_USERREGS;
755 	bcopy((caddr_t)frame->f_regs,
756 	      (caddr_t)kfp->sf_state.ss_frame.f_regs, sizeof frame->f_regs);
757 	if (ft >= FMT7) {
758 #ifdef DEBUG
759 		if (ft > 15 || exframesize[ft] < 0)
760 			panic("sendsig: bogus frame type");
761 #endif
762 		kfp->sf_state.ss_flags |= SS_RTEFRAME;
763 		kfp->sf_state.ss_frame.f_format = frame->f_format;
764 		kfp->sf_state.ss_frame.f_vector = frame->f_vector;
765 		bcopy((caddr_t)&frame->F_u,
766 		      (caddr_t)&kfp->sf_state.ss_frame.F_u, exframesize[ft]);
767 		/*
768 		 * Leave an indicator that we need to clean up the kernel
769 		 * stack.  We do this by setting the "pad word" above the
770 		 * hardware stack frame to the amount the stack must be
771 		 * adjusted by.
772 		 *
773 		 * N.B. we increment rather than just set f_stackadj in
774 		 * case we are called from syscall when processing a
775 		 * sigreturn.  In that case, f_stackadj may be non-zero.
776 		 */
777 		frame->f_stackadj += exframesize[ft];
778 		frame->f_format = frame->f_vector = 0;
779 #ifdef DEBUG
780 		if (sigdebug & SDB_FOLLOW)
781 			printf("sendsig(%d): copy out %d of frame %d\n",
782 			       p->p_pid, exframesize[ft], ft);
783 #endif
784 	}
785 #ifdef FPCOPROC
786 	kfp->sf_state.ss_flags |= SS_FPSTATE;
787 	m68881_save(&kfp->sf_state.ss_fpstate);
788 #ifdef DEBUG
789 	if ((sigdebug & SDB_FPSTATE) && *(char *)&kfp->sf_state.ss_fpstate)
790 		printf("sendsig(%d): copy out FP state (%x) to %x\n",
791 		       p->p_pid, *(u_int *)&kfp->sf_state.ss_fpstate,
792 		       &kfp->sf_state.ss_fpstate);
793 #endif
794 #endif
795 	/*
796 	 * Build the signal context to be used by sigreturn.
797 	 */
798 	kfp->sf_sc.sc_onstack = oonstack;
799 	kfp->sf_sc.sc_mask = mask;
800 	kfp->sf_sc.sc_sp = frame->f_regs[SP];
801 	kfp->sf_sc.sc_fp = frame->f_regs[A6];
802 	kfp->sf_sc.sc_ap = (int)&fp->sf_state;
803 	kfp->sf_sc.sc_pc = frame->f_pc;
804 	kfp->sf_sc.sc_ps = frame->f_sr;
805 #ifdef HPUXCOMPAT
806 	/*
807 	 * Create an HP-UX style sigcontext structure and associated goo
808 	 */
809 	if (p->p_md.md_flags & MDP_HPUX) {
810 		register struct hpuxsigframe *hkfp;
811 
812 		hkfp = (struct hpuxsigframe *)&kfp[1];
813 		hkfp->hsf_signum = bsdtohpuxsig(kfp->sf_signum);
814 		hkfp->hsf_code = kfp->sf_code;
815 		hkfp->hsf_scp = (struct sigcontext *)
816 			&((struct hpuxsigframe *)(&fp[1]))->hsf_sc;
817 		hkfp->hsf_sc.hsc_syscall = 0;		/* XXX */
818 		hkfp->hsf_sc.hsc_action = 0;		/* XXX */
819 		hkfp->hsf_sc.hsc_pad1 = hkfp->hsf_sc.hsc_pad2 = 0;
820 		hkfp->hsf_sc.hsc_onstack = kfp->sf_sc.sc_onstack;
821 		hkfp->hsf_sc.hsc_mask = kfp->sf_sc.sc_mask;
822 		hkfp->hsf_sc.hsc_sp = kfp->sf_sc.sc_sp;
823 		hkfp->hsf_sc.hsc_ps = kfp->sf_sc.sc_ps;
824 		hkfp->hsf_sc.hsc_pc = kfp->sf_sc.sc_pc;
825 		hkfp->hsf_sc.hsc_pad = 0;
826 		hkfp->hsf_sc.hsc_magic = 0xdeadbeef;
827 		hkfp->hsf_sc.hsc_realsc = kfp->sf_scp;
828 		bcopy((caddr_t)frame->f_regs, (caddr_t)hkfp->hsf_regs,
829 		      sizeof (hkfp->hsf_regs));
830 
831 		kfp->sf_signum = hkfp->hsf_signum;
832 		kfp->sf_scp = hkfp->hsf_scp;
833 	}
834 #endif
835 	(void) copyout((caddr_t)kfp, (caddr_t)fp, fsize);
836 	frame->f_regs[SP] = (int)fp;
837 #ifdef DEBUG
838 	if (sigdebug & SDB_FOLLOW)
839 		printf("sendsig(%d): sig %d scp %x fp %x sc_sp %x sc_ap %x\n",
840 		       p->p_pid, sig, kfp->sf_scp, fp,
841 		       kfp->sf_sc.sc_sp, kfp->sf_sc.sc_ap);
842 #endif
843 	/*
844 	 * Signal trampoline code is at base of user stack.
845 	 */
846 	frame->f_pc = (int)PS_STRINGS - (esigcode - sigcode);
847 #ifdef DEBUG
848 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
849 		printf("sendsig(%d): sig %d returns\n",
850 		       p->p_pid, sig);
851 #endif
852 	free((caddr_t)kfp, M_TEMP);
853 }
854 
855 /*
856  * System call to cleanup state after a signal
857  * has been taken.  Reset signal mask and
858  * stack state from context left by sendsig (above).
859  * Return to previous pc and psl as specified by
860  * context left by sendsig. Check carefully to
861  * make sure that the user has not modified the
862  * psl to gain improper priviledges or to cause
863  * a machine fault.
864  */
865 struct sigreturn_args {
866 	struct sigcontext *sigcntxp;
867 };
868 /* ARGSUSED */
869 sigreturn(p, uap, retval)
870 	struct proc *p;
871 	struct sigreturn_args *uap;
872 	int *retval;
873 {
874 	register struct sigcontext *scp;
875 	register struct frame *frame;
876 	register int rf;
877 	struct sigcontext tsigc;
878 	struct sigstate tstate;
879 	int flags;
880 
881 	scp = uap->sigcntxp;
882 #ifdef DEBUG
883 	if (sigdebug & SDB_FOLLOW)
884 		printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
885 #endif
886 	if ((int)scp & 1)
887 		return (EINVAL);
888 #ifdef HPUXCOMPAT
889 	/*
890 	 * Grab context as an HP-UX style context and determine if it
891 	 * was one that we contructed in sendsig.
892 	 */
893 	if (p->p_md.md_flags & MDP_HPUX) {
894 		struct hpuxsigcontext *hscp = (struct hpuxsigcontext *)scp;
895 		struct hpuxsigcontext htsigc;
896 
897 		if (useracc((caddr_t)hscp, sizeof (*hscp), B_WRITE) == 0 ||
898 		    copyin((caddr_t)hscp, (caddr_t)&htsigc, sizeof htsigc))
899 			return (EINVAL);
900 		/*
901 		 * If not generated by sendsig or we cannot restore the
902 		 * BSD-style sigcontext, just restore what we can -- state
903 		 * will be lost, but them's the breaks.
904 		 */
905 		hscp = &htsigc;
906 		if (hscp->hsc_magic != 0xdeadbeef ||
907 		    (scp = hscp->hsc_realsc) == 0 ||
908 		    useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
909 		    copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc)) {
910 			if (hscp->hsc_onstack & 01)
911 				p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
912 			else
913 				p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
914 			p->p_sigmask = hscp->hsc_mask &~ sigcantmask;
915 			frame = (struct frame *) p->p_md.md_regs;
916 			frame->f_regs[SP] = hscp->hsc_sp;
917 			frame->f_pc = hscp->hsc_pc;
918 			frame->f_sr = hscp->hsc_ps &~ PSL_USERCLR;
919 			return (EJUSTRETURN);
920 		}
921 		/*
922 		 * Otherwise, overlay BSD context with possibly modified
923 		 * HP-UX values.
924 		 */
925 		tsigc.sc_onstack = hscp->hsc_onstack;
926 		tsigc.sc_mask = hscp->hsc_mask;
927 		tsigc.sc_sp = hscp->hsc_sp;
928 		tsigc.sc_ps = hscp->hsc_ps;
929 		tsigc.sc_pc = hscp->hsc_pc;
930 	} else
931 #endif
932 	/*
933 	 * Test and fetch the context structure.
934 	 * We grab it all at once for speed.
935 	 */
936 	if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
937 	    copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc))
938 		return (EINVAL);
939 	scp = &tsigc;
940 	if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_S)) != 0)
941 		return (EINVAL);
942 	/*
943 	 * Restore the user supplied information
944 	 */
945 	if (scp->sc_onstack & 01)
946 		p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
947 	else
948 		p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
949 	p->p_sigmask = scp->sc_mask &~ sigcantmask;
950 	frame = (struct frame *) p->p_md.md_regs;
951 	frame->f_regs[SP] = scp->sc_sp;
952 	frame->f_regs[A6] = scp->sc_fp;
953 	frame->f_pc = scp->sc_pc;
954 	frame->f_sr = scp->sc_ps;
955 	/*
956 	 * Grab pointer to hardware state information.
957 	 * If zero, the user is probably doing a longjmp.
958 	 */
959 	if ((rf = scp->sc_ap) == 0)
960 		return (EJUSTRETURN);
961 	/*
962 	 * See if there is anything to do before we go to the
963 	 * expense of copying in close to 1/2K of data
964 	 */
965 	flags = fuword((caddr_t)rf);
966 #ifdef DEBUG
967 	if (sigdebug & SDB_FOLLOW)
968 		printf("sigreturn(%d): sc_ap %x flags %x\n",
969 		       p->p_pid, rf, flags);
970 #endif
971 	/*
972 	 * fuword failed (bogus sc_ap value).
973 	 */
974 	if (flags == -1)
975 		return (EINVAL);
976 	if (flags == 0 || copyin((caddr_t)rf, (caddr_t)&tstate, sizeof tstate))
977 		return (EJUSTRETURN);
978 #ifdef DEBUG
979 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
980 		printf("sigreturn(%d): ssp %x usp %x scp %x ft %d\n",
981 		       p->p_pid, &flags, scp->sc_sp, uap->sigcntxp,
982 		       (flags&SS_RTEFRAME) ? tstate.ss_frame.f_format : -1);
983 #endif
984 	/*
985 	 * Restore most of the users registers except for A6 and SP
986 	 * which were handled above.
987 	 */
988 	if (flags & SS_USERREGS)
989 		bcopy((caddr_t)tstate.ss_frame.f_regs,
990 		      (caddr_t)frame->f_regs, sizeof(frame->f_regs)-2*NBPW);
991 	/*
992 	 * Restore long stack frames.  Note that we do not copy
993 	 * back the saved SR or PC, they were picked up above from
994 	 * the sigcontext structure.
995 	 */
996 	if (flags & SS_RTEFRAME) {
997 		register int sz;
998 
999 		/* grab frame type and validate */
1000 		sz = tstate.ss_frame.f_format;
1001 		if (sz > 15 || (sz = exframesize[sz]) < 0)
1002 			return (EINVAL);
1003 		frame->f_stackadj -= sz;
1004 		frame->f_format = tstate.ss_frame.f_format;
1005 		frame->f_vector = tstate.ss_frame.f_vector;
1006 		bcopy((caddr_t)&tstate.ss_frame.F_u, (caddr_t)&frame->F_u, sz);
1007 #ifdef DEBUG
1008 		if (sigdebug & SDB_FOLLOW)
1009 			printf("sigreturn(%d): copy in %d of frame type %d\n",
1010 			       p->p_pid, sz, tstate.ss_frame.f_format);
1011 #endif
1012 	}
1013 #ifdef FPCOPROC
1014 	/*
1015 	 * Finally we restore the original FP context
1016 	 */
1017 	if (flags & SS_FPSTATE)
1018 		m68881_restore(&tstate.ss_fpstate);
1019 #ifdef DEBUG
1020 	if ((sigdebug & SDB_FPSTATE) && *(char *)&tstate.ss_fpstate)
1021 		printf("sigreturn(%d): copied in FP state (%x) at %x\n",
1022 		       p->p_pid, *(u_int *)&tstate.ss_fpstate,
1023 		       &tstate.ss_fpstate);
1024 #endif
1025 #endif
1026 #ifdef DEBUG
1027 	if ((sigdebug & SDB_FOLLOW) ||
1028 	    ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid))
1029 		printf("sigreturn(%d): returns\n", p->p_pid);
1030 #endif
1031 	return (EJUSTRETURN);
1032 }
1033 
1034 int	waittime = -1;
1035 
1036 boot(howto)
1037 	register int howto;
1038 {
1039 	/* take a snap shot before clobbering any registers */
1040 	if (curproc && curproc->p_addr)
1041 		savectx(curproc->p_addr, 0);
1042 
1043 	boothowto = howto;
1044 	if ((howto&RB_NOSYNC) == 0 && waittime < 0) {
1045 		register struct buf *bp;
1046 		int iter, nbusy;
1047 
1048 		waittime = 0;
1049 		(void) spl0();
1050 		printf("syncing disks... ");
1051 		/*
1052 		 * Release vnodes held by texts before sync.
1053 		 */
1054 		if (panicstr == 0)
1055 			vnode_pager_umount(NULL);
1056 #ifdef notdef
1057 #include "vn.h"
1058 #if NVN > 0
1059 		vnshutdown();
1060 #endif
1061 #endif
1062 		sync(&proc0, (void *)NULL, (int *)NULL);
1063 
1064 		for (iter = 0; iter < 20; iter++) {
1065 			nbusy = 0;
1066 			for (bp = &buf[nbuf]; --bp >= buf; )
1067 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
1068 					nbusy++;
1069 			if (nbusy == 0)
1070 				break;
1071 			printf("%d ", nbusy);
1072 			DELAY(40000 * iter);
1073 		}
1074 		if (nbusy)
1075 			printf("giving up\n");
1076 		else
1077 			printf("done\n");
1078 		/*
1079 		 * If we've been adjusting the clock, the todr
1080 		 * will be out of synch; adjust it now.
1081 		 */
1082 		resettodr();
1083 	}
1084 	splhigh();			/* extreme priority */
1085 	if (howto&RB_HALT) {
1086 		printf("halted\n\n");
1087 		asm("	stop	#0x2700");
1088 	} else {
1089 		if (howto & RB_DUMP)
1090 			dumpsys();
1091 		doboot();
1092 		/*NOTREACHED*/
1093 	}
1094 	/*NOTREACHED*/
1095 }
1096 
1097 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
1098 int	dumpsize = 0;		/* also for savecore */
1099 long	dumplo = 0;
1100 
1101 dumpconf()
1102 {
1103 	int nblks;
1104 
1105 	/*
1106 	 * XXX include the final RAM page which is not included in physmem.
1107 	 */
1108 	dumpsize = physmem + 1;
1109 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
1110 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1111 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
1112 			dumpsize = btoc(dbtob(nblks - dumplo));
1113 		else if (dumplo == 0)
1114 			dumplo = nblks - btodb(ctob(dumpsize));
1115 	}
1116 	/*
1117 	 * Don't dump on the first CLBYTES (why CLBYTES?)
1118 	 * in case the dump device includes a disk label.
1119 	 */
1120 	if (dumplo < btodb(CLBYTES))
1121 		dumplo = btodb(CLBYTES);
1122 }
1123 
1124 /*
1125  * Doadump comes here after turning off memory management and
1126  * getting on the dump stack, either when called above, or by
1127  * the auto-restart code.
1128  */
1129 dumpsys()
1130 {
1131 
1132 	msgbufmapped = 0;
1133 	if (dumpdev == NODEV)
1134 		return;
1135 	/*
1136 	 * For dumps during autoconfiguration,
1137 	 * if dump device has already configured...
1138 	 */
1139 	if (dumpsize == 0)
1140 		dumpconf();
1141 	if (dumplo < 0)
1142 		return;
1143 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
1144 	printf("dump ");
1145 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
1146 
1147 	case ENXIO:
1148 		printf("device bad\n");
1149 		break;
1150 
1151 	case EFAULT:
1152 		printf("device not ready\n");
1153 		break;
1154 
1155 	case EINVAL:
1156 		printf("area improper\n");
1157 		break;
1158 
1159 	case EIO:
1160 		printf("i/o error\n");
1161 		break;
1162 
1163 	default:
1164 		printf("succeeded\n");
1165 		break;
1166 	}
1167 }
1168 
1169 initcpu()
1170 {
1171 #ifdef MAPPEDCOPY
1172 	extern u_int mappedcopysize;
1173 
1174 	/*
1175 	 * Initialize lower bound for doing copyin/copyout using
1176 	 * page mapping (if not already set).  We don't do this on
1177 	 * VAC machines as it loses big time.
1178 	 */
1179 	if (mappedcopysize == 0) {
1180 		if (ectype == EC_VIRT)
1181 			mappedcopysize = (u_int) -1;
1182 		else
1183 			mappedcopysize = NBPG;
1184 	}
1185 #endif
1186 	parityenable();
1187 #ifdef USELEDS
1188 	ledinit();
1189 #endif
1190 }
1191 
1192 straytrap(pc, evec)
1193 	int pc;
1194 	u_short evec;
1195 {
1196 	printf("unexpected trap (vector offset %x) from %x\n",
1197 	       evec & 0xFFF, pc);
1198 }
1199 
1200 int	*nofault;
1201 
1202 badaddr(addr)
1203 	register caddr_t addr;
1204 {
1205 	register int i;
1206 	label_t	faultbuf;
1207 
1208 #ifdef lint
1209 	i = *addr; if (i) return(0);
1210 #endif
1211 	nofault = (int *) &faultbuf;
1212 	if (setjmp((label_t *)nofault)) {
1213 		nofault = (int *) 0;
1214 		return(1);
1215 	}
1216 	i = *(volatile short *)addr;
1217 	nofault = (int *) 0;
1218 	return(0);
1219 }
1220 
1221 badbaddr(addr)
1222 	register caddr_t addr;
1223 {
1224 	register int i;
1225 	label_t	faultbuf;
1226 
1227 #ifdef lint
1228 	i = *addr; if (i) return(0);
1229 #endif
1230 	nofault = (int *) &faultbuf;
1231 	if (setjmp((label_t *)nofault)) {
1232 		nofault = (int *) 0;
1233 		return(1);
1234 	}
1235 	i = *(volatile char *)addr;
1236 	nofault = (int *) 0;
1237 	return(0);
1238 }
1239 
1240 netintr()
1241 {
1242 #ifdef INET
1243 	if (netisr & (1 << NETISR_ARP)) {
1244 		netisr &= ~(1 << NETISR_ARP);
1245 		arpintr();
1246 	}
1247 	if (netisr & (1 << NETISR_IP)) {
1248 		netisr &= ~(1 << NETISR_IP);
1249 		ipintr();
1250 	}
1251 #endif
1252 #ifdef NS
1253 	if (netisr & (1 << NETISR_NS)) {
1254 		netisr &= ~(1 << NETISR_NS);
1255 		nsintr();
1256 	}
1257 #endif
1258 #ifdef ISO
1259 	if (netisr & (1 << NETISR_ISO)) {
1260 		netisr &= ~(1 << NETISR_ISO);
1261 		clnlintr();
1262 	}
1263 #endif
1264 #ifdef CCITT
1265 	if (netisr & (1 << NETISR_CCITT)) {
1266 		netisr &= ~(1 << NETISR_CCITT);
1267 		ccittintr();
1268 	}
1269 #endif
1270 }
1271 
1272 intrhand(sr)
1273 	int sr;
1274 {
1275 	register struct isr *isr;
1276 	register int found = 0;
1277 	register int ipl;
1278 	extern struct isr isrqueue[];
1279 	static int straycount;
1280 
1281 	ipl = (sr >> 8) & 7;
1282 	switch (ipl) {
1283 
1284 	case 3:
1285 	case 4:
1286 	case 5:
1287 		ipl = ISRIPL(ipl);
1288 		isr = isrqueue[ipl].isr_forw;
1289 		for (; isr != &isrqueue[ipl]; isr = isr->isr_forw) {
1290 			if ((isr->isr_intr)(isr->isr_arg)) {
1291 				found++;
1292 				break;
1293 			}
1294 		}
1295 		if (found)
1296 			straycount = 0;
1297 		else if (++straycount > 50)
1298 			panic("intrhand: stray interrupt");
1299 		else
1300 			printf("stray interrupt, sr 0x%x\n", sr);
1301 		break;
1302 
1303 	case 0:
1304 	case 1:
1305 	case 2:
1306 	case 6:
1307 	case 7:
1308 		if (++straycount > 50)
1309 			panic("intrhand: unexpected sr");
1310 		else
1311 			printf("intrhand: unexpected sr 0x%x\n", sr);
1312 		break;
1313 	}
1314 }
1315 
1316 #if defined(DEBUG) && !defined(PANICBUTTON)
1317 #define PANICBUTTON
1318 #endif
1319 
1320 #ifdef PANICBUTTON
1321 int panicbutton = 1;	/* non-zero if panic buttons are enabled */
1322 int crashandburn = 0;
1323 int candbdelay = 50;	/* give em half a second */
1324 
1325 void
1326 candbtimer(arg)
1327 	void *arg;
1328 {
1329 
1330 	crashandburn = 0;
1331 }
1332 #endif
1333 
1334 /*
1335  * Level 7 interrupts can be caused by the keyboard or parity errors.
1336  */
1337 nmihand(frame)
1338 	struct frame frame;
1339 {
1340 	if (kbdnmi()) {
1341 #ifdef PANICBUTTON
1342 		static int innmihand = 0;
1343 
1344 		/*
1345 		 * Attempt to reduce the window of vulnerability for recursive
1346 		 * NMIs (e.g. someone holding down the keyboard reset button).
1347 		 */
1348 		if (innmihand == 0) {
1349 			innmihand = 1;
1350 			printf("Got a keyboard NMI\n");
1351 			innmihand = 0;
1352 		}
1353 		if (panicbutton) {
1354 			if (crashandburn) {
1355 				crashandburn = 0;
1356 				panic(panicstr ?
1357 				      "forced crash, nosync" : "forced crash");
1358 			}
1359 			crashandburn++;
1360 			timeout(candbtimer, (void *)0, candbdelay);
1361 		}
1362 #endif
1363 		return;
1364 	}
1365 	if (parityerror(&frame))
1366 		return;
1367 	/* panic?? */
1368 	printf("unexpected level 7 interrupt ignored\n");
1369 }
1370 
1371 /*
1372  * Parity error section.  Contains magic.
1373  */
1374 #define PARREG		((volatile short *)IIOV(0x5B0000))
1375 static int gotparmem = 0;
1376 #ifdef DEBUG
1377 int ignorekperr = 0;	/* ignore kernel parity errors */
1378 #endif
1379 
1380 /*
1381  * Enable parity detection
1382  */
1383 parityenable()
1384 {
1385 	label_t	faultbuf;
1386 
1387 	nofault = (int *) &faultbuf;
1388 	if (setjmp((label_t *)nofault)) {
1389 		nofault = (int *) 0;
1390 #ifdef DEBUG
1391 		printf("No parity memory\n");
1392 #endif
1393 		return;
1394 	}
1395 	*PARREG = 1;
1396 	nofault = (int *) 0;
1397 	gotparmem = 1;
1398 #ifdef DEBUG
1399 	printf("Parity detection enabled\n");
1400 #endif
1401 }
1402 
1403 /*
1404  * Determine if level 7 interrupt was caused by a parity error
1405  * and deal with it if it was.  Returns 1 if it was a parity error.
1406  */
1407 parityerror(fp)
1408 	struct frame *fp;
1409 {
1410 	if (!gotparmem)
1411 		return(0);
1412 	*PARREG = 0;
1413 	DELAY(10);
1414 	*PARREG = 1;
1415 	if (panicstr) {
1416 		printf("parity error after panic ignored\n");
1417 		return(1);
1418 	}
1419 	if (!findparerror())
1420 		printf("WARNING: transient parity error ignored\n");
1421 	else if (USERMODE(fp->f_sr)) {
1422 		printf("pid %d: parity error\n", curproc->p_pid);
1423 		uprintf("sorry, pid %d killed due to memory parity error\n",
1424 			curproc->p_pid);
1425 		psignal(curproc, SIGKILL);
1426 #ifdef DEBUG
1427 	} else if (ignorekperr) {
1428 		printf("WARNING: kernel parity error ignored\n");
1429 #endif
1430 	} else {
1431 		regdump(fp, 128);
1432 		panic("kernel parity error");
1433 	}
1434 	return(1);
1435 }
1436 
1437 /*
1438  * Yuk!  There has got to be a better way to do this!
1439  * Searching all of memory with interrupts blocked can lead to disaster.
1440  */
1441 findparerror()
1442 {
1443 	static label_t parcatch;
1444 	static int looking = 0;
1445 	volatile int pg, o, s;
1446 	register volatile int *ip;
1447 	register int i;
1448 	int found;
1449 
1450 #ifdef lint
1451 	i = o = pg = 0; if (i) return(0);
1452 #endif
1453 	/*
1454 	 * If looking is true we are searching for a known parity error
1455 	 * and it has just occured.  All we do is return to the higher
1456 	 * level invocation.
1457 	 */
1458 	if (looking)
1459 		longjmp(&parcatch);
1460 	s = splhigh();
1461 	/*
1462 	 * If setjmp returns true, the parity error we were searching
1463 	 * for has just occured (longjmp above) at the current pg+o
1464 	 */
1465 	if (setjmp(&parcatch)) {
1466 		printf("Parity error at 0x%x\n", ctob(pg)|o);
1467 		found = 1;
1468 		goto done;
1469 	}
1470 	/*
1471 	 * If we get here, a parity error has occured for the first time
1472 	 * and we need to find it.  We turn off any external caches and
1473 	 * loop thru memory, testing every longword til a fault occurs and
1474 	 * we regain control at setjmp above.  Note that because of the
1475 	 * setjmp, pg and o need to be volatile or their values will be lost.
1476 	 */
1477 	looking = 1;
1478 	ecacheoff();
1479 	for (pg = btoc(lowram); pg < btoc(lowram)+physmem; pg++) {
1480 		pmap_enter(kernel_pmap, (vm_offset_t)vmmap, ctob(pg),
1481 		    VM_PROT_READ, TRUE);
1482 		ip = (int *)vmmap;
1483 		for (o = 0; o < NBPG; o += sizeof(int))
1484 			i = *ip++;
1485 	}
1486 	/*
1487 	 * Getting here implies no fault was found.  Should never happen.
1488 	 */
1489 	printf("Couldn't locate parity error\n");
1490 	found = 0;
1491 done:
1492 	looking = 0;
1493 	pmap_remove(kernel_pmap, (vm_offset_t)vmmap, (vm_offset_t)&vmmap[NBPG]);
1494 	ecacheon();
1495 	splx(s);
1496 	return(found);
1497 }
1498 
1499 regdump(fp, sbytes)
1500 	struct frame *fp; /* must not be register */
1501 	int sbytes;
1502 {
1503 	static int doingdump = 0;
1504 	register int i;
1505 	int s;
1506 	extern char *hexstr();
1507 
1508 	if (doingdump)
1509 		return;
1510 	s = splhigh();
1511 	doingdump = 1;
1512 	printf("pid = %d, pc = %s, ",
1513 	       curproc ? curproc->p_pid : -1, hexstr(fp->f_pc, 8));
1514 	printf("ps = %s, ", hexstr(fp->f_sr, 4));
1515 	printf("sfc = %s, ", hexstr(getsfc(), 4));
1516 	printf("dfc = %s\n", hexstr(getdfc(), 4));
1517 	printf("Registers:\n     ");
1518 	for (i = 0; i < 8; i++)
1519 		printf("        %d", i);
1520 	printf("\ndreg:");
1521 	for (i = 0; i < 8; i++)
1522 		printf(" %s", hexstr(fp->f_regs[i], 8));
1523 	printf("\nareg:");
1524 	for (i = 0; i < 8; i++)
1525 		printf(" %s", hexstr(fp->f_regs[i+8], 8));
1526 	if (sbytes > 0) {
1527 		if (fp->f_sr & PSL_S) {
1528 			printf("\n\nKernel stack (%s):",
1529 			       hexstr((int)(((int *)&fp)-1), 8));
1530 			dumpmem(((int *)&fp)-1, sbytes, 0);
1531 		} else {
1532 			printf("\n\nUser stack (%s):", hexstr(fp->f_regs[SP], 8));
1533 			dumpmem((int *)fp->f_regs[SP], sbytes, 1);
1534 		}
1535 	}
1536 	doingdump = 0;
1537 	splx(s);
1538 }
1539 
1540 extern char kstack[];
1541 #define KSADDR	((int *)&(kstack[(UPAGES-1)*NBPG]))
1542 
1543 dumpmem(ptr, sz, ustack)
1544 	register int *ptr;
1545 	int sz, ustack;
1546 {
1547 	register int i, val;
1548 	extern char *hexstr();
1549 
1550 	for (i = 0; i < sz; i++) {
1551 		if ((i & 7) == 0)
1552 			printf("\n%s: ", hexstr((int)ptr, 6));
1553 		else
1554 			printf(" ");
1555 		if (ustack == 1) {
1556 			if ((val = fuword(ptr++)) == -1)
1557 				break;
1558 		} else {
1559 			if (ustack == 0 &&
1560 			    (ptr < KSADDR || ptr > KSADDR+(NBPG/4-1)))
1561 				break;
1562 			val = *ptr++;
1563 		}
1564 		printf("%s", hexstr(val, 8));
1565 	}
1566 	printf("\n");
1567 }
1568 
1569 char *
1570 hexstr(val, len)
1571 	register int val;
1572 	int len;
1573 {
1574 	static char nbuf[9];
1575 	register int x, i;
1576 
1577 	if (len > 8)
1578 		return("");
1579 	nbuf[len] = '\0';
1580 	for (i = len-1; i >= 0; --i) {
1581 		x = val & 0xF;
1582 		if (x > 9)
1583 			nbuf[i] = x - 10 + 'A';
1584 		else
1585 			nbuf[i] = x + '0';
1586 		val >>= 4;
1587 	}
1588 	return(nbuf);
1589 }
1590 
1591 #ifdef DEBUG
1592 char oflowmsg[] = "k-stack overflow";
1593 char uflowmsg[] = "k-stack underflow";
1594 
1595 badkstack(oflow, fr)
1596 	int oflow;
1597 	struct frame fr;
1598 {
1599 	extern char kstackatbase[];
1600 
1601 	printf("%s: sp should be %x\n",
1602 	       oflow ? oflowmsg : uflowmsg,
1603 	       kstackatbase - (exframesize[fr.f_format] + 8));
1604 	regdump(&fr, 0);
1605 	panic(oflow ? oflowmsg : uflowmsg);
1606 }
1607 #endif
1608