xref: /netbsd/sys/arch/hp300/hp300/machdep.c (revision c4a72b64)
1 /*	$NetBSD: machdep.c,v 1.163 2002/09/25 20:05:26 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah $Hdr: machdep.c 1.74 92/12/20$
41  *
42  *	@(#)machdep.c	8.10 (Berkeley) 4/20/94
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.163 2002/09/25 20:05:26 thorpej Exp $");
47 
48 #include "opt_ddb.h"
49 #include "opt_compat_hpux.h"
50 #include "opt_compat_netbsd.h"
51 #include "hil.h"
52 
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/callout.h>
56 #include <sys/buf.h>
57 #include <sys/conf.h>
58 #include <sys/exec.h>
59 #include <sys/file.h>
60 #include <sys/ioctl.h>
61 #include <sys/kernel.h>
62 #include <sys/device.h>
63 #include <sys/malloc.h>
64 #include <sys/mbuf.h>
65 #include <sys/mount.h>
66 #include <sys/msgbuf.h>
67 #include <sys/proc.h>
68 #include <sys/reboot.h>
69 #include <sys/signalvar.h>
70 #include <sys/syscallargs.h>
71 #include <sys/tty.h>
72 #include <sys/user.h>
73 #include <sys/exec.h>
74 #include <sys/core.h>
75 #include <sys/kcore.h>
76 #include <sys/vnode.h>
77 
78 #ifdef DDB
79 #include <machine/db_machdep.h>
80 #include <ddb/db_sym.h>
81 #include <ddb/db_extern.h>
82 #ifdef __ELF__
83 #include <sys/exec_elf.h>
84 #endif
85 #endif /* DDB */
86 
87 #include <machine/autoconf.h>
88 #include <machine/bootinfo.h>
89 #include <machine/bus.h>
90 #include <machine/cpu.h>
91 #include <machine/hp300spu.h>
92 #include <machine/reg.h>
93 #include <machine/psl.h>
94 #include <machine/pte.h>
95 
96 #include <machine/kcore.h>	/* XXX should be pulled in by sys/kcore.h */
97 
98 #include <dev/cons.h>
99 
100 #define	MAXMEM	64*1024	/* XXX - from cmap.h */
101 #include <uvm/uvm_extern.h>
102 
103 #include <sys/sysctl.h>
104 
105 #include "opt_useleds.h"
106 
107 #include <hp300/dev/hilreg.h>
108 #include <hp300/dev/hilioctl.h>
109 #include <hp300/dev/hilvar.h>
110 #ifdef USELEDS
111 #include <hp300/hp300/leds.h>
112 #endif
113 
114 /* the following is used externally (sysctl_hw) */
115 char	machine[] = MACHINE;	/* from <machine/param.h> */
116 
117 /* Our exported CPU info; we can have only one. */
118 struct cpu_info cpu_info_store;
119 
120 struct vm_map *exec_map = NULL;
121 struct vm_map *mb_map = NULL;
122 struct vm_map *phys_map = NULL;
123 
124 extern paddr_t avail_end;
125 
126 /*
127  * bootinfo base (physical and virtual).  The bootinfo is placed, by
128  * the boot loader, into the first page of kernel text, which is zero
129  * filled (see locore.s) and not mapped at 0.  It is remapped to a
130  * different address in pmap_bootstrap().
131  */
132 paddr_t	bootinfo_pa;
133 vaddr_t	bootinfo_va;
134 
135 caddr_t	msgbufaddr;
136 int	maxmem;			/* max memory per process */
137 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
138 /*
139  * safepri is a safe priority for sleep to set for a spin-wait
140  * during autoconfiguration or after a panic.
141  */
142 int	safepri = PSL_LOWIPL;
143 
144 extern	u_int lowram;
145 extern	short exframesize[];
146 
147 #ifdef COMPAT_HPUX
148 extern struct emul emul_hpux;
149 #endif
150 
151 /* prototypes for local functions */
152 void	parityenable __P((void));
153 int	parityerror __P((struct frame *));
154 int	parityerrorfind __P((void));
155 void    identifycpu __P((void));
156 void    initcpu __P((void));
157 
158 int	cpu_dumpsize __P((void));
159 int	cpu_dump __P((int (*)(dev_t, daddr_t, caddr_t, size_t), daddr_t *));
160 void	cpu_init_kcore_hdr __P((void));
161 
162 /* functions called from locore.s */
163 void    dumpsys __P((void));
164 void	hp300_init __P((void));
165 void    straytrap __P((int, u_short));
166 void	nmihand __P((struct frame));
167 
168 /*
169  * Machine-dependent crash dump header info.
170  */
171 cpu_kcore_hdr_t cpu_kcore_hdr;
172 
173 /*
174  * Note that the value of delay_divisor is roughly
175  * 2048 / cpuspeed (where cpuspeed is in MHz) on 68020
176  * and 68030 systems.  See clock.c for the delay
177  * calibration algorithm.
178  */
179 int	cpuspeed;		/* relative cpu speed; XXX skewed on 68040 */
180 int	delay_divisor;		/* delay constant */
181 
182 /*
183  * Early initialization, before main() is called.
184  */
185 void
186 hp300_init()
187 {
188 	struct btinfo_magic *bt_mag;
189 	int i;
190 
191 	extern paddr_t avail_start, avail_end;
192 
193 	/*
194 	 * Tell the VM system about available physical memory.  The
195 	 * hp300 only has one segment.
196 	 */
197 	uvm_page_physload(atop(avail_start), atop(avail_end),
198 	    atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT);
199 
200 	/* Initialize the interrupt handlers. */
201 	intr_init();
202 
203 	/* Calibrate the delay loop. */
204 	hp300_calibrate_delay();
205 
206 	/*
207 	 * Initialize error message buffer (at end of core).
208 	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
209 	 */
210 	for (i = 0; i < btoc(MSGBUFSIZE); i++)
211 		pmap_kenter_pa((vaddr_t)msgbufaddr + i * NBPG,
212 		    avail_end + i * NBPG, VM_PROT_READ|VM_PROT_WRITE);
213 	pmap_update(pmap_kernel());
214 	initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE));
215 
216 	/*
217 	 * Map in the bootinfo page, and make sure the bootinfo
218 	 * exists by searching for the MAGIC record.  If it's not
219 	 * there, disable bootinfo.
220 	 */
221 	pmap_enter(pmap_kernel(), bootinfo_va, bootinfo_pa,
222 	    VM_PROT_READ|VM_PROT_WRITE,
223 	    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
224 	pmap_update(pmap_kernel());
225 	bt_mag = lookup_bootinfo(BTINFO_MAGIC);
226 	if (bt_mag == NULL ||
227 	    bt_mag->magic1 != BOOTINFO_MAGIC1 ||
228 	    bt_mag->magic2 != BOOTINFO_MAGIC2) {
229 		pmap_remove(pmap_kernel(), bootinfo_va, bootinfo_va + NBPG);
230 		pmap_update(pmap_kernel());
231 		bootinfo_va = 0;
232 	}
233 }
234 
235 /*
236  * Console initialization: called early on from main,
237  * before vm init or startup.  Do enough configuration
238  * to choose and initialize a console.
239  */
240 void
241 consinit()
242 {
243 
244 	/*
245 	 * Initialize the external I/O extent map.
246 	 */
247 	iomap_init();
248 
249 	/*
250 	 * Initialize the console before we print anything out.
251 	 */
252 
253 	hp300_cninit();
254 
255 	/*
256 	 * Issue a warning if the boot loader didn't provide bootinfo.
257 	 */
258 	if (bootinfo_va == 0)
259 		printf("WARNING: boot loader did not provide bootinfo\n");
260 
261 #ifdef DDB
262 	{
263 		extern int end;
264 		extern int *esym;
265 
266 #ifndef __ELF__
267 		ddb_init(*(int *)&end, ((int *)&end) + 1, esym);
268 #else
269 		ddb_init((int)esym - (int)&end - sizeof(Elf32_Ehdr),
270 		    (void *)&end, esym);
271 #endif
272 	}
273 	if (boothowto & RB_KDB)
274 		Debugger();
275 #endif
276 }
277 
278 /*
279  * cpu_startup: allocate memory for variable-sized tables,
280  * initialize cpu
281  */
282 void
283 cpu_startup()
284 {
285 	extern char *etext;
286 	caddr_t v;
287 	u_int i, base, residual;
288 	vaddr_t minaddr, maxaddr;
289 	vsize_t size;
290 	char pbuf[9];
291 #ifdef DEBUG
292 	extern int pmapdebug;
293 	int opmapdebug = pmapdebug;
294 
295 	pmapdebug = 0;
296 #endif
297 
298 	/*
299 	 * Initialize the kernel crash dump header.
300 	 */
301 	cpu_init_kcore_hdr();
302 
303 	/*
304 	 * Good {morning,afternoon,evening,night}.
305 	 */
306 	printf(version);
307 	identifycpu();
308 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
309 	printf("total memory = %s\n", pbuf);
310 
311 	/*
312 	 * Find out how much space we need, allocate it,
313 	 * and the give everything true virtual addresses.
314 	 */
315 	size = (vsize_t)allocsys(NULL, NULL);
316 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0)
317 		panic("startup: no room for tables");
318 	if ((allocsys(v, NULL) - v) != size)
319 		panic("startup: table size inconsistency");
320 
321 	/*
322 	 * Now allocate buffers proper.  They are different than the above
323 	 * in that they usually occupy more virtual memory than physical.
324 	 */
325 	size = MAXBSIZE * nbuf;
326 	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
327 		    NULL, UVM_UNKNOWN_OFFSET, 0,
328 		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
329 				UVM_ADV_NORMAL, 0)) != 0)
330 		panic("startup: cannot allocate VM for buffers");
331 	minaddr = (vaddr_t)buffers;
332 	base = bufpages / nbuf;
333 	residual = bufpages % nbuf;
334 	for (i = 0; i < nbuf; i++) {
335 		vsize_t curbufsize;
336 		vaddr_t curbuf;
337 		struct vm_page *pg;
338 
339 		/*
340 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
341 		 * that MAXBSIZE space, we allocate and map (base+1) pages
342 		 * for the first "residual" buffers, and then we allocate
343 		 * "base" pages for the rest.
344 		 */
345 		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
346 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
347 
348 		while (curbufsize) {
349 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
350 			if (pg == NULL)
351 				panic("cpu_startup: not enough memory for "
352 				    "buffer cache");
353 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
354 					VM_PROT_READ|VM_PROT_WRITE);
355 			curbuf += PAGE_SIZE;
356 			curbufsize -= PAGE_SIZE;
357 		}
358 	}
359 	pmap_update(pmap_kernel());
360 
361 	/*
362 	 * Allocate a submap for exec arguments.  This map effectively
363 	 * limits the number of processes exec'ing at any time.
364 	 */
365 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
366 				   16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
367 
368 	/*
369 	 * Allocate a submap for physio
370 	 */
371 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
372 				   VM_PHYS_SIZE, 0, FALSE, NULL);
373 
374 	/*
375 	 * Finally, allocate mbuf cluster submap.
376 	 */
377 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
378 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
379 				 FALSE, NULL);
380 
381 #ifdef DEBUG
382 	pmapdebug = opmapdebug;
383 #endif
384 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
385 	printf("avail memory = %s\n", pbuf);
386 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
387 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
388 
389 	/*
390 	 * Tell the VM system that page 0 isn't mapped.
391 	 *
392 	 * XXX This is bogus; should just fix KERNBASE and
393 	 * XXX VM_MIN_KERNEL_ADDRESS, but not right now.
394 	 */
395 	if (uvm_map_protect(kernel_map, 0, NBPG, UVM_PROT_NONE, TRUE) != 0)
396 		panic("can't mark page 0 off-limits");
397 
398 	/*
399 	 * Tell the VM system that writing to kernel text isn't allowed.
400 	 * If we don't, we might end up COW'ing the text segment!
401 	 *
402 	 * XXX Should be m68k_trunc_page(&kernel_text) instead
403 	 * XXX of NBPG.
404 	 */
405 	if (uvm_map_protect(kernel_map, NBPG, m68k_round_page(&etext),
406 	    UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != 0)
407 		panic("can't protect kernel text");
408 
409 	/*
410 	 * Set up CPU-specific registers, cache, etc.
411 	 */
412 	initcpu();
413 
414 	/*
415 	 * Set up buffers, so they can be used to read disk labels.
416 	 */
417 	bufinit();
418 
419 	/* Safe to use malloc for extio_ex now. */
420 	extio_ex_malloc_safe = 1;
421 }
422 
423 /*
424  * Set registers on exec.
425  */
426 void
427 setregs(p, pack, stack)
428 	struct proc *p;
429 	struct exec_package *pack;
430 	u_long stack;
431 {
432 	struct frame *frame = (struct frame *)p->p_md.md_regs;
433 
434 	frame->f_sr = PSL_USERSET;
435 	frame->f_pc = pack->ep_entry & ~1;
436 	frame->f_regs[D0] = 0;
437 	frame->f_regs[D1] = 0;
438 	frame->f_regs[D2] = 0;
439 	frame->f_regs[D3] = 0;
440 	frame->f_regs[D4] = 0;
441 	frame->f_regs[D5] = 0;
442 	frame->f_regs[D6] = 0;
443 	frame->f_regs[D7] = 0;
444 	frame->f_regs[A0] = 0;
445 	frame->f_regs[A1] = 0;
446 	frame->f_regs[A2] = (int)p->p_psstr;
447 	frame->f_regs[A3] = 0;
448 	frame->f_regs[A4] = 0;
449 	frame->f_regs[A5] = 0;
450 	frame->f_regs[A6] = 0;
451 	frame->f_regs[SP] = stack;
452 
453 	/* restore a null state frame */
454 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
455 	if (fputype)
456 		m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
457 }
458 
459 /*
460  * Info for CTL_HW
461  */
462 char	cpu_model[120];
463 
464 struct hp300_model {
465 	int id;
466 	int mmuid;
467 	const char *name;
468 	const char *speed;
469 };
470 
471 const struct hp300_model hp300_models[] = {
472 	{ HP_320,	-1,		"320",		"16.67"	},
473 	{ HP_330,	-1,		"318/319/330",	"16.67"	},
474 	{ HP_340,	-1,		"340",		"16.67"	},
475 	{ HP_345,	-1,		"345",		"50"	},
476 	{ HP_350,	-1,		"350",		"25"	},
477 	{ HP_360,	-1,		"360",		"25"	},
478 	{ HP_370,	-1,		"370",		"33.33"	},
479 	{ HP_375,	-1,		"375",		"50"	},
480 	{ HP_380,	-1,		"380",		"25"	},
481 	{ HP_385,	-1,		"385",		"33"	},
482 	{ HP_400,	-1,		"400",		"50"	},
483 	{ HP_425,	MMUID_425_T,	"425t",		"25"	},
484 	{ HP_425,	MMUID_425_S,	"425s",		"25"	},
485 	{ HP_425,	MMUID_425_E,	"425e",		"25"	},
486 	{ HP_425,	-1,		"425",		"25"	},
487 	{ HP_433,	MMUID_433_T,	"433t",		"33"	},
488 	{ HP_433,	MMUID_433_S,	"433s",		"33"	},
489 	{ HP_433,	-1,		"433",		"33"	},
490 	{ 0,		-1,		NULL,		NULL	},
491 };
492 
493 void
494 identifycpu()
495 {
496 	const char *t, *mc, *s;
497 	int i, len;
498 
499 	/*
500 	 * Find the model number.
501 	 */
502 	for (t = s = NULL, i = 0; hp300_models[i].name != NULL; i++) {
503 		if (hp300_models[i].id == machineid) {
504 			if (hp300_models[i].mmuid != -1 &&
505 			    hp300_models[i].mmuid != mmuid)
506 				continue;
507 			t = hp300_models[i].name;
508 			s = hp300_models[i].speed;
509 			break;
510 		}
511 	}
512 	if (t == NULL) {
513 		printf("\nunknown machineid %d\n", machineid);
514 		goto lose;
515 	}
516 
517 	/*
518 	 * ...and the CPU type.
519 	 */
520 	switch (cputype) {
521 	case CPU_68040:
522 		mc = "40";
523 		break;
524 	case CPU_68030:
525 		mc = "30";
526 		break;
527 	case CPU_68020:
528 		mc = "20";
529 		break;
530 	default:
531 		printf("\nunknown cputype %d\n", cputype);
532 		goto lose;
533 	}
534 
535 	sprintf(cpu_model, "HP 9000/%s (%sMHz MC680%s CPU", t, s, mc);
536 
537 	/*
538 	 * ...and the MMU type.
539 	 */
540 	switch (mmutype) {
541 	case MMU_68040:
542 	case MMU_68030:
543 		strcat(cpu_model, "+MMU");
544 		break;
545 	case MMU_68851:
546 		strcat(cpu_model, ", MC68851 MMU");
547 		break;
548 	case MMU_HP:
549 		strcat(cpu_model, ", HP MMU");
550 		break;
551 	default:
552 		printf("%s\nunknown MMU type %d\n", cpu_model, mmutype);
553 		panic("startup");
554 	}
555 
556 	len = strlen(cpu_model);
557 
558 	/*
559 	 * ...and the FPU type.
560 	 */
561 	switch (fputype) {
562 	case FPU_68040:
563 		len += sprintf(cpu_model + len, "+FPU");
564 		break;
565 	case FPU_68882:
566 		len += sprintf(cpu_model + len, ", %sMHz MC68882 FPU", s);
567 		break;
568 	case FPU_68881:
569 		len += sprintf(cpu_model + len, ", %sMHz MC68881 FPU",
570 		    machineid == HP_350 ? "20" : "16.67");
571 		break;
572 	default:
573 		len += sprintf(cpu_model + len, ", unknown FPU");
574 	}
575 
576 	/*
577 	 * ...and finally, the cache type.
578 	 */
579 	if (cputype == CPU_68040)
580 		sprintf(cpu_model + len, ", 4k on-chip physical I/D caches");
581 	else {
582 		switch (ectype) {
583 		case EC_VIRT:
584 			sprintf(cpu_model + len,
585 			    ", %dK virtual-address cache",
586 			    machineid == HP_320 ? 16 : 32);
587 			break;
588 		case EC_PHYS:
589 			sprintf(cpu_model + len,
590 			    ", %dK physical-address cache",
591 			    machineid == HP_370 ? 64 : 32);
592 			break;
593 		}
594 	}
595 
596 	strcat(cpu_model, ")");
597 	printf("%s\n", cpu_model);
598 #ifdef DIAGNOSTIC
599 	printf("cpu: delay divisor %d", delay_divisor);
600 	if (mmuid)
601 		printf(", mmuid %d", mmuid);
602 	printf("\n");
603 #endif
604 
605 	/*
606 	 * Now that we have told the user what they have,
607 	 * let them know if that machine type isn't configured.
608 	 */
609 	switch (machineid) {
610 	case -1:		/* keep compilers happy */
611 #if !defined(HP320)
612 	case HP_320:
613 #endif
614 #if !defined(HP330)
615 	case HP_330:
616 #endif
617 #if !defined(HP340)
618 	case HP_340:
619 #endif
620 #if !defined(HP345)
621 	case HP_345:
622 #endif
623 #if !defined(HP350)
624 	case HP_350:
625 #endif
626 #if !defined(HP360)
627 	case HP_360:
628 #endif
629 #if !defined(HP370)
630 	case HP_370:
631 #endif
632 #if !defined(HP375)
633 	case HP_375:
634 #endif
635 #if !defined(HP380)
636 	case HP_380:
637 #endif
638 #if !defined(HP385)
639 	case HP_385:
640 #endif
641 #if !defined(HP400)
642 	case HP_400:
643 #endif
644 #if !defined(HP425)
645 	case HP_425:
646 #endif
647 #if !defined(HP433)
648 	case HP_433:
649 #endif
650 		panic("SPU type not configured");
651 	default:
652 		break;
653 	}
654 
655 	return;
656  lose:
657 	panic("startup");
658 }
659 
660 /*
661  * machine dependent system variables.
662  */
663 int
664 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
665 	int *name;
666 	u_int namelen;
667 	void *oldp;
668 	size_t *oldlenp;
669 	void *newp;
670 	size_t newlen;
671 	struct proc *p;
672 {
673 	dev_t consdev;
674 
675 	/* all sysctl names at this level are terminal */
676 	if (namelen != 1)
677 		return (ENOTDIR);		/* overloaded */
678 
679 	switch (name[0]) {
680 	case CPU_CONSDEV:
681 		if (cn_tab != NULL)
682 			consdev = cn_tab->cn_dev;
683 		else
684 			consdev = NODEV;
685 		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
686 		    sizeof consdev));
687 	default:
688 		return (EOPNOTSUPP);
689 	}
690 	/* NOTREACHED */
691 }
692 
693 int	waittime = -1;
694 
695 void
696 cpu_reboot(howto, bootstr)
697 	int howto;
698 	char *bootstr;
699 {
700 
701 #if __GNUC__	/* XXX work around lame compiler problem (gcc 2.7.2) */
702 	(void)&howto;
703 #endif
704 	/* take a snap shot before clobbering any registers */
705 	if (curproc && curproc->p_addr)
706 		savectx(&curproc->p_addr->u_pcb);
707 
708 	/* If system is cold, just halt. */
709 	if (cold) {
710 		howto |= RB_HALT;
711 		goto haltsys;
712 	}
713 
714 	boothowto = howto;
715 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
716 		waittime = 0;
717 		vfs_shutdown();
718 		/*
719 		 * If we've been adjusting the clock, the todr
720 		 * will be out of synch; adjust it now.
721 		 */
722 		resettodr();
723 	}
724 
725 	/* Disable interrupts. */
726 	splhigh();
727 
728 	/* If rebooting and a dump is requested do it. */
729 	if (howto & RB_DUMP)
730 		dumpsys();
731 
732  haltsys:
733 	/* Run any shutdown hooks. */
734 	doshutdownhooks();
735 
736 #if defined(PANICWAIT) && !defined(DDB)
737 	if ((howto & RB_HALT) == 0 && panicstr) {
738 		printf("hit any key to reboot...\n");
739 		(void)cngetc();
740 		printf("\n");
741 	}
742 #endif
743 
744 	/* Finally, halt/reboot the system. */
745 	if (howto & RB_HALT) {
746 		printf("System halted.  Hit any key to reboot.\n\n");
747 		(void)cngetc();
748 	}
749 
750 	printf("rebooting...\n");
751 	DELAY(1000000);
752 	doboot();
753 	/*NOTREACHED*/
754 }
755 
756 /*
757  * Initialize the kernel crash dump header.
758  */
759 void
760 cpu_init_kcore_hdr()
761 {
762 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
763 	struct m68k_kcore_hdr *m = &h->un._m68k;
764 	extern int end;
765 
766 	memset(&cpu_kcore_hdr, 0, sizeof(cpu_kcore_hdr));
767 
768 	/*
769 	 * Initialize the `dispatcher' portion of the header.
770 	 */
771 	strcpy(h->name, machine);
772 	h->page_size = NBPG;
773 	h->kernbase = KERNBASE;
774 
775 	/*
776 	 * Fill in information about our MMU configuration.
777 	 */
778 	m->mmutype	= mmutype;
779 	m->sg_v		= SG_V;
780 	m->sg_frame	= SG_FRAME;
781 	m->sg_ishift	= SG_ISHIFT;
782 	m->sg_pmask	= SG_PMASK;
783 	m->sg40_shift1	= SG4_SHIFT1;
784 	m->sg40_mask2	= SG4_MASK2;
785 	m->sg40_shift2	= SG4_SHIFT2;
786 	m->sg40_mask3	= SG4_MASK3;
787 	m->sg40_shift3	= SG4_SHIFT3;
788 	m->sg40_addr1	= SG4_ADDR1;
789 	m->sg40_addr2	= SG4_ADDR2;
790 	m->pg_v		= PG_V;
791 	m->pg_frame	= PG_FRAME;
792 
793 	/*
794 	 * Initialize pointer to kernel segment table.
795 	 */
796 	m->sysseg_pa = (u_int32_t)(pmap_kernel()->pm_stpa);
797 
798 	/*
799 	 * Initialize relocation value such that:
800 	 *
801 	 *	pa = (va - KERNBASE) + reloc
802 	 */
803 	m->reloc = lowram;
804 
805 	/*
806 	 * Define the end of the relocatable range.
807 	 */
808 	m->relocend = (u_int32_t)&end;
809 
810 	/*
811 	 * hp300 has one contiguous memory segment.
812 	 */
813 	m->ram_segs[0].start = lowram;
814 	m->ram_segs[0].size  = ctob(physmem);
815 }
816 
817 /*
818  * Compute the size of the machine-dependent crash dump header.
819  * Returns size in disk blocks.
820  */
821 int
822 cpu_dumpsize()
823 {
824 	int size;
825 
826 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
827 	return (btodb(roundup(size, dbtob(1))));
828 }
829 
830 /*
831  * Called by dumpsys() to dump the machine-dependent header.
832  */
833 int
834 cpu_dump(dump, blknop)
835 	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
836 	daddr_t *blknop;
837 {
838 	int buf[dbtob(1) / sizeof(int)];
839 	cpu_kcore_hdr_t *chdr;
840 	kcore_seg_t *kseg;
841 	int error;
842 
843 	kseg = (kcore_seg_t *)buf;
844 	chdr = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(kcore_seg_t)) /
845 	    sizeof(int)];
846 
847 	/* Create the segment header. */
848 	CORE_SETMAGIC(*kseg, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
849 	kseg->c_size = dbtob(1) - ALIGN(sizeof(kcore_seg_t));
850 
851 	memcpy(chdr, &cpu_kcore_hdr, sizeof(cpu_kcore_hdr_t));
852 	error = (*dump)(dumpdev, *blknop, (caddr_t)buf, sizeof(buf));
853 	*blknop += btodb(sizeof(buf));
854 	return (error);
855 }
856 
857 /*
858  * These variables are needed by /sbin/savecore
859  */
860 u_int32_t dumpmag = 0x8fca0101;	/* magic number */
861 int	dumpsize = 0;		/* pages */
862 long	dumplo = 0;		/* blocks */
863 
864 /*
865  * This is called by main to set dumplo and dumpsize.
866  * Dumps always skip the first NBPG of disk space
867  * in case there might be a disk label stored there.
868  * If there is extra space, put dump at the end to
869  * reduce the chance that swapping trashes it.
870  */
871 void
872 cpu_dumpconf()
873 {
874 	const struct bdevsw *bdev;
875 	int chdrsize;	/* size of dump header */
876 	int nblks;	/* size of dump area */
877 
878 	if (dumpdev == NODEV)
879 		return;
880 	bdev = bdevsw_lookup(dumpdev);
881 	if (bdev == NULL)
882 		panic("dumpconf: bad dumpdev=0x%x", dumpdev);
883 	if (bdev->d_psize == NULL)
884 		return;
885 	nblks = (*bdev->d_psize)(dumpdev);
886 	chdrsize = cpu_dumpsize();
887 
888 	dumpsize = btoc(cpu_kcore_hdr.un._m68k.ram_segs[0].size);
889 
890 	/*
891 	 * Check do see if we will fit.  Note we always skip the
892 	 * first NBPG in case there is a disk label there.
893 	 */
894 	if (nblks < (ctod(dumpsize) + chdrsize + ctod(1))) {
895 		dumpsize = 0;
896 		dumplo = -1;
897 		return;
898 	}
899 
900 	/*
901 	 * Put dump at the end of the partition.
902 	 */
903 	dumplo = (nblks - 1) - ctod(dumpsize) - chdrsize;
904 }
905 
906 /*
907  * Dump physical memory onto the dump device.  Called by cpu_reboot().
908  */
909 void
910 dumpsys()
911 {
912 	const struct bdevsw *bdev;
913 	daddr_t blkno;		/* current block to write */
914 				/* dump routine */
915 	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
916 	int pg;			/* page being dumped */
917 	paddr_t maddr;		/* PA being dumped */
918 	int error;		/* error code from (*dump)() */
919 
920 	/* XXX initialized here because of gcc lossage */
921 	maddr = lowram;
922 	pg = 0;
923 
924 	/* Make sure dump device is valid. */
925 	if (dumpdev == NODEV)
926 		return;
927 	bdev = bdevsw_lookup(dumpdev);
928 	if (bdev == NULL)
929 		return;
930 	if (dumpsize == 0) {
931 		cpu_dumpconf();
932 		if (dumpsize == 0)
933 			return;
934 	}
935 	if (dumplo <= 0) {
936 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
937 		    minor(dumpdev));
938 		return;
939 	}
940 	dump = bdev->d_dump;
941 	blkno = dumplo;
942 
943 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
944 	    minor(dumpdev), dumplo);
945 
946 	printf("dump ");
947 
948 	/* Write the dump header. */
949 	error = cpu_dump(dump, &blkno);
950 	if (error)
951 		goto bad;
952 
953 	for (pg = 0; pg < dumpsize; pg++) {
954 #define NPGMB	(1024*1024/NBPG)
955 		/* print out how many MBs we have dumped */
956 		if (pg && (pg % NPGMB) == 0)
957 			printf("%d ", pg / NPGMB);
958 #undef NPGMB
959 		pmap_enter(pmap_kernel(), (vaddr_t)vmmap, maddr,
960 		    VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
961 
962 		pmap_update(pmap_kernel());
963 		error = (*dump)(dumpdev, blkno, vmmap, NBPG);
964  bad:
965 		switch (error) {
966 		case 0:
967 			maddr += NBPG;
968 			blkno += btodb(NBPG);
969 			break;
970 
971 		case ENXIO:
972 			printf("device bad\n");
973 			return;
974 
975 		case EFAULT:
976 			printf("device not ready\n");
977 			return;
978 
979 		case EINVAL:
980 			printf("area improper\n");
981 			return;
982 
983 		case EIO:
984 			printf("i/o error\n");
985 			return;
986 
987 		case EINTR:
988 			printf("aborted from console\n");
989 			return;
990 
991 		default:
992 			printf("error %d\n", error);
993 			return;
994 		}
995 	}
996 	printf("succeeded\n");
997 }
998 
999 void
1000 initcpu()
1001 {
1002 
1003 #ifdef MAPPEDCOPY
1004 	/*
1005 	 * Initialize lower bound for doing copyin/copyout using
1006 	 * page mapping (if not already set).  We don't do this on
1007 	 * VAC machines as it loses big time.
1008 	 */
1009 	if (ectype == EC_VIRT)
1010 		mappedcopysize = -1;	/* in case it was patched */
1011 	else
1012 		mappedcopysize = NBPG;
1013 #endif
1014 	parityenable();
1015 #ifdef USELEDS
1016 	ledinit();
1017 #endif
1018 }
1019 
1020 void
1021 straytrap(pc, evec)
1022 	int pc;
1023 	u_short evec;
1024 {
1025 	printf("unexpected trap (vector offset %x) from %x\n",
1026 	       evec & 0xFFF, pc);
1027 }
1028 
1029 /* XXX should change the interface, and make one badaddr() function */
1030 
1031 int	*nofault;
1032 
1033 int
1034 badaddr(addr)
1035 	caddr_t addr;
1036 {
1037 	int i;
1038 	label_t	faultbuf;
1039 
1040 	nofault = (int *) &faultbuf;
1041 	if (setjmp((label_t *)nofault)) {
1042 		nofault = (int *) 0;
1043 		return(1);
1044 	}
1045 	i = *(volatile short *)addr;
1046 	nofault = (int *) 0;
1047 	return(0);
1048 }
1049 
1050 int
1051 badbaddr(addr)
1052 	caddr_t addr;
1053 {
1054 	int i;
1055 	label_t	faultbuf;
1056 
1057 	nofault = (int *) &faultbuf;
1058 	if (setjmp((label_t *)nofault)) {
1059 		nofault = (int *) 0;
1060 		return(1);
1061 	}
1062 	i = *(volatile char *)addr;
1063 	nofault = (int *) 0;
1064 	return(0);
1065 }
1066 
1067 /*
1068  * lookup_bootinfo:
1069  *
1070  *	Look up information in bootinfo from boot loader.
1071  */
1072 void *
1073 lookup_bootinfo(type)
1074 	int type;
1075 {
1076 	struct btinfo_common *bt;
1077 	char *help = (char *)bootinfo_va;
1078 
1079 	/* Check for a bootinfo record first. */
1080 	if (help == NULL)
1081 		return (NULL);
1082 
1083 	do {
1084 		bt = (struct btinfo_common *)help;
1085 		if (bt->type == type)
1086 			return (help);
1087 		help += bt->next;
1088 	} while (bt->next != 0 &&
1089 		 (size_t)help < (size_t)bootinfo_va + BOOTINFO_SIZE);
1090 
1091 	return (NULL);
1092 }
1093 
1094 #ifdef PANICBUTTON
1095 /*
1096  * Declare these so they can be patched.
1097  */
1098 int panicbutton = 1;	/* non-zero if panic buttons are enabled */
1099 int candbdiv = 2;	/* give em half a second (hz / candbdiv) */
1100 
1101 void	candbtimer __P((void *));
1102 
1103 int crashandburn;
1104 
1105 struct callout candbtimer_ch = CALLOUT_INITIALIZER;
1106 
1107 void
1108 candbtimer(arg)
1109 	void *arg;
1110 {
1111 
1112 	crashandburn = 0;
1113 }
1114 #endif /* PANICBUTTON */
1115 
1116 static int innmihand;	/* simple mutex */
1117 
1118 /*
1119  * Level 7 interrupts can be caused by the keyboard or parity errors.
1120  */
1121 void
1122 nmihand(frame)
1123 	struct frame frame;
1124 {
1125 
1126 	/* Prevent unwanted recursion. */
1127 	if (innmihand)
1128 		return;
1129 	innmihand = 1;
1130 
1131 #if NHIL > 0
1132 	/* Check for keyboard <CRTL>+<SHIFT>+<RESET>. */
1133 	if (kbdnmi()) {
1134 		printf("Got a keyboard NMI");
1135 
1136 		/*
1137 		 * We can:
1138 		 *
1139 		 *	- enter DDB
1140 		 *
1141 		 *	- Start the crashandburn sequence
1142 		 *
1143 		 *	- Ignore it.
1144 		 */
1145 #ifdef DDB
1146 		printf(": entering debugger\n");
1147 		Debugger();
1148 #else
1149 #ifdef PANICBUTTON
1150 		if (panicbutton) {
1151 			if (crashandburn) {
1152 				crashandburn = 0;
1153 				printf(": CRASH AND BURN!\n");
1154 				panic("forced crash");
1155 			} else {
1156 				/* Start the crashandburn sequence */
1157 				printf("\n");
1158 				crashandburn = 1;
1159 				callout_reset(&candbtimer_ch, hz / candbdiv,
1160 				    candbtiner, NULL);
1161 			}
1162 		} else
1163 #endif /* PANICBUTTON */
1164 			printf(": ignoring\n");
1165 #endif /* DDB */
1166 
1167 		goto nmihand_out;	/* no more work to do */
1168 	}
1169 #endif
1170 
1171 	if (parityerror(&frame))
1172 		return;
1173 	/* panic?? */
1174 	printf("unexpected level 7 interrupt ignored\n");
1175 
1176 #if NHIL > 0
1177 nmihand_out:
1178 	innmihand = 0;
1179 #endif
1180 }
1181 
1182 /*
1183  * Parity error section.  Contains magic.
1184  */
1185 #define PARREG		((volatile short *)IIOV(0x5B0000))
1186 static int gotparmem = 0;
1187 #ifdef DEBUG
1188 int ignorekperr = 0;	/* ignore kernel parity errors */
1189 #endif
1190 
1191 /*
1192  * Enable parity detection
1193  */
1194 void
1195 parityenable()
1196 {
1197 	label_t	faultbuf;
1198 
1199 	nofault = (int *) &faultbuf;
1200 	if (setjmp((label_t *)nofault)) {
1201 		nofault = (int *) 0;
1202 		printf("Parity detection disabled\n");
1203 		return;
1204 	}
1205 	*PARREG = 1;
1206 	nofault = (int *) 0;
1207 	gotparmem = 1;
1208 }
1209 
1210 /*
1211  * Determine if level 7 interrupt was caused by a parity error
1212  * and deal with it if it was.  Returns 1 if it was a parity error.
1213  */
1214 int
1215 parityerror(fp)
1216 	struct frame *fp;
1217 {
1218 	if (!gotparmem)
1219 		return(0);
1220 	*PARREG = 0;
1221 	DELAY(10);
1222 	*PARREG = 1;
1223 	if (panicstr) {
1224 		printf("parity error after panic ignored\n");
1225 		return(1);
1226 	}
1227 	if (!parityerrorfind())
1228 		printf("WARNING: transient parity error ignored\n");
1229 	else if (USERMODE(fp->f_sr)) {
1230 		printf("pid %d: parity error\n", curproc->p_pid);
1231 		uprintf("sorry, pid %d killed due to memory parity error\n",
1232 			curproc->p_pid);
1233 		psignal(curproc, SIGKILL);
1234 #ifdef DEBUG
1235 	} else if (ignorekperr) {
1236 		printf("WARNING: kernel parity error ignored\n");
1237 #endif
1238 	} else {
1239 		regdump((struct trapframe *)fp, 128);
1240 		panic("kernel parity error");
1241 	}
1242 	return(1);
1243 }
1244 
1245 /*
1246  * Yuk!  There has got to be a better way to do this!
1247  * Searching all of memory with interrupts blocked can lead to disaster.
1248  */
1249 int
1250 parityerrorfind()
1251 {
1252 	static label_t parcatch;
1253 	static int looking = 0;
1254 	volatile int pg, o, s;
1255 	volatile int *ip;
1256 	int i;
1257 	int found;
1258 
1259 #ifdef lint
1260 	i = o = pg = 0; if (i) return(0);
1261 #endif
1262 	/*
1263 	 * If looking is true we are searching for a known parity error
1264 	 * and it has just occurred.  All we do is return to the higher
1265 	 * level invocation.
1266 	 */
1267 	if (looking)
1268 		longjmp(&parcatch);
1269 	s = splhigh();
1270 	/*
1271 	 * If setjmp returns true, the parity error we were searching
1272 	 * for has just occurred (longjmp above) at the current pg+o
1273 	 */
1274 	if (setjmp(&parcatch)) {
1275 		printf("Parity error at 0x%x\n", ctob(pg)|o);
1276 		found = 1;
1277 		goto done;
1278 	}
1279 	/*
1280 	 * If we get here, a parity error has occurred for the first time
1281 	 * and we need to find it.  We turn off any external caches and
1282 	 * loop thru memory, testing every longword til a fault occurs and
1283 	 * we regain control at setjmp above.  Note that because of the
1284 	 * setjmp, pg and o need to be volatile or their values will be lost.
1285 	 */
1286 	looking = 1;
1287 	ecacheoff();
1288 	for (pg = btoc(lowram); pg < btoc(lowram)+physmem; pg++) {
1289 		pmap_enter(pmap_kernel(), (vaddr_t)vmmap, ctob(pg),
1290 		    VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
1291 		pmap_update(pmap_kernel());
1292 		ip = (int *)vmmap;
1293 		for (o = 0; o < NBPG; o += sizeof(int))
1294 			i = *ip++;
1295 	}
1296 	/*
1297 	 * Getting here implies no fault was found.  Should never happen.
1298 	 */
1299 	printf("Couldn't locate parity error\n");
1300 	found = 0;
1301 done:
1302 	looking = 0;
1303 	pmap_remove(pmap_kernel(), (vaddr_t)vmmap, (vaddr_t)&vmmap[NBPG]);
1304 	pmap_update(pmap_kernel());
1305 	ecacheon();
1306 	splx(s);
1307 	return(found);
1308 }
1309 
1310 /*
1311  * cpu_exec_aout_makecmds():
1312  *	cpu-dependent a.out format hook for execve().
1313  *
1314  * Determine of the given exec package refers to something which we
1315  * understand and, if so, set up the vmcmds for it.
1316  *
1317  * XXX what are the special cases for the hp300?
1318  * XXX why is this COMPAT_NOMID?  was something generating
1319  *	hp300 binaries with an a_mid of 0?  i thought that was only
1320  *	done on little-endian machines...  -- cgd
1321  */
1322 int
1323 cpu_exec_aout_makecmds(p, epp)
1324 	struct proc *p;
1325 	struct exec_package *epp;
1326 {
1327 #if defined(COMPAT_NOMID) || defined(COMPAT_44)
1328 	u_long midmag, magic;
1329 	u_short mid;
1330 	int error;
1331 	struct exec *execp = epp->ep_hdr;
1332 
1333 	midmag = ntohl(execp->a_midmag);
1334 	mid = (midmag >> 16) & 0xffff;
1335 	magic = midmag & 0xffff;
1336 
1337 	midmag = mid << 16 | magic;
1338 
1339 	switch (midmag) {
1340 #ifdef COMPAT_NOMID
1341 	case (MID_ZERO << 16) | ZMAGIC:
1342 		error = exec_aout_prep_oldzmagic(p, epp);
1343 		return(error);
1344 #endif
1345 #ifdef COMPAT_44
1346 	case (MID_HP300 << 16) | ZMAGIC:
1347 		error = exec_aout_prep_oldzmagic(p, epp);
1348 		return(error);
1349 #endif
1350 	}
1351 #endif /* !(defined(COMPAT_NOMID) || defined(COMPAT_44)) */
1352 
1353 	return ENOEXEC;
1354 }
1355