xref: /netbsd/sys/arch/hppa/hppa/machdep.c (revision 27b72179)
1 /*	$NetBSD: machdep.c,v 1.18 2022/09/29 06:39:59 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matthew Fredette.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*	$OpenBSD: machdep.c,v 1.40 2001/09/19 20:50:56 mickey Exp $	*/
33 
34 /*
35  * Copyright (c) 1999-2003 Michael Shalayeff
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
51  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
52  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
53  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
55  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
56  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
57  * THE POSSIBILITY OF SUCH DAMAGE.
58  */
59 
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.18 2022/09/29 06:39:59 skrll Exp $");
62 
63 #include "opt_cputype.h"
64 #include "opt_ddb.h"
65 #include "opt_kgdb.h"
66 #include "opt_modular.h"
67 #include "opt_useleds.h"
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/signalvar.h>
72 #include <sys/kernel.h>
73 #include <sys/proc.h>
74 #include <sys/buf.h>
75 #include <sys/cpu.h>
76 #include <sys/reboot.h>
77 #include <sys/device.h>
78 #include <sys/conf.h>
79 #include <sys/file.h>
80 #include <sys/callout.h>
81 #include <sys/mbuf.h>
82 #include <sys/msgbuf.h>
83 #include <sys/ioctl.h>
84 #include <sys/tty.h>
85 #include <sys/exec.h>
86 #include <sys/exec_aout.h>		/* for MID_* */
87 #include <sys/sysctl.h>
88 #include <sys/core.h>
89 #include <sys/kcore.h>
90 #include <sys/module.h>
91 #include <sys/extent.h>
92 #include <sys/ksyms.h>
93 #include <sys/mount.h>
94 #include <sys/mutex.h>
95 #include <sys/syscallargs.h>
96 
97 #include <uvm/uvm_page.h>
98 #include <uvm/uvm.h>
99 
100 #include <dev/cons.h>
101 #include <dev/mm.h>
102 
103 #include <machine/pdc.h>
104 #include <machine/iomod.h>
105 #include <machine/psl.h>
106 #include <machine/reg.h>
107 #include <machine/cpufunc.h>
108 #include <machine/autoconf.h>
109 #include <machine/bootinfo.h>
110 #include <machine/kcore.h>
111 #include <machine/pcb.h>
112 
113 #ifdef	KGDB
114 #include "com.h"
115 #endif
116 
117 #ifdef DDB
118 #include <machine/db_machdep.h>
119 #include <ddb/db_access.h>
120 #include <ddb/db_sym.h>
121 #include <ddb/db_extern.h>
122 #endif
123 
124 #include <hppa/hppa/machdep.h>
125 #include <hppa/hppa/pim.h>
126 #include <hppa/dev/cpudevs.h>
127 
128 #include "ksyms.h"
129 #include "lcd.h"
130 
131 #ifdef MACHDEPDEBUG
132 
133 #define	DPRINTF(s)	do {		\
134 	if (machdepdebug)		\
135 		printf s;		\
136 } while(0)
137 
138 #define	DPRINTFN(l,s)	do {		\
139 	if (machdepdebug >= (1))	\
140 		printf s;		\
141 } while(0)
142 
143 int machdepdebug = 1;
144 #else
145 #define	DPRINTF(s)	/* */
146 #define	DPRINTFN(l,s)	/* */
147 #endif
148 
149 /*
150  * Different kinds of flags used throughout the kernel.
151  */
152 void *msgbufaddr;
153 
154 /* The primary (aka monarch) cpu HPA */
155 hppa_hpa_t hppa_mcpuhpa;
156 
157 /*
158  * cache configuration, for most machines is the same
159  * numbers, so it makes sense to do defines w/ numbers depending
160  * on configured CPU types in the kernel
161  */
162 int icache_stride, icache_line_mask;
163 int dcache_stride, dcache_line_mask;
164 
165 /*
166  * things to not kill
167  */
168 volatile uint8_t *machine_ledaddr;
169 int machine_ledword, machine_leds;
170 
171 /*
172  * This flag is nonzero iff page zero is mapped.
173  * It is initialized to 1, because before we go
174  * virtual, page zero *is* available.  It is set
175  * to zero right before we go virtual.
176  */
177 static int pagezero_mapped = 1;
178 
179 /*
180  * CPU params (should be the same for all cpus in the system)
181  */
182 struct pdc_cache pdc_cache;
183 struct pdc_btlb pdc_btlb;
184 struct pdc_model pdc_model;
185 
186 int usebtlb;
187 
188 /*
189  * The BTLB slots.
190  */
191 static struct btlb_slot {
192 
193 	/* The number associated with this slot. */
194 	int btlb_slot_number;
195 
196 	/* The flags associated with this slot. */
197 	int btlb_slot_flags;
198 #define	BTLB_SLOT_IBTLB			(1 << 0)
199 #define	BTLB_SLOT_DBTLB			(1 << 1)
200 #define	BTLB_SLOT_CBTLB			(BTLB_SLOT_IBTLB | BTLB_SLOT_DBTLB)
201 #define	BTLB_SLOT_VARIABLE_RANGE	(1 << 2)
202 
203 	/*
204 	 * The mapping information.  A mapping is free
205 	 * if its btlb_slot_frames member is zero.
206 	 */
207 	pa_space_t btlb_slot_va_space;
208 	vaddr_t	btlb_slot_va_frame;
209 	paddr_t btlb_slot_pa_frame;
210 	vsize_t btlb_slot_frames;
211 	u_int btlb_slot_tlbprot;
212 } *btlb_slots;
213 int	btlb_slots_count;
214 
215 /* w/ a little deviation should be the same for all installed cpus */
216 u_int	cpu_ticksnum, cpu_ticksdenom, cpu_hzticks;
217 
218 /* exported info */
219 char	machine[] = MACHINE;
220 const struct hppa_cpu_info *hppa_cpu_info;
221 enum hppa_cpu_type cpu_type;
222 int	cpu_modelno;
223 int	cpu_revision;
224 
225 #if NLCD > 0
226 bool	lcd_blink_p;
227 #endif
228 
229 /*
230  * exported methods for cpus
231  */
232 int (*cpu_desidhash)(void);
233 int (*cpu_hpt_init)(vaddr_t, vsize_t);
234 int (*cpu_ibtlb_ins)(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int);
235 int (*cpu_dbtlb_ins)(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int);
236 
237 dev_t	bootdev;
238 int	totalphysmem;		/* # pages in system */
239 int	availphysmem;		/* # pages available to kernel */
240 int	esym;
241 paddr_t	avail_end;
242 
243 /*
244  * Our copy of the bootinfo struct passed to us by the boot loader.
245  */
246 struct bootinfo bootinfo;
247 
248 /*
249  * XXX note that 0x12000 is the old kernel text start
250  * address.  Memory below this is assumed to belong
251  * to the firmware.  This value is converted into pages
252  * by hppa_init and used as pages in pmap_bootstrap().
253  */
254 int	resvmem = 0x12000;
255 int	resvphysmem;
256 
257 /*
258  * BTLB parameters, broken out for the MI hppa code.
259  */
260 u_int hppa_btlb_size_min, hppa_btlb_size_max;
261 
262 /*
263  * Things for MI glue to stick on.
264  */
265 struct extent *hppa_io_extent;
266 static long hppa_io_extent_store[EXTENT_FIXED_STORAGE_SIZE(64) / sizeof(long)];
267 
268 struct pool hppa_fppl;
269 struct fpreg lwp0_fpregs;
270 
271 /* Our exported CPU info */
272 struct cpu_info cpus[HPPA_MAXCPUS] = {
273 #ifdef MULTIPROCESSOR
274 	{
275 		.ci_curlwp = &lwp0,
276 	},
277 #endif
278 };
279 
280 struct vm_map *phys_map = NULL;
281 
282 void delay_init(void);
283 static inline void fall(int, int, int, int, int);
284 void dumpsys(void);
285 void cpuid(void);
286 enum hppa_cpu_type cpu_model_cpuid(int);
287 #if NLCD > 0
288 void blink_lcd_timeout(void *);
289 #endif
290 
291 /*
292  * wide used hardware params
293  */
294 struct pdc_hwtlb pdc_hwtlb;
295 struct pdc_coproc pdc_coproc;
296 struct pdc_coherence pdc_coherence;
297 struct pdc_spidb pdc_spidbits;
298 struct pdc_pim pdc_pim;
299 struct pdc_model pdc_model;
300 
301 /*
302  * Debugger info.
303  */
304 int hppa_kgdb_attached;
305 
306 /*
307  * Whatever CPU types we support
308  */
309 extern const u_int itlb_x[], itlbna_x[], dtlb_x[], dtlbna_x[], tlbd_x[];
310 extern const u_int itlb_s[], itlbna_s[], dtlb_s[], dtlbna_s[], tlbd_s[];
311 extern const u_int itlb_t[], itlbna_t[], dtlb_t[], dtlbna_t[], tlbd_t[];
312 extern const u_int itlb_l[], itlbna_l[], dtlb_l[], dtlbna_l[], tlbd_l[];
313 extern const u_int itlb_u[], itlbna_u[], dtlb_u[], dtlbna_u[], tlbd_u[];
314 
315 int iibtlb_s(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int);
316 int idbtlb_s(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int);
317 int ibtlb_t(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int);
318 int ibtlb_l(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int);
319 int ibtlb_u(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int);
320 int ibtlb_g(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int);
321 int pbtlb_g(int);
322 int pbtlb_u(int);
323 int hpti_l(vaddr_t, vsize_t);
324 int hpti_u(vaddr_t, vsize_t);
325 int hpti_g(vaddr_t, vsize_t);
326 int desidhash_x(void);
327 int desidhash_s(void);
328 int desidhash_t(void);
329 int desidhash_l(void);
330 int desidhash_u(void);
331 
332 const struct hppa_cpu_info cpu_types[] = {
333 #ifdef HP7000_CPU
334 	{ "PA7000", NULL, "PCX",
335 	  hpcx,  0,
336 	  0, "1.0",
337 	  desidhash_x, itlb_x, dtlb_x, itlbna_x, dtlbna_x, tlbd_x,
338 	  ibtlb_g, NULL, pbtlb_g, NULL }, /* XXXNH check */
339 #endif
340 #ifdef HP7000_CPU
341 	{ "PA7000", NULL, "PCXS",
342 	  hpcxs,  0,
343 	  0, "1.1a",
344 	  desidhash_s, itlb_s, dtlb_s, itlbna_s, dtlbna_s, tlbd_s,
345 	  ibtlb_g, NULL, pbtlb_g, NULL },
346 #endif
347 #ifdef HP7100_CPU
348 	{ "PA7100", "T-Bird", "PCXT",
349 	  hpcxt, 0,
350 	  HPPA_FTRS_BTLBU, "1.1b",
351 	  desidhash_t, itlb_t, dtlb_t, itlbna_t, dtlbna_t, tlbd_t,
352 	  ibtlb_g, NULL, pbtlb_g, NULL },
353 #endif
354 #ifdef HP7100LC_CPU
355 	{ "PA7100LC", "Hummingbird", "PCXL",
356 	  hpcxl, HPPA_CPU_PCXL,
357 	  HPPA_FTRS_TLBU | HPPA_FTRS_BTLBU | HPPA_FTRS_HVT, "1.1c",
358 	  desidhash_l, itlb_l, dtlb_l, itlbna_l, dtlbna_l, tlbd_l,
359 	  ibtlb_g, NULL, pbtlb_g, hpti_g },
360 #endif
361 #ifdef HP7200_CPU
362 	{ "PA7200", "T-Bird", "PCXT'",
363 	  hpcxtp, HPPA_CPU_PCXT2,
364 	  HPPA_FTRS_BTLBU, "1.1d",
365 	  desidhash_t, itlb_t, dtlb_t, itlbna_t, dtlbna_t, tlbd_t,
366 	  ibtlb_g, NULL, pbtlb_g, NULL },
367 #endif
368 #ifdef HP7300LC_CPU
369 	{ "PA7300LC", "Velociraptor", "PCXL2",
370 	  hpcxl2, HPPA_CPU_PCXL2,
371 	  HPPA_FTRS_TLBU | HPPA_FTRS_BTLBU | HPPA_FTRS_HVT, "1.1e",
372 	  NULL, itlb_l, dtlb_l, itlbna_l, dtlbna_l, tlbd_l,
373 	  ibtlb_g, NULL, pbtlb_g, hpti_g },
374 #endif
375 #ifdef HP8000_CPU
376 	{ "PA8000", "Onyx", "PCXU",
377 	  hpcxu, HPPA_CPU_PCXU,
378 	  HPPA_FTRS_W32B, "2.0",
379 	  desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u,
380  	  ibtlb_u, NULL, pbtlb_u, NULL },
381 #endif
382 #ifdef HP8200_CPU
383 	{ "PA8200", "Vulcan", "PCXU+",
384 	  hpcxup, HPPA_CPU_PCXUP,
385 	  HPPA_FTRS_W32B, "2.0",
386 	  desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u,
387  	  ibtlb_u, NULL, pbtlb_u, NULL },
388 #endif
389 #ifdef HP8500_CPU
390 	{ "PA8500", "Barra'Cuda", "PCXW",
391 	  hpcxw, HPPA_CPU_PCXW,
392 	  HPPA_FTRS_W32B, "2.0",
393 	  desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u,
394  	  ibtlb_u, NULL, pbtlb_u, NULL },
395 #endif
396 #ifdef HP8600_CPU
397 	{ "PA8600", "Landshark", "PCXW+",
398 	  hpcxwp, HPPA_CPU_PCXWP,
399 	  HPPA_FTRS_W32B, "2.0",
400 	  desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u,
401  	  ibtlb_u, NULL, pbtlb_u, NULL },
402 #endif
403 #ifdef HP8700_CPU
404 	{ "PA8700", "Piranha", "PCXW2",
405 	  hpcxw2, HPPA_CPU_PCXW2,
406 	  HPPA_FTRS_W32B, "2.0",
407 	  desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u,
408  	  ibtlb_u, NULL, pbtlb_u, NULL },
409 #endif
410 #ifdef HP8800_CPU
411 	{ "PA8800", "Mako", "Make",
412 	  mako, HPPA_CPU_PCXW2,
413 	  HPPA_FTRS_W32B, "2.0",
414 	  desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u,
415  	  ibtlb_u, NULL, pbtlb_u, NULL },
416 #endif
417 #ifdef HP8900_CPU
418 	{ "PA8900", "Shortfin", "Shortfin",
419 	  mako, HPPA_CPU_PCXW2,
420 	  HPPA_FTRS_W32B, "2.0",
421 	  desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u,
422  	  ibtlb_u, NULL, pbtlb_u, NULL },
423 #endif
424 };
425 
426 void
hppa_init(paddr_t start,void * bi)427 hppa_init(paddr_t start, void *bi)
428 {
429 	vaddr_t vstart;
430 	vaddr_t v;
431 	int error;
432 	u_int *p, *q;
433 	struct btlb_slot *btlb_slot;
434 	int btlb_slot_i;
435 	struct btinfo_symtab *bi_sym;
436 	struct pcb *pcb0;
437 	struct cpu_info *ci;
438 
439 #ifdef KGDB
440 	boothowto |= RB_KDB;	/* go to kgdb early if compiled in. */
441 #endif
442 	/* Setup curlwp/curcpu early for LOCKDEBUG and spl* */
443 #ifdef MULTIPROCESSOR
444 	mtctl(&cpus[0], CR_CURCPU);
445 #else
446 	mtctl(&lwp0, CR_CURLWP);
447 #endif
448 	lwp0.l_cpu = &cpus[0];
449 
450 	/* curcpu() is now valid */
451 	ci = curcpu();
452 
453 	ci->ci_psw =
454 		PSW_Q |         /* Interrupt State Collection Enable */
455 		PSW_P |         /* Protection Identifier Validation Enable */
456 		PSW_C |         /* Instruction Address Translation Enable */
457 		PSW_D;          /* Data Address Translation Enable */
458 
459 	/* Copy bootinfo */
460 	if (bi != NULL)
461 		memcpy(&bootinfo, bi, sizeof(struct bootinfo));
462 
463 	/* init PDC iface, so we can call em easy */
464 	pdc_init();
465 
466 	cpu_hzticks = (PAGE0->mem_10msec * 100) / hz;
467 
468 	/* calculate CPU clock ratio */
469 	delay_init();
470 
471 	/* fetch the monarch/"default" cpu hpa */
472 	error = pdcproc_hpa_processor(&hppa_mcpuhpa);
473 	if (error < 0)
474 		panic("%s: PDC_HPA failed", __func__);
475 
476 	/* cache parameters */
477 	error = pdcproc_cache(&pdc_cache);
478 	if (error < 0) {
479 		DPRINTF(("WARNING: PDC_CACHE error %d\n", error));
480 	}
481 
482 	dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1;
483 	dcache_stride = pdc_cache.dc_stride;
484 	icache_line_mask = pdc_cache.ic_conf.cc_line * 16 - 1;
485 	icache_stride = pdc_cache.ic_stride;
486 
487 	error = pdcproc_cache_spidbits(&pdc_spidbits);
488 	DPRINTF(("SPID bits: 0x%x, error = %d\n", pdc_spidbits.spidbits, error));
489 
490 	/* Calculate the OS_HPMC handler checksums. */
491 	p = os_hpmc;
492 	if (pdcproc_instr(p))
493 		*p = 0x08000240;
494 	p[7] = ((char *) &os_hpmc_cont_end) - ((char *) &os_hpmc_cont);
495 	p[6] = (u_int) &os_hpmc_cont;
496 	p[5] = -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7]);
497 	p = &os_hpmc_cont;
498 	q = os_hpmc_checksum;
499 	for (*q = 0; p < q; *q -= *(p++));
500 
501 	/* Calculate the OS_TOC handler checksum. */
502 	p = (u_int *) &os_toc;
503 	q = os_toc_checksum;
504 	for (*q = 0; p < q; *q -= *(p++));
505 
506 	/* Install the OS_TOC handler. */
507 	PAGE0->ivec_toc = os_toc;
508 	PAGE0->ivec_toclen = ((char *) &os_toc_end) - ((char *) &os_toc);
509 
510 	cpuid();
511 	ptlball();
512 	fcacheall();
513 
514 	avail_end = trunc_page(PAGE0->imm_max_mem);
515 	totalphysmem = atop(avail_end);
516 	if (avail_end > SYSCALLGATE)
517 		avail_end = SYSCALLGATE;
518 	physmem = atop(avail_end);
519 	resvmem = atop(resvmem);	/* XXXNH */
520 
521 	/* we hope this won't fail */
522 	hppa_io_extent = extent_create("io",
523 	    HPPA_IOSPACE, 0xffffffff,
524 	    (void *)hppa_io_extent_store, sizeof(hppa_io_extent_store),
525 	    EX_NOCOALESCE|EX_NOWAIT);
526 
527 	vstart = round_page(start);
528 
529 	/*
530 	 * Now allocate kernel dynamic variables
531 	 */
532 
533 	/* Allocate the msgbuf. */
534 	msgbufaddr = (void *) vstart;
535 	vstart += MSGBUFSIZE;
536 	vstart = round_page(vstart);
537 
538 	if (usebtlb) {
539 		/* Allocate and initialize the BTLB slots array. */
540 		btlb_slots = (struct btlb_slot *) ALIGN(vstart);
541 		btlb_slot = btlb_slots;
542 #define BTLB_SLOTS(count, flags)					\
543 do {									\
544 	for (btlb_slot_i = 0;						\
545 	     btlb_slot_i < pdc_btlb.count;				\
546 	     btlb_slot_i++) {						\
547 		btlb_slot->btlb_slot_number = (btlb_slot - btlb_slots);	\
548 		btlb_slot->btlb_slot_flags = flags;			\
549 		btlb_slot->btlb_slot_frames = 0;			\
550 		btlb_slot++;						\
551 	}								\
552 } while (/* CONSTCOND */ 0)
553 
554 		BTLB_SLOTS(finfo.num_i, BTLB_SLOT_IBTLB);
555 		BTLB_SLOTS(finfo.num_d, BTLB_SLOT_DBTLB);
556 		BTLB_SLOTS(finfo.num_c, BTLB_SLOT_CBTLB);
557 		BTLB_SLOTS(vinfo.num_i, BTLB_SLOT_IBTLB | BTLB_SLOT_VARIABLE_RANGE);
558 		BTLB_SLOTS(vinfo.num_d, BTLB_SLOT_DBTLB | BTLB_SLOT_VARIABLE_RANGE);
559 		BTLB_SLOTS(vinfo.num_c, BTLB_SLOT_CBTLB | BTLB_SLOT_VARIABLE_RANGE);
560 #undef BTLB_SLOTS
561 
562 		btlb_slots_count = (btlb_slot - btlb_slots);
563 		vstart = round_page((vaddr_t) btlb_slot);
564 	}
565 
566 	v = vstart;
567 
568 	/* sets resvphysmem */
569 	pmap_bootstrap(v);
570 
571 	/*
572 	 * BELOW THIS LINE REFERENCING PAGE0 AND OTHER LOW MEMORY
573 	 * LOCATIONS, AND WRITING THE KERNEL TEXT ARE PROHIBITED
574 	 * WITHOUT TAKING SPECIAL MEASURES.
575 	 */
576 
577 	DPRINTF(("%s: PDC_CHASSIS\n", __func__));
578 
579 	/* they say PDC_COPROC might turn fault light on */
580 	pdcproc_chassis_display(PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0);
581 
582 	DPRINTF(("%s: intr bootstrap\n", __func__));
583 	/* Bootstrap interrupt masking and dispatching. */
584 	hppa_intr_initialise(ci);
585 
586 	/*
587 	 * Initialize any debugger.
588 	 */
589 #ifdef KGDB
590 	/*
591 	 * XXX note that we're not virtual yet, yet these
592 	 * KGDB attach functions will be using bus_space(9)
593 	 * to map and manipulate their devices.  This only
594 	 * works because, currently, the mainbus.c bus_space
595 	 * implementation directly-maps things in I/O space.
596 	 */
597 	hppa_kgdb_attached = false;
598 #if NCOM > 0
599 	if (!strcmp(KGDB_DEVNAME, "com")) {
600 		int com_gsc_kgdb_attach(void);
601 		if (com_gsc_kgdb_attach() == 0)
602 			hppa_kgdb_attached = true;
603 	}
604 #endif /* NCOM > 0 */
605 #endif /* KGDB */
606 
607 #if NKSYMS || defined(DDB) || defined(MODULAR)
608 	if ((bi_sym = lookup_bootinfo(BTINFO_SYMTAB)) != NULL)
609 		ksyms_addsyms_elf(bi_sym->nsym, (int *)bi_sym->ssym,
610 		    (int *)bi_sym->esym);
611 	else {
612 		extern int end;
613 
614 		ksyms_addsyms_elf(esym - (int)&end, &end, (int*)esym);
615 	}
616 #endif
617 
618 	/* We will shortly go virtual. */
619 	pagezero_mapped = 0;
620 	fcacheall();
621 
622 	pcb0 = lwp_getpcb(&lwp0);
623 	pcb0->pcb_fpregs = &lwp0_fpregs;
624 	memset(&lwp0_fpregs, 0, sizeof(struct fpreg));
625 
626 	pool_init(&hppa_fppl, sizeof(struct fpreg), 16, 0, 0, "fppl", NULL,
627 	    IPL_NONE);
628 }
629 
630 void
cpuid(void)631 cpuid(void)
632 {
633 	/*
634 	 * XXX fredette - much of this TLB trap handler setup should
635 	 * probably be moved here to hppa/hppa/hppa_machdep.c, seeing
636 	 * that there's related code already in hppa/hppa/trap.S.
637 	 */
638 
639 	/*
640 	 * Ptrs to various tlb handlers, to be filled
641 	 * based on CPU features.
642 	 * from locore.S
643 	 */
644 	extern u_int trap_ep_T_TLB_DIRTY[];
645 	extern u_int trap_ep_T_DTLBMISS[];
646 	extern u_int trap_ep_T_DTLBMISSNA[];
647 	extern u_int trap_ep_T_ITLBMISS[];
648 	extern u_int trap_ep_T_ITLBMISSNA[];
649 
650 	struct pdc_cpuid pdc_cpuid;
651 	const struct hppa_cpu_info *p = NULL;
652 	const char *model;
653 	u_int cpu_version, cpu_features;
654 	int error, i;
655 
656 	/* may the scientific guessing begin */
657 	cpu_type = hpc_unknown;
658 	cpu_features = 0;
659 	cpu_version = 0;
660 
661 	/* identify system type */
662 	error = pdcproc_model_info(&pdc_model);
663 	if (error < 0) {
664 		DPRINTF(("WARNING: PDC_MODEL_INFO error %d\n", error));
665 
666 		pdc_model.hwmodel = 0;
667 		pdc_model.hv = 0;
668 	} else {
669 		DPRINTF(("pdc_model.hwmodel/hv %x/%x\n", pdc_model.hwmodel,
670 		    pdc_model.hv));
671 	}
672 	cpu_modelno = pdc_model.hwmodel;
673 	model = hppa_mod_info(HPPA_TYPE_BOARD, cpu_modelno);
674 
675 	DPRINTF(("%s: model %s\n", __func__, model));
676 
677 	pdc_settype(cpu_modelno);
678 
679 	memset(&pdc_cpuid, 0, sizeof(pdc_cpuid));
680 	error = pdcproc_model_cpuid(&pdc_cpuid);
681 	if (error < 0) {
682 		DPRINTF(("WARNING: PDC_MODEL_CPUID error %d. "
683 		    "Using cpu_modelno (%#x) based cpu_type.\n", error, cpu_modelno));
684 
685 		cpu_type = cpu_model_cpuid(cpu_modelno);
686 		if (cpu_type == hpc_unknown) {
687 			printf("WARNING: Unknown cpu_type for cpu_modelno %x\n",
688 			   cpu_modelno);
689 		}
690 	} else {
691 		DPRINTF(("%s: cpuid.version  = %x\n", __func__,
692 		    pdc_cpuid.version));
693 		DPRINTF(("%s: cpuid.revision = %x\n", __func__,
694 		    pdc_cpuid.revision));
695 
696 		cpu_version = pdc_cpuid.version;
697 
698 		/* XXXNH why? */
699 		/* patch for old 8200 */
700 		if (pdc_cpuid.version == HPPA_CPU_PCXU &&
701 		    pdc_cpuid.revision > 0x0d)
702 			cpu_version = HPPA_CPU_PCXUP;
703 	}
704 
705 	/* locate coprocessors and SFUs */
706 	memset(&pdc_coproc, 0, sizeof(pdc_coproc));
707 	error = pdcproc_coproc(&pdc_coproc);
708 	if (error < 0) {
709 		DPRINTF(("WARNING: PDC_COPROC error %d\n", error));
710 		pdc_coproc.ccr_enable = 0;
711 	} else {
712 		DPRINTF(("pdc_coproc: 0x%x, 0x%x; model %x rev %x\n",
713 		    pdc_coproc.ccr_enable, pdc_coproc.ccr_present,
714 		    pdc_coproc.fpu_model, pdc_coproc.fpu_revision));
715 
716 		/* a kludge to detect PCXW */
717 		if (pdc_coproc.fpu_model == HPPA_FPU_PCXW)
718 			cpu_version = HPPA_CPU_PCXW;
719 	}
720 	mtctl(pdc_coproc.ccr_enable & CCR_MASK, CR_CCR);
721 	DPRINTF(("%s: bootstrap fpu\n", __func__));
722 
723 	usebtlb = 0;
724 	if (cpu_version == HPPA_CPU_PCXW || cpu_version > HPPA_CPU_PCXL2) {
725 		DPRINTF(("WARNING: BTLB no supported on cpu %d\n", cpu_version));
726 	} else {
727 
728 		/* BTLB params */
729 		error = pdcproc_block_tlb(&pdc_btlb);
730 		if (error < 0) {
731 			DPRINTF(("WARNING: PDC_BTLB error %d\n", error));
732 		} else {
733 			DPRINTFN(10, ("btlb info: minsz=%d, maxsz=%d\n",
734 			    pdc_btlb.min_size, pdc_btlb.max_size));
735 			DPRINTFN(10, ("btlb fixed: i=%d, d=%d, c=%d\n",
736 			    pdc_btlb.finfo.num_i,
737 			    pdc_btlb.finfo.num_d,
738 			    pdc_btlb.finfo.num_c));
739 			DPRINTFN(10, ("btlb varbl: i=%d, d=%d, c=%d\n",
740 			    pdc_btlb.vinfo.num_i,
741 			    pdc_btlb.vinfo.num_d,
742 			    pdc_btlb.vinfo.num_c));
743 
744 			/* purge TLBs and caches */
745 			if (pdcproc_btlb_purgeall() < 0)
746 				DPRINTFN(10, ("WARNING: BTLB purge failed\n"));
747 
748 			hppa_btlb_size_min = pdc_btlb.min_size;
749 			hppa_btlb_size_max = pdc_btlb.max_size;
750 
751 			DPRINTF(("hppa_btlb_size_min 0x%x\n", hppa_btlb_size_min));
752 			DPRINTF(("hppa_btlb_size_max 0x%x\n", hppa_btlb_size_max));
753 
754 			if (pdc_btlb.finfo.num_c)
755 				cpu_features |= HPPA_FTRS_BTLBU;
756 			usebtlb = 1;
757 		}
758 	}
759 	usebtlb = 0;
760 
761 	error = pdcproc_tlb_info(&pdc_hwtlb);
762 	if (error == 0 && pdc_hwtlb.min_size != 0 && pdc_hwtlb.max_size != 0) {
763 		cpu_features |= HPPA_FTRS_HVT;
764 		if (pmap_hptsize > pdc_hwtlb.max_size)
765 			pmap_hptsize = pdc_hwtlb.max_size;
766 		else if (pmap_hptsize && pmap_hptsize < pdc_hwtlb.min_size)
767 			pmap_hptsize = pdc_hwtlb.min_size;
768 
769 		DPRINTF(("%s: pmap_hptsize 0x%x\n", __func__, pmap_hptsize));
770 	} else {
771 		DPRINTF(("WARNING: no HPT support, fine!\n"));
772 
773 		pmap_hptsize = 0;
774 	}
775 
776 	bool cpu_found = false;
777 	if (cpu_version) {
778 		DPRINTF(("%s: looking for cpu_version %x\n", __func__,
779 		    cpu_version));
780 		for (i = 0, p = cpu_types; i < __arraycount(cpu_types);
781 		     i++, p++) {
782 			if (p->hci_cpuversion == cpu_version) {
783 				cpu_found = true;
784 				break;
785 			}
786 		}
787 	} else if (cpu_type != hpc_unknown) {
788 		DPRINTF(("%s: looking for cpu_type %d\n", __func__,
789 		    cpu_type));
790 		for (i = 0, p = cpu_types; i < __arraycount(cpu_types);
791 		     i++, p++) {
792 			if (p->hci_cputype == cpu_type) {
793 				cpu_found = true;
794 				break;
795 			}
796 		}
797 	}
798 
799 	if (!cpu_found) {
800 		panic("CPU detection failed. Please report the problem.");
801 	}
802 
803 	hppa_cpu_info = p;
804 
805 	if (hppa_cpu_info->hci_chip_name == NULL)
806 		panic("bad model string for 0x%x", pdc_model.hwmodel);
807 
808 	/*
809 	 * TODO: HPT on 7200 is not currently supported
810 	 */
811 	if (pmap_hptsize && p->hci_cputype != hpcxl && p->hci_cputype != hpcxl2)
812 		pmap_hptsize = 0;
813 
814 	cpu_type = hppa_cpu_info->hci_cputype;
815 	cpu_ibtlb_ins = hppa_cpu_info->ibtlbins;
816 	cpu_dbtlb_ins = hppa_cpu_info->dbtlbins;
817 	cpu_hpt_init = hppa_cpu_info->hptinit;
818 	cpu_desidhash = hppa_cpu_info->desidhash;
819 
820 	if (cpu_desidhash)
821 		cpu_revision = (*cpu_desidhash)();
822 	else
823 		cpu_revision = 0;
824 
825 	/* force strong ordering for now */
826 	if (hppa_cpu_ispa20_p())
827 		curcpu()->ci_psw |= PSW_O;
828 
829 	cpu_setmodel("HP9000/%s", model);
830 
831 #define	LDILDO(t,f) ((t)[0] = (f)[0], (t)[1] = (f)[1]);
832 	LDILDO(trap_ep_T_TLB_DIRTY , hppa_cpu_info->tlbdh);
833 	LDILDO(trap_ep_T_DTLBMISS  , hppa_cpu_info->dtlbh);
834 	LDILDO(trap_ep_T_DTLBMISSNA, hppa_cpu_info->dtlbnah);
835 	LDILDO(trap_ep_T_ITLBMISS  , hppa_cpu_info->itlbh);
836 	LDILDO(trap_ep_T_ITLBMISSNA, hppa_cpu_info->itlbnah);
837 #undef LDILDO
838 
839 	/* Bootstrap any FPU. */
840 	hppa_fpu_bootstrap(pdc_coproc.ccr_enable);
841 }
842 
843 enum hppa_cpu_type
cpu_model_cpuid(int modelno)844 cpu_model_cpuid(int modelno)
845 {
846 	switch (modelno) {
847 	/* no supported HP8xx/9xx models with pcx */
848 	case HPPA_BOARD_HP720:
849 	case HPPA_BOARD_HP750_66:
850 	case HPPA_BOARD_HP730_66:
851 	case HPPA_BOARD_HP710:
852 	case HPPA_BOARD_HP705:
853 		return hpcxs;
854 
855 	case HPPA_BOARD_HPE23:
856 	case HPPA_BOARD_HPE25:
857 	case HPPA_BOARD_HPE35:
858 	case HPPA_BOARD_HPE45:
859 	case HPPA_BOARD_HP712_60:
860 	case HPPA_BOARD_HP712_80:
861 	case HPPA_BOARD_HP712_100:
862 	case HPPA_BOARD_HP715_80:
863 	case HPPA_BOARD_HP715_64:
864 	case HPPA_BOARD_HP715_100:
865 	case HPPA_BOARD_HP715_100XC:
866 	case HPPA_BOARD_HP715_100L:
867 	case HPPA_BOARD_HP715_120L:
868 	case HPPA_BOARD_HP715_80M:
869 		return hpcxl;
870 
871 	case HPPA_BOARD_HP735_99:
872 	case HPPA_BOARD_HP755_99:
873 	case HPPA_BOARD_HP755_125:
874 	case HPPA_BOARD_HP735_130:
875 	case HPPA_BOARD_HP715_50:
876 	case HPPA_BOARD_HP715_33:
877 	case HPPA_BOARD_HP715S_50:
878 	case HPPA_BOARD_HP715S_33:
879 	case HPPA_BOARD_HP715T_50:
880 	case HPPA_BOARD_HP715T_33:
881 	case HPPA_BOARD_HP715_75:
882 	case HPPA_BOARD_HP715_99:
883 	case HPPA_BOARD_HP725_50:
884 	case HPPA_BOARD_HP725_75:
885 	case HPPA_BOARD_HP725_99:
886 		return hpcxt;
887 	}
888 	return hpc_unknown;
889 }
890 
891 void
cpu_startup(void)892 cpu_startup(void)
893 {
894 	vaddr_t minaddr, maxaddr;
895 	char pbuf[3][9];
896 
897 	/* Initialize the message buffer. */
898 	initmsgbuf(msgbufaddr, MSGBUFSIZE);
899 
900 	/*
901 	 * i won't understand a friend of mine,
902 	 * who sat in a room full of artificial ice,
903 	 * fogging the air w/ humid cries --
904 	 *	WELCOME TO SUMMER!
905 	 */
906 	printf("%s%s", copyright, version);
907 
908 	/* identify system type */
909 	printf("%s\n", cpu_getmodel());
910 
911 	/* Display some memory usage information. */
912 	format_bytes(pbuf[0], sizeof(pbuf[0]), ptoa(physmem));
913 	format_bytes(pbuf[1], sizeof(pbuf[1]), ptoa(resvmem));
914 	format_bytes(pbuf[2], sizeof(pbuf[2]), ptoa(availphysmem));
915 	printf("real mem = %s (%s reserved for PROM, %s used by NetBSD)\n",
916 	    pbuf[0], pbuf[1], pbuf[2]);
917 
918 #ifdef DEBUG
919 	if (totalphysmem > physmem) {
920 		format_bytes(pbuf[0], sizeof(pbuf[0]), ptoa(totalphysmem - physmem));
921 		DPRINTF(("lost mem = %s\n", pbuf[0]));
922 	}
923 #endif
924 
925 	minaddr = 0;
926 
927 	/*
928 	 * Allocate a submap for physio
929 	 */
930 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
931 	    VM_PHYS_SIZE, 0, false, NULL);
932 
933 	format_bytes(pbuf[0], sizeof(pbuf[0]), ptoa(uvm_availmem(false)));
934 	printf("avail mem = %s\n", pbuf[0]);
935 }
936 
937 /*
938  * compute CPU clock ratio such as:
939  *	cpu_ticksnum / cpu_ticksdenom = t + delta
940  *	delta -> 0
941  */
942 void
delay_init(void)943 delay_init(void)
944 {
945 	u_int num, denom, delta, mdelta;
946 
947 	mdelta = UINT_MAX;
948 	for (denom = 1; denom < 1000; denom++) {
949 		num = (PAGE0->mem_10msec * denom) / 10000;
950 		delta = num * 10000 / denom - PAGE0->mem_10msec;
951 		if (!delta) {
952 			cpu_ticksdenom = denom;
953 			cpu_ticksnum = num;
954 			break;
955 		} else if (delta < mdelta) {
956 			cpu_ticksdenom = denom;
957 			cpu_ticksnum = num;
958 			mdelta = delta;
959 		}
960 	}
961 }
962 
963 void
delay(u_int us)964 delay(u_int us)
965 {
966 	u_int start, end, n;
967 
968 	mfctl(CR_ITMR, start);
969 	while (us) {
970 		n = uimin(1000, us);
971 		end = start + n * cpu_ticksnum / cpu_ticksdenom;
972 
973 		/* N.B. Interval Timer may wrap around */
974 		if (end < start) {
975 			do {
976 				mfctl(CR_ITMR, start);
977 			} while (start > end);
978 		}
979 
980 		do
981 			mfctl(CR_ITMR, start);
982 		while (start < end);
983 
984 		us -= n;
985 		mfctl(CR_ITMR, start);
986 	}
987 }
988 
989 static inline void
fall(int c_base,int c_count,int c_loop,int c_stride,int data)990 fall(int c_base, int c_count, int c_loop, int c_stride, int data)
991 {
992 	int loop;
993 
994 	for (; c_count--; c_base += c_stride)
995 		for (loop = c_loop; loop--; )
996 			if (data)
997 				fdce(0, c_base);
998 			else
999 				fice(0, c_base);
1000 }
1001 
1002 void
fcacheall(void)1003 fcacheall(void)
1004 {
1005 	/*
1006 	 * Flush the instruction, then data cache.
1007 	 */
1008 	fall(pdc_cache.ic_base, pdc_cache.ic_count, pdc_cache.ic_loop,
1009 	    pdc_cache.ic_stride, 0);
1010 	sync_caches();
1011 	fall(pdc_cache.dc_base, pdc_cache.dc_count, pdc_cache.dc_loop,
1012 	    pdc_cache.dc_stride, 1);
1013 	sync_caches();
1014 }
1015 
1016 void
ptlball(void)1017 ptlball(void)
1018 {
1019 	pa_space_t sp;
1020 	int i, j, k;
1021 
1022 	/* instruction TLB */
1023 	sp = pdc_cache.it_sp_base;
1024 	for (i = 0; i < pdc_cache.it_sp_count; i++) {
1025 		vaddr_t off = pdc_cache.it_off_base;
1026 		for (j = 0; j < pdc_cache.it_off_count; j++) {
1027 			for (k = 0; k < pdc_cache.it_loop; k++)
1028 				pitlbe(sp, off);
1029 			off += pdc_cache.it_off_stride;
1030 		}
1031 		sp += pdc_cache.it_sp_stride;
1032 	}
1033 
1034 	/* data TLB */
1035 	sp = pdc_cache.dt_sp_base;
1036 	for (i = 0; i < pdc_cache.dt_sp_count; i++) {
1037 		vaddr_t off = pdc_cache.dt_off_base;
1038 		for (j = 0; j < pdc_cache.dt_off_count; j++) {
1039 			for (k = 0; k < pdc_cache.dt_loop; k++)
1040 				pdtlbe(sp, off);
1041 			off += pdc_cache.dt_off_stride;
1042 		}
1043 		sp += pdc_cache.dt_sp_stride;
1044 	}
1045 }
1046 
1047 int
hpti_g(vaddr_t hpt,vsize_t hptsize)1048 hpti_g(vaddr_t hpt, vsize_t hptsize)
1049 {
1050 
1051 	return pdcproc_tlb_config(&pdc_hwtlb, hpt, hptsize, PDC_TLB_CURRPDE);
1052 }
1053 
1054 int
pbtlb_g(int i)1055 pbtlb_g(int i)
1056 {
1057 	return -1;
1058 }
1059 
1060 int
ibtlb_g(int i,pa_space_t sp,vaddr_t va,paddr_t pa,vsize_t sz,u_int prot)1061 ibtlb_g(int i, pa_space_t sp, vaddr_t va, paddr_t pa, vsize_t sz, u_int prot)
1062 {
1063 	int error;
1064 
1065 	error = pdcproc_btlb_insert(sp, va, pa, sz, prot, i);
1066 	if (error < 0) {
1067 #ifdef BTLBDEBUG
1068 		DPRINTF(("WARNING: BTLB insert failed (%d)\n", error));
1069 #endif
1070 	}
1071 	return error;
1072 }
1073 
1074 
1075 /*
1076  * This inserts a recorded BTLB slot.
1077  */
1078 static int _hppa_btlb_insert(struct btlb_slot *);
1079 static int
_hppa_btlb_insert(struct btlb_slot * btlb_slot)1080 _hppa_btlb_insert(struct btlb_slot *btlb_slot)
1081 {
1082 	int error;
1083 #ifdef MACHDEPDEBUG
1084 	const char *prot;
1085 
1086 	/* Display the protection like a file protection. */
1087 	switch (btlb_slot->btlb_slot_tlbprot & TLB_AR_MASK) {
1088 	case TLB_AR_NA:			prot = "------"; break;
1089 	case TLB_AR_R:			prot = "r-----"; break;
1090 	case TLB_AR_RW:			prot = "rw----"; break;
1091 	case TLB_AR_RX:			prot = "r-x---"; break;
1092 	case TLB_AR_RWX:		prot = "rwx---"; break;
1093 	case TLB_AR_R | TLB_USER:	prot = "r--r--"; break;
1094 	case TLB_AR_RW | TLB_USER:	prot = "rw-rw-"; break;
1095 	case TLB_AR_RX | TLB_USER:	prot = "r--r-x"; break;
1096 	case TLB_AR_RWX | TLB_USER:	prot = "rw-rwx"; break;
1097 	default:		prot = "??????"; break;
1098 	}
1099 
1100 	DPRINTFN(10, (
1101 	    "  [ BTLB %d: %s 0x%08x @ 0x%x:0x%08x len 0x%08x prot 0x%08x]  ",
1102 	    btlb_slot->btlb_slot_number,
1103 	    prot,
1104 	    (u_int)btlb_slot->btlb_slot_pa_frame << PGSHIFT,
1105 	    btlb_slot->btlb_slot_va_space,
1106 	    (u_int)btlb_slot->btlb_slot_va_frame << PGSHIFT,
1107 	    (u_int)btlb_slot->btlb_slot_frames << PGSHIFT,
1108 	    btlb_slot->btlb_slot_tlbprot));
1109 
1110 	/*
1111 	 * Non-I/O space mappings are entered by the pmap,
1112 	 * so we do print a newline to make things look better.
1113 	 */
1114 	if (btlb_slot->btlb_slot_pa_frame < (HPPA_IOSPACE >> PGSHIFT))
1115 		DPRINTFN(10, ("\n"));
1116 #endif
1117 
1118 	/* Insert this mapping. */
1119 	error = pdcproc_btlb_insert(
1120 		btlb_slot->btlb_slot_va_space,
1121 		btlb_slot->btlb_slot_va_frame,
1122 		btlb_slot->btlb_slot_pa_frame,
1123 		btlb_slot->btlb_slot_frames,
1124 		btlb_slot->btlb_slot_tlbprot,
1125 		btlb_slot->btlb_slot_number);
1126 	if (error < 0) {
1127 #ifdef BTLBDEBUG
1128 		DPRINTF(("WARNING: BTLB insert failed (%d)\n", error);
1129 #endif
1130 	}
1131 	return (error ? EINVAL : 0);
1132 }
1133 
1134 /*
1135  * This records and inserts a new BTLB entry.
1136  */
1137 int
1138 hppa_btlb_insert(pa_space_t space, vaddr_t va, paddr_t pa, vsize_t *sizep,
1139     u_int tlbprot)
1140 {
1141 	struct btlb_slot *btlb_slot, *btlb_slot_best, *btlb_slot_end;
1142 	vsize_t frames;
1143 	int error;
1144 	int need_dbtlb, need_ibtlb, need_variable_range;
1145 	int btlb_slot_score, btlb_slot_best_score;
1146 	vsize_t slot_mapped_frames, total_mapped_frames;
1147 
1148 	/*
1149 	 * All entries need data translation.  Those that
1150 	 * allow execution also need instruction translation.
1151 	 */
1152 	switch (tlbprot & TLB_AR_MASK) {
1153 	case TLB_AR_R:
1154 	case TLB_AR_RW:
1155 	case TLB_AR_R | TLB_USER:
1156 	case TLB_AR_RW | TLB_USER:
1157 		need_dbtlb = true;
1158 		need_ibtlb = false;
1159 		break;
1160 	case TLB_AR_RX:
1161 	case TLB_AR_RWX:
1162 	case TLB_AR_RX | TLB_USER:
1163 	case TLB_AR_RWX | TLB_USER:
1164 		need_dbtlb = true;
1165 		need_ibtlb = true;
1166 		break;
1167 	default:
1168 		panic("btlb_insert: bad tlbprot");
1169 	}
1170 
1171 	/*
1172 	 * If this entry isn't aligned to the size required
1173 	 * for a fixed-range slot, it requires a variable-range
1174 	 * slot.  This also converts pa and va to page frame
1175 	 * numbers.
1176 	 */
1177 	frames = pdc_btlb.min_size << PGSHIFT;
1178 	while (frames < *sizep)
1179 		frames <<= 1;
1180 	frames >>= PGSHIFT;
1181 	if (frames > pdc_btlb.max_size) {
1182 #ifdef BTLBDEBUG
1183 		DPRINTF(("btlb_insert: too big (%u < %u < %u)\n",
1184 		    pdc_btlb.min_size, (u_int) frames, pdc_btlb.max_size);
1185 #endif
1186 		return -(ENOMEM);
1187 	}
1188 	pa >>= PGSHIFT;
1189 	va >>= PGSHIFT;
1190 	need_variable_range =
1191 		((pa & (frames - 1)) != 0 || (va & (frames - 1)) != 0);
1192 
1193 	/* I/O space must be mapped uncached. */
1194 	if (pa >= HPPA_IOBEGIN)
1195 		tlbprot |= TLB_UNCACHEABLE;
1196 
1197 	/*
1198 	 * Loop while we still need slots.
1199 	 */
1200 	btlb_slot_end = btlb_slots + btlb_slots_count;
1201 	total_mapped_frames = 0;
1202 	btlb_slot_best_score = 0;
1203 	while (need_dbtlb || need_ibtlb) {
1204 
1205 		/*
1206 		 * Find an applicable slot.
1207 		 */
1208 		btlb_slot_best = NULL;
1209 		for (btlb_slot = btlb_slots;
1210 		     btlb_slot < btlb_slot_end;
1211 		     btlb_slot++) {
1212 
1213 			/*
1214 			 * Skip this slot if it's in use, or if we need a
1215 			 * variable-range slot and this isn't one.
1216 			 */
1217 			if (btlb_slot->btlb_slot_frames != 0 ||
1218 			    (need_variable_range &&
1219 			     !(btlb_slot->btlb_slot_flags &
1220 			       BTLB_SLOT_VARIABLE_RANGE)))
1221 				continue;
1222 
1223 			/*
1224 			 * Score this slot.
1225 			 */
1226 			btlb_slot_score = 0;
1227 			if (need_dbtlb &&
1228 			    (btlb_slot->btlb_slot_flags & BTLB_SLOT_DBTLB))
1229 				btlb_slot_score++;
1230 			if (need_ibtlb &&
1231 			    (btlb_slot->btlb_slot_flags & BTLB_SLOT_IBTLB))
1232 				btlb_slot_score++;
1233 
1234 			/*
1235 			 * Update the best slot.
1236 			 */
1237 			if (btlb_slot_score > 0 &&
1238 			    (btlb_slot_best == NULL ||
1239 			     btlb_slot_score > btlb_slot_best_score)) {
1240 				btlb_slot_best = btlb_slot;
1241 				btlb_slot_best_score = btlb_slot_score;
1242 			}
1243 		}
1244 
1245 		/*
1246 		 * If there were no applicable slots.
1247 		 */
1248 		if (btlb_slot_best == NULL) {
1249 			DPRINTFN(10, ("BTLB full\n"));
1250 			return -(ENOMEM);
1251 		}
1252 
1253 		/*
1254 		 * Now fill this BTLB slot record and insert the entry.
1255 		 */
1256 		if (btlb_slot->btlb_slot_flags & BTLB_SLOT_VARIABLE_RANGE)
1257 			slot_mapped_frames = ((*sizep + PGOFSET) >> PGSHIFT);
1258 		else
1259 			slot_mapped_frames = frames;
1260 		if (slot_mapped_frames > total_mapped_frames)
1261 			total_mapped_frames = slot_mapped_frames;
1262 		btlb_slot = btlb_slot_best;
1263 		btlb_slot->btlb_slot_va_space = space;
1264 		btlb_slot->btlb_slot_va_frame = va;
1265 		btlb_slot->btlb_slot_pa_frame = pa;
1266 		btlb_slot->btlb_slot_tlbprot = tlbprot;
1267 		btlb_slot->btlb_slot_frames = slot_mapped_frames;
1268 		error = _hppa_btlb_insert(btlb_slot);
1269 		if (error)
1270 			return -error;
1271 		/*
1272 		 * Note what slots we no longer need.
1273 		 */
1274 		if (btlb_slot->btlb_slot_flags & BTLB_SLOT_DBTLB)
1275 			need_dbtlb = false;
1276 		if (btlb_slot->btlb_slot_flags & BTLB_SLOT_IBTLB)
1277 			need_ibtlb = false;
1278 	}
1279 
1280 	/* Success. */
1281 	*sizep = (total_mapped_frames << PGSHIFT);
1282 	return 0;
1283 }
1284 
1285 /*
1286  * This reloads the BTLB in the event that it becomes invalidated.
1287  */
1288 int
1289 hppa_btlb_reload(void)
1290 {
1291 	struct btlb_slot *btlb_slot, *btlb_slot_end;
1292 	int error;
1293 
1294 	/* Insert all recorded BTLB entries. */
1295 	btlb_slot = btlb_slots;
1296 	btlb_slot_end = btlb_slots + btlb_slots_count;
1297 	error = 0;
1298 	while (error == 0 && btlb_slot < btlb_slot_end) {
1299 		if (btlb_slot->btlb_slot_frames != 0)
1300 			error = _hppa_btlb_insert(btlb_slot);
1301 		btlb_slot++;
1302 	}
1303 	DPRINTF(("\n"));
1304 	return (error);
1305 }
1306 
1307 /*
1308  * This purges a BTLB entry.
1309  */
1310 int
1311 hppa_btlb_purge(pa_space_t space, vaddr_t va, vsize_t *sizep)
1312 {
1313 	struct btlb_slot *btlb_slot, *btlb_slot_end;
1314 	int error;
1315 
1316 	/*
1317 	 * Purge all slots that map this virtual address.
1318 	 */
1319 	error = ENOENT;
1320 	va >>= PGSHIFT;
1321 	btlb_slot_end = btlb_slots + btlb_slots_count;
1322 	for (btlb_slot = btlb_slots;
1323 	     btlb_slot < btlb_slot_end;
1324 	     btlb_slot++) {
1325 		if (btlb_slot->btlb_slot_frames != 0 &&
1326 		    btlb_slot->btlb_slot_va_space == space &&
1327 		    btlb_slot->btlb_slot_va_frame == va) {
1328 			error = pdcproc_btlb_purge(
1329 				btlb_slot->btlb_slot_va_space,
1330 				btlb_slot->btlb_slot_va_frame,
1331 				btlb_slot->btlb_slot_number,
1332 				btlb_slot->btlb_slot_frames);
1333 			if (error < 0) {
1334 				DPRINTFN(10, ("WARNING: BTLB purge failed (%d)\n",
1335 					error));
1336 
1337 				return (error);
1338 			}
1339 
1340 			/*
1341 			 * Tell our caller how many bytes were mapped
1342 			 * by this slot, then free the slot.
1343 			 */
1344 			*sizep = (btlb_slot->btlb_slot_frames << PGSHIFT);
1345 			btlb_slot->btlb_slot_frames = 0;
1346 		}
1347 	}
1348 	return (error);
1349 }
1350 
1351 /*
1352  * This maps page zero if it isn't already mapped, and
1353  * returns a cookie for hppa_pagezero_unmap.
1354  */
1355 int
1356 hppa_pagezero_map(void)
1357 {
1358 	int was_mapped_before;
1359 	int s;
1360 
1361 	was_mapped_before = pagezero_mapped;
1362 	if (!was_mapped_before) {
1363 		s = splhigh();
1364 		pmap_kenter_pa(0, 0, VM_PROT_ALL, 0);
1365 		pagezero_mapped = 1;
1366 		splx(s);
1367 	}
1368 	return (was_mapped_before);
1369 }
1370 
1371 /*
1372  * This unmaps mape zero, given a cookie previously returned
1373  * by hppa_pagezero_map.
1374  */
1375 void
1376 hppa_pagezero_unmap(int was_mapped_before)
1377 {
1378 	int s;
1379 
1380 	if (!was_mapped_before) {
1381 		s = splhigh();
1382 		pmap_kremove(0, PAGE_SIZE);
1383 		pagezero_mapped = 0;
1384 		splx(s);
1385 	}
1386 }
1387 
1388 int waittime = -1;
1389 
1390 __dead void
1391 cpu_reboot(int howto, char *user_boot_string)
1392 {
1393 	boothowto = howto | (boothowto & RB_HALT);
1394 
1395 	if (!(howto & RB_NOSYNC) && waittime < 0) {
1396 		waittime = 0;
1397 		vfs_shutdown();
1398 
1399 		/*
1400 		 * If we've been adjusting the clock, the todr
1401 		 * will be out of synch; adjust it now.
1402 		 */
1403 		resettodr();
1404 	}
1405 
1406 	/* XXX probably save howto into stable storage */
1407 
1408 	/* Disable interrupts. */
1409 	splhigh();
1410 
1411 	/* Make a crash dump. */
1412 	if (howto & RB_DUMP)
1413 		dumpsys();
1414 
1415 	/* Run any shutdown hooks. */
1416 	doshutdownhooks();
1417 
1418 	pmf_system_shutdown(boothowto);
1419 
1420 	/* in case we came on powerfail interrupt */
1421 	if (cold_hook)
1422 		(*cold_hook)(HPPA_COLD_COLD);
1423 
1424 	if (howto & RB_HALT) {
1425 		if ((howto & RB_POWERDOWN) == RB_POWERDOWN && cold_hook) {
1426 			printf("Powering off...");
1427 			DELAY(1000000);
1428 			(*cold_hook)(HPPA_COLD_OFF);
1429 			DELAY(1000000);
1430 		}
1431 
1432 		printf("System halted!\n");
1433 		DELAY(1000000);
1434 		__asm volatile("stwas %0, 0(%1)"
1435 		    :: "r" (CMD_STOP), "r" (LBCAST_ADDR + iomod_command));
1436 	} else {
1437 		printf("rebooting...");
1438 		DELAY(1000000);
1439 		__asm volatile("stwas %0, 0(%1)"
1440 		    :: "r" (CMD_RESET), "r" (LBCAST_ADDR + iomod_command));
1441 
1442 		/* ask firmware to reset */
1443 		pdcproc_doreset();
1444 		/* forcably reset module if that fails */
1445 		__asm __volatile("stwas %0, 0(%1)"
1446 		    :: "r" (CMD_RESET), "r" (HPPA_LBCAST + iomod_command));
1447 	}
1448 
1449 	for (;;) {
1450 		/*
1451 		 * loop while bus reset is coming up.  This NOP instruction
1452 		 * is used by qemu to detect the 'death loop'.
1453 		 */
1454 		__asm volatile("or %%r31, %%r31, %%r31" ::: "memory");
1455 	}
1456 	/* NOTREACHED */
1457 }
1458 
1459 uint32_t dumpmag = 0x8fca0101;	/* magic number */
1460 int	dumpsize = 0;		/* pages */
1461 long	dumplo = 0;		/* blocks */
1462 
1463 /*
1464  * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
1465  */
1466 int
1467 cpu_dumpsize(void)
1468 {
1469 	int size;
1470 
1471 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
1472 	if (roundup(size, dbtob(1)) != dbtob(1))
1473 		return -1;
1474 
1475 	return 1;
1476 }
1477 
1478 /*
1479  * This handles a machine check.  This can be either an HPMC,
1480  * an LPMC, or a TOC.  The check type is passed in as a trap
1481  * type, one of T_HPMC, T_LPMC, or T_INTERRUPT (for TOC).
1482  */
1483 static char in_check = 0;
1484 
1485 #define	PIM_WORD(name, word, bits)			\
1486 do {							\
1487 	snprintb(bitmask_buffer, sizeof(bitmask_buffer),\
1488 	    bits, word);				\
1489 	printf("%s %s", name, bitmask_buffer);		\
1490 } while (/* CONSTCOND */ 0)
1491 
1492 
1493 static inline void
1494 hppa_pim_dump(int check_type, void *data, size_t size)
1495 {
1496 	struct hppa_pim_hpmc *hpmc;
1497 	struct hppa_pim_lpmc *lpmc;
1498 	struct hppa_pim_toc *toc;
1499 	struct hppa_pim_regs *regs;
1500 	struct hppa_pim_checks *checks;
1501 	u_int *regarray;
1502 	int reg_i, reg_j, reg_k;
1503 	char bitmask_buffer[64];
1504 	const char *name;
1505 
1506 	regs = NULL;
1507 	checks = NULL;
1508 	switch (check_type) {
1509 	case T_HPMC:
1510 		hpmc = (struct hppa_pim_hpmc *) data;
1511 		regs = &hpmc->pim_hpmc_regs;
1512 		checks = &hpmc->pim_hpmc_checks;
1513 		break;
1514 	case T_LPMC:
1515 		lpmc = (struct hppa_pim_lpmc *) data;
1516 		checks = &lpmc->pim_lpmc_checks;
1517 		break;
1518 	case T_INTERRUPT:
1519 		toc = (struct hppa_pim_toc *) data;
1520 		regs = &toc->pim_toc_regs;
1521 		break;
1522 	default:
1523 		panic("unknown machine check type");
1524 		/* NOTREACHED */
1525 	}
1526 
1527 	/* If we have register arrays, display them. */
1528 	if (regs != NULL) {
1529 		for (reg_i = 0; reg_i < 3; reg_i++) {
1530 			if (reg_i == 0) {
1531 				name = "General";
1532 				regarray = &regs->pim_regs_r0;
1533 				reg_j = 32;
1534 			} else if (reg_i == 1) {
1535 				name = "Control";
1536 				regarray = &regs->pim_regs_cr0;
1537 				reg_j = 32;
1538 			} else {
1539 				name = "Space";
1540 				regarray = &regs->pim_regs_sr0;
1541 				reg_j = 8;
1542 			}
1543 			printf("\n\n\t%s Registers:", name);
1544 			for (reg_k = 0; reg_k < reg_j; reg_k++)
1545 				printf("%s0x%08x",
1546 				    (reg_k & 3) ? " " : "\n",
1547 				    regarray[reg_k]);
1548 		}
1549 
1550 		/* Print out some interesting registers. */
1551 		printf("\n\n\tIIA head 0x%x:0x%08x\n"
1552 			"\tIIA tail 0x%x:0x%08x",
1553 			regs->pim_regs_cr17, regs->pim_regs_cr18,
1554 			regs->pim_regs_iisq_tail, regs->pim_regs_iioq_tail);
1555 		PIM_WORD("\n\tIPSW", regs->pim_regs_cr22, PSW_BITS);
1556 		printf("\n\tSP 0x%x:0x%08x FP 0x%x:0x%08x",
1557 			regs->pim_regs_sr0, regs->pim_regs_r30,
1558 			regs->pim_regs_sr0, regs->pim_regs_r3);
1559 	}
1560 
1561 	/* If we have check words, display them. */
1562 	if (checks != NULL) {
1563 		PIM_WORD("\n\n\tCheck Type", checks->pim_check_type,
1564 			PIM_CHECK_BITS);
1565 		PIM_WORD("\n\tCPU State", checks->pim_check_cpu_state,
1566 			PIM_CPU_BITS PIM_CPU_HPMC_BITS);
1567 		PIM_WORD("\n\tCache Check", checks->pim_check_cache,
1568 			PIM_CACHE_BITS);
1569 		PIM_WORD("\n\tTLB Check", checks->pim_check_tlb,
1570 			PIM_TLB_BITS);
1571 		PIM_WORD("\n\tBus Check", checks->pim_check_bus,
1572 			PIM_BUS_BITS);
1573 		PIM_WORD("\n\tAssist Check", checks->pim_check_assist,
1574 			PIM_ASSIST_BITS);
1575 		printf("\tAssist State %u", checks->pim_check_assist_state);
1576 		printf("\n\tSystem Responder 0x%08x",
1577 			checks->pim_check_responder);
1578 		printf("\n\tSystem Requestor 0x%08x",
1579 			checks->pim_check_requestor);
1580 		printf("\n\tPath Info 0x%08x",
1581 			checks->pim_check_path_info);
1582 	}
1583 }
1584 
1585 static inline void
1586 hppa_pim64_dump(int check_type, void *data, size_t size)
1587 {
1588 	struct hppa_pim64_hpmc *hpmc;
1589 	struct hppa_pim64_lpmc *lpmc;
1590 	struct hppa_pim64_toc *toc;
1591 	struct hppa_pim64_regs *regs;
1592 	struct hppa_pim64_checks *checks;
1593 	int reg_i, reg_j, reg_k;
1594 	uint64_t *regarray;
1595 	char bitmask_buffer[64];
1596 	const char *name;
1597 
1598 	regs = NULL;
1599 	checks = NULL;
1600 	switch (check_type) {
1601 	case T_HPMC:
1602 		hpmc = (struct hppa_pim64_hpmc *) data;
1603 		regs = &hpmc->pim_hpmc_regs;
1604 		checks = &hpmc->pim_hpmc_checks;
1605 		break;
1606 	case T_LPMC:
1607 		lpmc = (struct hppa_pim64_lpmc *) data;
1608 		checks = &lpmc->pim_lpmc_checks;
1609 		break;
1610 	case T_INTERRUPT:
1611 		toc = (struct hppa_pim64_toc *) data;
1612 		regs = &toc->pim_toc_regs;
1613 		break;
1614 	default:
1615 		panic("unknown machine check type");
1616 		/* NOTREACHED */
1617 	}
1618 
1619 	/* If we have register arrays, display them. */
1620 	if (regs != NULL) {
1621 		for (reg_i = 0; reg_i < 3; reg_i++) {
1622 			if (reg_i == 0) {
1623 				name = "General";
1624 				regarray = &regs->pim_regs_r0;
1625 				reg_j = 32;
1626 			} else if (reg_i == 1) {
1627 				name = "Control";
1628 				regarray = &regs->pim_regs_cr0;
1629 				reg_j = 32;
1630 			} else {
1631 				name = "Space";
1632 				regarray = &regs->pim_regs_sr0;
1633 				reg_j = 8;
1634 			}
1635 			printf("\n\n%s Registers:", name);
1636 			for (reg_k = 0; reg_k < reg_j; reg_k++)
1637 				printf("%s0x%016lx",
1638 				   (reg_k & 3) ? " " : "\n",
1639 				   (unsigned long)regarray[reg_k]);
1640 		}
1641 
1642 		/* Print out some interesting registers. */
1643 		printf("\n\nIIA head 0x%lx:0x%016lx\n"
1644 	            "IIA tail 0x%lx:0x%016lx",
1645 		    (unsigned long)regs->pim_regs_cr17,
1646 		    (unsigned long)regs->pim_regs_cr18,
1647 		    (unsigned long)regs->pim_regs_iisq_tail,
1648 		    (unsigned long)regs->pim_regs_iioq_tail);
1649 		PIM_WORD("\nIPSW", regs->pim_regs_cr22, PSW_BITS);
1650 		printf("\nSP 0x%lx:0x%016lx\nFP 0x%lx:0x%016lx",
1651        		    (unsigned long)regs->pim_regs_sr0,
1652 		    (unsigned long)regs->pim_regs_r30,
1653 		    (unsigned long)regs->pim_regs_sr0,
1654 		    (unsigned long)regs->pim_regs_r3);
1655 	}
1656 
1657 	/* If we have check words, display them. */
1658 	if (checks != NULL) {
1659 		PIM_WORD("\n\nCheck Type", checks->pim_check_type,
1660 			PIM_CHECK_BITS);
1661 		PIM_WORD("\nCPU State", checks->pim_check_cpu_state,
1662 			PIM_CPU_BITS PIM_CPU_HPMC_BITS);
1663 		PIM_WORD("\nCache Check", checks->pim_check_cache,
1664 			PIM_CACHE_BITS);
1665 		PIM_WORD("\nTLB Check", checks->pim_check_tlb,
1666 			PIM_TLB_BITS);
1667 		PIM_WORD("\nBus Check", checks->pim_check_bus,
1668 			PIM_BUS_BITS);
1669 		PIM_WORD("\nAssist Check", checks->pim_check_assist,
1670 			PIM_ASSIST_BITS);
1671 		printf("\nAssist State %u", checks->pim_check_assist_state);
1672 		printf("\nSystem Responder 0x%016lx",
1673 		        (unsigned long)checks->pim_check_responder);
1674 		printf("\nSystem Requestor 0x%016lx",
1675 		        (unsigned long)checks->pim_check_requestor);
1676 		printf("\nPath Info 0x%08x",
1677 		        checks->pim_check_path_info);
1678 	}
1679 }
1680 
1681 void
1682 hppa_machine_check(int check_type)
1683 {
1684 	int pdc_pim_type;
1685 	const char *name;
1686 	int pimerror, error;
1687 	void *data;
1688 	size_t size;
1689 
1690 	/* Do an fcacheall(). */
1691 	fcacheall();
1692 
1693 	/* Dispatch on the check type. */
1694 	switch (check_type) {
1695 	case T_HPMC:
1696 		name = "HPMC";
1697 		pdc_pim_type = PDC_PIM_HPMC;
1698 		break;
1699 	case T_LPMC:
1700 		name = "LPMC";
1701 		pdc_pim_type = PDC_PIM_LPMC;
1702 		break;
1703 	case T_INTERRUPT:
1704 		name = "TOC";
1705 		pdc_pim_type = PDC_PIM_TOC;
1706 		break;
1707 	default:
1708 		panic("unknown machine check type");
1709 		/* NOTREACHED */
1710 	}
1711 
1712 	pimerror = pdcproc_pim(pdc_pim_type, &pdc_pim, &data, &size);
1713 
1714 	KASSERT(pdc_pim.count <= size);
1715 
1716 	/*
1717 	 * Reset IO and log errors.
1718 	 *
1719 	 * This seems to be needed in order to output to the console
1720 	 * if we take a HPMC interrupt. This PDC procedure may not be
1721 	 * implemented by some machines.
1722 	 */
1723 	error = pdcproc_ioclrerrors();
1724 	if (error != PDC_ERR_OK && error != PDC_ERR_NOPROC)
1725 		/* This seems futile if we can't print to the console. */
1726 		panic("PDC_IO failed");
1727 
1728 	printf("\nmachine check: %s", name);
1729 
1730 	if (pimerror < 0) {
1731 		printf(" - WARNING: could not transfer PIM info (%d)", pimerror);
1732 	} else {
1733 		if (hppa_cpu_ispa20_p())
1734 			hppa_pim64_dump(check_type, data, size);
1735 		else
1736 			hppa_pim_dump(check_type, data, size);
1737 	}
1738 
1739 	printf("\n");
1740 
1741 	/* If this is our first check, panic. */
1742 	if (in_check == 0) {
1743 		in_check = 1;
1744 		DELAY(250000);
1745 		panic("machine check");
1746 	}
1747 
1748 	/* Reboot the machine. */
1749 	printf("Rebooting...\n");
1750 	cpu_die();
1751 }
1752 
1753 int
1754 cpu_dump(void)
1755 {
1756 	long buf[dbtob(1) / sizeof (long)];
1757 	kcore_seg_t	*segp;
1758 	cpu_kcore_hdr_t	*cpuhdrp __unused;
1759 	const struct bdevsw *bdev;
1760 
1761 	segp = (kcore_seg_t *)buf;
1762 	cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp)) / sizeof (long)];
1763 
1764 	/*
1765 	 * Generate a segment header.
1766 	 */
1767 	CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1768 	segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1769 
1770 	/*
1771 	 * Add the machine-dependent header info
1772 	 */
1773 	/* nothing for now */
1774 
1775 	bdev = bdevsw_lookup(dumpdev);
1776 	if (bdev == NULL)
1777 		return (-1);
1778 
1779 	return (*bdev->d_dump)(dumpdev, dumplo, (void *)buf, dbtob(1));
1780 }
1781 
1782 /*
1783  * Dump the kernel's image to the swap partition.
1784  */
1785 #define	BYTES_PER_DUMP	PAGE_SIZE
1786 
1787 void
1788 dumpsys(void)
1789 {
1790 	const struct bdevsw *bdev;
1791 	int psize, bytes, i, n;
1792 	char *maddr;
1793 	daddr_t blkno;
1794 	int (*dump)(dev_t, daddr_t, void *, size_t);
1795 	int error;
1796 
1797 	if (dumpdev == NODEV)
1798 		return;
1799 	bdev = bdevsw_lookup(dumpdev);
1800 	if (bdev == NULL)
1801 		return;
1802 
1803 	/* Save registers
1804 	savectx(&dumppcb); */
1805 
1806 	if (dumpsize == 0)
1807 		cpu_dumpconf();
1808 	if (dumplo <= 0) {
1809 		printf("\ndump to dev %u,%u not possible\n",
1810 		    major(dumpdev), minor(dumpdev));
1811 		return;
1812 	}
1813 	printf("\ndumping to dev %u,%u offset %ld\n",
1814 	    major(dumpdev), minor(dumpdev), dumplo);
1815 
1816 	psize = bdev_size(dumpdev);
1817 	printf("dump ");
1818 	if (psize == -1) {
1819 		printf("area unavailable\n");
1820 		return;
1821 	}
1822 
1823 	if (!(error = cpu_dump())) {
1824 
1825 		/* XXX fredette - this is way broken: */
1826 		bytes = ctob(physmem);
1827 		maddr = NULL;
1828 		blkno = dumplo + cpu_dumpsize();
1829 		dump = bdev->d_dump;
1830 		/* TODO block map the whole physical memory */
1831 		for (i = 0; i < bytes; i += n) {
1832 
1833 			/* Print out how many MBs we are to go. */
1834 			n = bytes - i;
1835 			if (n && (n % (1024*1024)) == 0)
1836 				printf_nolog("%d ", n / (1024 * 1024));
1837 
1838 			/* Limit size for next transfer. */
1839 
1840 			if (n > BYTES_PER_DUMP)
1841 				n = BYTES_PER_DUMP;
1842 
1843 			if ((error = (*dump)(dumpdev, blkno, maddr, n)))
1844 				break;
1845 			maddr += n;
1846 			blkno += btodb(n);
1847 		}
1848 	}
1849 
1850 	switch (error) {
1851 	case ENXIO:	printf("device bad\n");			break;
1852 	case EFAULT:	printf("device not ready\n");		break;
1853 	case EINVAL:	printf("area improper\n");		break;
1854 	case EIO:	printf("i/o error\n");			break;
1855 	case EINTR:	printf("aborted from console\n");	break;
1856 	case 0:		printf("succeeded\n");			break;
1857 	default:	printf("error %d\n", error);		break;
1858 	}
1859 }
1860 
1861 void
1862 hppa_setvmspace(struct lwp *l)
1863 {
1864 	struct proc *p = l->l_proc;
1865 	struct trapframe *tf = l->l_md.md_regs;
1866 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
1867 	pa_space_t space = pmap->pm_space;
1868 
1869     	if (p->p_md.md_flags & MDP_OLDSPACE) {
1870 		tf->tf_sr7 = HPPA_SID_KERNEL;
1871 	} else {
1872 		tf->tf_sr7 = space;
1873 	}
1874 
1875 	tf->tf_sr2 = HPPA_SID_KERNEL;
1876 
1877 	/* Load all of the user's space registers. */
1878 	tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr3 =
1879 	tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 =
1880 	tf->tf_iisq_head = tf->tf_iisq_tail = space;
1881 
1882 	/* Load the protection registers. */
1883 	tf->tf_pidr1 = tf->tf_pidr2 = pmap->pm_pid;
1884 }
1885 
1886 /*
1887  * Set registers on exec.
1888  */
1889 void
1890 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
1891 {
1892 	struct proc *p = l->l_proc;
1893 	struct trapframe *tf = l->l_md.md_regs;
1894 	struct pcb *pcb = lwp_getpcb(l);
1895 
1896 	tf->tf_flags = TFF_SYS|TFF_LAST;
1897 	tf->tf_iioq_tail = 4 +
1898 	    (tf->tf_iioq_head = pack->ep_entry | HPPA_PC_PRIV_USER);
1899 	tf->tf_rp = 0;
1900 	tf->tf_arg0 = p->p_psstrp;
1901 	tf->tf_arg1 = tf->tf_arg2 = 0; /* XXX dynload stuff */
1902 
1903 	if (pack->ep_osversion < 699003600) {
1904 		p->p_md.md_flags |= MDP_OLDSPACE;
1905 	} else {
1906 		p->p_md.md_flags = 0;
1907 	}
1908 
1909 	hppa_setvmspace(l);
1910 
1911 	/* reset any of the pending FPU exceptions */
1912 	hppa_fpu_flush(l);
1913 	pcb->pcb_fpregs->fpr_regs[0] = ((uint64_t)HPPA_FPU_INIT) << 32;
1914 	pcb->pcb_fpregs->fpr_regs[1] = 0;
1915 	pcb->pcb_fpregs->fpr_regs[2] = 0;
1916 	pcb->pcb_fpregs->fpr_regs[3] = 0;
1917 
1918 	l->l_md.md_bpva = 0;
1919 
1920 	/* setup terminal stack frame */
1921 	stack = (u_long)STACK_ALIGN(stack, 63);
1922 	tf->tf_r3 = stack;
1923 	ustore_long((void *)(stack), 0);
1924 	stack += HPPA_FRAME_SIZE;
1925 	ustore_long((void *)(stack + HPPA_FRAME_PSP), 0);
1926 	tf->tf_sp = stack;
1927 }
1928 
1929 /*
1930  * machine dependent system variables.
1931  */
1932 static int
1933 sysctl_machdep_boot(SYSCTLFN_ARGS)
1934 {
1935 	struct sysctlnode node = *rnode;
1936 	struct btinfo_kernelfile *bi_file;
1937 	const char *cp = NULL;
1938 
1939 	switch (node.sysctl_num) {
1940 	case CPU_BOOTED_KERNEL:
1941 		if ((bi_file = lookup_bootinfo(BTINFO_KERNELFILE)) != NULL)
1942 			cp = bi_file->name;
1943 		if (cp != NULL && cp[0] == '\0')
1944 			cp = "netbsd";
1945 		break;
1946 	default:
1947 		return (EINVAL);
1948 	}
1949 
1950 	if (cp == NULL || cp[0] == '\0')
1951 		return (ENOENT);
1952 
1953 	node.sysctl_data = __UNCONST(cp);
1954 	node.sysctl_size = strlen(cp) + 1;
1955 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
1956 }
1957 
1958 #if NLCD > 0
1959 static int
1960 sysctl_machdep_heartbeat(SYSCTLFN_ARGS)
1961 {
1962 	int error;
1963 	bool oldval;
1964 	struct sysctlnode node = *rnode;
1965 
1966 	oldval = lcd_blink_p;
1967 	/*
1968 	 * If we were false and are now true, start the timer.
1969 	 */
1970 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1971 
1972 	if (error || newp == NULL)
1973 		return (error);
1974 
1975 	if (!oldval && lcd_blink_p)
1976 		blink_lcd_timeout(NULL);
1977 
1978 	return 0;
1979 }
1980 #endif
1981 
1982 /*
1983  * machine dependent system variables.
1984  */
1985 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
1986 {
1987 
1988 	sysctl_createv(clog, 0, NULL, NULL,
1989 		       CTLFLAG_PERMANENT,
1990 		       CTLTYPE_NODE, "machdep", NULL,
1991 		       NULL, 0, NULL, 0,
1992 		       CTL_MACHDEP, CTL_EOL);
1993 
1994 	sysctl_createv(clog, 0, NULL, NULL,
1995 		       CTLFLAG_PERMANENT,
1996 		       CTLTYPE_STRUCT, "console_device", NULL,
1997 		       sysctl_consdev, 0, NULL, sizeof(dev_t),
1998 		       CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
1999 
2000 	sysctl_createv(clog, 0, NULL, NULL,
2001 		       CTLFLAG_PERMANENT,
2002 		       CTLTYPE_STRING, "booted_kernel", NULL,
2003 		       sysctl_machdep_boot, 0, NULL, 0,
2004 		       CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
2005 #if NLCD > 0
2006 	sysctl_createv(clog, 0, NULL, NULL,
2007 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2008 		       CTLTYPE_BOOL, "lcd_blink", "Display heartbeat on the LCD display",
2009 		       sysctl_machdep_heartbeat, 0, &lcd_blink_p, 0,
2010 		       CTL_MACHDEP, CPU_LCD_BLINK, CTL_EOL);
2011 #endif
2012 }
2013 
2014 /*
2015  * Given the type of a bootinfo entry, looks for a matching item inside
2016  * the bootinfo structure.  If found, returns a pointer to it (which must
2017  * then be casted to the appropriate bootinfo_* type); otherwise, returns
2018  * NULL.
2019  */
2020 void *
2021 lookup_bootinfo(int type)
2022 {
2023 	struct btinfo_common *bic;
2024 	int i;
2025 
2026 	bic = (struct btinfo_common *)(&bootinfo.bi_data[0]);
2027 	for (i = 0; i < bootinfo.bi_nentries; i++)
2028 		if (bic->type == type)
2029 			return bic;
2030 		else
2031 			bic = (struct btinfo_common *)
2032 			    ((uint8_t *)bic + bic->len);
2033 
2034 	return NULL;
2035 }
2036 
2037 /*
2038  * consinit:
2039  * initialize the system console.
2040  */
2041 void
2042 consinit(void)
2043 {
2044 	static int initted = 0;
2045 
2046 	if (!initted) {
2047 		initted++;
2048 		cninit();
2049 	}
2050 }
2051 
2052 #if NLCD > 0
2053 struct blink_lcd_softc {
2054 	SLIST_HEAD(, blink_lcd) bls_head;
2055 	int bls_on;
2056 	struct callout bls_to;
2057 } blink_sc = {
2058 	.bls_head = SLIST_HEAD_INITIALIZER(bls_head)
2059 };
2060 
2061 void
2062 blink_lcd_register(struct blink_lcd *l)
2063 {
2064 	if (SLIST_EMPTY(&blink_sc.bls_head)) {
2065 		callout_init(&blink_sc.bls_to, 0);
2066 		callout_setfunc(&blink_sc.bls_to, blink_lcd_timeout, &blink_sc);
2067 		blink_sc.bls_on = 0;
2068 		if (lcd_blink_p)
2069 			callout_schedule(&blink_sc.bls_to, 1);
2070 	}
2071 	SLIST_INSERT_HEAD(&blink_sc.bls_head, l, bl_next);
2072 }
2073 
2074 void
2075 blink_lcd_timeout(void *vsc)
2076 {
2077 	struct blink_lcd_softc *sc = &blink_sc;
2078 	struct blink_lcd *l;
2079 	int t;
2080 
2081 	if (SLIST_EMPTY(&sc->bls_head))
2082 		return;
2083 
2084 	SLIST_FOREACH(l, &sc->bls_head, bl_next) {
2085 		(*l->bl_func)(l->bl_arg, sc->bls_on);
2086 	}
2087 	sc->bls_on = !sc->bls_on;
2088 
2089 	if (!lcd_blink_p)
2090 		return;
2091 
2092 	/*
2093 	 * Blink rate is:
2094 	 *      full cycle every second if completely idle (loadav = 0)
2095 	 *      full cycle every 2 seconds if loadav = 1
2096 	 *      full cycle every 3 seconds if loadav = 2
2097 	 * etc.
2098 	 */
2099 	t = (((averunnable.ldavg[0] + FSCALE) * hz) >> (FSHIFT + 1));
2100 	callout_schedule(&sc->bls_to, t);
2101 }
2102 #endif
2103 
2104 #ifdef MODULAR
2105 /*
2106  * Push any modules loaded by the boot loader.
2107  */
2108 void
2109 module_init_md(void)
2110 {
2111 }
2112 #endif /* MODULAR */
2113 
2114 bool
2115 mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr)
2116 {
2117 
2118 	if (atop(paddr) > physmem) {
2119 		return false;
2120 	}
2121 	*vaddr = paddr;
2122 
2123 	return true;
2124 }
2125 
2126 int
2127 mm_md_physacc(paddr_t pa, vm_prot_t prot)
2128 {
2129 
2130 	return (atop(pa) > physmem) ? EFAULT : 0;
2131 }
2132 
2133 int
2134 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled)
2135 {
2136 	extern int kernel_text;
2137 	extern int __data_start;
2138 	extern int end;
2139 
2140 	const vaddr_t ksro = (vaddr_t) &kernel_text;
2141 	const vaddr_t ksrw = (vaddr_t) &__data_start;
2142 	const vaddr_t kend = (vaddr_t) end;
2143 	const vaddr_t v = (vaddr_t)ptr;
2144 
2145 	*handled = false;
2146 	if (v >= ksro && v < kend) {
2147 		*handled = true;
2148 		if (v < ksrw && (prot & VM_PROT_WRITE)) {
2149 			return EFAULT;
2150 		}
2151 	} else if (v >= kend && atop((paddr_t)v) < physmem) {
2152 		*handled = true;
2153 	}
2154 
2155 	return 0;
2156 }
2157