xref: /openbsd/sys/arch/hppa/hppa/machdep.c (revision 36dba039)
1 /*	$OpenBSD: machdep.c,v 1.269 2024/04/14 03:26:25 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 1999-2003 Michael Shalayeff
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/signalvar.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/buf.h>
35 #include <sys/reboot.h>
36 #include <sys/device.h>
37 #include <sys/conf.h>
38 #include <sys/timeout.h>
39 #include <sys/malloc.h>
40 #include <sys/pool.h>
41 #include <sys/msgbuf.h>
42 #include <sys/ioctl.h>
43 #include <sys/tty.h>
44 #include <sys/user.h>
45 #include <sys/exec.h>
46 #include <sys/sysctl.h>
47 #include <sys/core.h>
48 #include <sys/kcore.h>
49 #include <sys/extent.h>
50 
51 #include <sys/mount.h>
52 #include <sys/syscallargs.h>
53 
54 #include <net/if.h>
55 #include <uvm/uvm.h>
56 
57 #include <dev/cons.h>
58 
59 #include <machine/pdc.h>
60 #include <machine/iomod.h>
61 #include <machine/psl.h>
62 #include <machine/reg.h>
63 #include <machine/cpufunc.h>
64 #include <machine/autoconf.h>
65 #include <machine/kcore.h>
66 #include <machine/fpu.h>
67 
68 #ifdef DDB
69 #include <machine/db_machdep.h>
70 #include <ddb/db_access.h>
71 #include <ddb/db_sym.h>
72 #include <ddb/db_extern.h>
73 #endif
74 
75 #include <hppa/dev/cpudevs.h>
76 
77 /*
78  * Different kinds of flags used throughout the kernel.
79  */
80 int cold = 1;			/* unset when engine is up to go */
81 extern int msgbufmapped;	/* set when safe to use msgbuf */
82 
83 /*
84  * cache configuration, for most machines is the same
85  * numbers, so it makes sense to do defines w/ numbers depending
86  * on configured cpu types in the kernel
87  */
88 int icache_stride, icache_line_mask;
89 int dcache_stride, dcache_line_mask;
90 
91 /*
92  * things to not kill
93  */
94 volatile u_int8_t *machine_ledaddr;
95 int machine_ledword, machine_leds;
96 struct cpu_info cpu_info[HPPA_MAXCPUS];
97 
98 /*
99  * CPU params (should be the same for all cpus in the system)
100  */
101 struct pdc_cache pdc_cache PDC_ALIGNMENT;
102 struct pdc_btlb pdc_btlb PDC_ALIGNMENT;
103 struct pdc_model pdc_model PDC_ALIGNMENT;
104 
105 	/* w/ a little deviation should be the same for all installed cpus */
106 u_int	cpu_ticksnum, cpu_ticksdenom;
107 
108 	/* exported info */
109 char	machine[] = MACHINE;
110 char	cpu_model[128];
111 enum hppa_cpu_type cpu_type;
112 const char *cpu_typename;
113 int	cpu_hvers;
114 u_int	fpu_version;
115 
116 int	led_blink;
117 
118 /*
119  * exported methods for cpus
120  */
121 int (*cpu_desidhash)(void);
122 int (*cpu_hpt_init)(vaddr_t hpt, vsize_t hptsize);
123 int (*cpu_ibtlb_ins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
124 	    vsize_t sz, u_int prot);
125 int (*cpu_dbtlb_ins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
126 	    vsize_t sz, u_int prot);
127 
128 dev_t	bootdev;
129 int	physmem, resvmem, resvphysmem, esym;
130 
131 #ifdef MULTIPROCESSOR
132 __cpu_simple_lock_t atomic_lock = __SIMPLELOCK_UNLOCKED;
133 #endif
134 
135 /*
136  * Things for MI glue to stick on.
137  */
138 struct user *proc0paddr;
139 long mem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(64) / sizeof(long)];
140 struct extent *hppa_ex;
141 struct pool hppa_fppl;
142 struct hppa_fpstate proc0fpstate;
143 struct consdev *cn_tab;
144 
145 struct vm_map *exec_map = NULL;
146 struct vm_map *phys_map = NULL;
147 
148 void delay_init(void);
149 static __inline void fall(int, int, int, int, int);
150 void dumpsys(void);
151 void hpmc_dump(void);
152 void cpuid(void);
153 void blink_led_timeout(void *);
154 
155 /*
156  * safepri is a safe priority for sleep to set for a spin-wait
157  * during autoconfiguration or after a panic.
158  */
159 int   safepri = 0;
160 
161 /*
162  * wide used hardware params
163  */
164 struct pdc_hwtlb pdc_hwtlb PDC_ALIGNMENT;
165 struct pdc_coherence pdc_coherence PDC_ALIGNMENT;
166 struct pdc_spidb pdc_spidbits PDC_ALIGNMENT;
167 struct pdc_model pdc_model PDC_ALIGNMENT;
168 
169 struct uvm_constraint_range  dma_constraint = { 0x0, (paddr_t)-1 };
170 struct uvm_constraint_range *uvm_md_constraints[] = { NULL };
171 
172 /*
173  * Whatever CPU types we support
174  */
175 extern const u_int itlb_x[], itlbna_x[], dtlb_x[], dtlbna_x[], tlbd_x[];
176 extern const u_int itlb_s[], itlbna_s[], dtlb_s[], dtlbna_s[], tlbd_s[];
177 extern const u_int itlb_t[], itlbna_t[], dtlb_t[], dtlbna_t[], tlbd_t[];
178 extern const u_int itlb_l[], itlbna_l[], dtlb_l[], dtlbna_l[], tlbd_l[];
179 extern const u_int itlb_u[], itlbna_u[], dtlb_u[], dtlbna_u[], tlbd_u[];
180 int iibtlb_s(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
181     vsize_t sz, u_int prot);
182 int idbtlb_s(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
183     vsize_t sz, u_int prot);
184 int ibtlb_t(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
185     vsize_t sz, u_int prot);
186 int ibtlb_l(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
187     vsize_t sz, u_int prot);
188 int ibtlb_u(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
189     vsize_t sz, u_int prot);
190 int ibtlb_g(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
191     vsize_t sz, u_int prot);
192 int pbtlb_g(int i);
193 int pbtlb_u(int i);
194 int hpti_g(vaddr_t, vsize_t);
195 int desidhash_s(void);
196 int desidhash_t(void);
197 int desidhash_l(void);
198 int desidhash_u(void);
199 const struct hppa_cpu_typed {
200 	char name[8];
201 	enum hppa_cpu_type type;
202 	int  cpuid;
203 	int  features;
204 	int  patch;
205 	int  (*desidhash)(void);
206 	int  (*dbtlbins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
207 	     vsize_t sz, u_int prot);
208 	int  (*ibtlbins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
209 	     vsize_t sz, u_int prot);
210 	int  (*btlbprg)(int i);
211 	int  (*hptinit)(vaddr_t hpt, vsize_t hptsize);
212 } cpu_types[] = {
213 #ifdef HP7000_CPU
214 	{ "PCXS",  hpcxs,  0, 0, 3, desidhash_s, ibtlb_g, NULL, pbtlb_g},
215 #endif
216 #ifdef HP7100_CPU
217 	{ "PCXT",  hpcxt, 0, HPPA_FTRS_BTLBU,
218 	  2, desidhash_t, ibtlb_g, NULL, pbtlb_g},
219 #endif
220 #ifdef HP7200_CPU
221 	{ "PCXT'", hpcxta,HPPA_CPU_PCXT2, HPPA_FTRS_BTLBU,
222 	  2, desidhash_t, ibtlb_g, NULL, pbtlb_g},
223 #endif
224 #ifdef HP7100LC_CPU
225 	{ "PCXL",  hpcxl, HPPA_CPU_PCXL, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
226 	  0, desidhash_l, ibtlb_g, NULL, pbtlb_g, hpti_g},
227 #endif
228 #ifdef HP7300LC_CPU
229 	{ "PCXL2", hpcxl2,HPPA_CPU_PCXL2, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
230 	  0, desidhash_l, ibtlb_g, NULL, pbtlb_g, hpti_g},
231 #endif
232 #ifdef HP8000_CPU
233 	{ "PCXU",  hpcxu, HPPA_CPU_PCXU, HPPA_FTRS_W32B,
234 	  4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
235 #endif
236 #ifdef HP8200_CPU
237 	{ "PCXU+", hpcxu2,HPPA_CPU_PCXUP, HPPA_FTRS_W32B,
238 	  4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
239 #endif
240 #ifdef HP8500_CPU
241 	{ "PCXW",  hpcxw, HPPA_CPU_PCXW, HPPA_FTRS_W32B,
242 	  4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
243 #endif
244 #ifdef HP8700_CPU
245 	{ "PCXW2",  hpcxw, HPPA_CPU_PCXW2, HPPA_FTRS_W32B,
246 	  4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
247 #endif
248 	{ "", 0 }
249 };
250 
251 int	hppa_cpuspeed(int *mhz);
252 
253 int
hppa_cpuspeed(int * mhz)254 hppa_cpuspeed(int *mhz)
255 {
256 	*mhz = PAGE0->mem_10msec / 10000;
257 
258 	return (0);
259 }
260 
261 void
hppa_init(paddr_t start)262 hppa_init(paddr_t start)
263 {
264 	extern int kernel_text;
265 	struct cpu_info *ci;
266 	int error;
267 	paddr_t	avail_end;
268 
269 	pdc_init();	/* init PDC iface, so we can call em easy */
270 
271 	delay_init();	/* calculate cpu clock ratio */
272 
273 	/* cache parameters */
274 	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_DFLT,
275 	    &pdc_cache)) < 0) {
276 #ifdef DEBUG
277 		printf("WARNING: PDC_CACHE error %d\n", error);
278 #endif
279 	}
280 
281 	dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1;
282 	dcache_stride = pdc_cache.dc_stride;
283 	icache_line_mask = pdc_cache.ic_conf.cc_line * 16 - 1;
284 	icache_stride = pdc_cache.ic_stride;
285 
286 	/* cache coherence params (pbably available for 8k only) */
287 	error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_SETCS,
288 	    &pdc_coherence, 1, 1, 1, 1);
289 #ifdef DEBUG
290 	printf ("PDC_CACHE_SETCS: %d, %d, %d, %d (%d)\n",
291 	    pdc_coherence.ia_cst, pdc_coherence.da_cst,
292 	    pdc_coherence.ita_cst, pdc_coherence.dta_cst, error);
293 #endif
294 	error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_GETSPIDB,
295 	    &pdc_spidbits, 0, 0, 0, 0);
296 	printf("SPID bits: 0x%x, error = %d\n", pdc_spidbits.spidbits, error);
297 
298 	/* setup hpmc handler */
299 	{
300 		/* from locore.s */
301 		extern uint32_t hpmc_v[], hpmc_tramp[], hpmc_tramp_end[];
302 		uint32_t *p;
303 		uint32_t cksum = 0;
304 
305 		for (p = hpmc_tramp; p < hpmc_tramp_end; p++)
306 			cksum += *p;
307 
308 		p = hpmc_v;
309 		if (pdc_call((iodcio_t)pdc, 0, PDC_INSTR, PDC_INSTR_DFLT, p))
310 			*p = 0x08000240;
311 
312 		p[6] = (uint32_t)&hpmc_tramp;
313 		p[7] = (hpmc_tramp_end - hpmc_tramp) * sizeof(uint32_t);
314 		p[5] =
315 		    -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7] + cksum);
316 	}
317 
318 	{
319 		extern u_int hppa_toc[], hppa_toc_end[];
320 		u_int cksum, *p;
321 
322 		for (cksum = 0, p = hppa_toc; p < hppa_toc_end; p++)
323 			cksum += *p;
324 
325 		*p = cksum;
326 		PAGE0->ivec_toc = (u_int)hppa_toc;
327 		PAGE0->ivec_toclen = (hppa_toc_end - hppa_toc + 1) * 4;
328 	}
329 
330 	{
331 		extern u_int hppa_pfr[], hppa_pfr_end[];
332 		u_int cksum, *p;
333 
334 		for (cksum = 0, p = hppa_pfr; p < hppa_pfr_end; p++)
335 			cksum += *p;
336 
337 		*p = cksum;
338 		PAGE0->ivec_mempf = (u_int)hppa_pfr;
339 		PAGE0->ivec_mempflen = (hppa_pfr_end - hppa_pfr + 1) * 4;
340 	}
341 
342 	ci = curcpu();
343 	ci->ci_cpl = IPL_NESTED;
344 	ci->ci_psw = PSL_Q | PSL_P | PSL_C | PSL_D;
345 
346 	cpuid();
347 	ptlball();
348 	ficacheall();
349 	fdcacheall();
350 
351 	avail_end = trunc_page(PAGE0->imm_max_mem);
352 	if (avail_end > SYSCALLGATE)
353 		avail_end = SYSCALLGATE;
354 	physmem = atop(avail_end);
355 	resvmem = atop(((vaddr_t)&kernel_text));
356 
357 	/* we hope this won't fail */
358 	hppa_ex = extent_create("mem", 0x0, 0xffffffff, M_DEVBUF,
359 	    (caddr_t)mem_ex_storage, sizeof(mem_ex_storage),
360 	    EX_NOCOALESCE|EX_NOWAIT);
361 	if (extent_alloc_region(hppa_ex, 0, (vaddr_t)PAGE0->imm_max_mem,
362 	    EX_NOWAIT))
363 		panic("cannot reserve main memory");
364 
365 	/* sets resvphysmem */
366 	pmap_bootstrap(round_page(start));
367 
368 	/* space has been reserved in pmap_bootstrap() */
369 	initmsgbuf((caddr_t)(ptoa(physmem) - round_page(MSGBUFSIZE)),
370 	    round_page(MSGBUFSIZE));
371 
372 	/* they say PDC_COPROC might turn fault light on */
373 	pdc_call((iodcio_t)pdc, 0, PDC_CHASSIS, PDC_CHASSIS_DISP,
374 	    PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0);
375 
376 	cpu_cpuspeed = &hppa_cpuspeed;
377 #ifdef DDB
378 	ddb_init();
379 #endif
380 	ficacheall();
381 	fdcacheall();
382 
383 	proc0paddr->u_pcb.pcb_fpstate = &proc0fpstate;
384 	pool_init(&hppa_fppl, sizeof(struct hppa_fpstate), 16, IPL_NONE, 0,
385 	    "hppafp", NULL);
386 }
387 
388 void
cpuid()389 cpuid()
390 {
391 	/*
392 	 * Ptrs to various tlb handlers, to be filled
393 	 * based on cpu features.
394 	 * from locore.S
395 	 */
396 	extern u_int trap_ep_T_TLB_DIRTY[];
397 	extern u_int trap_ep_T_DTLBMISS[];
398 	extern u_int trap_ep_T_DTLBMISSNA[];
399 	extern u_int trap_ep_T_ITLBMISS[];
400 	extern u_int trap_ep_T_ITLBMISSNA[];
401 
402 	extern u_int fpu_enable;
403 	extern int cpu_fpuena;
404 	struct pdc_cpuid pdc_cpuid PDC_ALIGNMENT;
405 	struct pdc_coproc pdc_coproc PDC_ALIGNMENT;
406 	const struct hppa_cpu_typed *p = NULL;
407 	u_int cpu_features;
408 	int error;
409 
410 	/* may the scientific guessing begin */
411 	cpu_features = 0;
412 	cpu_type = 0;
413 
414 	/* identify system type */
415 	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_MODEL, PDC_MODEL_INFO,
416 	    &pdc_model)) < 0) {
417 #ifdef DEBUG
418 		printf("WARNING: PDC_MODEL error %d\n", error);
419 #endif
420 		pdc_model.hvers = 0;
421 	}
422 
423 	bzero(&pdc_cpuid, sizeof(pdc_cpuid));
424 	if (pdc_call((iodcio_t)pdc, 0, PDC_MODEL, PDC_MODEL_CPUID,
425 	    &pdc_cpuid, 0, 0, 0, 0) >= 0) {
426 
427 		/* patch for old 8200 */
428 		if (pdc_cpuid.version == HPPA_CPU_PCXU &&
429 		    pdc_cpuid.revision > 0x0d)
430 			pdc_cpuid.version = HPPA_CPU_PCXUP;
431 
432 		cpu_type = pdc_cpuid.version;
433 	}
434 
435 	/* locate coprocessors and SFUs */
436 	bzero(&pdc_coproc, sizeof(pdc_coproc));
437 	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_COPROC, PDC_COPROC_DFLT,
438 	    &pdc_coproc, 0, 0, 0, 0, 0)) < 0) {
439 		/*
440 		 * Some 1.1 systems fail the PDC_COPROC call with error == -3,
441 		 * when booting from disk (but not when netbooting).
442 		 * Until the cause of this misbehaviour is found, assume the
443 		 * usual 1.1 FPU settings, so that userland gets a chance to
444 		 * run.
445 		 */
446 		if ((pdc_model.hvers >> 4) != 0 && pdc_model.arch_rev == 4) {
447 			printf("WARNING: PDC_COPROC error %d,"
448 			    " assuming 1.1 FPU\n", error);
449 			fpu_enable = 0xc0;
450 			cpu_fpuena = 1;
451 		} else {
452 			printf("WARNING: PDC_COPROC error %d\n", error);
453 			cpu_fpuena = 0;
454 		}
455 	} else {
456 		printf("pdc_coproc: 0x%x, 0x%x; model %x rev %x\n",
457 		    pdc_coproc.ccr_enable, pdc_coproc.ccr_present,
458 		    pdc_coproc.fpu_model, pdc_coproc.fpu_revision);
459 		fpu_enable = pdc_coproc.ccr_enable & CCR_MASK;
460 		cpu_fpuena = 1;
461 
462 		/* a kludge to detect PCXW */
463 		if (pdc_coproc.fpu_model == HPPA_FPU_PCXW)
464 			cpu_type = HPPA_CPU_PCXW;
465 	}
466 
467 	/* BTLB params */
468 	if (cpu_type < HPPA_CPU_PCXU &&
469 	    (error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB,
470 	     PDC_BTLB_DEFAULT, &pdc_btlb)) < 0) {
471 #ifdef DEBUG
472 		printf("WARNING: PDC_BTLB error %d\n", error);
473 #endif
474 	} else {
475 #ifdef BTLBDEBUG
476 		printf("btlb info: minsz=%d, maxsz=%d\n",
477 		    pdc_btlb.min_size, pdc_btlb.max_size);
478 		printf("btlb fixed: i=%d, d=%d, c=%d\n",
479 		    pdc_btlb.finfo.num_i,
480 		    pdc_btlb.finfo.num_d,
481 		    pdc_btlb.finfo.num_c);
482 		printf("btlb varbl: i=%d, d=%d, c=%d\n",
483 		    pdc_btlb.vinfo.num_i,
484 		    pdc_btlb.vinfo.num_d,
485 		    pdc_btlb.vinfo.num_c);
486 #endif /* BTLBDEBUG */
487 		/* purge TLBs and caches */
488 		if (pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB,
489 		    PDC_BTLB_PURGE_ALL) < 0)
490 			printf("WARNING: BTLB purge failed\n");
491 
492 		if (pdc_btlb.finfo.num_c)
493 			cpu_features |= HPPA_FTRS_BTLBU;
494 	}
495 
496 	if (!pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) &&
497 	    pdc_hwtlb.min_size && pdc_hwtlb.max_size) {
498 		cpu_features |= HPPA_FTRS_HVT;
499 		if (pmap_hptsize > pdc_hwtlb.max_size)
500 			pmap_hptsize = pdc_hwtlb.max_size;
501 		else if (pmap_hptsize && pmap_hptsize < pdc_hwtlb.min_size)
502 			pmap_hptsize = pdc_hwtlb.min_size;
503 	} else {
504 #ifdef DEBUG
505 		printf("WARNING: no HPT support, fine!\n");
506 #endif
507 		pmap_hptsize = 0;
508 	}
509 
510 	if (cpu_type)
511 		for (p = cpu_types; p->name[0] && p->cpuid != cpu_type; p++)
512 			;
513 	else
514 		for (p = cpu_types;
515 		    p->name[0] && p->features != cpu_features; p++);
516 
517 	if (!p->name[0]) {
518 		printf("WARNING: UNKNOWN CPU TYPE; GOOD LUCK "
519 		    "(type 0x%x, features 0x%x)\n", cpu_type, cpu_features);
520 		p = cpu_types;
521 	} else if ((p->type == hpcxl || p->type == hpcxl2) && !fpu_enable) {
522 		/* we know PCXL and PCXL2 do not exist w/o FPU */
523 		fpu_enable = 0xc0;
524 		cpu_fpuena = 1;
525 	}
526 
527 	/*
528 	 * TODO: HPT on 7200 is not currently supported
529 	 */
530 	if (pmap_hptsize && p->type != hpcxl && p->type != hpcxl2)
531 		pmap_hptsize = 0;
532 
533 	cpu_type = p->type;
534 	cpu_typename = p->name;
535 	cpu_ibtlb_ins = p->ibtlbins;
536 	cpu_dbtlb_ins = p->dbtlbins;
537 	cpu_hpt_init = p->hptinit;
538 	cpu_desidhash = p->desidhash;
539 
540 	/* patch tlb handler branches */
541 	if (p->patch) {
542 		trap_ep_T_TLB_DIRTY [0] = trap_ep_T_TLB_DIRTY [p->patch];
543 		trap_ep_T_DTLBMISS  [0] = trap_ep_T_DTLBMISS  [p->patch];
544 		trap_ep_T_DTLBMISSNA[0] = trap_ep_T_DTLBMISSNA[p->patch];
545 		trap_ep_T_ITLBMISS  [0] = trap_ep_T_ITLBMISS  [p->patch];
546 		trap_ep_T_ITLBMISSNA[0] = trap_ep_T_ITLBMISSNA[p->patch];
547 	}
548 
549 	/* force strong ordering for now */
550 	if (p->features & HPPA_FTRS_W32B) {
551 		curcpu()->ci_psw |= PSL_O;
552 	}
553 
554 	{
555 		const char *p, *q;
556 		char buf[32];
557 		int lev;
558 
559 		lev = 0xa + (*cpu_desidhash)();
560 		cpu_hvers = pdc_model.hvers >> 4;
561 		if (!cpu_hvers) {
562 			p = "(UNKNOWN)";
563 			q = lev == 0xa? "1.0" : "1.1";
564 		} else {
565 			p = hppa_mod_info(HPPA_TYPE_BOARD, cpu_hvers);
566 			if (!p) {
567 				snprintf(buf, sizeof buf, "(UNKNOWN 0x%x)",
568 				    cpu_hvers);
569 				p = buf;
570 			}
571 
572 			switch (pdc_model.arch_rev) {
573 			default:
574 			case 0:
575 				q = "1.0";
576 				break;
577 			case 4:
578 				q = "1.1";
579 				/* this one is just a 100MHz pcxl */
580 				if (lev == 0x10)
581 					lev = 0xc;
582 				/* this one is a pcxl2 */
583 				if (lev == 0x16)
584 					lev = 0xe;
585 				break;
586 			case 8:
587 				q = "2.0";
588 				break;
589 			}
590 		}
591 
592 		snprintf(cpu_model, sizeof cpu_model,
593 		    "HP 9000/%s PA-RISC %s%x", p, q, lev);
594 	}
595 #ifdef DEBUG
596 	printf("cpu: %s\n", cpu_model);
597 #endif
598 }
599 
600 void
cpu_startup(void)601 cpu_startup(void)
602 {
603 	vaddr_t minaddr, maxaddr;
604 
605 	/*
606 	 * i won't understand a friend of mine,
607 	 * who sat in a room full of artificial ice,
608 	 * fogging the air w/ humid cries --
609 	 *	WELCOME TO SUMMER!
610 	 */
611 	printf("%s", version);
612 
613 	printf("%s\n", cpu_model);
614 	printf("real mem = %lu (%luMB)\n", ptoa(physmem),
615 	    ptoa(physmem) / 1024 / 1024);
616 	printf("rsvd mem = %lu (%luKB)\n", ptoa(resvmem), ptoa(resvmem) / 1024);
617 
618 	/*
619 	 * Allocate a submap for exec arguments.  This map effectively
620 	 * limits the number of processes exec'ing at any time.
621 	 */
622 	minaddr = vm_map_min(kernel_map);
623 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
624 	    16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
625 
626 	/*
627 	 * Allocate a submap for physio
628 	 */
629 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
630 	    VM_PHYS_SIZE, 0, FALSE, NULL);
631 
632 	printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
633 	    ptoa(uvmexp.free) / 1024 / 1024);
634 
635 	/*
636 	 * Set up buffers, so they can be used to read disk labels.
637 	 */
638 	bufinit();
639 
640 	/*
641 	 * Configure the system.
642 	 */
643 	if (boothowto & RB_CONFIG) {
644 #ifdef BOOT_CONFIG
645 		user_config();
646 #else
647 		printf("kernel does not support -c; continuing..\n");
648 #endif
649 	}
650 }
651 
652 /*
653  * compute cpu clock ratio such as:
654  *	cpu_ticksnum / cpu_ticksdenom = t + delta
655  *	delta -> 0
656  */
657 void
delay_init(void)658 delay_init(void)
659 {
660 	u_int num, denom, delta, mdelta;
661 
662 	mdelta = UINT_MAX;
663 	for (denom = 1; denom < 1000; denom++) {
664 		num = (PAGE0->mem_10msec * denom) / 10000;
665 		delta = num * 10000 / denom - PAGE0->mem_10msec;
666 		if (!delta) {
667 			cpu_ticksdenom = denom;
668 			cpu_ticksnum = num;
669 			break;
670 		} else if (delta < mdelta) {
671 			cpu_ticksdenom = denom;
672 			cpu_ticksnum = num;
673 			mdelta = delta;
674 		}
675 	}
676 }
677 
678 void
delay(u_int us)679 delay(u_int us)
680 {
681 	u_int start, end, n;
682 
683 	mfctl(CR_ITMR, start);
684 	while (us) {
685 		n = min(1000, us);
686 		end = start + n * cpu_ticksnum / cpu_ticksdenom;
687 
688 		/* N.B. Interval Timer may wrap around */
689 		if (end < start)
690 			do
691 				mfctl(CR_ITMR, start);
692 			while (start > end);
693 
694 		do
695 			mfctl(CR_ITMR, start);
696 		while (start < end);
697 
698 		us -= n;
699 	}
700 }
701 
702 static __inline void
fall(int c_base,int c_count,int c_loop,int c_stride,int data)703 fall(int c_base, int c_count, int c_loop, int c_stride, int data)
704 {
705 	int loop;
706 
707 	for (; c_count--; c_base += c_stride)
708 		for (loop = c_loop; loop--; )
709 			if (data)
710 				fdce(0, c_base);
711 			else
712 				fice(0, c_base);
713 }
714 
715 void
ficacheall(void)716 ficacheall(void)
717 {
718 	/*
719 	 * Flush the instruction, then data cache.
720 	 */
721 	fall(pdc_cache.ic_base, pdc_cache.ic_count, pdc_cache.ic_loop,
722 	    pdc_cache.ic_stride, 0);
723 	sync_caches();
724 }
725 
726 void
fdcacheall(void)727 fdcacheall(void)
728 {
729 	fall(pdc_cache.dc_base, pdc_cache.dc_count, pdc_cache.dc_loop,
730 	    pdc_cache.dc_stride, 1);
731 	sync_caches();
732 }
733 
734 void
ptlball(void)735 ptlball(void)
736 {
737 	pa_space_t sp;
738 	int i, j, k;
739 
740 	/* instruction TLB */
741 	sp = pdc_cache.it_sp_base;
742 	for (i = 0; i < pdc_cache.it_sp_count; i++) {
743 		vaddr_t off = pdc_cache.it_off_base;
744 		for (j = 0; j < pdc_cache.it_off_count; j++) {
745 			for (k = 0; k < pdc_cache.it_loop; k++)
746 				pitlbe(sp, off);
747 			off += pdc_cache.it_off_stride;
748 		}
749 		sp += pdc_cache.it_sp_stride;
750 	}
751 
752 	/* data TLB */
753 	sp = pdc_cache.dt_sp_base;
754 	for (i = 0; i < pdc_cache.dt_sp_count; i++) {
755 		vaddr_t off = pdc_cache.dt_off_base;
756 		for (j = 0; j < pdc_cache.dt_off_count; j++) {
757 			for (k = 0; k < pdc_cache.dt_loop; k++)
758 				pdtlbe(sp, off);
759 			off += pdc_cache.dt_off_stride;
760 		}
761 		sp += pdc_cache.dt_sp_stride;
762 	}
763 }
764 
765 int
hpti_g(vaddr_t hpt,vsize_t hptsize)766 hpti_g(vaddr_t hpt, vsize_t hptsize)
767 {
768 	return pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_CONFIG,
769 	    &pdc_hwtlb, hpt, hptsize, PDC_TLB_CURRPDE);
770 }
771 
772 int
pbtlb_g(int i)773 pbtlb_g(int i)
774 {
775 	return -1;
776 }
777 
778 int
ibtlb_g(int i,pa_space_t sp,vaddr_t va,paddr_t pa,vsize_t sz,u_int prot)779 ibtlb_g(int i, pa_space_t sp, vaddr_t va, paddr_t pa, vsize_t sz, u_int prot)
780 {
781 	int error;
782 
783 	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB, PDC_BTLB_INSERT,
784 	    sp, va, pa, sz, prot, i)) < 0) {
785 #ifdef BTLBDEBUG
786 		printf("WARNING: BTLB insert failed (%d)\n", error);
787 #endif
788 	}
789 	return error;
790 }
791 
792 int
btlb_insert(pa_space_t space,vaddr_t va,paddr_t pa,vsize_t * lenp,u_int prot)793 btlb_insert(pa_space_t space, vaddr_t va, paddr_t pa, vsize_t *lenp, u_int prot)
794 {
795 	static u_int32_t mask;
796 	vsize_t len;
797 	int error, i, btlb_max;
798 
799 	if (!pdc_btlb.min_size && !pdc_btlb.max_size)
800 		return -(ENXIO);
801 
802 	/*
803 	 * On PCXS processors with split BTLB, we should theoretically
804 	 * insert in the IBTLB (if executable mapping requested), and
805 	 * into the DBTLB. The PDC documentation is very clear that
806 	 * slot numbers are, in order, IBTLB, then DBTLB, then combined
807 	 * BTLB.
808 	 *
809 	 * However it also states that ``successful completion may not mean
810 	 * that the entire address range specified in the call has been
811 	 * mapped in the block TLB. For both fixed range slots and variable
812 	 * range slots, complete coverage of the address range specified
813 	 * is not guaranteed. Only a portion of the address range specified
814 	 * may get mapped as a result''.
815 	 *
816 	 * On an HP 9000/720 with PDC ROM v1.2, it turns out that IBTLB
817 	 * entries are inserted as expected, but no DBTLB gets inserted
818 	 * at all, despite PDC returning success.
819 	 *
820 	 * So play it dumb, and do not attempt to insert DBTLB entries at
821 	 * all on split BTLB systems. Callers are supposed to be able to
822 	 * cope with this.
823 	 */
824 
825 	if (pdc_btlb.finfo.num_c == 0) {
826 		if ((prot & TLB_EXECUTE) == 0)
827 			return -(EINVAL);
828 
829 		btlb_max = pdc_btlb.finfo.num_i;
830 	} else {
831 		btlb_max = pdc_btlb.finfo.num_c;
832 	}
833 
834 	/* align size */
835 	for (len = pdc_btlb.min_size << PGSHIFT; len < *lenp; len <<= 1)
836 		;
837 	len >>= PGSHIFT;
838 	i = ffs(~mask) - 1;
839 	if (len > pdc_btlb.max_size || i < 0 || i >= btlb_max) {
840 #ifdef BTLBDEBUG
841 		printf("btln_insert: too big (%u < %u < %u)\n",
842 		    pdc_btlb.min_size, len, pdc_btlb.max_size);
843 #endif
844 		return -(ENOMEM);
845 	}
846 
847 	mask |= 1 << i;
848 	pa >>= PGSHIFT;
849 	va >>= PGSHIFT;
850 	/* check address alignment */
851 	if (pa & (len - 1)) {
852 #ifdef BTLBDEBUG
853 		printf("WARNING: BTLB address misaligned pa=0x%x, len=0x%x\n",
854 		    pa, len);
855 #endif
856 		return -(ERANGE);
857 	}
858 
859 	/* ensure IO space is uncached */
860 	if ((pa & (HPPA_IOBEGIN >> PGSHIFT)) == (HPPA_IOBEGIN >> PGSHIFT))
861 		prot |= TLB_UNCACHABLE;
862 
863 #ifdef BTLBDEBUG
864 	printf("btlb_insert(%d): %x:%x=%x[%x,%x]\n",
865 	    i, space, va, pa, len, prot);
866 #endif
867 	if ((error = (*cpu_dbtlb_ins)(i, space, va, pa, len, prot)) < 0)
868 		return -(EINVAL);
869 	*lenp = len << PGSHIFT;
870 
871 	return i;
872 }
873 
874 int waittime = -1;
875 
876 __dead void
boot(int howto)877 boot(int howto)
878 {
879 	if ((howto & RB_RESET) != 0)
880 		goto doreset;
881 
882 	/*
883 	 * On older systems without software power control, prevent mi code
884 	 * from spinning disks off, in case the operator changes his mind
885 	 * and prefers to reboot - the firmware will not send a spin up
886 	 * command to the disks.
887 	 */
888 	if (cold_hook == NULL)
889 		howto &= ~RB_POWERDOWN;
890 
891 	if (cold) {
892 		if ((howto & RB_USERREQ) == 0)
893 			howto |= RB_HALT;
894 		goto haltsys;
895 	}
896 
897 	boothowto = howto | (boothowto & RB_HALT);
898 
899 	if ((howto & RB_NOSYNC) == 0) {
900 		waittime = 0;
901 		vfs_shutdown(curproc);
902 
903 		if ((howto & RB_TIMEBAD) == 0) {
904 			resettodr();
905 		} else {
906 			printf("WARNING: not updating battery clock\n");
907 		}
908 	}
909 	if_downall();
910 
911 	uvm_shutdown();
912 	splhigh();
913 	cold = 1;
914 
915 	if ((howto & RB_DUMP) != 0)
916 		dumpsys();
917 
918 haltsys:
919 	config_suspend_all(DVACT_POWERDOWN);
920 
921 #ifdef MULTIPROCESSOR
922 	hppa_ipi_broadcast(HPPA_IPI_HALT);
923 #endif
924 
925 	/* in case we came on powerfail interrupt */
926 	if (cold_hook)
927 		(*cold_hook)(HPPA_COLD_COLD);
928 
929 	if ((howto & RB_HALT) != 0) {
930 		if ((howto & RB_POWERDOWN) != 0) {
931 			printf("Powering off...");
932 			DELAY(2000000);
933 			(*cold_hook)(HPPA_COLD_OFF);
934 			DELAY(1000000);
935 		}
936 
937 		printf("System halted!\n");
938 		DELAY(2000000);
939 		__asm volatile("stwas %0, 0(%1)"
940 		    :: "r" (CMD_STOP), "r" (HPPA_LBCAST + iomod_command));
941 	} else {
942 doreset:
943 		printf("rebooting...");
944 		DELAY(2000000);
945 
946 		/* ask firmware to reset */
947                 pdc_call((iodcio_t)pdc, 0, PDC_BROADCAST_RESET, PDC_DO_RESET);
948 
949 		/* forcibly reset module if that fails */
950 		__asm volatile(".export hppa_reset, entry\n\t"
951 		    ".label hppa_reset");
952 		__asm volatile("stwas %0, 0(%1)"
953 		    :: "r" (CMD_RESET), "r" (HPPA_LBCAST + iomod_command));
954 	}
955 
956 	for (;;)
957 		continue;
958 	/* NOTREACHED */
959 }
960 
961 u_long	dumpmag = 0x8fca0101;	/* magic number */
962 int	dumpsize = 0;		/* pages */
963 long	dumplo = 0;		/* blocks */
964 
965 /*
966  * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
967  */
968 int
cpu_dumpsize(void)969 cpu_dumpsize(void)
970 {
971 	int size;
972 
973 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
974 	if (roundup(size, dbtob(1)) != dbtob(1))
975 		return -1;
976 
977 	return 1;
978 }
979 
980 /*
981  * Called from HPMC handler in locore
982  */
983 void
hpmc_dump(void)984 hpmc_dump(void)
985 {
986 	cold = 0;
987 	panic("HPMC");
988 	/* NOTREACHED */
989 }
990 
991 int
cpu_dump(void)992 cpu_dump(void)
993 {
994 	long buf[dbtob(1) / sizeof (long)];
995 	kcore_seg_t	*segp;
996 	cpu_kcore_hdr_t	*cpuhdrp;
997 
998 	segp = (kcore_seg_t *)buf;
999 	cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp)) / sizeof (long)];
1000 
1001 	/*
1002 	 * Generate a segment header.
1003 	 */
1004 	CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1005 	segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1006 
1007 	/*
1008 	 * Add the machine-dependent header info
1009 	 */
1010 	/* nothing for now */
1011 
1012 	return (bdevsw[major(dumpdev)].d_dump)
1013 	    (dumpdev, dumplo, (caddr_t)buf, dbtob(1));
1014 }
1015 
1016 /*
1017  * Dump the kernel's image to the swap partition.
1018  */
1019 #define	BYTES_PER_DUMP	NBPG
1020 
1021 void
dumpsys(void)1022 dumpsys(void)
1023 {
1024 	int psize, bytes, i, n;
1025 	caddr_t maddr;
1026 	daddr_t blkno;
1027 	int (*dump)(dev_t, daddr_t, caddr_t, size_t);
1028 	int error;
1029 
1030 	/* Save registers
1031 	savectx(&dumppcb); */
1032 
1033 	if (dumpsize == 0)
1034 		dumpconf();
1035 	if (dumplo <= 0) {
1036 		printf("\ndump to dev %x not possible\n", dumpdev);
1037 		return;
1038 	}
1039 	printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
1040 
1041 	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1042 	printf("dump ");
1043 	if (psize == -1) {
1044 		printf("area unavailable\n");
1045 		return;
1046 	}
1047 
1048 	if (!(error = cpu_dump())) {
1049 
1050 		bytes = ptoa(physmem);
1051 		maddr = NULL;
1052 		blkno = dumplo + cpu_dumpsize();
1053 		dump = bdevsw[major(dumpdev)].d_dump;
1054 		/* TODO block map the whole physical memory */
1055 		for (i = 0; i < bytes; i += n) {
1056 
1057 			/* Print out how many MBs we are to go. */
1058 			n = bytes - i;
1059 			if (n && (n % (1024*1024)) == 0)
1060 				printf("%d ", n / (1024 * 1024));
1061 
1062 			/* Limit size for next transfer. */
1063 
1064 			if (n > BYTES_PER_DUMP)
1065 				n = BYTES_PER_DUMP;
1066 
1067 			if ((error = (*dump)(dumpdev, blkno, maddr, n)))
1068 				break;
1069 			maddr += n;
1070 			blkno += btodb(n);
1071 		}
1072 	}
1073 
1074 	switch (error) {
1075 	case ENXIO:	printf("device bad\n");			break;
1076 	case EFAULT:	printf("device not ready\n");		break;
1077 	case EINVAL:	printf("area improper\n");		break;
1078 	case EIO:	printf("i/o error\n");			break;
1079 	case EINTR:	printf("aborted from console\n");	break;
1080 	case 0:		printf("succeeded\n");			break;
1081 	default:	printf("error %d\n", error);		break;
1082 	}
1083 }
1084 
1085 /* bcopy(), error on fault */
1086 int
kcopy(const void * from,void * to,size_t size)1087 kcopy(const void *from, void *to, size_t size)
1088 {
1089 	return spcopy(HPPA_SID_KERNEL, from, HPPA_SID_KERNEL, to, size);
1090 }
1091 
1092 int
copyinstr(const void * src,void * dst,size_t size,size_t * lenp)1093 copyinstr(const void *src, void *dst, size_t size, size_t *lenp)
1094 {
1095 	if (size == 0)
1096 		return ENAMETOOLONG;
1097 	return spstrcpy(curproc->p_addr->u_pcb.pcb_space, src,
1098 	    HPPA_SID_KERNEL, dst, size, lenp);
1099 }
1100 
1101 int
copyoutstr(const void * src,void * dst,size_t size,size_t * lenp)1102 copyoutstr(const void *src, void *dst, size_t size, size_t *lenp)
1103 {
1104 	if (size == 0)
1105 		return ENAMETOOLONG;
1106 	return spstrcpy(HPPA_SID_KERNEL, src,
1107 	    curproc->p_addr->u_pcb.pcb_space, dst, size, lenp);
1108 }
1109 
1110 int
copyin(const void * src,void * dst,size_t size)1111 copyin(const void *src, void *dst, size_t size)
1112 {
1113 	return spcopy(curproc->p_addr->u_pcb.pcb_space, src,
1114 	    HPPA_SID_KERNEL, dst, size);
1115 }
1116 
1117 int
copyout(const void * src,void * dst,size_t size)1118 copyout(const void *src, void *dst, size_t size)
1119 {
1120 	return spcopy(HPPA_SID_KERNEL, src,
1121 	    curproc->p_addr->u_pcb.pcb_space, dst, size);
1122 }
1123 
1124 int
copyin32(const uint32_t * src,uint32_t * dst)1125 copyin32(const uint32_t *src, uint32_t *dst)
1126 {
1127 	return spcopy32(curproc->p_addr->u_pcb.pcb_space, src,
1128 	    HPPA_SID_KERNEL, dst);
1129 }
1130 
1131 /*
1132  * Set up tf_sp and tf_r3 (the frame pointer) and copy out the
1133  * frame marker and the old r3
1134  */
1135 int
setstack(struct trapframe * tf,u_long stack,register_t old_r3)1136 setstack(struct trapframe *tf, u_long stack, register_t old_r3)
1137 {
1138 	static const register_t zero = 0;
1139 	int err;
1140 
1141 	tf->tf_r3 = stack;
1142 	err = copyout(&old_r3, (caddr_t)stack, sizeof(register_t));
1143 
1144 	tf->tf_sp = stack += HPPA_FRAME_SIZE;
1145 	return (copyout(&zero, (caddr_t)(stack + HPPA_FRAME_CRP),
1146 	    sizeof(register_t)) || err);
1147 }
1148 
1149 
1150 /*
1151  * Set registers on exec.
1152  */
1153 void
setregs(struct proc * p,struct exec_package * pack,u_long stack,struct ps_strings * arginfo)1154 setregs(struct proc *p, struct exec_package *pack, u_long stack,
1155     struct ps_strings *arginfo)
1156 {
1157 	struct trapframe *tf = p->p_md.md_regs;
1158 	struct pcb *pcb = &p->p_addr->u_pcb;
1159 	struct fpreg *fpreg = &pcb->pcb_fpstate->hfp_regs;
1160 
1161 	memset(tf, 0, sizeof *tf);
1162 	tf->tf_flags = TFF_SYS|TFF_LAST;
1163 	tf->tf_iioq_head = pack->ep_entry | HPPA_PC_PRIV_USER;
1164 	tf->tf_iioq_tail = tf->tf_iioq_head + 4;
1165 	tf->tf_iisq_head = tf->tf_iisq_tail = pcb->pcb_space;
1166 	tf->tf_arg0 = p->p_p->ps_strings;
1167 
1168 	/* setup terminal stack frame */
1169 	setstack(tf, (stack + 0x3f) & ~0x3f, 0);
1170 
1171 	tf->tf_cr30 = (paddr_t)pcb->pcb_fpstate;
1172 
1173 	tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr2 = tf->tf_sr3 =
1174 	tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 = pcb->pcb_space;
1175 	tf->tf_pidr1 = tf->tf_pidr2 = pmap_sid2pid(tf->tf_sr0);
1176 
1177 	/*
1178 	 * theoretically these could be inherited,
1179 	 * but just in case.
1180 	 */
1181 	tf->tf_sr7 = HPPA_SID_KERNEL;
1182 	mfctl(CR_EIEM, tf->tf_eiem);
1183 	tf->tf_ipsw = PSL_C | PSL_Q | PSL_P | PSL_D | PSL_I /* | PSL_L */ |
1184 	    (curcpu()->ci_psw & PSL_O);
1185 
1186 	/* clear the FPU */
1187 	fpu_proc_flush(p);
1188 	memset(fpreg, 0, sizeof *fpreg);
1189 	fpreg->fpr_regs[0] = ((u_int64_t)HPPA_FPU_INIT) << 32;
1190 
1191 	p->p_md.md_bpva = 0;
1192 }
1193 
1194 /*
1195  * Send an interrupt to process.
1196  */
1197 int
sendsig(sig_t catcher,int sig,sigset_t mask,const siginfo_t * ksip,int info,int onstack)1198 sendsig(sig_t catcher, int sig, sigset_t mask, const siginfo_t *ksip,
1199     int info, int onstack)
1200 {
1201 	struct proc *p = curproc;
1202 	struct trapframe *tf = p->p_md.md_regs;
1203 	struct pcb *pcb = &p->p_addr->u_pcb;
1204 	struct sigcontext ksc;
1205 	register_t scp, sip;
1206 	int sss;
1207 
1208 	/* Save the FPU context first. */
1209 	fpu_proc_save(p);
1210 
1211 	/*
1212 	 * Allocate space for the signal handler context.
1213 	 */
1214 	if ((p->p_sigstk.ss_flags & SS_DISABLE) == 0 &&
1215 	    !sigonstack(tf->tf_sp) && onstack)
1216 		scp = round_page((vaddr_t)p->p_sigstk.ss_sp);
1217 	else
1218 		scp = (tf->tf_sp + 63) & ~63;
1219 
1220 	sss = (sizeof(ksc) + 63) & ~63;
1221 	sip = 0;
1222 	if (info) {
1223 		sip = scp + sizeof(ksc);
1224 		sss += (sizeof(*ksip) + 63) & ~63;
1225 	}
1226 
1227 	bzero(&ksc, sizeof(ksc));
1228 	ksc.sc_mask = mask;
1229 	ksc.sc_fp = scp + sss;
1230 	ksc.sc_ps = tf->tf_ipsw;
1231 	ksc.sc_pcoqh = tf->tf_iioq_head;
1232 	ksc.sc_pcoqt = tf->tf_iioq_tail;
1233 	ksc.sc_regs[0] = tf->tf_t1;
1234 	ksc.sc_regs[1] = tf->tf_t2;
1235 	ksc.sc_regs[2] = tf->tf_sp;
1236 	ksc.sc_regs[3] = tf->tf_t3;
1237 	ksc.sc_regs[4] = tf->tf_sar;
1238 	ksc.sc_regs[5] = tf->tf_r1;
1239 	ksc.sc_regs[6] = tf->tf_rp;
1240 	ksc.sc_regs[7] = tf->tf_r3;
1241 	ksc.sc_regs[8] = tf->tf_r4;
1242 	ksc.sc_regs[9] = tf->tf_r5;
1243 	ksc.sc_regs[10] = tf->tf_r6;
1244 	ksc.sc_regs[11] = tf->tf_r7;
1245 	ksc.sc_regs[12] = tf->tf_r8;
1246 	ksc.sc_regs[13] = tf->tf_r9;
1247 	ksc.sc_regs[14] = tf->tf_r10;
1248 	ksc.sc_regs[15] = tf->tf_r11;
1249 	ksc.sc_regs[16] = tf->tf_r12;
1250 	ksc.sc_regs[17] = tf->tf_r13;
1251 	ksc.sc_regs[18] = tf->tf_r14;
1252 	ksc.sc_regs[19] = tf->tf_r15;
1253 	ksc.sc_regs[20] = tf->tf_r16;
1254 	ksc.sc_regs[21] = tf->tf_r17;
1255 	ksc.sc_regs[22] = tf->tf_r18;
1256 	ksc.sc_regs[23] = tf->tf_t4;
1257 	ksc.sc_regs[24] = tf->tf_arg3;
1258 	ksc.sc_regs[25] = tf->tf_arg2;
1259 	ksc.sc_regs[26] = tf->tf_arg1;
1260 	ksc.sc_regs[27] = tf->tf_arg0;
1261 	ksc.sc_regs[28] = tf->tf_dp;
1262 	ksc.sc_regs[29] = tf->tf_ret0;
1263 	ksc.sc_regs[30] = tf->tf_ret1;
1264 	ksc.sc_regs[31] = tf->tf_r31;
1265 	bcopy(&p->p_addr->u_pcb.pcb_fpstate->hfp_regs, ksc.sc_fpregs,
1266 	    sizeof(ksc.sc_fpregs));
1267 
1268 	if (setstack(tf, scp + sss, tf->tf_r3))
1269 		return 1;
1270 
1271 	tf->tf_arg0 = sig;
1272 	tf->tf_arg1 = sip;
1273 	tf->tf_arg2 = tf->tf_r4 = scp;
1274 	tf->tf_arg3 = (register_t)catcher;
1275 	tf->tf_ipsw &= ~(PSL_N|PSL_B|PSL_T);
1276 	tf->tf_iioq_head = HPPA_PC_PRIV_USER | p->p_p->ps_sigcode;
1277 	tf->tf_iioq_tail = tf->tf_iioq_head + 4;
1278 	tf->tf_iisq_tail = tf->tf_iisq_head = pcb->pcb_space;
1279 	/* disable tracing in the trapframe */
1280 
1281 	ksc.sc_cookie = (long)scp ^ p->p_p->ps_sigcookie;
1282 	if (copyout(&ksc, (void *)scp, sizeof(ksc)))
1283 		return 1;
1284 
1285 	if (sip) {
1286 		if (copyout(ksip, (void *)sip, sizeof *ksip))
1287 			return 1;
1288 	}
1289 
1290 	return 0;
1291 }
1292 
1293 int
sys_sigreturn(struct proc * p,void * v,register_t * retval)1294 sys_sigreturn(struct proc *p, void *v, register_t *retval)
1295 {
1296 	struct sys_sigreturn_args /* {
1297 		syscallarg(struct sigcontext *) sigcntxp;
1298 	} */ *uap = v;
1299 	struct sigcontext ksc, *scp = SCARG(uap, sigcntxp);
1300 	struct trapframe *tf = p->p_md.md_regs;
1301 	int error;
1302 
1303 	if (PROC_PC(p) != p->p_p->ps_sigcoderet) {
1304 		sigexit(p, SIGILL);
1305 		return (EPERM);
1306 	}
1307 
1308 	/* Flush the FPU context first. */
1309 	fpu_proc_flush(p);
1310 
1311 	if ((error = copyin((caddr_t)scp, (caddr_t)&ksc, sizeof ksc)))
1312 		return (error);
1313 
1314 	if (ksc.sc_cookie != ((long)scp ^ p->p_p->ps_sigcookie)) {
1315 		sigexit(p, SIGILL);
1316 		return (EFAULT);
1317 	}
1318 
1319 	/* Prevent reuse of the sigcontext cookie */
1320 	ksc.sc_cookie = 0;
1321 	(void)copyout(&ksc.sc_cookie, (caddr_t)scp +
1322 	    offsetof(struct sigcontext, sc_cookie), sizeof (ksc.sc_cookie));
1323 
1324 #define PSL_MBS (PSL_C|PSL_Q|PSL_P|PSL_D|PSL_I)
1325 #define PSL_MBZ (PSL_Y|PSL_Z|PSL_S|PSL_X|PSL_M|PSL_R)
1326 	if ((ksc.sc_ps & (PSL_MBS|PSL_MBZ)) != PSL_MBS)
1327 		return (EINVAL);
1328 
1329 	p->p_sigmask = ksc.sc_mask &~ sigcantmask;
1330 
1331 	tf->tf_t1 = ksc.sc_regs[0];		/* r22 */
1332 	tf->tf_t2 = ksc.sc_regs[1];		/* r21 */
1333 	tf->tf_sp = ksc.sc_regs[2];
1334 	tf->tf_t3 = ksc.sc_regs[3];		/* r20 */
1335 	tf->tf_sar = ksc.sc_regs[4];
1336 	tf->tf_r1 = ksc.sc_regs[5];
1337 	tf->tf_rp = ksc.sc_regs[6];
1338 	tf->tf_r3 = ksc.sc_regs[7];
1339 	tf->tf_r4 = ksc.sc_regs[8];
1340 	tf->tf_r5 = ksc.sc_regs[9];
1341 	tf->tf_r6 = ksc.sc_regs[10];
1342 	tf->tf_r7 = ksc.sc_regs[11];
1343 	tf->tf_r8 = ksc.sc_regs[12];
1344 	tf->tf_r9 = ksc.sc_regs[13];
1345 	tf->tf_r10 = ksc.sc_regs[14];
1346 	tf->tf_r11 = ksc.sc_regs[15];
1347 	tf->tf_r12 = ksc.sc_regs[16];
1348 	tf->tf_r13 = ksc.sc_regs[17];
1349 	tf->tf_r14 = ksc.sc_regs[18];
1350 	tf->tf_r15 = ksc.sc_regs[19];
1351 	tf->tf_r16 = ksc.sc_regs[20];
1352 	tf->tf_r17 = ksc.sc_regs[21];
1353 	tf->tf_r18 = ksc.sc_regs[22];
1354 	tf->tf_t4 = ksc.sc_regs[23];		/* r19 */
1355 	tf->tf_arg3 = ksc.sc_regs[24];		/* r23 */
1356 	tf->tf_arg2 = ksc.sc_regs[25];		/* r24 */
1357 	tf->tf_arg1 = ksc.sc_regs[26];		/* r25 */
1358 	tf->tf_arg0 = ksc.sc_regs[27];		/* r26 */
1359 	tf->tf_dp = ksc.sc_regs[28];
1360 	tf->tf_ret0 = ksc.sc_regs[29];
1361 	tf->tf_ret1 = ksc.sc_regs[30];
1362 	tf->tf_r31 = ksc.sc_regs[31];
1363 	bcopy(ksc.sc_fpregs, &p->p_addr->u_pcb.pcb_fpstate->hfp_regs,
1364 	    sizeof(ksc.sc_fpregs));
1365 
1366 	tf->tf_iioq_head = ksc.sc_pcoqh | HPPA_PC_PRIV_USER;
1367 	tf->tf_iioq_tail = ksc.sc_pcoqt | HPPA_PC_PRIV_USER;
1368 	if ((tf->tf_iioq_head & ~PAGE_MASK) == SYSCALLGATE)
1369 		tf->tf_iisq_head = HPPA_SID_KERNEL;
1370 	else
1371 		tf->tf_iisq_head = p->p_addr->u_pcb.pcb_space;
1372 	if ((tf->tf_iioq_tail & ~PAGE_MASK) == SYSCALLGATE)
1373 		tf->tf_iisq_tail = HPPA_SID_KERNEL;
1374 	else
1375 		tf->tf_iisq_tail = p->p_addr->u_pcb.pcb_space;
1376 	tf->tf_ipsw = ksc.sc_ps | (curcpu()->ci_psw & PSL_O);
1377 
1378 	return (EJUSTRETURN);
1379 }
1380 
1381 void
signotify(struct proc * p)1382 signotify(struct proc *p)
1383 {
1384 	setsoftast(p);
1385 	cpu_unidle(p->p_cpu);
1386 }
1387 
1388 /*
1389  * machine dependent system variables.
1390  */
1391 int
cpu_sysctl(int * name,u_int namelen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,struct proc * p)1392 cpu_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1393     size_t newlen, struct proc *p)
1394 {
1395 	extern u_int fpu_enable;
1396 	extern int cpu_fpuena;
1397 	dev_t consdev;
1398 	int oldval, ret;
1399 
1400 	/* all sysctl names at this level are terminal */
1401 	if (namelen != 1)
1402 		return (ENOTDIR);	/* overloaded */
1403 	switch (name[0]) {
1404 	case CPU_CONSDEV:
1405 		if (cn_tab != NULL)
1406 			consdev = cn_tab->cn_dev;
1407 		else
1408 			consdev = NODEV;
1409 		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1410 		    sizeof consdev));
1411 	case CPU_FPU:
1412 		if (curcpu()->ci_fpu_state) {
1413 			mtctl(fpu_enable, CR_CCR);
1414 			fpu_save(curcpu()->ci_fpu_state);
1415 			curcpu()->ci_fpu_state = 0;
1416 			mtctl(0, CR_CCR);
1417 		}
1418 		return (sysctl_int(oldp, oldlenp, newp, newlen, &cpu_fpuena));
1419 	case CPU_LED_BLINK:
1420 		oldval = led_blink;
1421 		ret = sysctl_int(oldp, oldlenp, newp, newlen, &led_blink);
1422 		/*
1423 		 * If we were false and are now true, start the timer.
1424 		 */
1425 		if (!oldval && led_blink > oldval)
1426 			blink_led_timeout(NULL);
1427 		return (ret);
1428 	default:
1429 		return (EOPNOTSUPP);
1430 	}
1431 	/* NOTREACHED */
1432 }
1433 
1434 
1435 /*
1436  * consinit:
1437  * initialize the system console.
1438  */
1439 void
consinit(void)1440 consinit(void)
1441 {
1442 	/*
1443 	 * Initial console setup has been done in pdc_init().
1444 	 */
1445 }
1446 
1447 
1448 struct blink_led_softc {
1449 	SLIST_HEAD(, blink_led) bls_head;
1450 	int bls_on;
1451 	struct timeout bls_to;
1452 } blink_sc = { SLIST_HEAD_INITIALIZER(bls_head), 0 };
1453 
1454 void
blink_led_register(struct blink_led * l)1455 blink_led_register(struct blink_led *l)
1456 {
1457 	if (SLIST_EMPTY(&blink_sc.bls_head)) {
1458 		timeout_set(&blink_sc.bls_to, blink_led_timeout, &blink_sc);
1459 		blink_sc.bls_on = 0;
1460 		if (led_blink)
1461 			timeout_add(&blink_sc.bls_to, 1);
1462 	}
1463 	SLIST_INSERT_HEAD(&blink_sc.bls_head, l, bl_next);
1464 }
1465 
1466 void
blink_led_timeout(void * vsc)1467 blink_led_timeout(void *vsc)
1468 {
1469 	struct blink_led_softc *sc = &blink_sc;
1470 	struct blink_led *l;
1471 	int t;
1472 
1473 	if (SLIST_EMPTY(&sc->bls_head))
1474 		return;
1475 
1476 	SLIST_FOREACH(l, &sc->bls_head, bl_next) {
1477 		(*l->bl_func)(l->bl_arg, sc->bls_on);
1478 	}
1479 	sc->bls_on = !sc->bls_on;
1480 
1481 	if (!led_blink)
1482 		return;
1483 
1484 	/*
1485 	 * Blink rate is:
1486 	 *      full cycle every second if completely idle (loadav = 0)
1487 	 *      full cycle every 2 seconds if loadav = 1
1488 	 *      full cycle every 3 seconds if loadav = 2
1489 	 * etc.
1490 	 */
1491 	t = (((averunnable.ldavg[0] + FSCALE) * hz) >> (FSHIFT + 1));
1492 	timeout_add(&sc->bls_to, t);
1493 }
1494