1 /* $OpenBSD: machdep.c,v 1.270 2024/05/22 14:25:47 jsg Exp $ */
2
3 /*
4 * Copyright (c) 1999-2003 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/signalvar.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/buf.h>
35 #include <sys/reboot.h>
36 #include <sys/device.h>
37 #include <sys/conf.h>
38 #include <sys/timeout.h>
39 #include <sys/malloc.h>
40 #include <sys/pool.h>
41 #include <sys/msgbuf.h>
42 #include <sys/ioctl.h>
43 #include <sys/tty.h>
44 #include <sys/user.h>
45 #include <sys/exec.h>
46 #include <sys/sysctl.h>
47 #include <sys/core.h>
48 #include <sys/kcore.h>
49 #include <sys/extent.h>
50
51 #include <sys/mount.h>
52 #include <sys/syscallargs.h>
53
54 #include <net/if.h>
55 #include <uvm/uvm.h>
56
57 #include <dev/cons.h>
58
59 #include <machine/pdc.h>
60 #include <machine/iomod.h>
61 #include <machine/psl.h>
62 #include <machine/reg.h>
63 #include <machine/cpufunc.h>
64 #include <machine/autoconf.h>
65 #include <machine/kcore.h>
66 #include <machine/fpu.h>
67
68 #ifdef DDB
69 #include <machine/db_machdep.h>
70 #include <ddb/db_access.h>
71 #include <ddb/db_sym.h>
72 #include <ddb/db_extern.h>
73 #endif
74
75 #include <hppa/dev/cpudevs.h>
76
77 /*
78 * Different kinds of flags used throughout the kernel.
79 */
80 int cold = 1; /* unset when engine is up to go */
81 extern int msgbufmapped; /* set when safe to use msgbuf */
82
83 /*
84 * cache configuration, for most machines is the same
85 * numbers, so it makes sense to do defines w/ numbers depending
86 * on configured cpu types in the kernel
87 */
88 int icache_stride, icache_line_mask;
89 int dcache_stride, dcache_line_mask;
90
91 /*
92 * things to not kill
93 */
94 volatile u_int8_t *machine_ledaddr;
95 int machine_ledword, machine_leds;
96 struct cpu_info cpu_info[HPPA_MAXCPUS];
97
98 /*
99 * CPU params (should be the same for all cpus in the system)
100 */
101 struct pdc_cache pdc_cache PDC_ALIGNMENT;
102 struct pdc_btlb pdc_btlb PDC_ALIGNMENT;
103 struct pdc_model pdc_model PDC_ALIGNMENT;
104
105 /* w/ a little deviation should be the same for all installed cpus */
106 u_int cpu_ticksnum, cpu_ticksdenom;
107
108 /* exported info */
109 char machine[] = MACHINE;
110 char cpu_model[128];
111 enum hppa_cpu_type cpu_type;
112 const char *cpu_typename;
113 int cpu_hvers;
114 u_int fpu_version;
115
116 int led_blink;
117
118 /*
119 * exported methods for cpus
120 */
121 int (*cpu_desidhash)(void);
122 int (*cpu_hpt_init)(vaddr_t hpt, vsize_t hptsize);
123 int (*cpu_ibtlb_ins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
124 vsize_t sz, u_int prot);
125 int (*cpu_dbtlb_ins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
126 vsize_t sz, u_int prot);
127
128 dev_t bootdev;
129 int physmem, resvmem, resvphysmem, esym;
130
131 #ifdef MULTIPROCESSOR
132 __cpu_simple_lock_t atomic_lock = __SIMPLELOCK_UNLOCKED;
133 #endif
134
135 /*
136 * Things for MI glue to stick on.
137 */
138 struct user *proc0paddr;
139 long mem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(64) / sizeof(long)];
140 struct extent *hppa_ex;
141 struct pool hppa_fppl;
142 struct hppa_fpstate proc0fpstate;
143 struct consdev *cn_tab;
144
145 struct vm_map *exec_map = NULL;
146 struct vm_map *phys_map = NULL;
147
148 void delay_init(void);
149 static __inline void fall(int, int, int, int, int);
150 void dumpsys(void);
151 void hpmc_dump(void);
152 void cpuid(void);
153 void blink_led_timeout(void *);
154
155 /*
156 * safepri is a safe priority for sleep to set for a spin-wait
157 * during autoconfiguration or after a panic.
158 */
159 int safepri = 0;
160
161 /*
162 * wide used hardware params
163 */
164 struct pdc_hwtlb pdc_hwtlb PDC_ALIGNMENT;
165 struct pdc_coherence pdc_coherence PDC_ALIGNMENT;
166 struct pdc_spidb pdc_spidbits PDC_ALIGNMENT;
167 struct pdc_model pdc_model PDC_ALIGNMENT;
168
169 struct uvm_constraint_range dma_constraint = { 0x0, (paddr_t)-1 };
170 struct uvm_constraint_range *uvm_md_constraints[] = { NULL };
171
172 /*
173 * Whatever CPU types we support
174 */
175 extern const u_int itlb_x[], itlbna_x[], dtlb_x[], dtlbna_x[], tlbd_x[];
176 extern const u_int itlb_s[], itlbna_s[], dtlb_s[], dtlbna_s[], tlbd_s[];
177 extern const u_int itlb_t[], itlbna_t[], dtlb_t[], dtlbna_t[], tlbd_t[];
178 extern const u_int itlb_l[], itlbna_l[], dtlb_l[], dtlbna_l[], tlbd_l[];
179 extern const u_int itlb_u[], itlbna_u[], dtlb_u[], dtlbna_u[], tlbd_u[];
180 int ibtlb_l(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
181 vsize_t sz, u_int prot);
182 int ibtlb_u(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
183 vsize_t sz, u_int prot);
184 int ibtlb_g(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
185 vsize_t sz, u_int prot);
186 int pbtlb_g(int i);
187 int pbtlb_u(int i);
188 int hpti_g(vaddr_t, vsize_t);
189 int desidhash_s(void);
190 int desidhash_t(void);
191 int desidhash_l(void);
192 int desidhash_u(void);
193 const struct hppa_cpu_typed {
194 char name[8];
195 enum hppa_cpu_type type;
196 int cpuid;
197 int features;
198 int patch;
199 int (*desidhash)(void);
200 int (*dbtlbins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
201 vsize_t sz, u_int prot);
202 int (*ibtlbins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
203 vsize_t sz, u_int prot);
204 int (*btlbprg)(int i);
205 int (*hptinit)(vaddr_t hpt, vsize_t hptsize);
206 } cpu_types[] = {
207 #ifdef HP7000_CPU
208 { "PCXS", hpcxs, 0, 0, 3, desidhash_s, ibtlb_g, NULL, pbtlb_g},
209 #endif
210 #ifdef HP7100_CPU
211 { "PCXT", hpcxt, 0, HPPA_FTRS_BTLBU,
212 2, desidhash_t, ibtlb_g, NULL, pbtlb_g},
213 #endif
214 #ifdef HP7200_CPU
215 { "PCXT'", hpcxta,HPPA_CPU_PCXT2, HPPA_FTRS_BTLBU,
216 2, desidhash_t, ibtlb_g, NULL, pbtlb_g},
217 #endif
218 #ifdef HP7100LC_CPU
219 { "PCXL", hpcxl, HPPA_CPU_PCXL, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
220 0, desidhash_l, ibtlb_g, NULL, pbtlb_g, hpti_g},
221 #endif
222 #ifdef HP7300LC_CPU
223 { "PCXL2", hpcxl2,HPPA_CPU_PCXL2, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
224 0, desidhash_l, ibtlb_g, NULL, pbtlb_g, hpti_g},
225 #endif
226 #ifdef HP8000_CPU
227 { "PCXU", hpcxu, HPPA_CPU_PCXU, HPPA_FTRS_W32B,
228 4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
229 #endif
230 #ifdef HP8200_CPU
231 { "PCXU+", hpcxu2,HPPA_CPU_PCXUP, HPPA_FTRS_W32B,
232 4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
233 #endif
234 #ifdef HP8500_CPU
235 { "PCXW", hpcxw, HPPA_CPU_PCXW, HPPA_FTRS_W32B,
236 4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
237 #endif
238 #ifdef HP8700_CPU
239 { "PCXW2", hpcxw, HPPA_CPU_PCXW2, HPPA_FTRS_W32B,
240 4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
241 #endif
242 { "", 0 }
243 };
244
245 int hppa_cpuspeed(int *mhz);
246
247 int
hppa_cpuspeed(int * mhz)248 hppa_cpuspeed(int *mhz)
249 {
250 *mhz = PAGE0->mem_10msec / 10000;
251
252 return (0);
253 }
254
255 void
hppa_init(paddr_t start)256 hppa_init(paddr_t start)
257 {
258 extern int kernel_text;
259 struct cpu_info *ci;
260 int error;
261 paddr_t avail_end;
262
263 pdc_init(); /* init PDC iface, so we can call em easy */
264
265 delay_init(); /* calculate cpu clock ratio */
266
267 /* cache parameters */
268 if ((error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_DFLT,
269 &pdc_cache)) < 0) {
270 #ifdef DEBUG
271 printf("WARNING: PDC_CACHE error %d\n", error);
272 #endif
273 }
274
275 dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1;
276 dcache_stride = pdc_cache.dc_stride;
277 icache_line_mask = pdc_cache.ic_conf.cc_line * 16 - 1;
278 icache_stride = pdc_cache.ic_stride;
279
280 /* cache coherence params (pbably available for 8k only) */
281 error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_SETCS,
282 &pdc_coherence, 1, 1, 1, 1);
283 #ifdef DEBUG
284 printf ("PDC_CACHE_SETCS: %d, %d, %d, %d (%d)\n",
285 pdc_coherence.ia_cst, pdc_coherence.da_cst,
286 pdc_coherence.ita_cst, pdc_coherence.dta_cst, error);
287 #endif
288 error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_GETSPIDB,
289 &pdc_spidbits, 0, 0, 0, 0);
290 printf("SPID bits: 0x%x, error = %d\n", pdc_spidbits.spidbits, error);
291
292 /* setup hpmc handler */
293 {
294 /* from locore.s */
295 extern uint32_t hpmc_v[], hpmc_tramp[], hpmc_tramp_end[];
296 uint32_t *p;
297 uint32_t cksum = 0;
298
299 for (p = hpmc_tramp; p < hpmc_tramp_end; p++)
300 cksum += *p;
301
302 p = hpmc_v;
303 if (pdc_call((iodcio_t)pdc, 0, PDC_INSTR, PDC_INSTR_DFLT, p))
304 *p = 0x08000240;
305
306 p[6] = (uint32_t)&hpmc_tramp;
307 p[7] = (hpmc_tramp_end - hpmc_tramp) * sizeof(uint32_t);
308 p[5] =
309 -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7] + cksum);
310 }
311
312 {
313 extern u_int hppa_toc[], hppa_toc_end[];
314 u_int cksum, *p;
315
316 for (cksum = 0, p = hppa_toc; p < hppa_toc_end; p++)
317 cksum += *p;
318
319 *p = cksum;
320 PAGE0->ivec_toc = (u_int)hppa_toc;
321 PAGE0->ivec_toclen = (hppa_toc_end - hppa_toc + 1) * 4;
322 }
323
324 {
325 extern u_int hppa_pfr[], hppa_pfr_end[];
326 u_int cksum, *p;
327
328 for (cksum = 0, p = hppa_pfr; p < hppa_pfr_end; p++)
329 cksum += *p;
330
331 *p = cksum;
332 PAGE0->ivec_mempf = (u_int)hppa_pfr;
333 PAGE0->ivec_mempflen = (hppa_pfr_end - hppa_pfr + 1) * 4;
334 }
335
336 ci = curcpu();
337 ci->ci_cpl = IPL_NESTED;
338 ci->ci_psw = PSL_Q | PSL_P | PSL_C | PSL_D;
339
340 cpuid();
341 ptlball();
342 ficacheall();
343 fdcacheall();
344
345 avail_end = trunc_page(PAGE0->imm_max_mem);
346 if (avail_end > SYSCALLGATE)
347 avail_end = SYSCALLGATE;
348 physmem = atop(avail_end);
349 resvmem = atop(((vaddr_t)&kernel_text));
350
351 /* we hope this won't fail */
352 hppa_ex = extent_create("mem", 0x0, 0xffffffff, M_DEVBUF,
353 (caddr_t)mem_ex_storage, sizeof(mem_ex_storage),
354 EX_NOCOALESCE|EX_NOWAIT);
355 if (extent_alloc_region(hppa_ex, 0, (vaddr_t)PAGE0->imm_max_mem,
356 EX_NOWAIT))
357 panic("cannot reserve main memory");
358
359 /* sets resvphysmem */
360 pmap_bootstrap(round_page(start));
361
362 /* space has been reserved in pmap_bootstrap() */
363 initmsgbuf((caddr_t)(ptoa(physmem) - round_page(MSGBUFSIZE)),
364 round_page(MSGBUFSIZE));
365
366 /* they say PDC_COPROC might turn fault light on */
367 pdc_call((iodcio_t)pdc, 0, PDC_CHASSIS, PDC_CHASSIS_DISP,
368 PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0);
369
370 cpu_cpuspeed = &hppa_cpuspeed;
371 #ifdef DDB
372 ddb_init();
373 #endif
374 ficacheall();
375 fdcacheall();
376
377 proc0paddr->u_pcb.pcb_fpstate = &proc0fpstate;
378 pool_init(&hppa_fppl, sizeof(struct hppa_fpstate), 16, IPL_NONE, 0,
379 "hppafp", NULL);
380 }
381
382 void
cpuid()383 cpuid()
384 {
385 /*
386 * Ptrs to various tlb handlers, to be filled
387 * based on cpu features.
388 * from locore.S
389 */
390 extern u_int trap_ep_T_TLB_DIRTY[];
391 extern u_int trap_ep_T_DTLBMISS[];
392 extern u_int trap_ep_T_DTLBMISSNA[];
393 extern u_int trap_ep_T_ITLBMISS[];
394 extern u_int trap_ep_T_ITLBMISSNA[];
395
396 extern u_int fpu_enable;
397 extern int cpu_fpuena;
398 struct pdc_cpuid pdc_cpuid PDC_ALIGNMENT;
399 struct pdc_coproc pdc_coproc PDC_ALIGNMENT;
400 const struct hppa_cpu_typed *p = NULL;
401 u_int cpu_features;
402 int error;
403
404 /* may the scientific guessing begin */
405 cpu_features = 0;
406 cpu_type = 0;
407
408 /* identify system type */
409 if ((error = pdc_call((iodcio_t)pdc, 0, PDC_MODEL, PDC_MODEL_INFO,
410 &pdc_model)) < 0) {
411 #ifdef DEBUG
412 printf("WARNING: PDC_MODEL error %d\n", error);
413 #endif
414 pdc_model.hvers = 0;
415 }
416
417 bzero(&pdc_cpuid, sizeof(pdc_cpuid));
418 if (pdc_call((iodcio_t)pdc, 0, PDC_MODEL, PDC_MODEL_CPUID,
419 &pdc_cpuid, 0, 0, 0, 0) >= 0) {
420
421 /* patch for old 8200 */
422 if (pdc_cpuid.version == HPPA_CPU_PCXU &&
423 pdc_cpuid.revision > 0x0d)
424 pdc_cpuid.version = HPPA_CPU_PCXUP;
425
426 cpu_type = pdc_cpuid.version;
427 }
428
429 /* locate coprocessors and SFUs */
430 bzero(&pdc_coproc, sizeof(pdc_coproc));
431 if ((error = pdc_call((iodcio_t)pdc, 0, PDC_COPROC, PDC_COPROC_DFLT,
432 &pdc_coproc, 0, 0, 0, 0, 0)) < 0) {
433 /*
434 * Some 1.1 systems fail the PDC_COPROC call with error == -3,
435 * when booting from disk (but not when netbooting).
436 * Until the cause of this misbehaviour is found, assume the
437 * usual 1.1 FPU settings, so that userland gets a chance to
438 * run.
439 */
440 if ((pdc_model.hvers >> 4) != 0 && pdc_model.arch_rev == 4) {
441 printf("WARNING: PDC_COPROC error %d,"
442 " assuming 1.1 FPU\n", error);
443 fpu_enable = 0xc0;
444 cpu_fpuena = 1;
445 } else {
446 printf("WARNING: PDC_COPROC error %d\n", error);
447 cpu_fpuena = 0;
448 }
449 } else {
450 printf("pdc_coproc: 0x%x, 0x%x; model %x rev %x\n",
451 pdc_coproc.ccr_enable, pdc_coproc.ccr_present,
452 pdc_coproc.fpu_model, pdc_coproc.fpu_revision);
453 fpu_enable = pdc_coproc.ccr_enable & CCR_MASK;
454 cpu_fpuena = 1;
455
456 /* a kludge to detect PCXW */
457 if (pdc_coproc.fpu_model == HPPA_FPU_PCXW)
458 cpu_type = HPPA_CPU_PCXW;
459 }
460
461 /* BTLB params */
462 if (cpu_type < HPPA_CPU_PCXU &&
463 (error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB,
464 PDC_BTLB_DEFAULT, &pdc_btlb)) < 0) {
465 #ifdef DEBUG
466 printf("WARNING: PDC_BTLB error %d\n", error);
467 #endif
468 } else {
469 #ifdef BTLBDEBUG
470 printf("btlb info: minsz=%d, maxsz=%d\n",
471 pdc_btlb.min_size, pdc_btlb.max_size);
472 printf("btlb fixed: i=%d, d=%d, c=%d\n",
473 pdc_btlb.finfo.num_i,
474 pdc_btlb.finfo.num_d,
475 pdc_btlb.finfo.num_c);
476 printf("btlb varbl: i=%d, d=%d, c=%d\n",
477 pdc_btlb.vinfo.num_i,
478 pdc_btlb.vinfo.num_d,
479 pdc_btlb.vinfo.num_c);
480 #endif /* BTLBDEBUG */
481 /* purge TLBs and caches */
482 if (pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB,
483 PDC_BTLB_PURGE_ALL) < 0)
484 printf("WARNING: BTLB purge failed\n");
485
486 if (pdc_btlb.finfo.num_c)
487 cpu_features |= HPPA_FTRS_BTLBU;
488 }
489
490 if (!pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) &&
491 pdc_hwtlb.min_size && pdc_hwtlb.max_size) {
492 cpu_features |= HPPA_FTRS_HVT;
493 if (pmap_hptsize > pdc_hwtlb.max_size)
494 pmap_hptsize = pdc_hwtlb.max_size;
495 else if (pmap_hptsize && pmap_hptsize < pdc_hwtlb.min_size)
496 pmap_hptsize = pdc_hwtlb.min_size;
497 } else {
498 #ifdef DEBUG
499 printf("WARNING: no HPT support, fine!\n");
500 #endif
501 pmap_hptsize = 0;
502 }
503
504 if (cpu_type)
505 for (p = cpu_types; p->name[0] && p->cpuid != cpu_type; p++)
506 ;
507 else
508 for (p = cpu_types;
509 p->name[0] && p->features != cpu_features; p++);
510
511 if (!p->name[0]) {
512 printf("WARNING: UNKNOWN CPU TYPE; GOOD LUCK "
513 "(type 0x%x, features 0x%x)\n", cpu_type, cpu_features);
514 p = cpu_types;
515 } else if ((p->type == hpcxl || p->type == hpcxl2) && !fpu_enable) {
516 /* we know PCXL and PCXL2 do not exist w/o FPU */
517 fpu_enable = 0xc0;
518 cpu_fpuena = 1;
519 }
520
521 /*
522 * TODO: HPT on 7200 is not currently supported
523 */
524 if (pmap_hptsize && p->type != hpcxl && p->type != hpcxl2)
525 pmap_hptsize = 0;
526
527 cpu_type = p->type;
528 cpu_typename = p->name;
529 cpu_ibtlb_ins = p->ibtlbins;
530 cpu_dbtlb_ins = p->dbtlbins;
531 cpu_hpt_init = p->hptinit;
532 cpu_desidhash = p->desidhash;
533
534 /* patch tlb handler branches */
535 if (p->patch) {
536 trap_ep_T_TLB_DIRTY [0] = trap_ep_T_TLB_DIRTY [p->patch];
537 trap_ep_T_DTLBMISS [0] = trap_ep_T_DTLBMISS [p->patch];
538 trap_ep_T_DTLBMISSNA[0] = trap_ep_T_DTLBMISSNA[p->patch];
539 trap_ep_T_ITLBMISS [0] = trap_ep_T_ITLBMISS [p->patch];
540 trap_ep_T_ITLBMISSNA[0] = trap_ep_T_ITLBMISSNA[p->patch];
541 }
542
543 /* force strong ordering for now */
544 if (p->features & HPPA_FTRS_W32B) {
545 curcpu()->ci_psw |= PSL_O;
546 }
547
548 {
549 const char *p, *q;
550 char buf[32];
551 int lev;
552
553 lev = 0xa + (*cpu_desidhash)();
554 cpu_hvers = pdc_model.hvers >> 4;
555 if (!cpu_hvers) {
556 p = "(UNKNOWN)";
557 q = lev == 0xa? "1.0" : "1.1";
558 } else {
559 p = hppa_mod_info(HPPA_TYPE_BOARD, cpu_hvers);
560 if (!p) {
561 snprintf(buf, sizeof buf, "(UNKNOWN 0x%x)",
562 cpu_hvers);
563 p = buf;
564 }
565
566 switch (pdc_model.arch_rev) {
567 default:
568 case 0:
569 q = "1.0";
570 break;
571 case 4:
572 q = "1.1";
573 /* this one is just a 100MHz pcxl */
574 if (lev == 0x10)
575 lev = 0xc;
576 /* this one is a pcxl2 */
577 if (lev == 0x16)
578 lev = 0xe;
579 break;
580 case 8:
581 q = "2.0";
582 break;
583 }
584 }
585
586 snprintf(cpu_model, sizeof cpu_model,
587 "HP 9000/%s PA-RISC %s%x", p, q, lev);
588 }
589 #ifdef DEBUG
590 printf("cpu: %s\n", cpu_model);
591 #endif
592 }
593
594 void
cpu_startup(void)595 cpu_startup(void)
596 {
597 vaddr_t minaddr, maxaddr;
598
599 /*
600 * i won't understand a friend of mine,
601 * who sat in a room full of artificial ice,
602 * fogging the air w/ humid cries --
603 * WELCOME TO SUMMER!
604 */
605 printf("%s", version);
606
607 printf("%s\n", cpu_model);
608 printf("real mem = %lu (%luMB)\n", ptoa(physmem),
609 ptoa(physmem) / 1024 / 1024);
610 printf("rsvd mem = %lu (%luKB)\n", ptoa(resvmem), ptoa(resvmem) / 1024);
611
612 /*
613 * Allocate a submap for exec arguments. This map effectively
614 * limits the number of processes exec'ing at any time.
615 */
616 minaddr = vm_map_min(kernel_map);
617 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
618 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
619
620 /*
621 * Allocate a submap for physio
622 */
623 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
624 VM_PHYS_SIZE, 0, FALSE, NULL);
625
626 printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
627 ptoa(uvmexp.free) / 1024 / 1024);
628
629 /*
630 * Set up buffers, so they can be used to read disk labels.
631 */
632 bufinit();
633
634 /*
635 * Configure the system.
636 */
637 if (boothowto & RB_CONFIG) {
638 #ifdef BOOT_CONFIG
639 user_config();
640 #else
641 printf("kernel does not support -c; continuing..\n");
642 #endif
643 }
644 }
645
646 /*
647 * compute cpu clock ratio such as:
648 * cpu_ticksnum / cpu_ticksdenom = t + delta
649 * delta -> 0
650 */
651 void
delay_init(void)652 delay_init(void)
653 {
654 u_int num, denom, delta, mdelta;
655
656 mdelta = UINT_MAX;
657 for (denom = 1; denom < 1000; denom++) {
658 num = (PAGE0->mem_10msec * denom) / 10000;
659 delta = num * 10000 / denom - PAGE0->mem_10msec;
660 if (!delta) {
661 cpu_ticksdenom = denom;
662 cpu_ticksnum = num;
663 break;
664 } else if (delta < mdelta) {
665 cpu_ticksdenom = denom;
666 cpu_ticksnum = num;
667 mdelta = delta;
668 }
669 }
670 }
671
672 void
delay(u_int us)673 delay(u_int us)
674 {
675 u_int start, end, n;
676
677 mfctl(CR_ITMR, start);
678 while (us) {
679 n = min(1000, us);
680 end = start + n * cpu_ticksnum / cpu_ticksdenom;
681
682 /* N.B. Interval Timer may wrap around */
683 if (end < start)
684 do
685 mfctl(CR_ITMR, start);
686 while (start > end);
687
688 do
689 mfctl(CR_ITMR, start);
690 while (start < end);
691
692 us -= n;
693 }
694 }
695
696 static __inline void
fall(int c_base,int c_count,int c_loop,int c_stride,int data)697 fall(int c_base, int c_count, int c_loop, int c_stride, int data)
698 {
699 int loop;
700
701 for (; c_count--; c_base += c_stride)
702 for (loop = c_loop; loop--; )
703 if (data)
704 fdce(0, c_base);
705 else
706 fice(0, c_base);
707 }
708
709 void
ficacheall(void)710 ficacheall(void)
711 {
712 /*
713 * Flush the instruction, then data cache.
714 */
715 fall(pdc_cache.ic_base, pdc_cache.ic_count, pdc_cache.ic_loop,
716 pdc_cache.ic_stride, 0);
717 sync_caches();
718 }
719
720 void
fdcacheall(void)721 fdcacheall(void)
722 {
723 fall(pdc_cache.dc_base, pdc_cache.dc_count, pdc_cache.dc_loop,
724 pdc_cache.dc_stride, 1);
725 sync_caches();
726 }
727
728 void
ptlball(void)729 ptlball(void)
730 {
731 pa_space_t sp;
732 int i, j, k;
733
734 /* instruction TLB */
735 sp = pdc_cache.it_sp_base;
736 for (i = 0; i < pdc_cache.it_sp_count; i++) {
737 vaddr_t off = pdc_cache.it_off_base;
738 for (j = 0; j < pdc_cache.it_off_count; j++) {
739 for (k = 0; k < pdc_cache.it_loop; k++)
740 pitlbe(sp, off);
741 off += pdc_cache.it_off_stride;
742 }
743 sp += pdc_cache.it_sp_stride;
744 }
745
746 /* data TLB */
747 sp = pdc_cache.dt_sp_base;
748 for (i = 0; i < pdc_cache.dt_sp_count; i++) {
749 vaddr_t off = pdc_cache.dt_off_base;
750 for (j = 0; j < pdc_cache.dt_off_count; j++) {
751 for (k = 0; k < pdc_cache.dt_loop; k++)
752 pdtlbe(sp, off);
753 off += pdc_cache.dt_off_stride;
754 }
755 sp += pdc_cache.dt_sp_stride;
756 }
757 }
758
759 int
hpti_g(vaddr_t hpt,vsize_t hptsize)760 hpti_g(vaddr_t hpt, vsize_t hptsize)
761 {
762 return pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_CONFIG,
763 &pdc_hwtlb, hpt, hptsize, PDC_TLB_CURRPDE);
764 }
765
766 int
pbtlb_g(int i)767 pbtlb_g(int i)
768 {
769 return -1;
770 }
771
772 int
ibtlb_g(int i,pa_space_t sp,vaddr_t va,paddr_t pa,vsize_t sz,u_int prot)773 ibtlb_g(int i, pa_space_t sp, vaddr_t va, paddr_t pa, vsize_t sz, u_int prot)
774 {
775 int error;
776
777 if ((error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB, PDC_BTLB_INSERT,
778 sp, va, pa, sz, prot, i)) < 0) {
779 #ifdef BTLBDEBUG
780 printf("WARNING: BTLB insert failed (%d)\n", error);
781 #endif
782 }
783 return error;
784 }
785
786 int
btlb_insert(pa_space_t space,vaddr_t va,paddr_t pa,vsize_t * lenp,u_int prot)787 btlb_insert(pa_space_t space, vaddr_t va, paddr_t pa, vsize_t *lenp, u_int prot)
788 {
789 static u_int32_t mask;
790 vsize_t len;
791 int error, i, btlb_max;
792
793 if (!pdc_btlb.min_size && !pdc_btlb.max_size)
794 return -(ENXIO);
795
796 /*
797 * On PCXS processors with split BTLB, we should theoretically
798 * insert in the IBTLB (if executable mapping requested), and
799 * into the DBTLB. The PDC documentation is very clear that
800 * slot numbers are, in order, IBTLB, then DBTLB, then combined
801 * BTLB.
802 *
803 * However it also states that ``successful completion may not mean
804 * that the entire address range specified in the call has been
805 * mapped in the block TLB. For both fixed range slots and variable
806 * range slots, complete coverage of the address range specified
807 * is not guaranteed. Only a portion of the address range specified
808 * may get mapped as a result''.
809 *
810 * On an HP 9000/720 with PDC ROM v1.2, it turns out that IBTLB
811 * entries are inserted as expected, but no DBTLB gets inserted
812 * at all, despite PDC returning success.
813 *
814 * So play it dumb, and do not attempt to insert DBTLB entries at
815 * all on split BTLB systems. Callers are supposed to be able to
816 * cope with this.
817 */
818
819 if (pdc_btlb.finfo.num_c == 0) {
820 if ((prot & TLB_EXECUTE) == 0)
821 return -(EINVAL);
822
823 btlb_max = pdc_btlb.finfo.num_i;
824 } else {
825 btlb_max = pdc_btlb.finfo.num_c;
826 }
827
828 /* align size */
829 for (len = pdc_btlb.min_size << PGSHIFT; len < *lenp; len <<= 1)
830 ;
831 len >>= PGSHIFT;
832 i = ffs(~mask) - 1;
833 if (len > pdc_btlb.max_size || i < 0 || i >= btlb_max) {
834 #ifdef BTLBDEBUG
835 printf("btln_insert: too big (%u < %u < %u)\n",
836 pdc_btlb.min_size, len, pdc_btlb.max_size);
837 #endif
838 return -(ENOMEM);
839 }
840
841 mask |= 1 << i;
842 pa >>= PGSHIFT;
843 va >>= PGSHIFT;
844 /* check address alignment */
845 if (pa & (len - 1)) {
846 #ifdef BTLBDEBUG
847 printf("WARNING: BTLB address misaligned pa=0x%x, len=0x%x\n",
848 pa, len);
849 #endif
850 return -(ERANGE);
851 }
852
853 /* ensure IO space is uncached */
854 if ((pa & (HPPA_IOBEGIN >> PGSHIFT)) == (HPPA_IOBEGIN >> PGSHIFT))
855 prot |= TLB_UNCACHABLE;
856
857 #ifdef BTLBDEBUG
858 printf("btlb_insert(%d): %x:%x=%x[%x,%x]\n",
859 i, space, va, pa, len, prot);
860 #endif
861 if ((error = (*cpu_dbtlb_ins)(i, space, va, pa, len, prot)) < 0)
862 return -(EINVAL);
863 *lenp = len << PGSHIFT;
864
865 return i;
866 }
867
868 int waittime = -1;
869
870 __dead void
boot(int howto)871 boot(int howto)
872 {
873 if ((howto & RB_RESET) != 0)
874 goto doreset;
875
876 /*
877 * On older systems without software power control, prevent mi code
878 * from spinning disks off, in case the operator changes his mind
879 * and prefers to reboot - the firmware will not send a spin up
880 * command to the disks.
881 */
882 if (cold_hook == NULL)
883 howto &= ~RB_POWERDOWN;
884
885 if (cold) {
886 if ((howto & RB_USERREQ) == 0)
887 howto |= RB_HALT;
888 goto haltsys;
889 }
890
891 boothowto = howto | (boothowto & RB_HALT);
892
893 if ((howto & RB_NOSYNC) == 0) {
894 waittime = 0;
895 vfs_shutdown(curproc);
896
897 if ((howto & RB_TIMEBAD) == 0) {
898 resettodr();
899 } else {
900 printf("WARNING: not updating battery clock\n");
901 }
902 }
903 if_downall();
904
905 uvm_shutdown();
906 splhigh();
907 cold = 1;
908
909 if ((howto & RB_DUMP) != 0)
910 dumpsys();
911
912 haltsys:
913 config_suspend_all(DVACT_POWERDOWN);
914
915 #ifdef MULTIPROCESSOR
916 hppa_ipi_broadcast(HPPA_IPI_HALT);
917 #endif
918
919 /* in case we came on powerfail interrupt */
920 if (cold_hook)
921 (*cold_hook)(HPPA_COLD_COLD);
922
923 if ((howto & RB_HALT) != 0) {
924 if ((howto & RB_POWERDOWN) != 0) {
925 printf("Powering off...");
926 DELAY(2000000);
927 (*cold_hook)(HPPA_COLD_OFF);
928 DELAY(1000000);
929 }
930
931 printf("System halted!\n");
932 DELAY(2000000);
933 __asm volatile("stwas %0, 0(%1)"
934 :: "r" (CMD_STOP), "r" (HPPA_LBCAST + iomod_command));
935 } else {
936 doreset:
937 printf("rebooting...");
938 DELAY(2000000);
939
940 /* ask firmware to reset */
941 pdc_call((iodcio_t)pdc, 0, PDC_BROADCAST_RESET, PDC_DO_RESET);
942
943 /* forcibly reset module if that fails */
944 __asm volatile(".export hppa_reset, entry\n\t"
945 ".label hppa_reset");
946 __asm volatile("stwas %0, 0(%1)"
947 :: "r" (CMD_RESET), "r" (HPPA_LBCAST + iomod_command));
948 }
949
950 for (;;)
951 continue;
952 /* NOTREACHED */
953 }
954
955 u_long dumpmag = 0x8fca0101; /* magic number */
956 int dumpsize = 0; /* pages */
957 long dumplo = 0; /* blocks */
958
959 /*
960 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
961 */
962 int
cpu_dumpsize(void)963 cpu_dumpsize(void)
964 {
965 int size;
966
967 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
968 if (roundup(size, dbtob(1)) != dbtob(1))
969 return -1;
970
971 return 1;
972 }
973
974 /*
975 * Called from HPMC handler in locore
976 */
977 void
hpmc_dump(void)978 hpmc_dump(void)
979 {
980 cold = 0;
981 panic("HPMC");
982 /* NOTREACHED */
983 }
984
985 int
cpu_dump(void)986 cpu_dump(void)
987 {
988 long buf[dbtob(1) / sizeof (long)];
989 kcore_seg_t *segp;
990 cpu_kcore_hdr_t *cpuhdrp;
991
992 segp = (kcore_seg_t *)buf;
993 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp)) / sizeof (long)];
994
995 /*
996 * Generate a segment header.
997 */
998 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
999 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1000
1001 /*
1002 * Add the machine-dependent header info
1003 */
1004 /* nothing for now */
1005
1006 return (bdevsw[major(dumpdev)].d_dump)
1007 (dumpdev, dumplo, (caddr_t)buf, dbtob(1));
1008 }
1009
1010 /*
1011 * Dump the kernel's image to the swap partition.
1012 */
1013 #define BYTES_PER_DUMP NBPG
1014
1015 void
dumpsys(void)1016 dumpsys(void)
1017 {
1018 int psize, bytes, i, n;
1019 caddr_t maddr;
1020 daddr_t blkno;
1021 int (*dump)(dev_t, daddr_t, caddr_t, size_t);
1022 int error;
1023
1024 /* Save registers
1025 savectx(&dumppcb); */
1026
1027 if (dumpsize == 0)
1028 dumpconf();
1029 if (dumplo <= 0) {
1030 printf("\ndump to dev %x not possible\n", dumpdev);
1031 return;
1032 }
1033 printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
1034
1035 psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1036 printf("dump ");
1037 if (psize == -1) {
1038 printf("area unavailable\n");
1039 return;
1040 }
1041
1042 if (!(error = cpu_dump())) {
1043
1044 bytes = ptoa(physmem);
1045 maddr = NULL;
1046 blkno = dumplo + cpu_dumpsize();
1047 dump = bdevsw[major(dumpdev)].d_dump;
1048 /* TODO block map the whole physical memory */
1049 for (i = 0; i < bytes; i += n) {
1050
1051 /* Print out how many MBs we are to go. */
1052 n = bytes - i;
1053 if (n && (n % (1024*1024)) == 0)
1054 printf("%d ", n / (1024 * 1024));
1055
1056 /* Limit size for next transfer. */
1057
1058 if (n > BYTES_PER_DUMP)
1059 n = BYTES_PER_DUMP;
1060
1061 if ((error = (*dump)(dumpdev, blkno, maddr, n)))
1062 break;
1063 maddr += n;
1064 blkno += btodb(n);
1065 }
1066 }
1067
1068 switch (error) {
1069 case ENXIO: printf("device bad\n"); break;
1070 case EFAULT: printf("device not ready\n"); break;
1071 case EINVAL: printf("area improper\n"); break;
1072 case EIO: printf("i/o error\n"); break;
1073 case EINTR: printf("aborted from console\n"); break;
1074 case 0: printf("succeeded\n"); break;
1075 default: printf("error %d\n", error); break;
1076 }
1077 }
1078
1079 /* bcopy(), error on fault */
1080 int
kcopy(const void * from,void * to,size_t size)1081 kcopy(const void *from, void *to, size_t size)
1082 {
1083 return spcopy(HPPA_SID_KERNEL, from, HPPA_SID_KERNEL, to, size);
1084 }
1085
1086 int
copyinstr(const void * src,void * dst,size_t size,size_t * lenp)1087 copyinstr(const void *src, void *dst, size_t size, size_t *lenp)
1088 {
1089 if (size == 0)
1090 return ENAMETOOLONG;
1091 return spstrcpy(curproc->p_addr->u_pcb.pcb_space, src,
1092 HPPA_SID_KERNEL, dst, size, lenp);
1093 }
1094
1095 int
copyoutstr(const void * src,void * dst,size_t size,size_t * lenp)1096 copyoutstr(const void *src, void *dst, size_t size, size_t *lenp)
1097 {
1098 if (size == 0)
1099 return ENAMETOOLONG;
1100 return spstrcpy(HPPA_SID_KERNEL, src,
1101 curproc->p_addr->u_pcb.pcb_space, dst, size, lenp);
1102 }
1103
1104 int
copyin(const void * src,void * dst,size_t size)1105 copyin(const void *src, void *dst, size_t size)
1106 {
1107 return spcopy(curproc->p_addr->u_pcb.pcb_space, src,
1108 HPPA_SID_KERNEL, dst, size);
1109 }
1110
1111 int
copyout(const void * src,void * dst,size_t size)1112 copyout(const void *src, void *dst, size_t size)
1113 {
1114 return spcopy(HPPA_SID_KERNEL, src,
1115 curproc->p_addr->u_pcb.pcb_space, dst, size);
1116 }
1117
1118 int
copyin32(const uint32_t * src,uint32_t * dst)1119 copyin32(const uint32_t *src, uint32_t *dst)
1120 {
1121 return spcopy32(curproc->p_addr->u_pcb.pcb_space, src,
1122 HPPA_SID_KERNEL, dst);
1123 }
1124
1125 /*
1126 * Set up tf_sp and tf_r3 (the frame pointer) and copy out the
1127 * frame marker and the old r3
1128 */
1129 int
setstack(struct trapframe * tf,u_long stack,register_t old_r3)1130 setstack(struct trapframe *tf, u_long stack, register_t old_r3)
1131 {
1132 static const register_t zero = 0;
1133 int err;
1134
1135 tf->tf_r3 = stack;
1136 err = copyout(&old_r3, (caddr_t)stack, sizeof(register_t));
1137
1138 tf->tf_sp = stack += HPPA_FRAME_SIZE;
1139 return (copyout(&zero, (caddr_t)(stack + HPPA_FRAME_CRP),
1140 sizeof(register_t)) || err);
1141 }
1142
1143
1144 /*
1145 * Set registers on exec.
1146 */
1147 void
setregs(struct proc * p,struct exec_package * pack,u_long stack,struct ps_strings * arginfo)1148 setregs(struct proc *p, struct exec_package *pack, u_long stack,
1149 struct ps_strings *arginfo)
1150 {
1151 struct trapframe *tf = p->p_md.md_regs;
1152 struct pcb *pcb = &p->p_addr->u_pcb;
1153 struct fpreg *fpreg = &pcb->pcb_fpstate->hfp_regs;
1154
1155 memset(tf, 0, sizeof *tf);
1156 tf->tf_flags = TFF_SYS|TFF_LAST;
1157 tf->tf_iioq_head = pack->ep_entry | HPPA_PC_PRIV_USER;
1158 tf->tf_iioq_tail = tf->tf_iioq_head + 4;
1159 tf->tf_iisq_head = tf->tf_iisq_tail = pcb->pcb_space;
1160 tf->tf_arg0 = p->p_p->ps_strings;
1161
1162 /* setup terminal stack frame */
1163 setstack(tf, (stack + 0x3f) & ~0x3f, 0);
1164
1165 tf->tf_cr30 = (paddr_t)pcb->pcb_fpstate;
1166
1167 tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr2 = tf->tf_sr3 =
1168 tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 = pcb->pcb_space;
1169 tf->tf_pidr1 = tf->tf_pidr2 = pmap_sid2pid(tf->tf_sr0);
1170
1171 /*
1172 * theoretically these could be inherited,
1173 * but just in case.
1174 */
1175 tf->tf_sr7 = HPPA_SID_KERNEL;
1176 mfctl(CR_EIEM, tf->tf_eiem);
1177 tf->tf_ipsw = PSL_C | PSL_Q | PSL_P | PSL_D | PSL_I /* | PSL_L */ |
1178 (curcpu()->ci_psw & PSL_O);
1179
1180 /* clear the FPU */
1181 fpu_proc_flush(p);
1182 memset(fpreg, 0, sizeof *fpreg);
1183 fpreg->fpr_regs[0] = ((u_int64_t)HPPA_FPU_INIT) << 32;
1184
1185 p->p_md.md_bpva = 0;
1186 }
1187
1188 /*
1189 * Send an interrupt to process.
1190 */
1191 int
sendsig(sig_t catcher,int sig,sigset_t mask,const siginfo_t * ksip,int info,int onstack)1192 sendsig(sig_t catcher, int sig, sigset_t mask, const siginfo_t *ksip,
1193 int info, int onstack)
1194 {
1195 struct proc *p = curproc;
1196 struct trapframe *tf = p->p_md.md_regs;
1197 struct pcb *pcb = &p->p_addr->u_pcb;
1198 struct sigcontext ksc;
1199 register_t scp, sip;
1200 int sss;
1201
1202 /* Save the FPU context first. */
1203 fpu_proc_save(p);
1204
1205 /*
1206 * Allocate space for the signal handler context.
1207 */
1208 if ((p->p_sigstk.ss_flags & SS_DISABLE) == 0 &&
1209 !sigonstack(tf->tf_sp) && onstack)
1210 scp = round_page((vaddr_t)p->p_sigstk.ss_sp);
1211 else
1212 scp = (tf->tf_sp + 63) & ~63;
1213
1214 sss = (sizeof(ksc) + 63) & ~63;
1215 sip = 0;
1216 if (info) {
1217 sip = scp + sizeof(ksc);
1218 sss += (sizeof(*ksip) + 63) & ~63;
1219 }
1220
1221 bzero(&ksc, sizeof(ksc));
1222 ksc.sc_mask = mask;
1223 ksc.sc_fp = scp + sss;
1224 ksc.sc_ps = tf->tf_ipsw;
1225 ksc.sc_pcoqh = tf->tf_iioq_head;
1226 ksc.sc_pcoqt = tf->tf_iioq_tail;
1227 ksc.sc_regs[0] = tf->tf_t1;
1228 ksc.sc_regs[1] = tf->tf_t2;
1229 ksc.sc_regs[2] = tf->tf_sp;
1230 ksc.sc_regs[3] = tf->tf_t3;
1231 ksc.sc_regs[4] = tf->tf_sar;
1232 ksc.sc_regs[5] = tf->tf_r1;
1233 ksc.sc_regs[6] = tf->tf_rp;
1234 ksc.sc_regs[7] = tf->tf_r3;
1235 ksc.sc_regs[8] = tf->tf_r4;
1236 ksc.sc_regs[9] = tf->tf_r5;
1237 ksc.sc_regs[10] = tf->tf_r6;
1238 ksc.sc_regs[11] = tf->tf_r7;
1239 ksc.sc_regs[12] = tf->tf_r8;
1240 ksc.sc_regs[13] = tf->tf_r9;
1241 ksc.sc_regs[14] = tf->tf_r10;
1242 ksc.sc_regs[15] = tf->tf_r11;
1243 ksc.sc_regs[16] = tf->tf_r12;
1244 ksc.sc_regs[17] = tf->tf_r13;
1245 ksc.sc_regs[18] = tf->tf_r14;
1246 ksc.sc_regs[19] = tf->tf_r15;
1247 ksc.sc_regs[20] = tf->tf_r16;
1248 ksc.sc_regs[21] = tf->tf_r17;
1249 ksc.sc_regs[22] = tf->tf_r18;
1250 ksc.sc_regs[23] = tf->tf_t4;
1251 ksc.sc_regs[24] = tf->tf_arg3;
1252 ksc.sc_regs[25] = tf->tf_arg2;
1253 ksc.sc_regs[26] = tf->tf_arg1;
1254 ksc.sc_regs[27] = tf->tf_arg0;
1255 ksc.sc_regs[28] = tf->tf_dp;
1256 ksc.sc_regs[29] = tf->tf_ret0;
1257 ksc.sc_regs[30] = tf->tf_ret1;
1258 ksc.sc_regs[31] = tf->tf_r31;
1259 bcopy(&p->p_addr->u_pcb.pcb_fpstate->hfp_regs, ksc.sc_fpregs,
1260 sizeof(ksc.sc_fpregs));
1261
1262 if (setstack(tf, scp + sss, tf->tf_r3))
1263 return 1;
1264
1265 tf->tf_arg0 = sig;
1266 tf->tf_arg1 = sip;
1267 tf->tf_arg2 = tf->tf_r4 = scp;
1268 tf->tf_arg3 = (register_t)catcher;
1269 tf->tf_ipsw &= ~(PSL_N|PSL_B|PSL_T);
1270 tf->tf_iioq_head = HPPA_PC_PRIV_USER | p->p_p->ps_sigcode;
1271 tf->tf_iioq_tail = tf->tf_iioq_head + 4;
1272 tf->tf_iisq_tail = tf->tf_iisq_head = pcb->pcb_space;
1273 /* disable tracing in the trapframe */
1274
1275 ksc.sc_cookie = (long)scp ^ p->p_p->ps_sigcookie;
1276 if (copyout(&ksc, (void *)scp, sizeof(ksc)))
1277 return 1;
1278
1279 if (sip) {
1280 if (copyout(ksip, (void *)sip, sizeof *ksip))
1281 return 1;
1282 }
1283
1284 return 0;
1285 }
1286
1287 int
sys_sigreturn(struct proc * p,void * v,register_t * retval)1288 sys_sigreturn(struct proc *p, void *v, register_t *retval)
1289 {
1290 struct sys_sigreturn_args /* {
1291 syscallarg(struct sigcontext *) sigcntxp;
1292 } */ *uap = v;
1293 struct sigcontext ksc, *scp = SCARG(uap, sigcntxp);
1294 struct trapframe *tf = p->p_md.md_regs;
1295 int error;
1296
1297 if (PROC_PC(p) != p->p_p->ps_sigcoderet) {
1298 sigexit(p, SIGILL);
1299 return (EPERM);
1300 }
1301
1302 /* Flush the FPU context first. */
1303 fpu_proc_flush(p);
1304
1305 if ((error = copyin((caddr_t)scp, (caddr_t)&ksc, sizeof ksc)))
1306 return (error);
1307
1308 if (ksc.sc_cookie != ((long)scp ^ p->p_p->ps_sigcookie)) {
1309 sigexit(p, SIGILL);
1310 return (EFAULT);
1311 }
1312
1313 /* Prevent reuse of the sigcontext cookie */
1314 ksc.sc_cookie = 0;
1315 (void)copyout(&ksc.sc_cookie, (caddr_t)scp +
1316 offsetof(struct sigcontext, sc_cookie), sizeof (ksc.sc_cookie));
1317
1318 #define PSL_MBS (PSL_C|PSL_Q|PSL_P|PSL_D|PSL_I)
1319 #define PSL_MBZ (PSL_Y|PSL_Z|PSL_S|PSL_X|PSL_M|PSL_R)
1320 if ((ksc.sc_ps & (PSL_MBS|PSL_MBZ)) != PSL_MBS)
1321 return (EINVAL);
1322
1323 p->p_sigmask = ksc.sc_mask &~ sigcantmask;
1324
1325 tf->tf_t1 = ksc.sc_regs[0]; /* r22 */
1326 tf->tf_t2 = ksc.sc_regs[1]; /* r21 */
1327 tf->tf_sp = ksc.sc_regs[2];
1328 tf->tf_t3 = ksc.sc_regs[3]; /* r20 */
1329 tf->tf_sar = ksc.sc_regs[4];
1330 tf->tf_r1 = ksc.sc_regs[5];
1331 tf->tf_rp = ksc.sc_regs[6];
1332 tf->tf_r3 = ksc.sc_regs[7];
1333 tf->tf_r4 = ksc.sc_regs[8];
1334 tf->tf_r5 = ksc.sc_regs[9];
1335 tf->tf_r6 = ksc.sc_regs[10];
1336 tf->tf_r7 = ksc.sc_regs[11];
1337 tf->tf_r8 = ksc.sc_regs[12];
1338 tf->tf_r9 = ksc.sc_regs[13];
1339 tf->tf_r10 = ksc.sc_regs[14];
1340 tf->tf_r11 = ksc.sc_regs[15];
1341 tf->tf_r12 = ksc.sc_regs[16];
1342 tf->tf_r13 = ksc.sc_regs[17];
1343 tf->tf_r14 = ksc.sc_regs[18];
1344 tf->tf_r15 = ksc.sc_regs[19];
1345 tf->tf_r16 = ksc.sc_regs[20];
1346 tf->tf_r17 = ksc.sc_regs[21];
1347 tf->tf_r18 = ksc.sc_regs[22];
1348 tf->tf_t4 = ksc.sc_regs[23]; /* r19 */
1349 tf->tf_arg3 = ksc.sc_regs[24]; /* r23 */
1350 tf->tf_arg2 = ksc.sc_regs[25]; /* r24 */
1351 tf->tf_arg1 = ksc.sc_regs[26]; /* r25 */
1352 tf->tf_arg0 = ksc.sc_regs[27]; /* r26 */
1353 tf->tf_dp = ksc.sc_regs[28];
1354 tf->tf_ret0 = ksc.sc_regs[29];
1355 tf->tf_ret1 = ksc.sc_regs[30];
1356 tf->tf_r31 = ksc.sc_regs[31];
1357 bcopy(ksc.sc_fpregs, &p->p_addr->u_pcb.pcb_fpstate->hfp_regs,
1358 sizeof(ksc.sc_fpregs));
1359
1360 tf->tf_iioq_head = ksc.sc_pcoqh | HPPA_PC_PRIV_USER;
1361 tf->tf_iioq_tail = ksc.sc_pcoqt | HPPA_PC_PRIV_USER;
1362 if ((tf->tf_iioq_head & ~PAGE_MASK) == SYSCALLGATE)
1363 tf->tf_iisq_head = HPPA_SID_KERNEL;
1364 else
1365 tf->tf_iisq_head = p->p_addr->u_pcb.pcb_space;
1366 if ((tf->tf_iioq_tail & ~PAGE_MASK) == SYSCALLGATE)
1367 tf->tf_iisq_tail = HPPA_SID_KERNEL;
1368 else
1369 tf->tf_iisq_tail = p->p_addr->u_pcb.pcb_space;
1370 tf->tf_ipsw = ksc.sc_ps | (curcpu()->ci_psw & PSL_O);
1371
1372 return (EJUSTRETURN);
1373 }
1374
1375 void
signotify(struct proc * p)1376 signotify(struct proc *p)
1377 {
1378 setsoftast(p);
1379 cpu_unidle(p->p_cpu);
1380 }
1381
1382 /*
1383 * machine dependent system variables.
1384 */
1385 int
cpu_sysctl(int * name,u_int namelen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,struct proc * p)1386 cpu_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1387 size_t newlen, struct proc *p)
1388 {
1389 extern u_int fpu_enable;
1390 extern int cpu_fpuena;
1391 dev_t consdev;
1392 int oldval, ret;
1393
1394 /* all sysctl names at this level are terminal */
1395 if (namelen != 1)
1396 return (ENOTDIR); /* overloaded */
1397 switch (name[0]) {
1398 case CPU_CONSDEV:
1399 if (cn_tab != NULL)
1400 consdev = cn_tab->cn_dev;
1401 else
1402 consdev = NODEV;
1403 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1404 sizeof consdev));
1405 case CPU_FPU:
1406 if (curcpu()->ci_fpu_state) {
1407 mtctl(fpu_enable, CR_CCR);
1408 fpu_save(curcpu()->ci_fpu_state);
1409 curcpu()->ci_fpu_state = 0;
1410 mtctl(0, CR_CCR);
1411 }
1412 return (sysctl_int(oldp, oldlenp, newp, newlen, &cpu_fpuena));
1413 case CPU_LED_BLINK:
1414 oldval = led_blink;
1415 ret = sysctl_int(oldp, oldlenp, newp, newlen, &led_blink);
1416 /*
1417 * If we were false and are now true, start the timer.
1418 */
1419 if (!oldval && led_blink > oldval)
1420 blink_led_timeout(NULL);
1421 return (ret);
1422 default:
1423 return (EOPNOTSUPP);
1424 }
1425 /* NOTREACHED */
1426 }
1427
1428
1429 /*
1430 * consinit:
1431 * initialize the system console.
1432 */
1433 void
consinit(void)1434 consinit(void)
1435 {
1436 /*
1437 * Initial console setup has been done in pdc_init().
1438 */
1439 }
1440
1441
1442 struct blink_led_softc {
1443 SLIST_HEAD(, blink_led) bls_head;
1444 int bls_on;
1445 struct timeout bls_to;
1446 } blink_sc = { SLIST_HEAD_INITIALIZER(bls_head), 0 };
1447
1448 void
blink_led_register(struct blink_led * l)1449 blink_led_register(struct blink_led *l)
1450 {
1451 if (SLIST_EMPTY(&blink_sc.bls_head)) {
1452 timeout_set(&blink_sc.bls_to, blink_led_timeout, &blink_sc);
1453 blink_sc.bls_on = 0;
1454 if (led_blink)
1455 timeout_add(&blink_sc.bls_to, 1);
1456 }
1457 SLIST_INSERT_HEAD(&blink_sc.bls_head, l, bl_next);
1458 }
1459
1460 void
blink_led_timeout(void * vsc)1461 blink_led_timeout(void *vsc)
1462 {
1463 struct blink_led_softc *sc = &blink_sc;
1464 struct blink_led *l;
1465 int t;
1466
1467 if (SLIST_EMPTY(&sc->bls_head))
1468 return;
1469
1470 SLIST_FOREACH(l, &sc->bls_head, bl_next) {
1471 (*l->bl_func)(l->bl_arg, sc->bls_on);
1472 }
1473 sc->bls_on = !sc->bls_on;
1474
1475 if (!led_blink)
1476 return;
1477
1478 /*
1479 * Blink rate is:
1480 * full cycle every second if completely idle (loadav = 0)
1481 * full cycle every 2 seconds if loadav = 1
1482 * full cycle every 3 seconds if loadav = 2
1483 * etc.
1484 */
1485 t = (((averunnable.ldavg[0] + FSCALE) * hz) >> (FSHIFT + 1));
1486 timeout_add(&sc->bls_to, t);
1487 }
1488