1 /* $OpenBSD: machdep.c,v 1.203 2023/04/11 00:45:06 jsg Exp $ */
2 /* $NetBSD: machdep.c,v 1.210 2000/06/01 17:12:38 thorpej Exp $ */
3
4 /*-
5 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center and by Chris G. Demetriou.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Author: Chris G. Demetriou
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/signalvar.h>
64 #include <sys/kernel.h>
65 #include <sys/proc.h>
66 #include <sys/socket.h>
67 #include <sys/sched.h>
68 #include <sys/buf.h>
69 #include <sys/reboot.h>
70 #include <sys/device.h>
71 #include <sys/conf.h>
72 #include <sys/timeout.h>
73 #include <sys/malloc.h>
74 #include <sys/mbuf.h>
75 #include <sys/msgbuf.h>
76 #include <sys/ioctl.h>
77 #include <sys/tty.h>
78 #include <sys/user.h>
79 #include <sys/exec.h>
80 #include <sys/sysctl.h>
81 #include <sys/core.h>
82 #include <sys/kcore.h>
83
84 #include <net/if.h>
85 #include <uvm/uvm.h>
86
87 #include <machine/kcore.h>
88 #ifndef NO_IEEE
89 #include <machine/fpu.h>
90 #endif
91 #include <sys/timetc.h>
92
93 #include <sys/mount.h>
94 #include <sys/syscallargs.h>
95
96 #include <dev/cons.h>
97
98 #include <machine/autoconf.h>
99 #include <machine/cpu.h>
100 #include <machine/reg.h>
101 #include <machine/rpb.h>
102 #include <machine/prom.h>
103 #include <machine/cpuconf.h>
104 #ifndef NO_IEEE
105 #include <machine/ieeefp.h>
106 #endif
107
108 #include <dev/pci/pcivar.h>
109
110 #ifdef DDB
111 #include <machine/db_machdep.h>
112 #include <ddb/db_extern.h>
113 #include <ddb/db_interface.h>
114 #endif
115
116 #include "ioasic.h"
117
118 #if NIOASIC > 0
119 #include <machine/tc_machdep.h>
120 #include <dev/tc/tcreg.h>
121 #include <dev/tc/ioasicvar.h>
122 #endif
123
124 int cpu_dump(void);
125 int cpu_dumpsize(void);
126 u_long cpu_dump_mempagecnt(void);
127 void dumpsys(void);
128 void identifycpu(void);
129 void regdump(struct trapframe *framep);
130 void printregs(struct reg *);
131
132 struct uvm_constraint_range isa_constraint = { 0x0, 0x00ffffffUL };
133 struct uvm_constraint_range dma_constraint = { 0x0, (paddr_t)-1 };
134 struct uvm_constraint_range *uvm_md_constraints[] = {
135 &isa_constraint,
136 NULL
137 };
138
139 struct vm_map *exec_map = NULL;
140 struct vm_map *phys_map = NULL;
141
142 /*
143 * safepri is a safe priority for sleep to set for a spin-wait
144 * during autoconfiguration or after a panic.
145 */
146 int safepri = 0;
147
148 #ifdef APERTURE
149 int allowaperture = 0;
150 #endif
151
152 int totalphysmem; /* total amount of physical memory in system */
153 int physmem; /* physical mem used by OpenBSD + some rsvd */
154 int resvmem; /* amount of memory reserved for PROM */
155 int unusedmem; /* amount of memory for OS that we don't use */
156 int unknownmem; /* amount of memory with an unknown use */
157
158 int cputype; /* system type, from the RPB */
159
160 int bootdev_debug = 0; /* patchable, or from DDB */
161
162 /* the following is used externally (sysctl_hw) */
163 char machine[] = MACHINE; /* from <machine/param.h> */
164 char cpu_model[128];
165
166 struct user *proc0paddr;
167
168 /* Number of machine cycles per microsecond */
169 u_int64_t cycles_per_usec;
170
171 struct bootinfo_kernel bootinfo;
172
173 struct consdev *cn_tab;
174
175 /* For built-in TCDS */
176 #if defined(DEC_3000_300) || defined(DEC_3000_500)
177 u_int8_t dec_3000_scsiid[2], dec_3000_scsifast[2];
178 #endif
179
180 struct platform platform;
181
182 /* for cpu_sysctl() */
183 #ifndef NO_IEEE
184 int alpha_fp_sync_complete = 0; /* fp fixup if sync even without /s */
185 #endif
186 #if NIOASIC > 0
187 int alpha_led_blink = 1;
188 #endif
189
190 /*
191 * XXX This should be dynamically sized, but we have the chicken-egg problem!
192 * XXX it should also be larger than it is, because not all of the mddt
193 * XXX clusters end up being used for VM.
194 */
195 phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; /* low size bits overloaded */
196 int mem_cluster_cnt;
197
198 void
alpha_init(unused,ptb,bim,bip,biv)199 alpha_init(unused, ptb, bim, bip, biv)
200 u_long unused;
201 u_long ptb; /* PFN of current level 1 page table */
202 u_long bim; /* bootinfo magic */
203 u_long bip; /* bootinfo pointer */
204 u_long biv; /* bootinfo version */
205 {
206 extern char kernel_text[], _end[];
207 struct mddt *mddtp;
208 struct mddt_cluster *memc;
209 int i, mddtweird;
210 struct vm_physseg *vps;
211 vaddr_t kernstart, kernend;
212 paddr_t kernstartpfn, kernendpfn, pfn0, pfn1;
213 char *p;
214 const char *bootinfo_msg;
215 const struct cpuinit *c;
216 extern caddr_t esym;
217 struct cpu_info *ci;
218 cpuid_t cpu_id;
219
220 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
221
222 /*
223 * Turn off interrupts (not mchecks) and floating point.
224 * Make sure the instruction and data streams are consistent.
225 */
226 (void)alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
227 alpha_pal_wrfen(0);
228 ALPHA_TBIA();
229 alpha_pal_imb();
230
231 /* Initialize the SCB. */
232 scb_init();
233
234 cpu_id = cpu_number();
235
236 #if defined(MULTIPROCESSOR)
237 /*
238 * Set our SysValue to the address of our cpu_info structure.
239 * Secondary processors do this in their spinup trampoline.
240 */
241 alpha_pal_wrval((u_long)&cpu_info_primary);
242 cpu_info[cpu_id] = &cpu_info_primary;
243 #endif
244
245 ci = curcpu();
246 ci->ci_cpuid = cpu_id;
247
248 /*
249 * Get critical system information (if possible, from the
250 * information provided by the boot program).
251 */
252 bootinfo_msg = NULL;
253 if (bim == BOOTINFO_MAGIC) {
254 if (biv == 0) { /* backward compat */
255 biv = *(u_long *)bip;
256 bip += 8;
257 }
258 switch (biv) {
259 case 1: {
260 struct bootinfo_v1 *v1p = (struct bootinfo_v1 *)bip;
261
262 bootinfo.ssym = v1p->ssym;
263 bootinfo.esym = v1p->esym;
264 /* hwrpb may not be provided by boot block in v1 */
265 if (v1p->hwrpb != NULL) {
266 bootinfo.hwrpb_phys =
267 ((struct rpb *)v1p->hwrpb)->rpb_phys;
268 bootinfo.hwrpb_size = v1p->hwrpbsize;
269 } else {
270 bootinfo.hwrpb_phys =
271 ((struct rpb *)HWRPB_ADDR)->rpb_phys;
272 bootinfo.hwrpb_size =
273 ((struct rpb *)HWRPB_ADDR)->rpb_size;
274 }
275 bcopy(v1p->boot_flags, bootinfo.boot_flags,
276 min(sizeof v1p->boot_flags,
277 sizeof bootinfo.boot_flags));
278 bcopy(v1p->booted_kernel, bootinfo.booted_kernel,
279 min(sizeof v1p->booted_kernel,
280 sizeof bootinfo.booted_kernel));
281 boothowto = v1p->howto;
282 /* booted dev not provided in bootinfo */
283 init_prom_interface((struct rpb *)
284 ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys));
285 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev,
286 sizeof bootinfo.booted_dev);
287 break;
288 }
289 default:
290 bootinfo_msg = "unknown bootinfo version";
291 goto nobootinfo;
292 }
293 } else {
294 bootinfo_msg = "boot program did not pass bootinfo";
295 nobootinfo:
296 bootinfo.ssym = (u_long)_end;
297 bootinfo.esym = (u_long)_end;
298 bootinfo.hwrpb_phys = ((struct rpb *)HWRPB_ADDR)->rpb_phys;
299 bootinfo.hwrpb_size = ((struct rpb *)HWRPB_ADDR)->rpb_size;
300 init_prom_interface((struct rpb *)HWRPB_ADDR);
301 prom_getenv(PROM_E_BOOTED_OSFLAGS, bootinfo.boot_flags,
302 sizeof bootinfo.boot_flags);
303 prom_getenv(PROM_E_BOOTED_FILE, bootinfo.booted_kernel,
304 sizeof bootinfo.booted_kernel);
305 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev,
306 sizeof bootinfo.booted_dev);
307 }
308
309 esym = (caddr_t)bootinfo.esym;
310 /*
311 * Initialize the kernel's mapping of the RPB. It's needed for
312 * lots of things.
313 */
314 hwrpb = (struct rpb *)ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys);
315
316 #if defined(DEC_3000_300) || defined(DEC_3000_500)
317 if (hwrpb->rpb_type == ST_DEC_3000_300 ||
318 hwrpb->rpb_type == ST_DEC_3000_500) {
319 prom_getenv(PROM_E_SCSIID, dec_3000_scsiid,
320 sizeof(dec_3000_scsiid));
321 prom_getenv(PROM_E_SCSIFAST, dec_3000_scsifast,
322 sizeof(dec_3000_scsifast));
323 }
324 #endif
325
326 /*
327 * Remember how many cycles there are per microsecond,
328 * so that we can use delay(). Round up, for safety.
329 */
330 cycles_per_usec = (hwrpb->rpb_cc_freq + 999999) / 1000000;
331
332 /*
333 * Initialize the (temporary) bootstrap console interface, so
334 * we can use printf until the VM system starts being setup.
335 * The real console is initialized before then.
336 */
337 init_bootstrap_console();
338
339 /* OUTPUT NOW ALLOWED */
340
341 /* delayed from above */
342 if (bootinfo_msg)
343 printf("WARNING: %s (0x%lx, 0x%lx, 0x%lx)\n",
344 bootinfo_msg, bim, bip, biv);
345
346 /* Initialize the trap vectors on the primary processor. */
347 trap_init();
348
349 /*
350 * Find out what hardware we're on, and do basic initialization.
351 */
352 cputype = hwrpb->rpb_type;
353 if (cputype < 0) {
354 /*
355 * At least some white-box systems have SRM which
356 * reports a systype that's the negative of their
357 * blue-box counterpart.
358 */
359 cputype = -cputype;
360 }
361 c = platform_lookup(cputype);
362 if (c == NULL) {
363 platform_not_supported();
364 /* NOTREACHED */
365 }
366 (*c->init)();
367 strlcpy(cpu_model, platform.model, sizeof cpu_model);
368
369 /*
370 * Initialize the real console, so that the bootstrap console is
371 * no longer necessary.
372 */
373 (*platform.cons_init)();
374
375 #if 0
376 /* Paranoid sanity checking */
377
378 assert(hwrpb->rpb_primary_cpu_id == alpha_pal_whami());
379
380 /*
381 * On single-CPU systypes, the primary should always be CPU 0,
382 * except on Alpha 8200 systems where the CPU id is related
383 * to the VID, which is related to the Turbo Laser node id.
384 */
385 if (cputype != ST_DEC_21000)
386 assert(hwrpb->rpb_primary_cpu_id == 0);
387 #endif
388
389 /* NO MORE FIRMWARE ACCESS ALLOWED */
390
391 #ifndef SMALL_KERNEL
392 /*
393 * If we run on a BWX-capable processor, override cpu_switch
394 * with a faster version.
395 * We do this now because the kernel text might be mapped
396 * read-only eventually (although this is not the case at the moment).
397 */
398 if (alpha_implver() >= ALPHA_IMPLVER_EV5) {
399 if ((~alpha_amask(ALPHA_AMASK_BWX) & ALPHA_AMASK_BWX) != 0) {
400 extern vaddr_t __bwx_switch0, __bwx_switch1,
401 __bwx_switch2, __bwx_switch3;
402 u_int32_t *dst, *src, *end;
403
404 src = (u_int32_t *)&__bwx_switch2;
405 end = (u_int32_t *)&__bwx_switch3;
406 dst = (u_int32_t *)&__bwx_switch0;
407 while (src != end)
408 *dst++ = *src++;
409 src = (u_int32_t *)&__bwx_switch1;
410 end = (u_int32_t *)&__bwx_switch2;
411 while (src != end)
412 *dst++ = *src++;
413 }
414 }
415 #endif
416
417 /*
418 * find out this system's page size
419 */
420 if ((uvmexp.pagesize = hwrpb->rpb_page_size) != 8192)
421 panic("page size %d != 8192?!", uvmexp.pagesize);
422
423 uvm_setpagesize();
424
425 /*
426 * Find the beginning and end of the kernel (and leave a
427 * bit of space before the beginning for the bootstrap
428 * stack).
429 */
430 kernstart = trunc_page((vaddr_t)kernel_text) - 2 * PAGE_SIZE;
431 kernend = (vaddr_t)round_page((vaddr_t)bootinfo.esym);
432
433 kernstartpfn = atop(ALPHA_K0SEG_TO_PHYS(kernstart));
434 kernendpfn = atop(ALPHA_K0SEG_TO_PHYS(kernend));
435
436 /*
437 * Find out how much memory is available, by looking at
438 * the memory cluster descriptors. This also tries to do
439 * its best to detect things that have never been seen
440 * before...
441 */
442 mddtp = (struct mddt *)(((caddr_t)hwrpb) + hwrpb->rpb_memdat_off);
443
444 /* MDDT SANITY CHECKING */
445 mddtweird = 0;
446 if (mddtp->mddt_cluster_cnt < 2) {
447 mddtweird = 1;
448 printf("WARNING: weird number of mem clusters: %lu\n",
449 (unsigned long)mddtp->mddt_cluster_cnt);
450 }
451
452 #if 0
453 printf("Memory cluster count: %d\n", mddtp->mddt_cluster_cnt);
454 #endif
455
456 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
457 memc = &mddtp->mddt_clusters[i];
458 #if 0
459 printf("MEMC %d: pfn 0x%lx cnt 0x%lx usage 0x%lx\n", i,
460 memc->mddt_pfn, memc->mddt_pg_cnt, memc->mddt_usage);
461 #endif
462 totalphysmem += memc->mddt_pg_cnt;
463 if (mem_cluster_cnt < VM_PHYSSEG_MAX) { /* XXX */
464 mem_clusters[mem_cluster_cnt].start =
465 ptoa(memc->mddt_pfn);
466 mem_clusters[mem_cluster_cnt].size =
467 ptoa(memc->mddt_pg_cnt);
468 if (memc->mddt_usage & MDDT_mbz ||
469 memc->mddt_usage & MDDT_NONVOLATILE || /* XXX */
470 memc->mddt_usage & MDDT_PALCODE)
471 mem_clusters[mem_cluster_cnt].size |=
472 PROT_READ;
473 else
474 mem_clusters[mem_cluster_cnt].size |=
475 PROT_READ | PROT_WRITE | PROT_EXEC;
476 mem_cluster_cnt++;
477 } /* XXX else print something! */
478
479 if (memc->mddt_usage & MDDT_mbz) {
480 mddtweird = 1;
481 printf("WARNING: mem cluster %d has weird "
482 "usage 0x%lx\n", i, (long)memc->mddt_usage);
483 unknownmem += memc->mddt_pg_cnt;
484 continue;
485 }
486 if (memc->mddt_usage & MDDT_NONVOLATILE) {
487 /* XXX should handle these... */
488 printf("WARNING: skipping non-volatile mem "
489 "cluster %d\n", i);
490 unusedmem += memc->mddt_pg_cnt;
491 continue;
492 }
493 if (memc->mddt_usage & MDDT_PALCODE) {
494 resvmem += memc->mddt_pg_cnt;
495 continue;
496 }
497
498 /*
499 * We have a memory cluster available for system
500 * software use. We must determine if this cluster
501 * holds the kernel.
502 */
503 physmem += memc->mddt_pg_cnt;
504 pfn0 = memc->mddt_pfn;
505 pfn1 = memc->mddt_pfn + memc->mddt_pg_cnt;
506 if (pfn0 <= kernstartpfn && kernendpfn <= pfn1) {
507 /*
508 * Must compute the location of the kernel
509 * within the segment.
510 */
511 #if 0
512 printf("Cluster %d contains kernel\n", i);
513 #endif
514 if (pfn0 < kernstartpfn) {
515 /*
516 * There is a chunk before the kernel.
517 */
518 #if 0
519 printf("Loading chunk before kernel: "
520 "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
521 #endif
522 uvm_page_physload(pfn0, kernstartpfn,
523 pfn0, kernstartpfn, 0);
524 }
525 if (kernendpfn < pfn1) {
526 /*
527 * There is a chunk after the kernel.
528 */
529 #if 0
530 printf("Loading chunk after kernel: "
531 "0x%lx / 0x%lx\n", kernendpfn, pfn1);
532 #endif
533 uvm_page_physload(kernendpfn, pfn1,
534 kernendpfn, pfn1, 0);
535 }
536 } else {
537 /*
538 * Just load this cluster as one chunk.
539 */
540 #if 0
541 printf("Loading cluster %d: 0x%lx / 0x%lx\n", i,
542 pfn0, pfn1);
543 #endif
544 uvm_page_physload(pfn0, pfn1, pfn0, pfn1, 0);
545 }
546 }
547
548 #ifdef DEBUG
549 /*
550 * Dump out the MDDT if it looks odd...
551 */
552 if (mddtweird) {
553 printf("\n");
554 printf("complete memory cluster information:\n");
555 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
556 printf("mddt %d:\n", i);
557 printf("\tpfn %lx\n",
558 mddtp->mddt_clusters[i].mddt_pfn);
559 printf("\tcnt %lx\n",
560 mddtp->mddt_clusters[i].mddt_pg_cnt);
561 printf("\ttest %lx\n",
562 mddtp->mddt_clusters[i].mddt_pg_test);
563 printf("\tbva %lx\n",
564 mddtp->mddt_clusters[i].mddt_v_bitaddr);
565 printf("\tbpa %lx\n",
566 mddtp->mddt_clusters[i].mddt_p_bitaddr);
567 printf("\tbcksum %lx\n",
568 mddtp->mddt_clusters[i].mddt_bit_cksum);
569 printf("\tusage %lx\n",
570 mddtp->mddt_clusters[i].mddt_usage);
571 }
572 printf("\n");
573 }
574 #endif
575
576 if (totalphysmem == 0)
577 panic("can't happen: system seems to have no memory!");
578 #if 0
579 printf("totalphysmem = %u\n", totalphysmem);
580 printf("physmem = %u\n", physmem);
581 printf("resvmem = %d\n", resvmem);
582 printf("unusedmem = %d\n", unusedmem);
583 printf("unknownmem = %d\n", unknownmem);
584 #endif
585
586 /*
587 * Initialize error message buffer (at end of core).
588 */
589 {
590 vsize_t sz = (vsize_t)round_page(MSGBUFSIZE);
591 vsize_t reqsz = sz;
592
593 vps = &vm_physmem[vm_nphysseg - 1];
594
595 /* shrink so that it'll fit in the last segment */
596 if ((vps->avail_end - vps->avail_start) < atop(sz))
597 sz = ptoa(vps->avail_end - vps->avail_start);
598
599 vps->end -= atop(sz);
600 vps->avail_end -= atop(sz);
601 initmsgbuf((caddr_t) ALPHA_PHYS_TO_K0SEG(ptoa(vps->end)), sz);
602
603 /* Remove the last segment if it now has no pages. */
604 if (vps->start == vps->end)
605 vm_nphysseg--;
606
607 /* warn if the message buffer had to be shrunk */
608 if (sz != reqsz)
609 printf("WARNING: %ld bytes not available for msgbuf "
610 "in last cluster (%ld used)\n", reqsz, sz);
611
612 }
613
614 /*
615 * Init mapping for u page(s) for proc 0
616 */
617 proc0.p_addr = proc0paddr =
618 (struct user *)pmap_steal_memory(UPAGES * PAGE_SIZE, NULL, NULL);
619
620 /*
621 * Initialize the virtual memory system, and set the
622 * page table base register in proc 0's PCB.
623 */
624 pmap_bootstrap(ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT),
625 hwrpb->rpb_max_asn, hwrpb->rpb_pcs_cnt);
626
627 /*
628 * Initialize the rest of proc 0's PCB, and cache its physical
629 * address.
630 */
631 proc0.p_md.md_pcbpaddr =
632 (struct pcb *)ALPHA_K0SEG_TO_PHYS((vaddr_t)&proc0paddr->u_pcb);
633
634 /*
635 * Set the kernel sp, reserving space for an (empty) trapframe,
636 * and make proc0's trapframe pointer point to it for sanity.
637 */
638 proc0paddr->u_pcb.pcb_hw.apcb_ksp =
639 (u_int64_t)proc0paddr + USPACE - sizeof(struct trapframe);
640 proc0.p_md.md_tf =
641 (struct trapframe *)proc0paddr->u_pcb.pcb_hw.apcb_ksp;
642
643 /*
644 * Initialize the primary CPU's idle PCB to proc0's. In a
645 * MULTIPROCESSOR configuration, each CPU will later get
646 * its own idle PCB when autoconfiguration runs.
647 */
648 ci->ci_idle_pcb = &proc0paddr->u_pcb;
649 ci->ci_idle_pcb_paddr = (u_long)proc0.p_md.md_pcbpaddr;
650
651 /*
652 * Look at arguments passed to us and compute boothowto.
653 */
654
655 for (p = bootinfo.boot_flags; p && *p != '\0'; p++) {
656 /*
657 * Note that we'd really like to differentiate case here,
658 * but the Alpha AXP Architecture Reference Manual
659 * says that we shouldn't.
660 */
661 switch (*p) {
662 case 'a': /* Ignore */
663 case 'A':
664 break;
665
666 case 'b': /* Enter DDB as soon as the console is initialised */
667 case 'B':
668 boothowto |= RB_KDB;
669 break;
670
671 case 'c': /* enter user kernel configuration */
672 case 'C':
673 boothowto |= RB_CONFIG;
674 break;
675
676 #ifdef DEBUG
677 case 'd': /* crash dump immediately after autoconfig */
678 case 'D':
679 boothowto |= RB_DUMP;
680 break;
681 #endif
682
683 case 'h': /* always halt, never reboot */
684 case 'H':
685 boothowto |= RB_HALT;
686 break;
687
688
689 case 'n': /* askname */
690 case 'N':
691 boothowto |= RB_ASKNAME;
692 break;
693
694 case 's': /* single-user */
695 case 'S':
696 boothowto |= RB_SINGLE;
697 break;
698
699 case '-':
700 /*
701 * Just ignore this. It's not required, but it's
702 * common for it to be passed regardless.
703 */
704 break;
705
706 default:
707 printf("Unrecognized boot flag '%c'.\n", *p);
708 break;
709 }
710 }
711
712
713 /*
714 * Figure out the number of cpus in the box, from RPB fields.
715 * Really. We mean it.
716 */
717 for (ncpusfound = 0, i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
718 struct pcs *pcsp;
719
720 pcsp = LOCATE_PCS(hwrpb, i);
721 if ((pcsp->pcs_flags & PCS_PP) != 0)
722 ncpusfound++;
723 }
724
725 /*
726 * Initialize debuggers, and break into them if appropriate.
727 */
728 #ifdef DDB
729 db_machine_init();
730 ddb_init();
731
732 if (boothowto & RB_KDB)
733 db_enter();
734 #endif
735 /*
736 * Figure out our clock frequency, from RPB fields.
737 */
738 hz = hwrpb->rpb_intr_freq >> 12;
739 if (!(60 <= hz && hz <= 10240)) {
740 #ifdef DIAGNOSTIC
741 printf("WARNING: unbelievable rpb_intr_freq: %lu (%d hz)\n",
742 (unsigned long)hwrpb->rpb_intr_freq, hz);
743 #endif
744 hz = 1024;
745 }
746 tick = 1000000 / hz;
747 tick_nsec = 1000000000 / hz;
748 }
749
750 void
consinit()751 consinit()
752 {
753 /*
754 * Everything related to console initialization is done
755 * in alpha_init().
756 */
757 }
758
759 void
cpu_startup()760 cpu_startup()
761 {
762 vaddr_t minaddr, maxaddr;
763 #if defined(DEBUG)
764 extern int pmapdebug;
765 int opmapdebug = pmapdebug;
766
767 pmapdebug = 0;
768 #endif
769
770 /*
771 * Good {morning,afternoon,evening,night}.
772 */
773 printf("%s", version);
774 identifycpu();
775 printf("real mem = %lu (%luMB)\n", ptoa((psize_t)totalphysmem),
776 ptoa((psize_t)totalphysmem) / 1024 / 1024);
777 printf("rsvd mem = %lu (%luMB)\n", ptoa((psize_t)resvmem),
778 ptoa((psize_t)resvmem) / 1024 / 1024);
779 if (unusedmem) {
780 printf("WARNING: unused memory = %lu (%luMB)\n",
781 ptoa((psize_t)unusedmem),
782 ptoa((psize_t)unusedmem) / 1024 / 1024);
783 }
784 if (unknownmem) {
785 printf("WARNING: %lu (%luMB) of memory with unknown purpose\n",
786 ptoa((psize_t)unknownmem),
787 ptoa((psize_t)unknownmem) / 1024 / 1024);
788 }
789
790 /*
791 * Allocate a submap for exec arguments. This map effectively
792 * limits the number of processes exec'ing at any time.
793 */
794 minaddr = vm_map_min(kernel_map);
795 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
796 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
797
798 /*
799 * Allocate a submap for physio
800 */
801 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
802 VM_PHYS_SIZE, 0, FALSE, NULL);
803
804 #if defined(DEBUG)
805 pmapdebug = opmapdebug;
806 #endif
807 printf("avail mem = %lu (%luMB)\n", ptoa((psize_t)uvmexp.free),
808 ptoa((psize_t)uvmexp.free) / 1024 / 1024);
809 #if 0
810 {
811 extern u_long pmap_pages_stolen;
812
813 printf("stolen memory for VM structures = %d\n", pmap_pages_stolen * PAGE_SIZE);
814 }
815 #endif
816
817 /*
818 * Set up buffers, so they can be used to read disk labels.
819 */
820 bufinit();
821
822 /*
823 * Configure the system.
824 */
825 if (boothowto & RB_CONFIG) {
826 #ifdef BOOT_CONFIG
827 user_config();
828 #else
829 printf("kernel does not support -c; continuing..\n");
830 #endif
831 }
832
833 /*
834 * Set up the HWPCB so that it's safe to configure secondary
835 * CPUs.
836 */
837 hwrpb_primary_init();
838 }
839
840 /*
841 * Retrieve the platform name from the DSR.
842 */
843 const char *
alpha_dsr_sysname()844 alpha_dsr_sysname()
845 {
846 struct dsrdb *dsr;
847 const char *sysname;
848
849 /*
850 * DSR does not exist on early HWRPB versions.
851 */
852 if (hwrpb->rpb_version < HWRPB_DSRDB_MINVERS)
853 return (NULL);
854
855 dsr = (struct dsrdb *)(((caddr_t)hwrpb) + hwrpb->rpb_dsrdb_off);
856 sysname = (const char *)((caddr_t)dsr + (dsr->dsr_sysname_off +
857 sizeof(u_int64_t)));
858 return (sysname);
859 }
860
861 /*
862 * Lookup the system specified system variation in the provided table,
863 * returning the model string on match.
864 */
865 const char *
alpha_variation_name(variation,avtp)866 alpha_variation_name(variation, avtp)
867 u_int64_t variation;
868 const struct alpha_variation_table *avtp;
869 {
870 int i;
871
872 for (i = 0; avtp[i].avt_model != NULL; i++)
873 if (avtp[i].avt_variation == variation)
874 return (avtp[i].avt_model);
875 return (NULL);
876 }
877
878 /*
879 * Generate a default platform name based for unknown system variations.
880 */
881 const char *
alpha_unknown_sysname()882 alpha_unknown_sysname()
883 {
884 static char s[128]; /* safe size */
885
886 snprintf(s, sizeof s, "%s family, unknown model variation 0x%lx",
887 platform.family, (unsigned long)hwrpb->rpb_variation & SV_ST_MASK);
888 return ((const char *)s);
889 }
890
891 void
identifycpu()892 identifycpu()
893 {
894 char *s;
895 int slen;
896
897 /*
898 * print out CPU identification information.
899 */
900 printf("%s", cpu_model);
901 for(s = cpu_model; *s; ++s)
902 if(strncasecmp(s, "MHz", 3) == 0)
903 goto skipMHz;
904 printf(", %luMHz", (unsigned long)hwrpb->rpb_cc_freq / 1000000);
905 skipMHz:
906 /* fill in hw_serial if a serial number is known */
907 slen = strlen(hwrpb->rpb_ssn) + 1;
908 if (slen > 1) {
909 hw_serial = malloc(slen, M_SYSCTL, M_NOWAIT);
910 if (hw_serial)
911 strlcpy(hw_serial, (char *)hwrpb->rpb_ssn, slen);
912 }
913
914 printf("\n");
915 printf("%lu byte page size, %d processor%s.\n",
916 (unsigned long)hwrpb->rpb_page_size, ncpusfound,
917 ncpusfound == 1 ? "" : "s");
918 #if 0
919 /* this is not particularly useful! */
920 printf("variation: 0x%lx, revision 0x%lx\n",
921 hwrpb->rpb_variation, *(long *)hwrpb->rpb_revision);
922 #endif
923 }
924
925 int waittime = -1;
926 struct pcb dumppcb;
927
928 __dead void
boot(int howto)929 boot(int howto)
930 {
931 #if defined(MULTIPROCESSOR)
932 u_long wait_mask;
933 int i;
934 #endif
935
936 if ((howto & RB_RESET) != 0)
937 goto doreset;
938
939 if (cold) {
940 if ((howto & RB_USERREQ) == 0)
941 howto |= RB_HALT;
942 goto haltsys;
943 }
944
945 if ((boothowto & RB_HALT) != 0)
946 howto |= RB_HALT;
947
948 boothowto = howto;
949 if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
950 waittime = 0;
951 vfs_shutdown(curproc);
952
953 if ((howto & RB_TIMEBAD) == 0) {
954 resettodr();
955 } else {
956 printf("WARNING: not updating battery clock\n");
957 }
958 }
959 if_downall();
960
961 uvm_shutdown();
962 splhigh();
963 cold = 1;
964
965 #if defined(MULTIPROCESSOR)
966 /*
967 * Halt all other CPUs.
968 */
969 wait_mask = (1UL << hwrpb->rpb_primary_cpu_id);
970 alpha_broadcast_ipi(ALPHA_IPI_HALT);
971
972 /* Ensure any CPUs paused by DDB resume execution so they can halt */
973 cpus_paused = 0;
974
975 for (i = 0; i < 10000; i++) {
976 alpha_mb();
977 if (cpus_running == wait_mask)
978 break;
979 delay(1000);
980 }
981 alpha_mb();
982 if (cpus_running != wait_mask)
983 printf("WARNING: Unable to halt secondary CPUs (0x%lx)\n",
984 cpus_running);
985 #endif
986
987 if ((howto & RB_DUMP) != 0)
988 dumpsys();
989
990 haltsys:
991 config_suspend_all(DVACT_POWERDOWN);
992
993 #ifdef BOOTKEY
994 printf("hit any key to %s...\n",
995 (howto & RB_HALT) != 0 ? "halt" : "reboot");
996 cnpollc(1); /* for proper keyboard command handling */
997 cngetc();
998 cnpollc(0);
999 printf("\n");
1000 #endif
1001
1002 /* Finally, powerdown/halt/reboot the system. */
1003 if ((howto & RB_POWERDOWN) != 0 &&
1004 platform.powerdown != NULL) {
1005 (*platform.powerdown)();
1006 printf("WARNING: powerdown failed!\n");
1007 }
1008 doreset:
1009 printf("%s\n\n",
1010 (howto & RB_HALT) != 0 ? "halted." : "rebooting...");
1011 prom_halt((howto & RB_HALT) != 0);
1012 for (;;)
1013 continue;
1014 /* NOTREACHED */
1015 }
1016
1017 /*
1018 * These variables are needed by /sbin/savecore
1019 */
1020 u_long dumpmag = 0x8fca0101; /* magic number */
1021 int dumpsize = 0; /* pages */
1022 long dumplo = 0; /* blocks */
1023
1024 /*
1025 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
1026 */
1027 int
cpu_dumpsize()1028 cpu_dumpsize()
1029 {
1030 int size;
1031
1032 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
1033 ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
1034 if (roundup(size, dbtob(1)) != dbtob(1))
1035 return -1;
1036
1037 return (1);
1038 }
1039
1040 /*
1041 * cpu_dump_mempagecnt: calculate size of RAM (in pages) to be dumped.
1042 */
1043 u_long
cpu_dump_mempagecnt()1044 cpu_dump_mempagecnt()
1045 {
1046 u_long i, n;
1047
1048 n = 0;
1049 for (i = 0; i < mem_cluster_cnt; i++)
1050 n += atop(mem_clusters[i].size);
1051 return (n);
1052 }
1053
1054 /*
1055 * cpu_dump: dump machine-dependent kernel core dump headers.
1056 */
1057 int
cpu_dump()1058 cpu_dump()
1059 {
1060 int (*dump)(dev_t, daddr_t, caddr_t, size_t);
1061 char buf[dbtob(1)];
1062 kcore_seg_t *segp;
1063 cpu_kcore_hdr_t *cpuhdrp;
1064 phys_ram_seg_t *memsegp;
1065 int i;
1066
1067 dump = bdevsw[major(dumpdev)].d_dump;
1068
1069 bzero(buf, sizeof buf);
1070 segp = (kcore_seg_t *)buf;
1071 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
1072 memsegp = (phys_ram_seg_t *)&buf[ALIGN(sizeof(*segp)) +
1073 ALIGN(sizeof(*cpuhdrp))];
1074
1075 /*
1076 * Generate a segment header.
1077 */
1078 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1079 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1080
1081 /*
1082 * Add the machine-dependent header info.
1083 */
1084 cpuhdrp->lev1map_pa = ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map);
1085 cpuhdrp->page_size = PAGE_SIZE;
1086 cpuhdrp->nmemsegs = mem_cluster_cnt;
1087
1088 /*
1089 * Fill in the memory segment descriptors.
1090 */
1091 for (i = 0; i < mem_cluster_cnt; i++) {
1092 memsegp[i].start = mem_clusters[i].start;
1093 memsegp[i].size = mem_clusters[i].size & ~PAGE_MASK;
1094 }
1095
1096 return (dump(dumpdev, dumplo, (caddr_t)buf, dbtob(1)));
1097 }
1098
1099 /*
1100 * This is called by main to set dumplo and dumpsize.
1101 * Dumps always skip the first PAGE_SIZE of disk space
1102 * in case there might be a disk label stored there.
1103 * If there is extra space, put dump at the end to
1104 * reduce the chance that swapping trashes it.
1105 */
1106 void
dumpconf(void)1107 dumpconf(void)
1108 {
1109 int nblks, dumpblks; /* size of dump area */
1110
1111 if (dumpdev == NODEV ||
1112 (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
1113 return;
1114 if (nblks <= ctod(1))
1115 return;
1116
1117 dumpblks = cpu_dumpsize();
1118 if (dumpblks < 0)
1119 return;
1120 dumpblks += ctod(cpu_dump_mempagecnt());
1121
1122 /* If dump won't fit (incl. room for possible label), punt. */
1123 if (dumpblks > (nblks - ctod(1)))
1124 return;
1125
1126 /* Put dump at end of partition */
1127 dumplo = nblks - dumpblks;
1128
1129 /* dumpsize is in page units, and doesn't include headers. */
1130 dumpsize = cpu_dump_mempagecnt();
1131 }
1132
1133 /*
1134 * Dump the kernel's image to the swap partition.
1135 */
1136 #define BYTES_PER_DUMP PAGE_SIZE
1137
1138 void
dumpsys()1139 dumpsys()
1140 {
1141 u_long totalbytesleft, bytes, i, n, memcl;
1142 u_long maddr;
1143 int psize;
1144 daddr_t blkno;
1145 int (*dump)(dev_t, daddr_t, caddr_t, size_t);
1146 int error;
1147 extern int msgbufmapped;
1148
1149 /* Save registers. */
1150 savectx(&dumppcb);
1151
1152 msgbufmapped = 0; /* don't record dump msgs in msgbuf */
1153 if (dumpdev == NODEV)
1154 return;
1155
1156 /*
1157 * For dumps during autoconfiguration,
1158 * if dump device has already configured...
1159 */
1160 if (dumpsize == 0)
1161 dumpconf();
1162 if (dumplo <= 0) {
1163 printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
1164 minor(dumpdev));
1165 return;
1166 }
1167 printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
1168 minor(dumpdev), dumplo);
1169
1170 psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1171 printf("dump ");
1172 if (psize == -1) {
1173 printf("area unavailable\n");
1174 return;
1175 }
1176
1177 /* XXX should purge all outstanding keystrokes. */
1178
1179 if ((error = cpu_dump()) != 0)
1180 goto err;
1181
1182 totalbytesleft = ptoa(cpu_dump_mempagecnt());
1183 blkno = dumplo + cpu_dumpsize();
1184 dump = bdevsw[major(dumpdev)].d_dump;
1185 error = 0;
1186
1187 for (memcl = 0; memcl < mem_cluster_cnt; memcl++) {
1188 maddr = mem_clusters[memcl].start;
1189 bytes = mem_clusters[memcl].size & ~PAGE_MASK;
1190
1191 for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
1192
1193 /* Print out how many MBs we to go. */
1194 if ((totalbytesleft % (1024*1024)) == 0)
1195 printf("%ld ", totalbytesleft / (1024 * 1024));
1196
1197 /* Limit size for next transfer. */
1198 n = bytes - i;
1199 if (n > BYTES_PER_DUMP)
1200 n = BYTES_PER_DUMP;
1201
1202 error = (*dump)(dumpdev, blkno,
1203 (caddr_t)ALPHA_PHYS_TO_K0SEG(maddr), n);
1204 if (error)
1205 goto err;
1206 maddr += n;
1207 blkno += btodb(n); /* XXX? */
1208
1209 /* XXX should look for keystrokes, to cancel. */
1210 }
1211 }
1212
1213 err:
1214 switch (error) {
1215 #ifdef DEBUG
1216 case ENXIO:
1217 printf("device bad\n");
1218 break;
1219
1220 case EFAULT:
1221 printf("device not ready\n");
1222 break;
1223
1224 case EINVAL:
1225 printf("area improper\n");
1226 break;
1227
1228 case EIO:
1229 printf("i/o error\n");
1230 break;
1231
1232 case EINTR:
1233 printf("aborted from console\n");
1234 break;
1235 #endif /* DEBUG */
1236 case 0:
1237 printf("succeeded\n");
1238 break;
1239
1240 default:
1241 printf("error %d\n", error);
1242 break;
1243 }
1244 printf("\n\n");
1245 delay(1000);
1246 }
1247
1248 void
frametoreg(framep,regp)1249 frametoreg(framep, regp)
1250 struct trapframe *framep;
1251 struct reg *regp;
1252 {
1253
1254 regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0];
1255 regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0];
1256 regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1];
1257 regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2];
1258 regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3];
1259 regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4];
1260 regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5];
1261 regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6];
1262 regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7];
1263 regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0];
1264 regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1];
1265 regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2];
1266 regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3];
1267 regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4];
1268 regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5];
1269 regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6];
1270 regp->r_regs[R_A0] = framep->tf_regs[FRAME_A0];
1271 regp->r_regs[R_A1] = framep->tf_regs[FRAME_A1];
1272 regp->r_regs[R_A2] = framep->tf_regs[FRAME_A2];
1273 regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3];
1274 regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4];
1275 regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5];
1276 regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8];
1277 regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9];
1278 regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10];
1279 regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11];
1280 regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA];
1281 regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12];
1282 regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT];
1283 regp->r_regs[R_GP] = framep->tf_regs[FRAME_GP];
1284 /* regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP]; XXX */
1285 regp->r_regs[R_ZERO] = 0;
1286 }
1287
1288 void
regtoframe(regp,framep)1289 regtoframe(regp, framep)
1290 struct reg *regp;
1291 struct trapframe *framep;
1292 {
1293
1294 framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0];
1295 framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0];
1296 framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1];
1297 framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2];
1298 framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3];
1299 framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4];
1300 framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5];
1301 framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6];
1302 framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7];
1303 framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0];
1304 framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1];
1305 framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2];
1306 framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3];
1307 framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4];
1308 framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5];
1309 framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6];
1310 framep->tf_regs[FRAME_A0] = regp->r_regs[R_A0];
1311 framep->tf_regs[FRAME_A1] = regp->r_regs[R_A1];
1312 framep->tf_regs[FRAME_A2] = regp->r_regs[R_A2];
1313 framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3];
1314 framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4];
1315 framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5];
1316 framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8];
1317 framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9];
1318 framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10];
1319 framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11];
1320 framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA];
1321 framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12];
1322 framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT];
1323 framep->tf_regs[FRAME_GP] = regp->r_regs[R_GP];
1324 /* framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP]; XXX */
1325 /* ??? = regp->r_regs[R_ZERO]; */
1326 }
1327
1328 void
printregs(regp)1329 printregs(regp)
1330 struct reg *regp;
1331 {
1332 int i;
1333
1334 for (i = 0; i < 32; i++)
1335 printf("R%d:\t0x%016lx%s", i, regp->r_regs[i],
1336 i & 1 ? "\n" : "\t");
1337 }
1338
1339 void
regdump(framep)1340 regdump(framep)
1341 struct trapframe *framep;
1342 {
1343 struct reg reg;
1344
1345 frametoreg(framep, ®);
1346 reg.r_regs[R_SP] = alpha_pal_rdusp();
1347
1348 printf("REGISTERS:\n");
1349 printregs(®);
1350 }
1351
1352 /*
1353 * Send an interrupt to process.
1354 */
1355 int
sendsig(sig_t catcher,int sig,sigset_t mask,const siginfo_t * ksip,int info,int onstack)1356 sendsig(sig_t catcher, int sig, sigset_t mask, const siginfo_t *ksip,
1357 int info, int onstack)
1358 {
1359 struct proc *p = curproc;
1360 struct sigcontext ksc, *scp;
1361 struct fpreg *fpregs = (struct fpreg *)&ksc.sc_fpregs;
1362 struct trapframe *frame;
1363 unsigned long oldsp;
1364 int fsize, rndfsize, kscsize;
1365 siginfo_t *sip;
1366
1367 oldsp = alpha_pal_rdusp();
1368 frame = p->p_md.md_tf;
1369 fsize = sizeof ksc;
1370 rndfsize = ((fsize + 15) / 16) * 16;
1371 kscsize = rndfsize;
1372
1373 if (info) {
1374 fsize += sizeof *ksip;
1375 rndfsize = ((fsize + 15) / 16) * 16;
1376 }
1377
1378 /*
1379 * Allocate space for the signal handler context.
1380 */
1381 if ((p->p_sigstk.ss_flags & SS_DISABLE) == 0 &&
1382 !sigonstack(oldsp) && onstack)
1383 scp = (struct sigcontext *)
1384 (trunc_page((vaddr_t)p->p_sigstk.ss_sp + p->p_sigstk.ss_size)
1385 - rndfsize);
1386 else
1387 scp = (struct sigcontext *)(oldsp - rndfsize);
1388
1389 /*
1390 * Build the signal context to be used by sigreturn.
1391 */
1392 bzero(&ksc, sizeof(ksc));
1393 ksc.sc_mask = mask;
1394 ksc.sc_pc = frame->tf_regs[FRAME_PC];
1395 ksc.sc_ps = frame->tf_regs[FRAME_PS];
1396
1397 /* copy the registers. */
1398 frametoreg(frame, (struct reg *)ksc.sc_regs);
1399 ksc.sc_regs[R_SP] = oldsp;
1400
1401 /* save the floating-point state, if necessary, then copy it. */
1402 if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
1403 fpusave_proc(p, 1);
1404 ksc.sc_ownedfp = p->p_md.md_flags & MDP_FPUSED;
1405 memcpy(/*ksc.sc_*/fpregs, &p->p_addr->u_pcb.pcb_fp,
1406 sizeof(struct fpreg));
1407 #ifndef NO_IEEE
1408 ksc.sc_fp_control = alpha_read_fp_c(p);
1409 #else
1410 ksc.sc_fp_control = 0;
1411 #endif
1412 memset(ksc.sc_reserved, 0, sizeof ksc.sc_reserved); /* XXX */
1413 memset(ksc.sc_xxx, 0, sizeof ksc.sc_xxx); /* XXX */
1414
1415 if (info) {
1416 sip = (void *)scp + kscsize;
1417 if (copyout(ksip, (caddr_t)sip, fsize - kscsize) != 0)
1418 return 1;
1419 } else
1420 sip = NULL;
1421
1422 ksc.sc_cookie = (long)scp ^ p->p_p->ps_sigcookie;
1423 if (copyout((caddr_t)&ksc, (caddr_t)scp, kscsize) != 0)
1424 return 1;
1425
1426 /*
1427 * Set up the registers to return to sigcode.
1428 */
1429 frame->tf_regs[FRAME_PC] = p->p_p->ps_sigcode;
1430 frame->tf_regs[FRAME_A0] = sig;
1431 frame->tf_regs[FRAME_A1] = (u_int64_t)sip;
1432 frame->tf_regs[FRAME_A2] = (u_int64_t)scp;
1433 frame->tf_regs[FRAME_T12] = (u_int64_t)catcher; /* t12 is pv */
1434 alpha_pal_wrusp((unsigned long)scp);
1435
1436 return 0;
1437 }
1438
1439 /*
1440 * System call to cleanup state after a signal
1441 * has been taken. Reset signal mask and
1442 * stack state from context left by sendsig (above).
1443 * Return to previous pc and psl as specified by
1444 * context left by sendsig. Check carefully to
1445 * make sure that the user has not modified the
1446 * psl to gain improper privileges or to cause
1447 * a machine fault.
1448 */
1449 int
sys_sigreturn(struct proc * p,void * v,register_t * retval)1450 sys_sigreturn(struct proc *p, void *v, register_t *retval)
1451 {
1452 struct sys_sigreturn_args /* {
1453 syscallarg(struct sigcontext *) sigcntxp;
1454 } */ *uap = v;
1455 struct sigcontext ksc, *scp = SCARG(uap, sigcntxp);
1456 struct fpreg *fpregs = (struct fpreg *)&ksc.sc_fpregs;
1457 int error;
1458
1459 if (PROC_PC(p) != p->p_p->ps_sigcoderet) {
1460 sigexit(p, SIGILL);
1461 return (EPERM);
1462 }
1463
1464 if ((error = copyin(scp, &ksc, sizeof(ksc))) != 0)
1465 return (error);
1466
1467 if (ksc.sc_cookie != ((long)scp ^ p->p_p->ps_sigcookie)) {
1468 sigexit(p, SIGILL);
1469 return (EFAULT);
1470 }
1471
1472 /* Prevent reuse of the sigcontext cookie */
1473 ksc.sc_cookie = 0;
1474 (void)copyout(&ksc.sc_cookie, (caddr_t)scp +
1475 offsetof(struct sigcontext, sc_cookie), sizeof (ksc.sc_cookie));
1476
1477 /*
1478 * Restore the user-supplied information
1479 */
1480 p->p_sigmask = ksc.sc_mask &~ sigcantmask;
1481
1482 p->p_md.md_tf->tf_regs[FRAME_PC] = ksc.sc_pc;
1483 p->p_md.md_tf->tf_regs[FRAME_PS] =
1484 (ksc.sc_ps | ALPHA_PSL_USERSET) & ~ALPHA_PSL_USERCLR;
1485
1486 regtoframe((struct reg *)ksc.sc_regs, p->p_md.md_tf);
1487 alpha_pal_wrusp(ksc.sc_regs[R_SP]);
1488
1489 /* XXX ksc.sc_ownedfp ? */
1490 if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
1491 fpusave_proc(p, 0);
1492 memcpy(&p->p_addr->u_pcb.pcb_fp, /*ksc.sc_*/fpregs,
1493 sizeof(struct fpreg));
1494 #ifndef NO_IEEE
1495 p->p_addr->u_pcb.pcb_fp.fpr_cr = ksc.sc_fpcr;
1496 p->p_md.md_flags = ksc.sc_fp_control & MDP_FP_C;
1497 #endif
1498 return (EJUSTRETURN);
1499 }
1500
1501 /*
1502 * machine dependent system variables.
1503 */
1504 int
cpu_sysctl(int * name,u_int namelen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,struct proc * p)1505 cpu_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1506 size_t newlen, struct proc *p)
1507 {
1508 dev_t consdev;
1509 #if NIOASIC > 0
1510 int oldval, ret;
1511 #endif
1512
1513 if (name[0] != CPU_CHIPSET && namelen != 1)
1514 return (ENOTDIR); /* overloaded */
1515
1516 switch (name[0]) {
1517 case CPU_CONSDEV:
1518 if (cn_tab != NULL)
1519 consdev = cn_tab->cn_dev;
1520 else
1521 consdev = NODEV;
1522 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1523 sizeof consdev));
1524
1525 #ifndef SMALL_KERNEL
1526 case CPU_BOOTED_KERNEL:
1527 return (sysctl_rdstring(oldp, oldlenp, newp,
1528 bootinfo.booted_kernel));
1529
1530 case CPU_CHIPSET:
1531 return (alpha_sysctl_chipset(name + 1, namelen - 1, oldp,
1532 oldlenp));
1533 #endif /* SMALL_KERNEL */
1534
1535 #ifndef NO_IEEE
1536 case CPU_FP_SYNC_COMPLETE:
1537 return (sysctl_int(oldp, oldlenp, newp, newlen,
1538 &alpha_fp_sync_complete));
1539 #endif
1540 case CPU_ALLOWAPERTURE:
1541 #ifdef APERTURE
1542 if (securelevel > 0)
1543 return (sysctl_int_lower(oldp, oldlenp, newp, newlen,
1544 &allowaperture));
1545 else
1546 return (sysctl_int(oldp, oldlenp, newp, newlen,
1547 &allowaperture));
1548 #else
1549 return (sysctl_rdint(oldp, oldlenp, newp, 0));
1550 #endif
1551 #if NIOASIC > 0
1552 case CPU_LED_BLINK:
1553 oldval = alpha_led_blink;
1554 ret = sysctl_int(oldp, oldlenp, newp, newlen, &alpha_led_blink);
1555 if (oldval != alpha_led_blink)
1556 ioasic_led_blink(NULL);
1557 return (ret);
1558 #endif
1559 default:
1560 return (EOPNOTSUPP);
1561 }
1562 /* NOTREACHED */
1563 }
1564
1565 /*
1566 * Set registers on exec.
1567 */
1568 void
setregs(struct proc * p,struct exec_package * pack,u_long stack,struct ps_strings * arginfo)1569 setregs(struct proc *p, struct exec_package *pack, u_long stack,
1570 struct ps_strings *arginfo)
1571 {
1572 struct trapframe *tfp = p->p_md.md_tf;
1573 #ifdef DEBUG
1574 int i;
1575 #endif
1576
1577 #ifdef DEBUG
1578 /*
1579 * Crash and dump, if the user requested it.
1580 */
1581 if (boothowto & RB_DUMP)
1582 panic("crash requested by boot flags");
1583 #endif
1584
1585 #ifdef DEBUG
1586 for (i = 0; i < FRAME_SIZE; i++)
1587 tfp->tf_regs[i] = 0xbabefacedeadbeef;
1588 tfp->tf_regs[FRAME_A1] = 0;
1589 #else
1590 memset(tfp->tf_regs, 0, FRAME_SIZE * sizeof tfp->tf_regs[0]);
1591 #endif
1592 memset(&p->p_addr->u_pcb.pcb_fp, 0, sizeof p->p_addr->u_pcb.pcb_fp);
1593 alpha_pal_wrusp(stack);
1594 tfp->tf_regs[FRAME_PS] = ALPHA_PSL_USERSET;
1595 tfp->tf_regs[FRAME_PC] = pack->ep_entry & ~3;
1596
1597 tfp->tf_regs[FRAME_A0] = stack;
1598 /* a1 and a2 already zeroed */
1599 tfp->tf_regs[FRAME_T12] = tfp->tf_regs[FRAME_PC]; /* a.k.a. PV */
1600
1601 p->p_md.md_flags &= ~MDP_FPUSED;
1602 #ifndef NO_IEEE
1603 if (__predict_true((p->p_md.md_flags & IEEE_INHERIT) == 0)) {
1604 p->p_md.md_flags &= ~MDP_FP_C;
1605 p->p_addr->u_pcb.pcb_fp.fpr_cr = FPCR_DYN(FP_RN);
1606 }
1607 #endif
1608 if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
1609 fpusave_proc(p, 0);
1610 }
1611
1612 /*
1613 * Release the FPU.
1614 */
1615 void
fpusave_cpu(struct cpu_info * ci,int save)1616 fpusave_cpu(struct cpu_info *ci, int save)
1617 {
1618 struct proc *p;
1619 #if defined(MULTIPROCESSOR)
1620 int s;
1621 #endif
1622
1623 KDASSERT(ci == curcpu());
1624
1625 #if defined(MULTIPROCESSOR)
1626 /* Need to block IPIs */
1627 s = splipi();
1628 atomic_setbits_ulong(&ci->ci_flags, CPUF_FPUSAVE);
1629 #endif
1630
1631 p = ci->ci_fpcurproc;
1632 if (p == NULL)
1633 goto out;
1634
1635 if (save) {
1636 alpha_pal_wrfen(1);
1637 savefpstate(&p->p_addr->u_pcb.pcb_fp);
1638 }
1639
1640 alpha_pal_wrfen(0);
1641
1642 p->p_addr->u_pcb.pcb_fpcpu = NULL;
1643 ci->ci_fpcurproc = NULL;
1644
1645 out:
1646 #if defined(MULTIPROCESSOR)
1647 atomic_clearbits_ulong(&ci->ci_flags, CPUF_FPUSAVE);
1648 alpha_pal_swpipl(s);
1649 #endif
1650 return;
1651 }
1652
1653 /*
1654 * Synchronize FP state for this process.
1655 */
1656 void
fpusave_proc(struct proc * p,int save)1657 fpusave_proc(struct proc *p, int save)
1658 {
1659 struct cpu_info *ci = curcpu();
1660 struct cpu_info *oci;
1661 #if defined(MULTIPROCESSOR)
1662 u_long ipi = save ? ALPHA_IPI_SYNCH_FPU : ALPHA_IPI_DISCARD_FPU;
1663 int s;
1664 #endif
1665
1666 KDASSERT(p->p_addr != NULL);
1667
1668 for (;;) {
1669 #if defined(MULTIPROCESSOR)
1670 /* Need to block IPIs */
1671 s = splipi();
1672 #endif
1673
1674 oci = p->p_addr->u_pcb.pcb_fpcpu;
1675 if (oci == NULL) {
1676 #if defined(MULTIPROCESSOR)
1677 alpha_pal_swpipl(s);
1678 #endif
1679 return;
1680 }
1681
1682 #if defined(MULTIPROCESSOR)
1683 if (oci == ci) {
1684 KASSERT(ci->ci_fpcurproc == p);
1685 alpha_pal_swpipl(s);
1686 fpusave_cpu(ci, save);
1687 return;
1688 }
1689
1690 /*
1691 * The other cpu may still be running and could have
1692 * discarded the fpu context on its own.
1693 */
1694 if (oci->ci_fpcurproc != p) {
1695 alpha_pal_swpipl(s);
1696 continue;
1697 }
1698
1699 alpha_send_ipi(oci->ci_cpuid, ipi);
1700 alpha_pal_swpipl(s);
1701
1702 while (p->p_addr->u_pcb.pcb_fpcpu != NULL)
1703 CPU_BUSY_CYCLE();
1704 #else
1705 KASSERT(ci->ci_fpcurproc == p);
1706 fpusave_cpu(ci, save);
1707 #endif /* MULTIPROCESSOR */
1708
1709 break;
1710 }
1711 }
1712
1713 int
spl0()1714 spl0()
1715 {
1716
1717 if (ssir) {
1718 (void) alpha_pal_swpipl(ALPHA_PSL_IPL_SOFT);
1719 softintr_dispatch();
1720 }
1721
1722 return (alpha_pal_swpipl(ALPHA_PSL_IPL_0));
1723 }
1724
1725 /*
1726 * Wait "n" microseconds.
1727 */
1728 void
delay(n)1729 delay(n)
1730 unsigned long n;
1731 {
1732 unsigned long pcc0, pcc1, curcycle, cycles, usec;
1733
1734 if (n == 0)
1735 return;
1736
1737 pcc0 = alpha_rpcc() & 0xffffffffUL;
1738 cycles = 0;
1739 usec = 0;
1740
1741 while (usec <= n) {
1742 /*
1743 * Get the next CPU cycle count - assumes that we can not
1744 * have had more than one 32 bit overflow.
1745 */
1746 pcc1 = alpha_rpcc() & 0xffffffffUL;
1747 if (pcc1 < pcc0)
1748 curcycle = (pcc1 + 0x100000000UL) - pcc0;
1749 else
1750 curcycle = pcc1 - pcc0;
1751
1752 /*
1753 * We now have the number of processor cycles since we
1754 * last checked. Add the current cycle count to the
1755 * running total. If it's over cycles_per_usec, increment
1756 * the usec counter.
1757 */
1758 cycles += curcycle;
1759 while (cycles >= cycles_per_usec) {
1760 usec++;
1761 cycles -= cycles_per_usec;
1762 }
1763 pcc0 = pcc1;
1764 }
1765 }
1766
1767 int
alpha_pa_access(pa)1768 alpha_pa_access(pa)
1769 u_long pa;
1770 {
1771 int i;
1772
1773 for (i = 0; i < mem_cluster_cnt; i++) {
1774 if (pa < mem_clusters[i].start)
1775 continue;
1776 if ((pa - mem_clusters[i].start) >=
1777 (mem_clusters[i].size & ~PAGE_MASK))
1778 continue;
1779 return (mem_clusters[i].size & PAGE_MASK); /* prot */
1780 }
1781
1782 /*
1783 * Address is not a memory address. If we're secure, disallow
1784 * access. Otherwise, grant read/write.
1785 */
1786 if (securelevel > 0)
1787 return (PROT_NONE);
1788 else
1789 return (PROT_READ | PROT_WRITE);
1790 }
1791
1792 /* XXX XXX BEGIN XXX XXX */
1793 paddr_t alpha_XXX_dmamap_or; /* XXX */
1794 /* XXX */
1795 paddr_t /* XXX */
alpha_XXX_dmamap(v)1796 alpha_XXX_dmamap(v) /* XXX */
1797 vaddr_t v; /* XXX */
1798 { /* XXX */
1799 /* XXX */
1800 return (vtophys(v) | alpha_XXX_dmamap_or); /* XXX */
1801 } /* XXX */
1802 /* XXX XXX END XXX XXX */
1803