1 /* $OpenBSD: machdep.c,v 1.196 2021/10/06 15:46:03 claudio Exp $ */ 2 /* $NetBSD: machdep.c,v 1.210 2000/06/01 17:12:38 thorpej Exp $ */ 3 4 /*- 5 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 10 * NASA Ames Research Center and by Chris G. Demetriou. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Author: Chris G. Demetriou 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 #include <sys/param.h> 62 #include <sys/systm.h> 63 #include <sys/signalvar.h> 64 #include <sys/kernel.h> 65 #include <sys/proc.h> 66 #include <sys/socket.h> 67 #include <sys/sched.h> 68 #include <sys/buf.h> 69 #include <sys/reboot.h> 70 #include <sys/device.h> 71 #include <sys/conf.h> 72 #include <sys/timeout.h> 73 #include <sys/malloc.h> 74 #include <sys/mbuf.h> 75 #include <sys/msgbuf.h> 76 #include <sys/ioctl.h> 77 #include <sys/tty.h> 78 #include <sys/user.h> 79 #include <sys/exec.h> 80 #include <sys/sysctl.h> 81 #include <sys/core.h> 82 #include <sys/kcore.h> 83 84 #include <net/if.h> 85 #include <uvm/uvm.h> 86 87 #include <machine/kcore.h> 88 #ifndef NO_IEEE 89 #include <machine/fpu.h> 90 #endif 91 #include <sys/timetc.h> 92 93 #include <sys/mount.h> 94 #include <sys/syscallargs.h> 95 96 #include <dev/cons.h> 97 98 #include <machine/autoconf.h> 99 #include <machine/cpu.h> 100 #include <machine/reg.h> 101 #include <machine/rpb.h> 102 #include <machine/prom.h> 103 #include <machine/cpuconf.h> 104 #ifndef NO_IEEE 105 #include <machine/ieeefp.h> 106 #endif 107 108 #include <dev/pci/pcivar.h> 109 110 #ifdef DDB 111 #include <machine/db_machdep.h> 112 #include <ddb/db_extern.h> 113 #include <ddb/db_interface.h> 114 #endif 115 116 #include "ioasic.h" 117 118 #if NIOASIC > 0 119 #include <machine/tc_machdep.h> 120 #include <dev/tc/tcreg.h> 121 #include <dev/tc/ioasicvar.h> 122 #endif 123 124 int cpu_dump(void); 125 int cpu_dumpsize(void); 126 u_long cpu_dump_mempagecnt(void); 127 void dumpsys(void); 128 void identifycpu(void); 129 void regdump(struct trapframe *framep); 130 void printregs(struct reg *); 131 132 struct uvm_constraint_range isa_constraint = { 0x0, 0x00ffffffUL }; 133 struct uvm_constraint_range dma_constraint = { 0x0, (paddr_t)-1 }; 134 struct uvm_constraint_range *uvm_md_constraints[] = { 135 &isa_constraint, 136 NULL 137 }; 138 139 struct vm_map *exec_map = NULL; 140 struct vm_map *phys_map = NULL; 141 142 /* 143 * safepri is a safe priority for sleep to set for a spin-wait 144 * during autoconfiguration or after a panic. 145 */ 146 int safepri = 0; 147 148 #ifdef APERTURE 149 int allowaperture = 0; 150 #endif 151 152 int totalphysmem; /* total amount of physical memory in system */ 153 int physmem; /* physical mem used by OpenBSD + some rsvd */ 154 int resvmem; /* amount of memory reserved for PROM */ 155 int unusedmem; /* amount of memory for OS that we don't use */ 156 int unknownmem; /* amount of memory with an unknown use */ 157 158 int cputype; /* system type, from the RPB */ 159 160 int bootdev_debug = 0; /* patchable, or from DDB */ 161 162 /* the following is used externally (sysctl_hw) */ 163 char machine[] = MACHINE; /* from <machine/param.h> */ 164 char cpu_model[128]; 165 166 struct user *proc0paddr; 167 168 /* Number of machine cycles per microsecond */ 169 u_int64_t cycles_per_usec; 170 171 struct bootinfo_kernel bootinfo; 172 173 struct consdev *cn_tab; 174 175 /* For built-in TCDS */ 176 #if defined(DEC_3000_300) || defined(DEC_3000_500) 177 u_int8_t dec_3000_scsiid[2], dec_3000_scsifast[2]; 178 #endif 179 180 struct platform platform; 181 182 /* for cpu_sysctl() */ 183 int alpha_unaligned_print = 1; /* warn about unaligned accesses */ 184 int alpha_unaligned_fix = 1; /* fix up unaligned accesses */ 185 int alpha_unaligned_sigbus = 1; /* SIGBUS on fixed-up accesses */ 186 #ifndef NO_IEEE 187 int alpha_fp_sync_complete = 0; /* fp fixup if sync even without /s */ 188 #endif 189 #if NIOASIC > 0 190 int alpha_led_blink = 1; 191 #endif 192 193 /* 194 * XXX This should be dynamically sized, but we have the chicken-egg problem! 195 * XXX it should also be larger than it is, because not all of the mddt 196 * XXX clusters end up being used for VM. 197 */ 198 phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; /* low size bits overloaded */ 199 int mem_cluster_cnt; 200 201 void 202 alpha_init(unused, ptb, bim, bip, biv) 203 u_long unused; 204 u_long ptb; /* PFN of current level 1 page table */ 205 u_long bim; /* bootinfo magic */ 206 u_long bip; /* bootinfo pointer */ 207 u_long biv; /* bootinfo version */ 208 { 209 extern char kernel_text[], _end[]; 210 struct mddt *mddtp; 211 struct mddt_cluster *memc; 212 int i, mddtweird; 213 struct vm_physseg *vps; 214 vaddr_t kernstart, kernend; 215 paddr_t kernstartpfn, kernendpfn, pfn0, pfn1; 216 char *p; 217 const char *bootinfo_msg; 218 const struct cpuinit *c; 219 extern caddr_t esym; 220 struct cpu_info *ci; 221 cpuid_t cpu_id; 222 223 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */ 224 225 /* 226 * Turn off interrupts (not mchecks) and floating point. 227 * Make sure the instruction and data streams are consistent. 228 */ 229 (void)alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); 230 alpha_pal_wrfen(0); 231 ALPHA_TBIA(); 232 alpha_pal_imb(); 233 234 /* Initialize the SCB. */ 235 scb_init(); 236 237 cpu_id = cpu_number(); 238 239 #if defined(MULTIPROCESSOR) 240 /* 241 * Set our SysValue to the address of our cpu_info structure. 242 * Secondary processors do this in their spinup trampoline. 243 */ 244 alpha_pal_wrval((u_long)&cpu_info_primary); 245 cpu_info[cpu_id] = &cpu_info_primary; 246 #endif 247 248 ci = curcpu(); 249 ci->ci_cpuid = cpu_id; 250 251 /* 252 * Get critical system information (if possible, from the 253 * information provided by the boot program). 254 */ 255 bootinfo_msg = NULL; 256 if (bim == BOOTINFO_MAGIC) { 257 if (biv == 0) { /* backward compat */ 258 biv = *(u_long *)bip; 259 bip += 8; 260 } 261 switch (biv) { 262 case 1: { 263 struct bootinfo_v1 *v1p = (struct bootinfo_v1 *)bip; 264 265 bootinfo.ssym = v1p->ssym; 266 bootinfo.esym = v1p->esym; 267 /* hwrpb may not be provided by boot block in v1 */ 268 if (v1p->hwrpb != NULL) { 269 bootinfo.hwrpb_phys = 270 ((struct rpb *)v1p->hwrpb)->rpb_phys; 271 bootinfo.hwrpb_size = v1p->hwrpbsize; 272 } else { 273 bootinfo.hwrpb_phys = 274 ((struct rpb *)HWRPB_ADDR)->rpb_phys; 275 bootinfo.hwrpb_size = 276 ((struct rpb *)HWRPB_ADDR)->rpb_size; 277 } 278 bcopy(v1p->boot_flags, bootinfo.boot_flags, 279 min(sizeof v1p->boot_flags, 280 sizeof bootinfo.boot_flags)); 281 bcopy(v1p->booted_kernel, bootinfo.booted_kernel, 282 min(sizeof v1p->booted_kernel, 283 sizeof bootinfo.booted_kernel)); 284 boothowto = v1p->howto; 285 /* booted dev not provided in bootinfo */ 286 init_prom_interface((struct rpb *) 287 ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys)); 288 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev, 289 sizeof bootinfo.booted_dev); 290 break; 291 } 292 default: 293 bootinfo_msg = "unknown bootinfo version"; 294 goto nobootinfo; 295 } 296 } else { 297 bootinfo_msg = "boot program did not pass bootinfo"; 298 nobootinfo: 299 bootinfo.ssym = (u_long)_end; 300 bootinfo.esym = (u_long)_end; 301 bootinfo.hwrpb_phys = ((struct rpb *)HWRPB_ADDR)->rpb_phys; 302 bootinfo.hwrpb_size = ((struct rpb *)HWRPB_ADDR)->rpb_size; 303 init_prom_interface((struct rpb *)HWRPB_ADDR); 304 prom_getenv(PROM_E_BOOTED_OSFLAGS, bootinfo.boot_flags, 305 sizeof bootinfo.boot_flags); 306 prom_getenv(PROM_E_BOOTED_FILE, bootinfo.booted_kernel, 307 sizeof bootinfo.booted_kernel); 308 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev, 309 sizeof bootinfo.booted_dev); 310 } 311 312 esym = (caddr_t)bootinfo.esym; 313 /* 314 * Initialize the kernel's mapping of the RPB. It's needed for 315 * lots of things. 316 */ 317 hwrpb = (struct rpb *)ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys); 318 319 #if defined(DEC_3000_300) || defined(DEC_3000_500) 320 if (hwrpb->rpb_type == ST_DEC_3000_300 || 321 hwrpb->rpb_type == ST_DEC_3000_500) { 322 prom_getenv(PROM_E_SCSIID, dec_3000_scsiid, 323 sizeof(dec_3000_scsiid)); 324 prom_getenv(PROM_E_SCSIFAST, dec_3000_scsifast, 325 sizeof(dec_3000_scsifast)); 326 } 327 #endif 328 329 /* 330 * Remember how many cycles there are per microsecond, 331 * so that we can use delay(). Round up, for safety. 332 */ 333 cycles_per_usec = (hwrpb->rpb_cc_freq + 999999) / 1000000; 334 335 /* 336 * Initialize the (temporary) bootstrap console interface, so 337 * we can use printf until the VM system starts being setup. 338 * The real console is initialized before then. 339 */ 340 init_bootstrap_console(); 341 342 /* OUTPUT NOW ALLOWED */ 343 344 /* delayed from above */ 345 if (bootinfo_msg) 346 printf("WARNING: %s (0x%lx, 0x%lx, 0x%lx)\n", 347 bootinfo_msg, bim, bip, biv); 348 349 /* Initialize the trap vectors on the primary processor. */ 350 trap_init(); 351 352 /* 353 * Find out what hardware we're on, and do basic initialization. 354 */ 355 cputype = hwrpb->rpb_type; 356 if (cputype < 0) { 357 /* 358 * At least some white-box systems have SRM which 359 * reports a systype that's the negative of their 360 * blue-box counterpart. 361 */ 362 cputype = -cputype; 363 } 364 c = platform_lookup(cputype); 365 if (c == NULL) { 366 platform_not_supported(); 367 /* NOTREACHED */ 368 } 369 (*c->init)(); 370 strlcpy(cpu_model, platform.model, sizeof cpu_model); 371 372 /* 373 * Initialize the real console, so that the bootstrap console is 374 * no longer necessary. 375 */ 376 (*platform.cons_init)(); 377 378 #if 0 379 /* Paranoid sanity checking */ 380 381 assert(hwrpb->rpb_primary_cpu_id == alpha_pal_whami()); 382 383 /* 384 * On single-CPU systypes, the primary should always be CPU 0, 385 * except on Alpha 8200 systems where the CPU id is related 386 * to the VID, which is related to the Turbo Laser node id. 387 */ 388 if (cputype != ST_DEC_21000) 389 assert(hwrpb->rpb_primary_cpu_id == 0); 390 #endif 391 392 /* NO MORE FIRMWARE ACCESS ALLOWED */ 393 #ifdef _PMAP_MAY_USE_PROM_CONSOLE 394 /* 395 * XXX (unless _PMAP_MAY_USE_PROM_CONSOLE is defined and 396 * XXX pmap_uses_prom_console() evaluates to non-zero.) 397 */ 398 #endif 399 400 #ifndef SMALL_KERNEL 401 /* 402 * If we run on a BWX-capable processor, override cpu_switch 403 * with a faster version. 404 * We do this now because the kernel text might be mapped 405 * read-only eventually (although this is not the case at the moment). 406 */ 407 if (alpha_implver() >= ALPHA_IMPLVER_EV5) { 408 if (~alpha_amask(ALPHA_AMASK_BWX) != 0) { 409 extern vaddr_t __bwx_switch0, __bwx_switch1, 410 __bwx_switch2, __bwx_switch3; 411 u_int32_t *dst, *src, *end; 412 413 src = (u_int32_t *)&__bwx_switch2; 414 end = (u_int32_t *)&__bwx_switch3; 415 dst = (u_int32_t *)&__bwx_switch0; 416 while (src != end) 417 *dst++ = *src++; 418 src = (u_int32_t *)&__bwx_switch1; 419 end = (u_int32_t *)&__bwx_switch2; 420 while (src != end) 421 *dst++ = *src++; 422 } 423 } 424 #endif 425 426 /* 427 * find out this system's page size 428 */ 429 if ((uvmexp.pagesize = hwrpb->rpb_page_size) != 8192) 430 panic("page size %d != 8192?!", uvmexp.pagesize); 431 432 uvm_setpagesize(); 433 434 /* 435 * Find the beginning and end of the kernel (and leave a 436 * bit of space before the beginning for the bootstrap 437 * stack). 438 */ 439 kernstart = trunc_page((vaddr_t)kernel_text) - 2 * PAGE_SIZE; 440 kernend = (vaddr_t)round_page((vaddr_t)bootinfo.esym); 441 442 kernstartpfn = atop(ALPHA_K0SEG_TO_PHYS(kernstart)); 443 kernendpfn = atop(ALPHA_K0SEG_TO_PHYS(kernend)); 444 445 /* 446 * Find out how much memory is available, by looking at 447 * the memory cluster descriptors. This also tries to do 448 * its best to detect things things that have never been seen 449 * before... 450 */ 451 mddtp = (struct mddt *)(((caddr_t)hwrpb) + hwrpb->rpb_memdat_off); 452 453 /* MDDT SANITY CHECKING */ 454 mddtweird = 0; 455 if (mddtp->mddt_cluster_cnt < 2) { 456 mddtweird = 1; 457 printf("WARNING: weird number of mem clusters: %lu\n", 458 (unsigned long)mddtp->mddt_cluster_cnt); 459 } 460 461 #if 0 462 printf("Memory cluster count: %d\n", mddtp->mddt_cluster_cnt); 463 #endif 464 465 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) { 466 memc = &mddtp->mddt_clusters[i]; 467 #if 0 468 printf("MEMC %d: pfn 0x%lx cnt 0x%lx usage 0x%lx\n", i, 469 memc->mddt_pfn, memc->mddt_pg_cnt, memc->mddt_usage); 470 #endif 471 totalphysmem += memc->mddt_pg_cnt; 472 if (mem_cluster_cnt < VM_PHYSSEG_MAX) { /* XXX */ 473 mem_clusters[mem_cluster_cnt].start = 474 ptoa(memc->mddt_pfn); 475 mem_clusters[mem_cluster_cnt].size = 476 ptoa(memc->mddt_pg_cnt); 477 if (memc->mddt_usage & MDDT_mbz || 478 memc->mddt_usage & MDDT_NONVOLATILE || /* XXX */ 479 memc->mddt_usage & MDDT_PALCODE) 480 mem_clusters[mem_cluster_cnt].size |= 481 PROT_READ; 482 else 483 mem_clusters[mem_cluster_cnt].size |= 484 PROT_READ | PROT_WRITE | PROT_EXEC; 485 mem_cluster_cnt++; 486 } /* XXX else print something! */ 487 488 if (memc->mddt_usage & MDDT_mbz) { 489 mddtweird = 1; 490 printf("WARNING: mem cluster %d has weird " 491 "usage 0x%lx\n", i, (long)memc->mddt_usage); 492 unknownmem += memc->mddt_pg_cnt; 493 continue; 494 } 495 if (memc->mddt_usage & MDDT_NONVOLATILE) { 496 /* XXX should handle these... */ 497 printf("WARNING: skipping non-volatile mem " 498 "cluster %d\n", i); 499 unusedmem += memc->mddt_pg_cnt; 500 continue; 501 } 502 if (memc->mddt_usage & MDDT_PALCODE) { 503 resvmem += memc->mddt_pg_cnt; 504 continue; 505 } 506 507 /* 508 * We have a memory cluster available for system 509 * software use. We must determine if this cluster 510 * holds the kernel. 511 */ 512 #ifdef _PMAP_MAY_USE_PROM_CONSOLE 513 /* 514 * XXX If the kernel uses the PROM console, we only use the 515 * XXX memory after the kernel in the first system segment, 516 * XXX to avoid clobbering prom mapping, data, etc. 517 */ 518 if (!pmap_uses_prom_console() || physmem == 0) { 519 #endif /* _PMAP_MAY_USE_PROM_CONSOLE */ 520 physmem += memc->mddt_pg_cnt; 521 pfn0 = memc->mddt_pfn; 522 pfn1 = memc->mddt_pfn + memc->mddt_pg_cnt; 523 if (pfn0 <= kernstartpfn && kernendpfn <= pfn1) { 524 /* 525 * Must compute the location of the kernel 526 * within the segment. 527 */ 528 #if 0 529 printf("Cluster %d contains kernel\n", i); 530 #endif 531 #ifdef _PMAP_MAY_USE_PROM_CONSOLE 532 if (!pmap_uses_prom_console()) { 533 #endif /* _PMAP_MAY_USE_PROM_CONSOLE */ 534 if (pfn0 < kernstartpfn) { 535 /* 536 * There is a chunk before the kernel. 537 */ 538 #if 0 539 printf("Loading chunk before kernel: " 540 "0x%lx / 0x%lx\n", pfn0, kernstartpfn); 541 #endif 542 uvm_page_physload(pfn0, kernstartpfn, 543 pfn0, kernstartpfn, 0); 544 } 545 #ifdef _PMAP_MAY_USE_PROM_CONSOLE 546 } 547 #endif /* _PMAP_MAY_USE_PROM_CONSOLE */ 548 if (kernendpfn < pfn1) { 549 /* 550 * There is a chunk after the kernel. 551 */ 552 #if 0 553 printf("Loading chunk after kernel: " 554 "0x%lx / 0x%lx\n", kernendpfn, pfn1); 555 #endif 556 uvm_page_physload(kernendpfn, pfn1, 557 kernendpfn, pfn1, 0); 558 } 559 } else { 560 /* 561 * Just load this cluster as one chunk. 562 */ 563 #if 0 564 printf("Loading cluster %d: 0x%lx / 0x%lx\n", i, 565 pfn0, pfn1); 566 #endif 567 uvm_page_physload(pfn0, pfn1, pfn0, pfn1, 0); 568 } 569 #ifdef _PMAP_MAY_USE_PROM_CONSOLE 570 } 571 #endif /* _PMAP_MAY_USE_PROM_CONSOLE */ 572 } 573 574 #ifdef DEBUG 575 /* 576 * Dump out the MDDT if it looks odd... 577 */ 578 if (mddtweird) { 579 printf("\n"); 580 printf("complete memory cluster information:\n"); 581 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) { 582 printf("mddt %d:\n", i); 583 printf("\tpfn %lx\n", 584 mddtp->mddt_clusters[i].mddt_pfn); 585 printf("\tcnt %lx\n", 586 mddtp->mddt_clusters[i].mddt_pg_cnt); 587 printf("\ttest %lx\n", 588 mddtp->mddt_clusters[i].mddt_pg_test); 589 printf("\tbva %lx\n", 590 mddtp->mddt_clusters[i].mddt_v_bitaddr); 591 printf("\tbpa %lx\n", 592 mddtp->mddt_clusters[i].mddt_p_bitaddr); 593 printf("\tbcksum %lx\n", 594 mddtp->mddt_clusters[i].mddt_bit_cksum); 595 printf("\tusage %lx\n", 596 mddtp->mddt_clusters[i].mddt_usage); 597 } 598 printf("\n"); 599 } 600 #endif 601 602 if (totalphysmem == 0) 603 panic("can't happen: system seems to have no memory!"); 604 #if 0 605 printf("totalphysmem = %u\n", totalphysmem); 606 printf("physmem = %u\n", physmem); 607 printf("resvmem = %d\n", resvmem); 608 printf("unusedmem = %d\n", unusedmem); 609 printf("unknownmem = %d\n", unknownmem); 610 #endif 611 612 /* 613 * Initialize error message buffer (at end of core). 614 */ 615 { 616 vsize_t sz = (vsize_t)round_page(MSGBUFSIZE); 617 vsize_t reqsz = sz; 618 619 vps = &vm_physmem[vm_nphysseg - 1]; 620 621 /* shrink so that it'll fit in the last segment */ 622 if ((vps->avail_end - vps->avail_start) < atop(sz)) 623 sz = ptoa(vps->avail_end - vps->avail_start); 624 625 vps->end -= atop(sz); 626 vps->avail_end -= atop(sz); 627 initmsgbuf((caddr_t) ALPHA_PHYS_TO_K0SEG(ptoa(vps->end)), sz); 628 629 /* Remove the last segment if it now has no pages. */ 630 if (vps->start == vps->end) 631 vm_nphysseg--; 632 633 /* warn if the message buffer had to be shrunk */ 634 if (sz != reqsz) 635 printf("WARNING: %ld bytes not available for msgbuf " 636 "in last cluster (%ld used)\n", reqsz, sz); 637 638 } 639 640 /* 641 * Init mapping for u page(s) for proc 0 642 */ 643 proc0.p_addr = proc0paddr = 644 (struct user *)pmap_steal_memory(UPAGES * PAGE_SIZE, NULL, NULL); 645 646 /* 647 * Initialize the virtual memory system, and set the 648 * page table base register in proc 0's PCB. 649 */ 650 pmap_bootstrap(ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT), 651 hwrpb->rpb_max_asn, hwrpb->rpb_pcs_cnt); 652 653 /* 654 * Initialize the rest of proc 0's PCB, and cache its physical 655 * address. 656 */ 657 proc0.p_md.md_pcbpaddr = 658 (struct pcb *)ALPHA_K0SEG_TO_PHYS((vaddr_t)&proc0paddr->u_pcb); 659 660 /* 661 * Set the kernel sp, reserving space for an (empty) trapframe, 662 * and make proc0's trapframe pointer point to it for sanity. 663 */ 664 proc0paddr->u_pcb.pcb_hw.apcb_ksp = 665 (u_int64_t)proc0paddr + USPACE - sizeof(struct trapframe); 666 proc0.p_md.md_tf = 667 (struct trapframe *)proc0paddr->u_pcb.pcb_hw.apcb_ksp; 668 669 /* 670 * Initialize the primary CPU's idle PCB to proc0's. In a 671 * MULTIPROCESSOR configuration, each CPU will later get 672 * its own idle PCB when autoconfiguration runs. 673 */ 674 ci->ci_idle_pcb = &proc0paddr->u_pcb; 675 ci->ci_idle_pcb_paddr = (u_long)proc0.p_md.md_pcbpaddr; 676 677 /* 678 * Look at arguments passed to us and compute boothowto. 679 */ 680 681 for (p = bootinfo.boot_flags; p && *p != '\0'; p++) { 682 /* 683 * Note that we'd really like to differentiate case here, 684 * but the Alpha AXP Architecture Reference Manual 685 * says that we shouldn't. 686 */ 687 switch (*p) { 688 case 'a': /* Ignore */ 689 case 'A': 690 break; 691 692 case 'b': /* Enter DDB as soon as the console is initialised */ 693 case 'B': 694 boothowto |= RB_KDB; 695 break; 696 697 case 'c': /* enter user kernel configuration */ 698 case 'C': 699 boothowto |= RB_CONFIG; 700 break; 701 702 #ifdef DEBUG 703 case 'd': /* crash dump immediately after autoconfig */ 704 case 'D': 705 boothowto |= RB_DUMP; 706 break; 707 #endif 708 709 case 'h': /* always halt, never reboot */ 710 case 'H': 711 boothowto |= RB_HALT; 712 break; 713 714 715 case 'n': /* askname */ 716 case 'N': 717 boothowto |= RB_ASKNAME; 718 break; 719 720 case 's': /* single-user */ 721 case 'S': 722 boothowto |= RB_SINGLE; 723 break; 724 725 case '-': 726 /* 727 * Just ignore this. It's not required, but it's 728 * common for it to be passed regardless. 729 */ 730 break; 731 732 default: 733 printf("Unrecognized boot flag '%c'.\n", *p); 734 break; 735 } 736 } 737 738 739 /* 740 * Figure out the number of cpus in the box, from RPB fields. 741 * Really. We mean it. 742 */ 743 for (ncpusfound = 0, i = 0; i < hwrpb->rpb_pcs_cnt; i++) { 744 struct pcs *pcsp; 745 746 pcsp = LOCATE_PCS(hwrpb, i); 747 if ((pcsp->pcs_flags & PCS_PP) != 0) 748 ncpusfound++; 749 } 750 751 /* 752 * Initialize debuggers, and break into them if appropriate. 753 */ 754 #ifdef DDB 755 db_machine_init(); 756 ddb_init(); 757 758 if (boothowto & RB_KDB) 759 db_enter(); 760 #endif 761 /* 762 * Figure out our clock frequency, from RPB fields. 763 */ 764 hz = hwrpb->rpb_intr_freq >> 12; 765 if (!(60 <= hz && hz <= 10240)) { 766 #ifdef DIAGNOSTIC 767 printf("WARNING: unbelievable rpb_intr_freq: %lu (%d hz)\n", 768 (unsigned long)hwrpb->rpb_intr_freq, hz); 769 #endif 770 hz = 1024; 771 } 772 tick = 1000000 / hz; 773 tick_nsec = 1000000000 / hz; 774 } 775 776 void 777 consinit() 778 { 779 780 /* 781 * Everything related to console initialization is done 782 * in alpha_init(). 783 */ 784 #if defined(DIAGNOSTIC) && defined(_PMAP_MAY_USE_PROM_CONSOLE) 785 printf("consinit: %susing prom console\n", 786 pmap_uses_prom_console() ? "" : "not "); 787 #endif 788 } 789 790 void 791 cpu_startup() 792 { 793 vaddr_t minaddr, maxaddr; 794 #if defined(DEBUG) 795 extern int pmapdebug; 796 int opmapdebug = pmapdebug; 797 798 pmapdebug = 0; 799 #endif 800 801 /* 802 * Good {morning,afternoon,evening,night}. 803 */ 804 printf(version); 805 identifycpu(); 806 printf("real mem = %lu (%luMB)\n", ptoa((psize_t)totalphysmem), 807 ptoa((psize_t)totalphysmem) / 1024 / 1024); 808 printf("rsvd mem = %lu (%luMB)\n", ptoa((psize_t)resvmem), 809 ptoa((psize_t)resvmem) / 1024 / 1024); 810 if (unusedmem) { 811 printf("WARNING: unused memory = %lu (%luMB)\n", 812 ptoa((psize_t)unusedmem), 813 ptoa((psize_t)unusedmem) / 1024 / 1024); 814 } 815 if (unknownmem) { 816 printf("WARNING: %lu (%luMB) of memory with unknown purpose\n", 817 ptoa((psize_t)unknownmem), 818 ptoa((psize_t)unknownmem) / 1024 / 1024); 819 } 820 821 /* 822 * Allocate a submap for exec arguments. This map effectively 823 * limits the number of processes exec'ing at any time. 824 */ 825 minaddr = vm_map_min(kernel_map); 826 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 827 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); 828 829 /* 830 * Allocate a submap for physio 831 */ 832 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 833 VM_PHYS_SIZE, 0, FALSE, NULL); 834 835 #if defined(DEBUG) 836 pmapdebug = opmapdebug; 837 #endif 838 printf("avail mem = %lu (%luMB)\n", ptoa((psize_t)uvmexp.free), 839 ptoa((psize_t)uvmexp.free) / 1024 / 1024); 840 #if 0 841 { 842 extern u_long pmap_pages_stolen; 843 844 printf("stolen memory for VM structures = %d\n", pmap_pages_stolen * PAGE_SIZE); 845 } 846 #endif 847 848 /* 849 * Set up buffers, so they can be used to read disk labels. 850 */ 851 bufinit(); 852 853 /* 854 * Configure the system. 855 */ 856 if (boothowto & RB_CONFIG) { 857 #ifdef BOOT_CONFIG 858 user_config(); 859 #else 860 printf("kernel does not support -c; continuing..\n"); 861 #endif 862 } 863 864 /* 865 * Set up the HWPCB so that it's safe to configure secondary 866 * CPUs. 867 */ 868 hwrpb_primary_init(); 869 } 870 871 /* 872 * Retrieve the platform name from the DSR. 873 */ 874 const char * 875 alpha_dsr_sysname() 876 { 877 struct dsrdb *dsr; 878 const char *sysname; 879 880 /* 881 * DSR does not exist on early HWRPB versions. 882 */ 883 if (hwrpb->rpb_version < HWRPB_DSRDB_MINVERS) 884 return (NULL); 885 886 dsr = (struct dsrdb *)(((caddr_t)hwrpb) + hwrpb->rpb_dsrdb_off); 887 sysname = (const char *)((caddr_t)dsr + (dsr->dsr_sysname_off + 888 sizeof(u_int64_t))); 889 return (sysname); 890 } 891 892 /* 893 * Lookup the system specified system variation in the provided table, 894 * returning the model string on match. 895 */ 896 const char * 897 alpha_variation_name(variation, avtp) 898 u_int64_t variation; 899 const struct alpha_variation_table *avtp; 900 { 901 int i; 902 903 for (i = 0; avtp[i].avt_model != NULL; i++) 904 if (avtp[i].avt_variation == variation) 905 return (avtp[i].avt_model); 906 return (NULL); 907 } 908 909 /* 910 * Generate a default platform name based for unknown system variations. 911 */ 912 const char * 913 alpha_unknown_sysname() 914 { 915 static char s[128]; /* safe size */ 916 917 snprintf(s, sizeof s, "%s family, unknown model variation 0x%lx", 918 platform.family, (unsigned long)hwrpb->rpb_variation & SV_ST_MASK); 919 return ((const char *)s); 920 } 921 922 void 923 identifycpu() 924 { 925 char *s; 926 int slen; 927 928 /* 929 * print out CPU identification information. 930 */ 931 printf("%s", cpu_model); 932 for(s = cpu_model; *s; ++s) 933 if(strncasecmp(s, "MHz", 3) == 0) 934 goto skipMHz; 935 printf(", %luMHz", (unsigned long)hwrpb->rpb_cc_freq / 1000000); 936 skipMHz: 937 /* fill in hw_serial if a serial number is known */ 938 slen = strlen(hwrpb->rpb_ssn) + 1; 939 if (slen > 1) { 940 hw_serial = malloc(slen, M_SYSCTL, M_NOWAIT); 941 if (hw_serial) 942 strlcpy(hw_serial, (char *)hwrpb->rpb_ssn, slen); 943 } 944 945 printf("\n"); 946 printf("%lu byte page size, %d processor%s.\n", 947 (unsigned long)hwrpb->rpb_page_size, ncpusfound, 948 ncpusfound == 1 ? "" : "s"); 949 #if 0 950 /* this is not particularly useful! */ 951 printf("variation: 0x%lx, revision 0x%lx\n", 952 hwrpb->rpb_variation, *(long *)hwrpb->rpb_revision); 953 #endif 954 } 955 956 int waittime = -1; 957 struct pcb dumppcb; 958 959 __dead void 960 boot(int howto) 961 { 962 #if defined(MULTIPROCESSOR) 963 u_long wait_mask; 964 int i; 965 #endif 966 967 if ((howto & RB_RESET) != 0) 968 goto doreset; 969 970 if (cold) { 971 if ((howto & RB_USERREQ) == 0) 972 howto |= RB_HALT; 973 goto haltsys; 974 } 975 976 if ((boothowto & RB_HALT) != 0) 977 howto |= RB_HALT; 978 979 boothowto = howto; 980 if ((howto & RB_NOSYNC) == 0 && waittime < 0) { 981 waittime = 0; 982 vfs_shutdown(curproc); 983 984 if ((howto & RB_TIMEBAD) == 0) { 985 resettodr(); 986 } else { 987 printf("WARNING: not updating battery clock\n"); 988 } 989 } 990 if_downall(); 991 992 uvm_shutdown(); 993 splhigh(); 994 cold = 1; 995 996 #if defined(MULTIPROCESSOR) 997 /* 998 * Halt all other CPUs. 999 */ 1000 wait_mask = (1UL << hwrpb->rpb_primary_cpu_id); 1001 alpha_broadcast_ipi(ALPHA_IPI_HALT); 1002 1003 /* Ensure any CPUs paused by DDB resume execution so they can halt */ 1004 cpus_paused = 0; 1005 1006 for (i = 0; i < 10000; i++) { 1007 alpha_mb(); 1008 if (cpus_running == wait_mask) 1009 break; 1010 delay(1000); 1011 } 1012 alpha_mb(); 1013 if (cpus_running != wait_mask) 1014 printf("WARNING: Unable to halt secondary CPUs (0x%lx)\n", 1015 cpus_running); 1016 #endif 1017 1018 if ((howto & RB_DUMP) != 0) 1019 dumpsys(); 1020 1021 haltsys: 1022 config_suspend_all(DVACT_POWERDOWN); 1023 1024 #ifdef BOOTKEY 1025 printf("hit any key to %s...\n", 1026 (howto & RB_HALT) != 0 ? "halt" : "reboot"); 1027 cnpollc(1); /* for proper keyboard command handling */ 1028 cngetc(); 1029 cnpollc(0); 1030 printf("\n"); 1031 #endif 1032 1033 /* Finally, powerdown/halt/reboot the system. */ 1034 if ((howto & RB_POWERDOWN) != 0 && 1035 platform.powerdown != NULL) { 1036 (*platform.powerdown)(); 1037 printf("WARNING: powerdown failed!\n"); 1038 } 1039 doreset: 1040 printf("%s\n\n", 1041 (howto & RB_HALT) != 0 ? "halted." : "rebooting..."); 1042 prom_halt((howto & RB_HALT) != 0); 1043 for (;;) 1044 continue; 1045 /* NOTREACHED */ 1046 } 1047 1048 /* 1049 * These variables are needed by /sbin/savecore 1050 */ 1051 u_long dumpmag = 0x8fca0101; /* magic number */ 1052 int dumpsize = 0; /* pages */ 1053 long dumplo = 0; /* blocks */ 1054 1055 /* 1056 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers. 1057 */ 1058 int 1059 cpu_dumpsize() 1060 { 1061 int size; 1062 1063 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) + 1064 ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t)); 1065 if (roundup(size, dbtob(1)) != dbtob(1)) 1066 return -1; 1067 1068 return (1); 1069 } 1070 1071 /* 1072 * cpu_dump_mempagecnt: calculate size of RAM (in pages) to be dumped. 1073 */ 1074 u_long 1075 cpu_dump_mempagecnt() 1076 { 1077 u_long i, n; 1078 1079 n = 0; 1080 for (i = 0; i < mem_cluster_cnt; i++) 1081 n += atop(mem_clusters[i].size); 1082 return (n); 1083 } 1084 1085 /* 1086 * cpu_dump: dump machine-dependent kernel core dump headers. 1087 */ 1088 int 1089 cpu_dump() 1090 { 1091 int (*dump)(dev_t, daddr_t, caddr_t, size_t); 1092 char buf[dbtob(1)]; 1093 kcore_seg_t *segp; 1094 cpu_kcore_hdr_t *cpuhdrp; 1095 phys_ram_seg_t *memsegp; 1096 int i; 1097 1098 dump = bdevsw[major(dumpdev)].d_dump; 1099 1100 bzero(buf, sizeof buf); 1101 segp = (kcore_seg_t *)buf; 1102 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))]; 1103 memsegp = (phys_ram_seg_t *)&buf[ALIGN(sizeof(*segp)) + 1104 ALIGN(sizeof(*cpuhdrp))]; 1105 1106 /* 1107 * Generate a segment header. 1108 */ 1109 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 1110 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp)); 1111 1112 /* 1113 * Add the machine-dependent header info. 1114 */ 1115 cpuhdrp->lev1map_pa = ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map); 1116 cpuhdrp->page_size = PAGE_SIZE; 1117 cpuhdrp->nmemsegs = mem_cluster_cnt; 1118 1119 /* 1120 * Fill in the memory segment descriptors. 1121 */ 1122 for (i = 0; i < mem_cluster_cnt; i++) { 1123 memsegp[i].start = mem_clusters[i].start; 1124 memsegp[i].size = mem_clusters[i].size & ~PAGE_MASK; 1125 } 1126 1127 return (dump(dumpdev, dumplo, (caddr_t)buf, dbtob(1))); 1128 } 1129 1130 /* 1131 * This is called by main to set dumplo and dumpsize. 1132 * Dumps always skip the first PAGE_SIZE of disk space 1133 * in case there might be a disk label stored there. 1134 * If there is extra space, put dump at the end to 1135 * reduce the chance that swapping trashes it. 1136 */ 1137 void 1138 dumpconf(void) 1139 { 1140 int nblks, dumpblks; /* size of dump area */ 1141 1142 if (dumpdev == NODEV || 1143 (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0) 1144 return; 1145 if (nblks <= ctod(1)) 1146 return; 1147 1148 dumpblks = cpu_dumpsize(); 1149 if (dumpblks < 0) 1150 return; 1151 dumpblks += ctod(cpu_dump_mempagecnt()); 1152 1153 /* If dump won't fit (incl. room for possible label), punt. */ 1154 if (dumpblks > (nblks - ctod(1))) 1155 return; 1156 1157 /* Put dump at end of partition */ 1158 dumplo = nblks - dumpblks; 1159 1160 /* dumpsize is in page units, and doesn't include headers. */ 1161 dumpsize = cpu_dump_mempagecnt(); 1162 } 1163 1164 /* 1165 * Dump the kernel's image to the swap partition. 1166 */ 1167 #define BYTES_PER_DUMP PAGE_SIZE 1168 1169 void 1170 dumpsys() 1171 { 1172 u_long totalbytesleft, bytes, i, n, memcl; 1173 u_long maddr; 1174 int psize; 1175 daddr_t blkno; 1176 int (*dump)(dev_t, daddr_t, caddr_t, size_t); 1177 int error; 1178 extern int msgbufmapped; 1179 1180 /* Save registers. */ 1181 savectx(&dumppcb); 1182 1183 msgbufmapped = 0; /* don't record dump msgs in msgbuf */ 1184 if (dumpdev == NODEV) 1185 return; 1186 1187 /* 1188 * For dumps during autoconfiguration, 1189 * if dump device has already configured... 1190 */ 1191 if (dumpsize == 0) 1192 dumpconf(); 1193 if (dumplo <= 0) { 1194 printf("\ndump to dev %u,%u not possible\n", major(dumpdev), 1195 minor(dumpdev)); 1196 return; 1197 } 1198 printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev), 1199 minor(dumpdev), dumplo); 1200 1201 psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev); 1202 printf("dump "); 1203 if (psize == -1) { 1204 printf("area unavailable\n"); 1205 return; 1206 } 1207 1208 /* XXX should purge all outstanding keystrokes. */ 1209 1210 if ((error = cpu_dump()) != 0) 1211 goto err; 1212 1213 totalbytesleft = ptoa(cpu_dump_mempagecnt()); 1214 blkno = dumplo + cpu_dumpsize(); 1215 dump = bdevsw[major(dumpdev)].d_dump; 1216 error = 0; 1217 1218 for (memcl = 0; memcl < mem_cluster_cnt; memcl++) { 1219 maddr = mem_clusters[memcl].start; 1220 bytes = mem_clusters[memcl].size & ~PAGE_MASK; 1221 1222 for (i = 0; i < bytes; i += n, totalbytesleft -= n) { 1223 1224 /* Print out how many MBs we to go. */ 1225 if ((totalbytesleft % (1024*1024)) == 0) 1226 printf("%ld ", totalbytesleft / (1024 * 1024)); 1227 1228 /* Limit size for next transfer. */ 1229 n = bytes - i; 1230 if (n > BYTES_PER_DUMP) 1231 n = BYTES_PER_DUMP; 1232 1233 error = (*dump)(dumpdev, blkno, 1234 (caddr_t)ALPHA_PHYS_TO_K0SEG(maddr), n); 1235 if (error) 1236 goto err; 1237 maddr += n; 1238 blkno += btodb(n); /* XXX? */ 1239 1240 /* XXX should look for keystrokes, to cancel. */ 1241 } 1242 } 1243 1244 err: 1245 switch (error) { 1246 #ifdef DEBUG 1247 case ENXIO: 1248 printf("device bad\n"); 1249 break; 1250 1251 case EFAULT: 1252 printf("device not ready\n"); 1253 break; 1254 1255 case EINVAL: 1256 printf("area improper\n"); 1257 break; 1258 1259 case EIO: 1260 printf("i/o error\n"); 1261 break; 1262 1263 case EINTR: 1264 printf("aborted from console\n"); 1265 break; 1266 #endif /* DEBUG */ 1267 case 0: 1268 printf("succeeded\n"); 1269 break; 1270 1271 default: 1272 printf("error %d\n", error); 1273 break; 1274 } 1275 printf("\n\n"); 1276 delay(1000); 1277 } 1278 1279 void 1280 frametoreg(framep, regp) 1281 struct trapframe *framep; 1282 struct reg *regp; 1283 { 1284 1285 regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0]; 1286 regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0]; 1287 regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1]; 1288 regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2]; 1289 regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3]; 1290 regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4]; 1291 regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5]; 1292 regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6]; 1293 regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7]; 1294 regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0]; 1295 regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1]; 1296 regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2]; 1297 regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3]; 1298 regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4]; 1299 regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5]; 1300 regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6]; 1301 regp->r_regs[R_A0] = framep->tf_regs[FRAME_A0]; 1302 regp->r_regs[R_A1] = framep->tf_regs[FRAME_A1]; 1303 regp->r_regs[R_A2] = framep->tf_regs[FRAME_A2]; 1304 regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3]; 1305 regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4]; 1306 regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5]; 1307 regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8]; 1308 regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9]; 1309 regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10]; 1310 regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11]; 1311 regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA]; 1312 regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12]; 1313 regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT]; 1314 regp->r_regs[R_GP] = framep->tf_regs[FRAME_GP]; 1315 /* regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP]; XXX */ 1316 regp->r_regs[R_ZERO] = 0; 1317 } 1318 1319 void 1320 regtoframe(regp, framep) 1321 struct reg *regp; 1322 struct trapframe *framep; 1323 { 1324 1325 framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0]; 1326 framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0]; 1327 framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1]; 1328 framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2]; 1329 framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3]; 1330 framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4]; 1331 framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5]; 1332 framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6]; 1333 framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7]; 1334 framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0]; 1335 framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1]; 1336 framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2]; 1337 framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3]; 1338 framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4]; 1339 framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5]; 1340 framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6]; 1341 framep->tf_regs[FRAME_A0] = regp->r_regs[R_A0]; 1342 framep->tf_regs[FRAME_A1] = regp->r_regs[R_A1]; 1343 framep->tf_regs[FRAME_A2] = regp->r_regs[R_A2]; 1344 framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3]; 1345 framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4]; 1346 framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5]; 1347 framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8]; 1348 framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9]; 1349 framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10]; 1350 framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11]; 1351 framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA]; 1352 framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12]; 1353 framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT]; 1354 framep->tf_regs[FRAME_GP] = regp->r_regs[R_GP]; 1355 /* framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP]; XXX */ 1356 /* ??? = regp->r_regs[R_ZERO]; */ 1357 } 1358 1359 void 1360 printregs(regp) 1361 struct reg *regp; 1362 { 1363 int i; 1364 1365 for (i = 0; i < 32; i++) 1366 printf("R%d:\t0x%016lx%s", i, regp->r_regs[i], 1367 i & 1 ? "\n" : "\t"); 1368 } 1369 1370 void 1371 regdump(framep) 1372 struct trapframe *framep; 1373 { 1374 struct reg reg; 1375 1376 frametoreg(framep, ®); 1377 reg.r_regs[R_SP] = alpha_pal_rdusp(); 1378 1379 printf("REGISTERS:\n"); 1380 printregs(®); 1381 } 1382 1383 /* 1384 * Send an interrupt to process. 1385 */ 1386 int 1387 sendsig(sig_t catcher, int sig, sigset_t mask, const siginfo_t *ksip, 1388 int info, int onstack) 1389 { 1390 struct proc *p = curproc; 1391 struct sigcontext ksc, *scp; 1392 struct fpreg *fpregs = (struct fpreg *)&ksc.sc_fpregs; 1393 struct trapframe *frame; 1394 unsigned long oldsp; 1395 int fsize, rndfsize, kscsize; 1396 siginfo_t *sip; 1397 1398 oldsp = alpha_pal_rdusp(); 1399 frame = p->p_md.md_tf; 1400 fsize = sizeof ksc; 1401 rndfsize = ((fsize + 15) / 16) * 16; 1402 kscsize = rndfsize; 1403 1404 if (info) { 1405 fsize += sizeof *ksip; 1406 rndfsize = ((fsize + 15) / 16) * 16; 1407 } 1408 1409 /* 1410 * Allocate space for the signal handler context. 1411 */ 1412 if ((p->p_sigstk.ss_flags & SS_DISABLE) == 0 && 1413 !sigonstack(oldsp) && onstack) 1414 scp = (struct sigcontext *) 1415 (trunc_page((vaddr_t)p->p_sigstk.ss_sp + p->p_sigstk.ss_size) 1416 - rndfsize); 1417 else 1418 scp = (struct sigcontext *)(oldsp - rndfsize); 1419 1420 /* 1421 * Build the signal context to be used by sigreturn. 1422 */ 1423 bzero(&ksc, sizeof(ksc)); 1424 ksc.sc_mask = mask; 1425 ksc.sc_pc = frame->tf_regs[FRAME_PC]; 1426 ksc.sc_ps = frame->tf_regs[FRAME_PS]; 1427 1428 /* copy the registers. */ 1429 frametoreg(frame, (struct reg *)ksc.sc_regs); 1430 ksc.sc_regs[R_SP] = oldsp; 1431 1432 /* save the floating-point state, if necessary, then copy it. */ 1433 if (p->p_addr->u_pcb.pcb_fpcpu != NULL) 1434 fpusave_proc(p, 1); 1435 ksc.sc_ownedfp = p->p_md.md_flags & MDP_FPUSED; 1436 memcpy(/*ksc.sc_*/fpregs, &p->p_addr->u_pcb.pcb_fp, 1437 sizeof(struct fpreg)); 1438 #ifndef NO_IEEE 1439 ksc.sc_fp_control = alpha_read_fp_c(p); 1440 #else 1441 ksc.sc_fp_control = 0; 1442 #endif 1443 memset(ksc.sc_reserved, 0, sizeof ksc.sc_reserved); /* XXX */ 1444 memset(ksc.sc_xxx, 0, sizeof ksc.sc_xxx); /* XXX */ 1445 1446 if (info) { 1447 sip = (void *)scp + kscsize; 1448 if (copyout(ksip, (caddr_t)sip, fsize - kscsize) != 0) 1449 return 1; 1450 } else 1451 sip = NULL; 1452 1453 ksc.sc_cookie = (long)scp ^ p->p_p->ps_sigcookie; 1454 if (copyout((caddr_t)&ksc, (caddr_t)scp, kscsize) != 0) 1455 return 1; 1456 1457 /* 1458 * Set up the registers to return to sigcode. 1459 */ 1460 frame->tf_regs[FRAME_PC] = p->p_p->ps_sigcode; 1461 frame->tf_regs[FRAME_A0] = sig; 1462 frame->tf_regs[FRAME_A1] = (u_int64_t)sip; 1463 frame->tf_regs[FRAME_A2] = (u_int64_t)scp; 1464 frame->tf_regs[FRAME_T12] = (u_int64_t)catcher; /* t12 is pv */ 1465 alpha_pal_wrusp((unsigned long)scp); 1466 1467 return 0; 1468 } 1469 1470 /* 1471 * System call to cleanup state after a signal 1472 * has been taken. Reset signal mask and 1473 * stack state from context left by sendsig (above). 1474 * Return to previous pc and psl as specified by 1475 * context left by sendsig. Check carefully to 1476 * make sure that the user has not modified the 1477 * psl to gain improper privileges or to cause 1478 * a machine fault. 1479 */ 1480 /* ARGSUSED */ 1481 int 1482 sys_sigreturn(struct proc *p, void *v, register_t *retval) 1483 { 1484 struct sys_sigreturn_args /* { 1485 syscallarg(struct sigcontext *) sigcntxp; 1486 } */ *uap = v; 1487 struct sigcontext ksc, *scp = SCARG(uap, sigcntxp); 1488 struct fpreg *fpregs = (struct fpreg *)&ksc.sc_fpregs; 1489 int error; 1490 1491 if (PROC_PC(p) != p->p_p->ps_sigcoderet) { 1492 sigexit(p, SIGILL); 1493 return (EPERM); 1494 } 1495 1496 if ((error = copyin(scp, &ksc, sizeof(ksc))) != 0) 1497 return (error); 1498 1499 if (ksc.sc_cookie != ((long)scp ^ p->p_p->ps_sigcookie)) { 1500 sigexit(p, SIGILL); 1501 return (EFAULT); 1502 } 1503 1504 /* Prevent reuse of the sigcontext cookie */ 1505 ksc.sc_cookie = 0; 1506 (void)copyout(&ksc.sc_cookie, (caddr_t)scp + 1507 offsetof(struct sigcontext, sc_cookie), sizeof (ksc.sc_cookie)); 1508 1509 /* 1510 * Restore the user-supplied information 1511 */ 1512 p->p_sigmask = ksc.sc_mask &~ sigcantmask; 1513 1514 p->p_md.md_tf->tf_regs[FRAME_PC] = ksc.sc_pc; 1515 p->p_md.md_tf->tf_regs[FRAME_PS] = 1516 (ksc.sc_ps | ALPHA_PSL_USERSET) & ~ALPHA_PSL_USERCLR; 1517 1518 regtoframe((struct reg *)ksc.sc_regs, p->p_md.md_tf); 1519 alpha_pal_wrusp(ksc.sc_regs[R_SP]); 1520 1521 /* XXX ksc.sc_ownedfp ? */ 1522 if (p->p_addr->u_pcb.pcb_fpcpu != NULL) 1523 fpusave_proc(p, 0); 1524 memcpy(&p->p_addr->u_pcb.pcb_fp, /*ksc.sc_*/fpregs, 1525 sizeof(struct fpreg)); 1526 #ifndef NO_IEEE 1527 p->p_addr->u_pcb.pcb_fp.fpr_cr = ksc.sc_fpcr; 1528 p->p_md.md_flags = ksc.sc_fp_control & MDP_FP_C; 1529 #endif 1530 return (EJUSTRETURN); 1531 } 1532 1533 /* 1534 * machine dependent system variables. 1535 */ 1536 int 1537 cpu_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1538 size_t newlen, struct proc *p) 1539 { 1540 dev_t consdev; 1541 #if NIOASIC > 0 1542 int oldval, ret; 1543 #endif 1544 1545 if (name[0] != CPU_CHIPSET && namelen != 1) 1546 return (ENOTDIR); /* overloaded */ 1547 1548 switch (name[0]) { 1549 case CPU_CONSDEV: 1550 if (cn_tab != NULL) 1551 consdev = cn_tab->cn_dev; 1552 else 1553 consdev = NODEV; 1554 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev, 1555 sizeof consdev)); 1556 1557 #ifndef SMALL_KERNEL 1558 case CPU_UNALIGNED_PRINT: 1559 return (sysctl_int(oldp, oldlenp, newp, newlen, 1560 &alpha_unaligned_print)); 1561 1562 case CPU_UNALIGNED_FIX: 1563 return (sysctl_int(oldp, oldlenp, newp, newlen, 1564 &alpha_unaligned_fix)); 1565 1566 case CPU_UNALIGNED_SIGBUS: 1567 return (sysctl_int(oldp, oldlenp, newp, newlen, 1568 &alpha_unaligned_sigbus)); 1569 1570 case CPU_BOOTED_KERNEL: 1571 return (sysctl_rdstring(oldp, oldlenp, newp, 1572 bootinfo.booted_kernel)); 1573 1574 case CPU_CHIPSET: 1575 return (alpha_sysctl_chipset(name + 1, namelen - 1, oldp, 1576 oldlenp)); 1577 #endif /* SMALL_KERNEL */ 1578 1579 #ifndef NO_IEEE 1580 case CPU_FP_SYNC_COMPLETE: 1581 return (sysctl_int(oldp, oldlenp, newp, newlen, 1582 &alpha_fp_sync_complete)); 1583 #endif 1584 case CPU_ALLOWAPERTURE: 1585 #ifdef APERTURE 1586 if (securelevel > 0) 1587 return (sysctl_int_lower(oldp, oldlenp, newp, newlen, 1588 &allowaperture)); 1589 else 1590 return (sysctl_int(oldp, oldlenp, newp, newlen, 1591 &allowaperture)); 1592 #else 1593 return (sysctl_rdint(oldp, oldlenp, newp, 0)); 1594 #endif 1595 #if NIOASIC > 0 1596 case CPU_LED_BLINK: 1597 oldval = alpha_led_blink; 1598 ret = sysctl_int(oldp, oldlenp, newp, newlen, &alpha_led_blink); 1599 if (oldval != alpha_led_blink) 1600 ioasic_led_blink(NULL); 1601 return (ret); 1602 #endif 1603 default: 1604 return (EOPNOTSUPP); 1605 } 1606 /* NOTREACHED */ 1607 } 1608 1609 /* 1610 * Set registers on exec. 1611 */ 1612 void 1613 setregs(p, pack, stack, retval) 1614 register struct proc *p; 1615 struct exec_package *pack; 1616 u_long stack; 1617 register_t *retval; 1618 { 1619 struct trapframe *tfp = p->p_md.md_tf; 1620 #ifdef DEBUG 1621 int i; 1622 #endif 1623 1624 #ifdef DEBUG 1625 /* 1626 * Crash and dump, if the user requested it. 1627 */ 1628 if (boothowto & RB_DUMP) 1629 panic("crash requested by boot flags"); 1630 #endif 1631 1632 #ifdef DEBUG 1633 for (i = 0; i < FRAME_SIZE; i++) 1634 tfp->tf_regs[i] = 0xbabefacedeadbeef; 1635 tfp->tf_regs[FRAME_A1] = 0; 1636 #else 1637 bzero(tfp->tf_regs, FRAME_SIZE * sizeof tfp->tf_regs[0]); 1638 #endif 1639 bzero(&p->p_addr->u_pcb.pcb_fp, sizeof p->p_addr->u_pcb.pcb_fp); 1640 alpha_pal_wrusp(stack); 1641 tfp->tf_regs[FRAME_PS] = ALPHA_PSL_USERSET; 1642 tfp->tf_regs[FRAME_PC] = pack->ep_entry & ~3; 1643 1644 tfp->tf_regs[FRAME_A0] = stack; 1645 /* a1 and a2 already zeroed */ 1646 tfp->tf_regs[FRAME_T12] = tfp->tf_regs[FRAME_PC]; /* a.k.a. PV */ 1647 1648 p->p_md.md_flags &= ~MDP_FPUSED; 1649 #ifndef NO_IEEE 1650 if (__predict_true((p->p_md.md_flags & IEEE_INHERIT) == 0)) { 1651 p->p_md.md_flags &= ~MDP_FP_C; 1652 p->p_addr->u_pcb.pcb_fp.fpr_cr = FPCR_DYN(FP_RN); 1653 } 1654 #endif 1655 if (p->p_addr->u_pcb.pcb_fpcpu != NULL) 1656 fpusave_proc(p, 0); 1657 1658 retval[1] = 0; 1659 } 1660 1661 /* 1662 * Release the FPU. 1663 */ 1664 void 1665 fpusave_cpu(struct cpu_info *ci, int save) 1666 { 1667 struct proc *p; 1668 #if defined(MULTIPROCESSOR) 1669 int s; 1670 #endif 1671 1672 KDASSERT(ci == curcpu()); 1673 1674 #if defined(MULTIPROCESSOR) 1675 /* Need to block IPIs */ 1676 s = splipi(); 1677 atomic_setbits_ulong(&ci->ci_flags, CPUF_FPUSAVE); 1678 #endif 1679 1680 p = ci->ci_fpcurproc; 1681 if (p == NULL) 1682 goto out; 1683 1684 if (save) { 1685 alpha_pal_wrfen(1); 1686 savefpstate(&p->p_addr->u_pcb.pcb_fp); 1687 } 1688 1689 alpha_pal_wrfen(0); 1690 1691 p->p_addr->u_pcb.pcb_fpcpu = NULL; 1692 ci->ci_fpcurproc = NULL; 1693 1694 out: 1695 #if defined(MULTIPROCESSOR) 1696 atomic_clearbits_ulong(&ci->ci_flags, CPUF_FPUSAVE); 1697 alpha_pal_swpipl(s); 1698 #endif 1699 return; 1700 } 1701 1702 /* 1703 * Synchronize FP state for this process. 1704 */ 1705 void 1706 fpusave_proc(struct proc *p, int save) 1707 { 1708 struct cpu_info *ci = curcpu(); 1709 struct cpu_info *oci; 1710 #if defined(MULTIPROCESSOR) 1711 u_long ipi = save ? ALPHA_IPI_SYNCH_FPU : ALPHA_IPI_DISCARD_FPU; 1712 int s; 1713 #endif 1714 1715 KDASSERT(p->p_addr != NULL); 1716 1717 for (;;) { 1718 #if defined(MULTIPROCESSOR) 1719 /* Need to block IPIs */ 1720 s = splipi(); 1721 #endif 1722 1723 oci = p->p_addr->u_pcb.pcb_fpcpu; 1724 if (oci == NULL) { 1725 #if defined(MULTIPROCESSOR) 1726 alpha_pal_swpipl(s); 1727 #endif 1728 return; 1729 } 1730 1731 #if defined(MULTIPROCESSOR) 1732 if (oci == ci) { 1733 KASSERT(ci->ci_fpcurproc == p); 1734 alpha_pal_swpipl(s); 1735 fpusave_cpu(ci, save); 1736 return; 1737 } 1738 1739 /* 1740 * The other cpu may still be running and could have 1741 * discarded the fpu context on its own. 1742 */ 1743 if (oci->ci_fpcurproc != p) { 1744 alpha_pal_swpipl(s); 1745 continue; 1746 } 1747 1748 alpha_send_ipi(oci->ci_cpuid, ipi); 1749 alpha_pal_swpipl(s); 1750 1751 while (p->p_addr->u_pcb.pcb_fpcpu != NULL) 1752 CPU_BUSY_CYCLE(); 1753 #else 1754 KASSERT(ci->ci_fpcurproc == p); 1755 fpusave_cpu(ci, save); 1756 #endif /* MULTIPROCESSOR */ 1757 1758 break; 1759 } 1760 } 1761 1762 int 1763 spl0() 1764 { 1765 1766 if (ssir) { 1767 (void) alpha_pal_swpipl(ALPHA_PSL_IPL_SOFT); 1768 softintr_dispatch(); 1769 } 1770 1771 return (alpha_pal_swpipl(ALPHA_PSL_IPL_0)); 1772 } 1773 1774 /* 1775 * Wait "n" microseconds. 1776 */ 1777 void 1778 delay(n) 1779 unsigned long n; 1780 { 1781 unsigned long pcc0, pcc1, curcycle, cycles, usec; 1782 1783 if (n == 0) 1784 return; 1785 1786 pcc0 = alpha_rpcc() & 0xffffffffUL; 1787 cycles = 0; 1788 usec = 0; 1789 1790 while (usec <= n) { 1791 /* 1792 * Get the next CPU cycle count - assumes that we can not 1793 * have had more than one 32 bit overflow. 1794 */ 1795 pcc1 = alpha_rpcc() & 0xffffffffUL; 1796 if (pcc1 < pcc0) 1797 curcycle = (pcc1 + 0x100000000UL) - pcc0; 1798 else 1799 curcycle = pcc1 - pcc0; 1800 1801 /* 1802 * We now have the number of processor cycles since we 1803 * last checked. Add the current cycle count to the 1804 * running total. If it's over cycles_per_usec, increment 1805 * the usec counter. 1806 */ 1807 cycles += curcycle; 1808 while (cycles >= cycles_per_usec) { 1809 usec++; 1810 cycles -= cycles_per_usec; 1811 } 1812 pcc0 = pcc1; 1813 } 1814 } 1815 1816 int 1817 alpha_pa_access(pa) 1818 u_long pa; 1819 { 1820 int i; 1821 1822 for (i = 0; i < mem_cluster_cnt; i++) { 1823 if (pa < mem_clusters[i].start) 1824 continue; 1825 if ((pa - mem_clusters[i].start) >= 1826 (mem_clusters[i].size & ~PAGE_MASK)) 1827 continue; 1828 return (mem_clusters[i].size & PAGE_MASK); /* prot */ 1829 } 1830 1831 /* 1832 * Address is not a memory address. If we're secure, disallow 1833 * access. Otherwise, grant read/write. 1834 */ 1835 if (securelevel > 0) 1836 return (PROT_NONE); 1837 else 1838 return (PROT_READ | PROT_WRITE); 1839 } 1840 1841 /* XXX XXX BEGIN XXX XXX */ 1842 paddr_t alpha_XXX_dmamap_or; /* XXX */ 1843 /* XXX */ 1844 paddr_t /* XXX */ 1845 alpha_XXX_dmamap(v) /* XXX */ 1846 vaddr_t v; /* XXX */ 1847 { /* XXX */ 1848 /* XXX */ 1849 return (vtophys(v) | alpha_XXX_dmamap_or); /* XXX */ 1850 } /* XXX */ 1851 /* XXX XXX END XXX XXX */ 1852