1 /* $NetBSD: machdep.c,v 1.155 2002/05/14 02:58:34 matt Exp $ */ 2 3 /* 4 * Copyright (c) 1994, 1995 Gordon W. Ross 5 * Copyright (c) 1993 Adam Glass 6 * Copyright (c) 1988 University of Utah. 7 * Copyright (c) 1982, 1986, 1990, 1993 8 * The Regents of the University of California. All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * the Systems Programming Group of the University of Utah Computer 12 * Science Department. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * from: Utah Hdr: machdep.c 1.74 92/12/20 43 * from: @(#)machdep.c 8.10 (Berkeley) 4/20/94 44 */ 45 46 #include "opt_ddb.h" 47 #include "opt_kgdb.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/map.h> 53 #include <sys/proc.h> 54 #include <sys/buf.h> 55 #include <sys/reboot.h> 56 #include <sys/conf.h> 57 #include <sys/file.h> 58 #include <sys/clist.h> 59 #include <sys/device.h> 60 #include <sys/malloc.h> 61 #include <sys/mbuf.h> 62 #include <sys/msgbuf.h> 63 #include <sys/ioctl.h> 64 #include <sys/tty.h> 65 #include <sys/mount.h> 66 #include <sys/user.h> 67 #include <sys/exec.h> 68 #include <sys/core.h> 69 #include <sys/kcore.h> 70 #include <sys/vnode.h> 71 #include <sys/syscallargs.h> 72 #ifdef KGDB 73 #include <sys/kgdb.h> 74 #endif 75 76 #include <uvm/uvm.h> /* XXX: not _extern ... need vm_map_create */ 77 78 #include <sys/sysctl.h> 79 80 #include <dev/cons.h> 81 82 #include <machine/cpu.h> 83 #include <machine/dvma.h> 84 #include <machine/idprom.h> 85 #include <machine/kcore.h> 86 #include <machine/reg.h> 87 #include <machine/psl.h> 88 #include <machine/pte.h> 89 90 #if defined(DDB) 91 #include <machine/db_machdep.h> 92 #include <ddb/db_sym.h> 93 #include <ddb/db_extern.h> 94 #endif 95 96 #include <sun3/sun3/machdep.h> 97 98 /* Defined in locore.s */ 99 extern char kernel_text[]; 100 /* Defined by the linker */ 101 extern char etext[]; 102 103 /* Our exported CPU info; we can have only one. */ 104 struct cpu_info cpu_info_store; 105 106 struct vm_map *exec_map = NULL; 107 struct vm_map *mb_map = NULL; 108 struct vm_map *phys_map = NULL; 109 110 int physmem; 111 int fputype; 112 caddr_t msgbufaddr; 113 114 /* Virtual page frame for /dev/mem (see mem.c) */ 115 vaddr_t vmmap; 116 117 union sun3sir sun3sir; 118 119 /* 120 * safepri is a safe priority for sleep to set for a spin-wait 121 * during autoconfiguration or after a panic. 122 */ 123 int safepri = PSL_LOWIPL; 124 125 /* Our private scratch page for dumping the MMU. */ 126 static vaddr_t dumppage; 127 128 static void identifycpu __P((void)); 129 static void initcpu __P((void)); 130 131 /* 132 * Console initialization: called early on from main, 133 * before vm init or cpu_startup. This system is able 134 * to use the console for output immediately (via PROM) 135 * but can not use it for input until after this point. 136 */ 137 void 138 consinit() 139 { 140 141 /* 142 * Switch from the PROM console (output only) 143 * to our own console driver. 144 */ 145 cninit(); 146 147 #ifdef DDB 148 { 149 extern int nsym; 150 extern char *ssym, *esym; 151 152 ddb_init(nsym, ssym, esym); 153 } 154 #endif /* DDB */ 155 156 /* 157 * Now that the console can do input as well as 158 * output, consider stopping for a debugger. 159 */ 160 if (boothowto & RB_KDB) { 161 #ifdef KGDB 162 /* XXX - Ask on console for kgdb_dev? */ 163 /* Note: this will just return if kgdb_dev==NODEV */ 164 kgdb_connect(1); 165 #else /* KGDB */ 166 /* Either DDB or no debugger (just PROM). */ 167 Debugger(); 168 #endif /* KGDB */ 169 } 170 } 171 172 /* 173 * cpu_startup: allocate memory for variable-sized tables, 174 * initialize cpu, and do autoconfiguration. 175 * 176 * This is called early in init_main.c:main(), after the 177 * kernel memory allocator is ready for use, but before 178 * the creation of processes 1,2, and mountroot, etc. 179 */ 180 void 181 cpu_startup() 182 { 183 caddr_t v; 184 int sz, i; 185 vsize_t size; 186 int base, residual; 187 vaddr_t minaddr, maxaddr; 188 char pbuf[9]; 189 190 /* 191 * Initialize message buffer (for kernel printf). 192 * This is put in physical page zero so it will 193 * always be in the same place after a reboot. 194 * Its mapping was prepared in pmap_bootstrap(). 195 * Also, offset some to avoid PROM scribbles. 196 */ 197 v = (caddr_t) KERNBASE; 198 msgbufaddr = (caddr_t)(v + MSGBUFOFF); 199 initmsgbuf(msgbufaddr, MSGBUFSIZE); 200 201 /* 202 * Good {morning,afternoon,evening,night}. 203 */ 204 printf(version); 205 identifycpu(); 206 initfpu(); /* also prints FPU type */ 207 208 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 209 printf("total memory = %s\n", pbuf); 210 211 /* 212 * Get scratch page for dumpsys(). 213 */ 214 if ((dumppage = uvm_km_alloc(kernel_map, NBPG)) == 0) 215 panic("startup: alloc dumppage"); 216 217 /* 218 * Find out how much space we need, allocate it, 219 * and then give everything true virtual addresses. 220 */ 221 sz = (int)allocsys(NULL, NULL); 222 if ((v = (caddr_t)uvm_km_alloc(kernel_map, round_page(sz))) == 0) 223 panic("startup: no room for tables"); 224 if (allocsys(v, NULL) - v != sz) 225 panic("startup: table size inconsistency"); 226 227 /* 228 * Now allocate buffers proper. They are different than the above 229 * in that they usually occupy more virtual memory than physical. 230 */ 231 size = MAXBSIZE * nbuf; 232 if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), 233 NULL, UVM_UNKNOWN_OFFSET, 0, 234 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 235 UVM_ADV_NORMAL, 0)) != 0) 236 panic("startup: cannot allocate VM for buffers"); 237 minaddr = (vaddr_t)buffers; 238 if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { 239 /* don't want to alloc more physical mem than needed */ 240 bufpages = btoc(MAXBSIZE) * nbuf; 241 } 242 base = bufpages / nbuf; 243 residual = bufpages % nbuf; 244 for (i = 0; i < nbuf; i++) { 245 vsize_t curbufsize; 246 vaddr_t curbuf; 247 struct vm_page *pg; 248 249 /* 250 * Each buffer has MAXBSIZE bytes of VM space allocated. Of 251 * that MAXBSIZE space, we allocate and map (base+1) pages 252 * for the first "residual" buffers, and then we allocate 253 * "base" pages for the rest. 254 */ 255 curbuf = (vaddr_t) buffers + (i * MAXBSIZE); 256 curbufsize = NBPG * ((i < residual) ? (base+1) : base); 257 258 while (curbufsize) { 259 pg = uvm_pagealloc(NULL, 0, NULL, 0); 260 if (pg == NULL) 261 panic("cpu_startup: not enough memory for " 262 "buffer cache"); 263 pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), 264 VM_PROT_READ|VM_PROT_WRITE); 265 curbuf += PAGE_SIZE; 266 curbufsize -= PAGE_SIZE; 267 } 268 } 269 pmap_update(pmap_kernel()); 270 271 /* 272 * Allocate a submap for exec arguments. This map effectively 273 * limits the number of processes exec'ing at any time. 274 */ 275 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 276 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); 277 278 /* 279 * We don't use a submap for physio, and use a separate map 280 * for DVMA allocations. Our vmapbuf just maps pages into 281 * the kernel map (any kernel mapping is OK) and then the 282 * device drivers clone the kernel mappings into DVMA space. 283 */ 284 285 /* 286 * Finally, allocate mbuf cluster submap. 287 */ 288 mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 289 nmbclusters * mclbytes, VM_MAP_INTRSAFE, 290 FALSE, NULL); 291 292 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 293 printf("avail memory = %s\n", pbuf); 294 format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG); 295 printf("using %d buffers containing %s of memory\n", nbuf, pbuf); 296 297 /* 298 * Allocate a virtual page (for use by /dev/mem) 299 * This page is handed to pmap_enter() therefore 300 * it has to be in the normal kernel VA range. 301 */ 302 vmmap = uvm_km_valloc_wait(kernel_map, NBPG); 303 304 /* 305 * Create the DVMA maps. 306 */ 307 dvma_init(); 308 309 /* 310 * Set up CPU-specific registers, cache, etc. 311 */ 312 initcpu(); 313 314 /* 315 * Set up buffers, so they can be used to read disk labels. 316 */ 317 bufinit(); 318 } 319 320 /* 321 * Set registers on exec. 322 */ 323 void 324 setregs(p, pack, stack) 325 struct proc *p; 326 struct exec_package *pack; 327 u_long stack; 328 { 329 struct trapframe *tf = (struct trapframe *)p->p_md.md_regs; 330 331 tf->tf_sr = PSL_USERSET; 332 tf->tf_pc = pack->ep_entry & ~1; 333 tf->tf_regs[D0] = 0; 334 tf->tf_regs[D1] = 0; 335 tf->tf_regs[D2] = 0; 336 tf->tf_regs[D3] = 0; 337 tf->tf_regs[D4] = 0; 338 tf->tf_regs[D5] = 0; 339 tf->tf_regs[D6] = 0; 340 tf->tf_regs[D7] = 0; 341 tf->tf_regs[A0] = 0; 342 tf->tf_regs[A1] = 0; 343 tf->tf_regs[A2] = (int)p->p_psstr; 344 tf->tf_regs[A3] = 0; 345 tf->tf_regs[A4] = 0; 346 tf->tf_regs[A5] = 0; 347 tf->tf_regs[A6] = 0; 348 tf->tf_regs[SP] = stack; 349 350 /* restore a null state frame */ 351 p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0; 352 if (fputype) 353 m68881_restore(&p->p_addr->u_pcb.pcb_fpregs); 354 355 p->p_md.md_flags = 0; 356 } 357 358 /* 359 * Info for CTL_HW 360 */ 361 char machine[16] = MACHINE; /* from <machine/param.h> */ 362 char kernel_arch[16] = "sun3"; /* XXX needs a sysctl node */ 363 char cpu_model[120]; 364 365 /* 366 * Determine which Sun3 model we are running on. 367 * We have to do this very early on the Sun3 because 368 * pmap_bootstrap() needs to know if it should avoid 369 * the video memory on the Sun3/50. Therefore, this 370 * function just prints out what we already know. 371 */ 372 void 373 identifycpu() 374 { 375 extern char *cpu_string; /* XXX */ 376 377 /* Other stuff? (VAC, mc6888x version, etc.) */ 378 /* Note: miniroot cares about the kernel_arch part. */ 379 sprintf(cpu_model, "%s %s", kernel_arch, cpu_string); 380 381 printf("Model: %s\n", cpu_model); 382 } 383 384 /* 385 * machine dependent system variables. 386 */ 387 int 388 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) 389 int *name; 390 u_int namelen; 391 void *oldp; 392 size_t *oldlenp; 393 void *newp; 394 size_t newlen; 395 struct proc *p; 396 { 397 int error; 398 dev_t consdev; 399 400 /* all sysctl names at this level are terminal */ 401 if (namelen != 1) 402 return (ENOTDIR); /* overloaded */ 403 404 switch (name[0]) { 405 case CPU_CONSDEV: 406 if (cn_tab != NULL) 407 consdev = cn_tab->cn_dev; 408 else 409 consdev = NODEV; 410 error = sysctl_rdstruct(oldp, oldlenp, newp, 411 &consdev, sizeof consdev); 412 break; 413 414 #if 0 /* XXX - Not yet... */ 415 case CPU_ROOT_DEVICE: 416 error = sysctl_rdstring(oldp, oldlenp, newp, root_device); 417 break; 418 419 case CPU_BOOTED_KERNEL: 420 error = sysctl_rdstring(oldp, oldlenp, newp, booted_kernel); 421 break; 422 #endif 423 424 default: 425 error = EOPNOTSUPP; 426 } 427 return (error); 428 } 429 430 /* See: sig_machdep.c */ 431 432 /* 433 * Do a sync in preparation for a reboot. 434 * XXX - This could probably be common code. 435 * XXX - And now, most of it is in vfs_shutdown() 436 * XXX - Put waittime checks in there too? 437 */ 438 int waittime = -1; /* XXX - Who else looks at this? -gwr */ 439 static void 440 reboot_sync __P((void)) 441 { 442 443 /* Check waittime here to localize its use to this function. */ 444 if (waittime >= 0) 445 return; 446 waittime = 0; 447 vfs_shutdown(); 448 } 449 450 /* 451 * Common part of the BSD and SunOS reboot system calls. 452 */ 453 __dead void 454 cpu_reboot(howto, user_boot_string) 455 int howto; 456 char *user_boot_string; 457 { 458 char *bs, *p; 459 char default_boot_string[8]; 460 461 /* If system is cold, just halt. (early panic?) */ 462 if (cold) 463 goto haltsys; 464 465 /* Un-blank the screen if appropriate. */ 466 cnpollc(1); 467 468 if ((howto & RB_NOSYNC) == 0) { 469 reboot_sync(); 470 /* 471 * If we've been adjusting the clock, the todr 472 * will be out of synch; adjust it now. 473 * 474 * XXX - However, if the kernel has been sitting in ddb, 475 * the time will be way off, so don't set the HW clock! 476 * XXX - Should do sanity check against HW clock. -gwr 477 */ 478 /* resettodr(); */ 479 } 480 481 /* Disable interrupts. */ 482 splhigh(); 483 484 /* Write out a crash dump if asked. */ 485 if (howto & RB_DUMP) 486 dumpsys(); 487 488 /* run any shutdown hooks */ 489 doshutdownhooks(); 490 491 if (howto & RB_HALT) { 492 haltsys: 493 printf("halted.\n"); 494 sunmon_halt(); 495 } 496 497 /* 498 * Automatic reboot. 499 */ 500 bs = user_boot_string; 501 if (bs == NULL) { 502 /* 503 * Build our own boot string with an empty 504 * boot device/file and (maybe) some flags. 505 * The PROM will supply the device/file name. 506 */ 507 bs = default_boot_string; 508 *bs = '\0'; 509 if (howto & (RB_KDB|RB_ASKNAME|RB_SINGLE)) { 510 /* Append the boot flags. */ 511 p = bs; 512 *p++ = ' '; 513 *p++ = '-'; 514 if (howto & RB_KDB) 515 *p++ = 'd'; 516 if (howto & RB_ASKNAME) 517 *p++ = 'a'; 518 if (howto & RB_SINGLE) 519 *p++ = 's'; 520 *p = '\0'; 521 } 522 } 523 printf("rebooting...\n"); 524 sunmon_reboot(bs); 525 for (;;) ; 526 /*NOTREACHED*/ 527 } 528 529 /* 530 * These variables are needed by /sbin/savecore 531 */ 532 u_int32_t dumpmag = 0x8fca0101; /* magic number */ 533 int dumpsize = 0; /* pages */ 534 long dumplo = 0; /* blocks */ 535 536 #define DUMP_EXTRA 3 /* CPU-dependent extra pages */ 537 538 /* 539 * This is called by main to set dumplo, dumpsize. 540 * Dumps always skip the first NBPG of disk space 541 * in case there might be a disk label stored there. 542 * If there is extra space, put dump at the end to 543 * reduce the chance that swapping trashes it. 544 */ 545 void 546 cpu_dumpconf() 547 { 548 int devblks; /* size of dump device in blocks */ 549 int dumpblks; /* size of dump image in blocks */ 550 int maj; 551 int (*getsize)__P((dev_t)); 552 553 if (dumpdev == NODEV) 554 return; 555 556 maj = major(dumpdev); 557 if (maj < 0 || maj >= nblkdev) 558 panic("dumpconf: bad dumpdev=0x%x", dumpdev); 559 getsize = bdevsw[maj].d_psize; 560 if (getsize == NULL) 561 return; 562 devblks = (*getsize)(dumpdev); 563 if (devblks <= ctod(1)) 564 return; 565 devblks &= ~(ctod(1)-1); 566 567 /* 568 * Note: savecore expects dumpsize to be the 569 * number of pages AFTER the dump header. 570 */ 571 dumpsize = physmem; 572 573 /* Position dump image near end of space, page aligned. */ 574 dumpblks = ctod(physmem + DUMP_EXTRA); 575 dumplo = devblks - dumpblks; 576 577 /* If it does not fit, truncate it by moving dumplo. */ 578 /* Note: Must force signed comparison. */ 579 if (dumplo < ((long)ctod(1))) { 580 dumplo = ctod(1); 581 dumpsize = dtoc(devblks - dumplo) - DUMP_EXTRA; 582 } 583 } 584 585 /* Note: gdb looks for "dumppcb" in a kernel crash dump. */ 586 struct pcb dumppcb; 587 extern paddr_t avail_start; 588 589 /* 590 * Write a crash dump. The format while in swap is: 591 * kcore_seg_t cpu_hdr; 592 * cpu_kcore_hdr_t cpu_data; 593 * padding (NBPG-sizeof(kcore_seg_t)) 594 * pagemap (2*NBPG) 595 * physical memory... 596 */ 597 void 598 dumpsys() 599 { 600 struct bdevsw *dsw; 601 kcore_seg_t *kseg_p; 602 cpu_kcore_hdr_t *chdr_p; 603 struct sun3_kcore_hdr *sh; 604 char *vaddr; 605 paddr_t paddr; 606 int psize, todo, chunk; 607 daddr_t blkno; 608 int error = 0; 609 610 if (dumpdev == NODEV) 611 return; 612 if (dumppage == 0) 613 return; 614 615 /* 616 * For dumps during autoconfiguration, 617 * if dump device has already configured... 618 */ 619 if (dumpsize == 0) 620 cpu_dumpconf(); 621 if (dumplo <= 0) { 622 printf("\ndump to dev %u,%u not possible\n", major(dumpdev), 623 minor(dumpdev)); 624 return; 625 } 626 savectx(&dumppcb); 627 628 dsw = &bdevsw[major(dumpdev)]; 629 psize = (*(dsw->d_psize))(dumpdev); 630 if (psize == -1) { 631 printf("dump area unavailable\n"); 632 return; 633 } 634 635 printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev), 636 minor(dumpdev), dumplo); 637 638 /* 639 * Prepare the dump header, including MMU state. 640 */ 641 blkno = dumplo; 642 todo = dumpsize; /* pages */ 643 vaddr = (char*)dumppage; 644 memset(vaddr, 0, NBPG); 645 646 /* Set pointers to all three parts. */ 647 kseg_p = (kcore_seg_t *)vaddr; 648 chdr_p = (cpu_kcore_hdr_t *) (kseg_p + 1); 649 sh = &chdr_p->un._sun3; 650 651 /* Fill in kcore_seg_t part. */ 652 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 653 kseg_p->c_size = (ctob(DUMP_EXTRA) - sizeof(*kseg_p)); 654 655 /* Fill in cpu_kcore_hdr_t part. */ 656 strncpy(chdr_p->name, kernel_arch, sizeof(chdr_p->name)); 657 chdr_p->page_size = NBPG; 658 chdr_p->kernbase = KERNBASE; 659 660 /* Fill in the sun3_kcore_hdr part (MMU state). */ 661 pmap_kcore_hdr(sh); 662 663 /* Write out the dump header. */ 664 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG); 665 if (error) 666 goto fail; 667 blkno += btodb(NBPG); 668 669 /* translation RAM (page zero) */ 670 pmap_get_pagemap((int*)vaddr, 0); 671 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG); 672 if (error) 673 goto fail; 674 blkno += btodb(NBPG); 675 676 /* translation RAM (page one) */ 677 pmap_get_pagemap((int*)vaddr, NBPG); 678 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG); 679 if (error) 680 goto fail; 681 blkno += btodb(NBPG); 682 683 /* 684 * Now dump physical memory. Have to do it in two chunks. 685 * The first chunk is "unmanaged" (by the VM code) and its 686 * range of physical addresses is not allow in pmap_enter. 687 * However, that segment is mapped linearly, so we can just 688 * use the virtual mappings already in place. The second 689 * chunk is done the normal way, using pmap_enter. 690 * 691 * Note that vaddr==(paddr+KERNBASE) for paddr=0 through etext. 692 */ 693 694 /* Do the first chunk (0 <= PA < avail_start) */ 695 paddr = 0; 696 chunk = btoc(avail_start); 697 if (chunk > todo) 698 chunk = todo; 699 do { 700 if ((todo & 0xf) == 0) 701 printf("\r%4d", todo); 702 vaddr = (char*)(paddr + KERNBASE); 703 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG); 704 if (error) 705 goto fail; 706 paddr += NBPG; 707 blkno += btodb(NBPG); 708 --todo; 709 } while (--chunk > 0); 710 711 /* Do the second chunk (avail_start <= PA < dumpsize) */ 712 vaddr = (char*)vmmap; /* Borrow /dev/mem VA */ 713 do { 714 if ((todo & 0xf) == 0) 715 printf("\r%4d", todo); 716 pmap_kenter_pa(vmmap, paddr | PMAP_NC, VM_PROT_READ); 717 pmap_update(pmap_kernel()); 718 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG); 719 pmap_kremove(vmmap, NBPG); 720 pmap_update(pmap_kernel()); 721 if (error) 722 goto fail; 723 paddr += NBPG; 724 blkno += btodb(NBPG); 725 } while (--todo > 0); 726 727 printf("\rdump succeeded\n"); 728 return; 729 fail: 730 printf(" dump error=%d\n", error); 731 } 732 733 static void 734 initcpu() 735 { 736 /* XXX: Enable RAM parity/ECC checking? */ 737 /* XXX: parityenable(); */ 738 739 #ifdef HAVECACHE 740 cache_enable(); 741 #endif 742 } 743 744 /* straptrap() in trap.c */ 745 746 /* from hp300: badaddr() */ 747 /* peek_byte(), peek_word() moved to bus_subr.c */ 748 749 /* XXX: parityenable() ? */ 750 /* regdump() moved to regdump.c */ 751 752 /* 753 * cpu_exec_aout_makecmds(): 754 * cpu-dependent a.out format hook for execve(). 755 * 756 * Determine if the given exec package refers to something which we 757 * understand and, if so, set up the vmcmds for it. 758 */ 759 int 760 cpu_exec_aout_makecmds(p, epp) 761 struct proc *p; 762 struct exec_package *epp; 763 { 764 return ENOEXEC; 765 } 766