1 /* 2 * Copyright (c) 1992 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This software was developed by the Computer Systems Engineering group 6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 7 * contributed to Berkeley. 8 * 9 * All advertising materials mentioning features or use of this software 10 * must display the following acknowledgement: 11 * This product includes software developed by the University of 12 * California, Lawrence Berkeley Laboratories. 13 * 14 * %sccs.include.redist.c% 15 * 16 * @(#)machdep.c 7.3 (Berkeley) 08/10/92 17 * 18 * from: $Header: machdep.c,v 1.33 92/08/05 04:20:03 torek Exp $ 19 */ 20 21 #include "param.h" 22 #include "proc.h" 23 #include "user.h" 24 #include "map.h" 25 #include "buf.h" 26 #include "device.h" 27 #include "reboot.h" 28 #include "systm.h" 29 #include "conf.h" 30 #include "file.h" 31 #include "clist.h" 32 #include "callout.h" 33 #include "malloc.h" 34 #include "mbuf.h" 35 #include "mount.h" 36 #include "msgbuf.h" 37 #ifdef SYSVSHM 38 #include "shm.h" 39 #endif 40 #include "exec.h" 41 42 #include "machine/autoconf.h" 43 #include "machine/frame.h" 44 #include "machine/cpu.h" 45 46 #include "vm/vm_kern.h" 47 #include "vm/vm_page.h" 48 49 #include "asm.h" 50 #include "cache.h" 51 #include "vaddrs.h" 52 53 vm_map_t buffer_map; 54 extern vm_offset_t avail_end; 55 56 /* 57 * Declare these as initialized data so we can patch them. 58 */ 59 int nswbuf = 0; 60 #ifdef NBUF 61 int nbuf = NBUF; 62 #else 63 int nbuf = 0; 64 #endif 65 #ifdef BUFPAGES 66 int bufpages = BUFPAGES; 67 #else 68 int bufpages = 0; 69 #endif 70 71 int physmem; 72 73 extern struct msgbuf msgbuf; 74 struct msgbuf *msgbufp = &msgbuf; 75 int msgbufmapped = 1; /* message buffer is always mapped */ 76 77 /* 78 * safepri is a safe priority for sleep to set for a spin-wait 79 * during autoconfiguration or after a panic. 80 */ 81 int safepri = 0; 82 83 caddr_t allocsys(); 84 85 /* 86 * Machine-dependent startup code 87 */ 88 cpu_startup() 89 { 90 register unsigned i; 91 register caddr_t v; 92 register int sz; 93 int base, residual; 94 #ifdef DEBUG 95 extern int pmapdebug; 96 int opmapdebug = pmapdebug; 97 #endif 98 vm_offset_t minaddr, maxaddr; 99 vm_size_t size; 100 101 #ifdef DEBUG 102 pmapdebug = 0; 103 #endif 104 105 /* 106 * Good {morning,afternoon,evening,night}. 107 */ 108 printf(version); 109 /*identifycpu();*/ 110 physmem = btoc(avail_end); 111 printf("real mem = %d\n", avail_end); 112 113 /* 114 * Find out how much space we need, allocate it, 115 * and then give everything true virtual addresses. 116 */ 117 sz = (int)allocsys((caddr_t)0); 118 if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0) 119 panic("startup: no room for tables"); 120 if (allocsys(v) - v != sz) 121 panic("startup: table size inconsistency"); 122 123 /* 124 * Now allocate buffers proper. They are different than the above 125 * in that they usually occupy more virtual memory than physical. 126 */ 127 size = MAXBSIZE * nbuf; 128 buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers, 129 &maxaddr, size, FALSE); 130 minaddr = (vm_offset_t)buffers; 131 if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0, 132 &minaddr, size, FALSE) != KERN_SUCCESS) 133 panic("startup: cannot allocate buffers"); 134 base = bufpages / nbuf; 135 residual = bufpages % nbuf; 136 for (i = 0; i < nbuf; i++) { 137 vm_size_t curbufsize; 138 vm_offset_t curbuf; 139 140 /* 141 * First <residual> buffers get (base+1) physical pages 142 * allocated for them. The rest get (base) physical pages. 143 * 144 * The rest of each buffer occupies virtual space, 145 * but has no physical memory allocated for it. 146 */ 147 curbuf = (vm_offset_t)buffers + i * MAXBSIZE; 148 curbufsize = CLBYTES * (i < residual ? base+1 : base); 149 vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE); 150 vm_map_simplify(buffer_map, curbuf); 151 } 152 /* 153 * Allocate a submap for exec arguments. This map effectively 154 * limits the number of processes exec'ing at any time. 155 */ 156 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 157 16*NCARGS, TRUE); 158 /* 159 * Allocate a map for physio. Others use a submap of the kernel 160 * map, but we want one completely separate, even though it uses 161 * the same pmap. 162 */ 163 phys_map = vm_map_create(kernel_pmap, DVMA_BASE, DVMA_END, 1); 164 if (phys_map == NULL) 165 panic("unable to create DVMA map"); 166 167 /* 168 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size 169 * we use the more space efficient malloc in place of kmem_alloc. 170 */ 171 mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES, 172 M_MBUF, M_NOWAIT); 173 bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES); 174 mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr, 175 VM_MBUF_SIZE, FALSE); 176 /* 177 * Initialize callouts 178 */ 179 callfree = callout; 180 for (i = 1; i < ncallout; i++) 181 callout[i-1].c_next = &callout[i]; 182 callout[i-1].c_next = NULL; 183 184 #ifdef DEBUG 185 pmapdebug = opmapdebug; 186 #endif 187 printf("avail mem = %d\n", ptoa(cnt.v_free_count)); 188 printf("using %d buffers containing %d bytes of memory\n", 189 nbuf, bufpages * CLBYTES); 190 191 /* 192 * Set up buffers, so they can be used to read disk labels. 193 */ 194 bufinit(); 195 196 /* 197 * Configure the system. 198 */ 199 configure(); 200 201 /* 202 * Turn on the cache (do after configuration due to a bug in 203 * some versions of the SPARC chips -- this info from Gilmore). 204 */ 205 cache_enable(); 206 } 207 208 /* 209 * Allocate space for system data structures. We are given 210 * a starting virtual address and we return a final virtual 211 * address; along the way we set each data structure pointer. 212 * 213 * You call allocsys() with 0 to find out how much space we want, 214 * allocate that much and fill it with zeroes, and then call 215 * allocsys() again with the correct base virtual address. 216 */ 217 caddr_t 218 allocsys(v) 219 register caddr_t v; 220 { 221 222 #define valloc(name, type, num) \ 223 v = (caddr_t)(((name) = (type *)v) + (num)) 224 valloc(cfree, struct cblock, nclist); 225 valloc(callout, struct callout, ncallout); 226 valloc(swapmap, struct map, nswapmap = maxproc * 2); 227 #ifdef SYSVSHM 228 valloc(shmsegs, struct shmid_ds, shminfo.shmmni); 229 #endif 230 231 /* 232 * Determine how many buffers to allocate (enough to 233 * hold 5% of total physical memory, but at least 16). 234 * Allocate 1/2 as many swap buffer headers as file i/o buffers. 235 */ 236 if (bufpages == 0) 237 bufpages = (physmem / 20) / CLSIZE; 238 if (nbuf == 0) { 239 nbuf = bufpages; 240 if (nbuf < 16) 241 nbuf = 16; 242 } 243 if (nswbuf == 0) { 244 nswbuf = (nbuf / 2) &~ 1; /* force even */ 245 if (nswbuf > 256) 246 nswbuf = 256; /* sanity */ 247 } 248 valloc(swbuf, struct buf, nswbuf); 249 valloc(buf, struct buf, nbuf); 250 return (v); 251 } 252 253 /* 254 * Set up registers on exec. 255 * 256 * XXX this entire mess must be fixed 257 */ 258 /* ARGSUSED */ 259 setregs(p, entry, retval) 260 register struct proc *p; 261 u_long entry; 262 int retval[2]; 263 { 264 register struct trapframe *tf = p->p_md.md_tf; 265 register struct fpstate *fs; 266 register int psr, sp; 267 268 /* 269 * The syscall will ``return'' to npc or %g7 or %g2; set them all. 270 * Set the rest of the registers to 0 except for %o6 (stack pointer, 271 * built in exec()) and psr (retain CWP and PSR_S bits). 272 */ 273 psr = tf->tf_psr & (PSR_S | PSR_CWP); 274 sp = tf->tf_out[6]; 275 if ((fs = p->p_md.md_fpstate) != NULL) { 276 /* 277 * We hold an FPU state. If we own *the* FPU chip state 278 * we must get rid of it, and the only way to do that is 279 * to save it. In any case, get rid of our FPU state. 280 */ 281 if (p == fpproc) { 282 savefpstate(fs); 283 fpproc = NULL; 284 } 285 free((void *)fs, M_SUBPROC); 286 p->p_md.md_fpstate = NULL; 287 } 288 bzero((caddr_t)tf, sizeof *tf); 289 tf->tf_psr = psr; 290 tf->tf_global[2] = tf->tf_global[7] = tf->tf_npc = entry & ~3; 291 tf->tf_out[6] = sp; 292 retval[1] = 0; 293 } 294 295 #ifdef DEBUG 296 int sigdebug = 0; 297 int sigpid = 0; 298 #define SDB_FOLLOW 0x01 299 #define SDB_KSTACK 0x02 300 #define SDB_FPSTATE 0x04 301 #endif 302 303 struct sigframe { 304 int sf_signo; /* signal number */ 305 int sf_code; /* code */ 306 #ifdef COMPAT_SUNOS 307 struct sigcontext *sf_scp; /* points to user addr of sigcontext */ 308 #else 309 int sf_xxx; /* placeholder */ 310 #endif 311 int sf_addr; /* SunOS compat, always 0 for now */ 312 struct sigcontext sf_sc; /* actual sigcontext */ 313 }; 314 315 /* 316 * Send an interrupt to process. 317 */ 318 void 319 sendsig(catcher, sig, mask, code) 320 sig_t catcher; 321 int sig, mask; 322 unsigned code; 323 { 324 register struct proc *p = curproc; 325 register struct sigacts *psp = p->p_sigacts; 326 register struct sigframe *fp; 327 register struct trapframe *tf; 328 register int addr, oonstack, oldsp, newsp; 329 struct sigframe sf; 330 extern char sigcode[], esigcode[]; 331 #define szsigcode (esigcode - sigcode) 332 333 tf = p->p_md.md_tf; 334 oldsp = tf->tf_out[6]; 335 oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK; 336 /* 337 * Compute new user stack addresses, subtract off 338 * one signal frame, and align. 339 */ 340 if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack && 341 (psp->ps_sigonstack & sigmask(sig))) { 342 fp = (struct sigframe *)(psp->ps_sigstk.ss_base + 343 psp->ps_sigstk.ss_size); 344 psp->ps_sigstk.ss_flags |= SA_ONSTACK; 345 } else 346 fp = (struct sigframe *)oldsp; 347 fp = (struct sigframe *)((int)(fp - 1) & ~7); 348 349 #ifdef DEBUG 350 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 351 printf("sendsig: %s[%d] sig %d newusp %x scp %x\n", 352 p->p_comm, p->p_pid, sig, fp, &fp->sf_sc); 353 #endif 354 /* 355 * Now set up the signal frame. We build it in kernel space 356 * and then copy it out. We probably ought to just build it 357 * directly in user space.... 358 */ 359 sf.sf_signo = sig; 360 sf.sf_code = code; 361 #ifdef COMPAT_SUNOS 362 sf.sf_scp = &fp->sf_sc; 363 #endif 364 sf.sf_addr = 0; /* XXX */ 365 366 /* 367 * Build the signal context to be used by sigreturn. 368 */ 369 sf.sf_sc.sc_onstack = oonstack; 370 sf.sf_sc.sc_mask = mask; 371 sf.sf_sc.sc_sp = oldsp; 372 sf.sf_sc.sc_pc = tf->tf_pc; 373 sf.sf_sc.sc_npc = tf->tf_npc; 374 sf.sf_sc.sc_psr = tf->tf_psr; 375 sf.sf_sc.sc_g1 = tf->tf_global[1]; 376 sf.sf_sc.sc_o0 = tf->tf_out[0]; 377 378 /* 379 * Put the stack in a consistent state before we whack away 380 * at it. Note that write_user_windows may just dump the 381 * registers into the pcb; we need them in the process's memory. 382 * We also need to make sure that when we start the signal handler, 383 * its %i6 (%fp), which is loaded from the newly allocated stack area, 384 * joins seamlessly with the frame it was in when the signal occurred, 385 * so that the debugger and _longjmp code can back up through it. 386 */ 387 newsp = (int)fp - sizeof(struct rwindow); 388 write_user_windows(); 389 if (rwindow_save(p) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf) || 390 suword(&((struct rwindow *)newsp)->rw_in[6], oldsp)) { 391 /* 392 * Process has trashed its stack; give it an illegal 393 * instruction to halt it in its tracks. 394 */ 395 #ifdef DEBUG 396 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 397 printf("sendsig: window save or copyout error\n"); 398 #endif 399 sigexit(p, SIGILL); 400 /* NOTREACHED */ 401 } 402 #ifdef DEBUG 403 if (sigdebug & SDB_FOLLOW) 404 printf("sendsig: %s[%d] sig %d scp %x\n", 405 p->p_comm, p->p_pid, sig, &fp->sf_sc); 406 #endif 407 /* 408 * Arrange to continue execution at the code copied out in exec(). 409 * It needs the function to call in %g1, and a new stack pointer. 410 */ 411 #ifdef COMPAT_SUNOS 412 if (psp->ps_usertramp & sigmask(sig)) { 413 addr = (int)catcher; /* user does his own trampolining */ 414 } else 415 #endif 416 { 417 addr = USRSTACK - sizeof(struct ps_strings) - szsigcode; 418 tf->tf_global[1] = (int)catcher; 419 } 420 tf->tf_pc = addr; 421 tf->tf_npc = addr + 4; 422 tf->tf_out[6] = newsp; 423 #ifdef DEBUG 424 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 425 printf("sendsig: about to return to catcher\n"); 426 #endif 427 } 428 429 /* 430 * System call to cleanup state after a signal 431 * has been taken. Reset signal mask and 432 * stack state from context left by sendsig (above), 433 * and return to the given trap frame (if there is one). 434 * Check carefully to make sure that the user has not 435 * modified the state to gain improper privileges or to cause 436 * a machine fault. 437 */ 438 /* ARGSUSED */ 439 struct sigreturn_args { 440 struct sigcontext *scp; 441 }; 442 sigreturn(p, uap, retval) 443 register struct proc *p; 444 struct sigreturn_args *uap; 445 int *retval; 446 { 447 register struct sigcontext *scp; 448 register struct trapframe *tf; 449 450 /* First ensure consistent stack state (see sendsig). */ 451 write_user_windows(); 452 if (rwindow_save(p)) 453 sigexit(p, SIGILL); 454 #ifdef DEBUG 455 if (sigdebug & SDB_FOLLOW) 456 printf("sigreturn: %s[%d], scp %x\n", 457 p->p_comm, p->p_pid, uap->scp); 458 #endif 459 scp = uap->scp; 460 if ((int)scp & 3 || useracc((caddr_t)scp, sizeof *scp, B_WRITE) == 0) 461 return (EINVAL); 462 tf = p->p_md.md_tf; 463 /* 464 * Only the icc bits in the psr are used, so it need not be 465 * verified. pc and npc must be multiples of 4. This is all 466 * that is required; if it holds, just do it. 467 */ 468 if (((scp->sc_pc | scp->sc_npc) & 3) != 0) 469 return (EINVAL); 470 /* take only psr ICC field */ 471 tf->tf_psr = (tf->tf_psr & ~PSR_ICC) | (scp->sc_psr & PSR_ICC); 472 tf->tf_pc = scp->sc_pc; 473 tf->tf_npc = scp->sc_npc; 474 tf->tf_global[1] = scp->sc_g1; 475 tf->tf_out[0] = scp->sc_o0; 476 tf->tf_out[6] = scp->sc_sp; 477 if (scp->sc_onstack & 1) 478 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK; 479 else 480 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK; 481 p->p_sigmask = scp->sc_mask & ~sigcantmask; 482 return (EJUSTRETURN); 483 } 484 485 int waittime = -1; 486 487 boot(howto) 488 register int howto; 489 { 490 int i; 491 static char str[4]; /* room for "-sd\0" */ 492 extern volatile void romhalt(void); 493 extern volatile void romboot(char *); 494 495 fb_unblank(); 496 boothowto = howto; 497 if ((howto & RB_NOSYNC) == 0 && waittime < 0 && rootfs) { 498 register struct buf *bp; 499 int iter, nbusy; 500 #if 1 501 extern struct proc proc0; 502 503 /* protect against curproc->p_stats.foo refs in sync() XXX */ 504 if (curproc == NULL) 505 curproc = &proc0; 506 #endif 507 waittime = 0; 508 (void) spl0(); 509 printf("syncing disks... "); 510 /* 511 * Release vnodes held by texts before sync. 512 */ 513 if (panicstr == 0) 514 vnode_pager_umount((struct mount *)NULL); 515 #include "fd.h" 516 #if NFD > 0 517 fdshutdown(); 518 #endif 519 sync((struct proc *)NULL, (void *)NULL, (int *)NULL); 520 521 for (iter = 0; iter < 20; iter++) { 522 nbusy = 0; 523 for (bp = &buf[nbuf]; --bp >= buf; ) 524 if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY) 525 nbusy++; 526 if (nbusy == 0) 527 break; 528 printf("%d ", nbusy); 529 DELAY(40000 * iter); 530 } 531 if (nbusy) 532 printf("giving up\n"); 533 else 534 printf("done\n"); 535 /* 536 * If we've been adjusting the clock, the todr 537 * will be out of synch; adjust it now. 538 */ 539 resettodr(); 540 } 541 (void) splhigh(); /* ??? */ 542 if (howto & RB_HALT) { 543 printf("halted\n\n"); 544 romhalt(); 545 } 546 if (howto & RB_DUMP) 547 dumpsys(); 548 printf("rebooting\n\n"); 549 i = 1; 550 if (howto & RB_SINGLE) 551 str[i++] = 's'; 552 if (howto & RB_KDB) 553 str[i++] = 'd'; 554 if (i > 1) { 555 str[0] = '-'; 556 str[i] = 0; 557 } else 558 str[0] = 0; 559 romboot(str); 560 /*NOTREACHED*/ 561 } 562 563 int dumpmag = 0x8fca0101; /* magic number for savecore */ 564 int dumpsize = 0; /* also for savecore */ 565 long dumplo = 0; 566 567 dumpconf() 568 { 569 int nblks; 570 571 dumpsize = physmem; 572 #define DUMPMMU 573 #ifdef DUMPMMU 574 #define NPMEG 128 575 /* 576 * savecore views the image in units of pages (i.e., dumpsize is in 577 * pages) so we round the two mmu entities into page-sized chunks. 578 * The PMEGs (32kB) and the segment table (512 bytes plus padding) 579 * are appending to the end of the crash dump. 580 */ 581 dumpsize += btoc(sizeof(((struct kpmap *)0)->pm_rsegmap)) + 582 btoc(NPMEG * NPTESG * sizeof(int)); 583 #endif 584 if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) { 585 nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev); 586 /* 587 * Don't dump on the first CLBYTES (why CLBYTES?) 588 * in case the dump device includes a disk label. 589 */ 590 if (dumplo < btodb(CLBYTES)) 591 dumplo = btodb(CLBYTES); 592 593 /* 594 * If dumpsize is too big for the partition, truncate it. 595 * Otherwise, put the dump at the end of the partition 596 * by making dumplo as large as possible. 597 */ 598 if (dumpsize > btoc(dbtob(nblks - dumplo))) 599 dumpsize = btoc(dbtob(nblks - dumplo)); 600 else if (dumplo + ctod(dumpsize) > nblks) 601 dumplo = nblks - ctod(dumpsize); 602 } 603 } 604 605 #ifdef DUMPMMU 606 /* XXX */ 607 #include "ctlreg.h" 608 #define getpte(va) lda(va, ASI_PTE) 609 #define setsegmap(va, pmeg) stba(va, ASI_SEGMAP, pmeg) 610 611 /* 612 * Write the mmu contents to the dump device. 613 * This gets appended to the end of a crash dump since 614 * there is no in-core copy of kernel memory mappings. 615 */ 616 int 617 dumpmmu(blkno) 618 register daddr_t blkno; 619 { 620 register int (*dump)(/*dev_t, daddr_t, caddr_t, int*/); 621 register int pmeg; 622 register int addr; /* unused kernel virtual address */ 623 register int i; 624 register int *pte, *ptend; 625 register int error; 626 register struct kpmap *kpmap = &kernel_pmap_store; 627 int buffer[dbtob(1) / sizeof(int)]; 628 extern int seginval; /* from pmap.c */ 629 630 631 dump = bdevsw[major(dumpdev)].d_dump; 632 633 /* 634 * dump page table entries 635 * 636 * We dump each pmeg in order (by segment number). Since the MMU 637 * automatically maps the given virtual segment to a pmeg we must 638 * iterate over the segments by incrementing an unused segment slot 639 * in the MMU. This fixed segment number is used in the virtual 640 * address argument to getpte(). 641 */ 642 643 /* First find an unused virtual segment. */ 644 i = NKSEG; 645 while (kpmap->pm_rsegmap[--i] != seginval) 646 if (i <= 0) 647 return (-1); 648 /* 649 * Compute the base address corresponding to the unused segment. 650 * Note that the kernel segments start after all the user segments 651 * so we must account for this offset. 652 */ 653 addr = VSTOVA(i + NUSEG); 654 /* 655 * Go through the pmegs and dump each one. 656 */ 657 pte = buffer; 658 ptend = &buffer[sizeof(buffer) / sizeof(buffer[0])]; 659 for (pmeg = 0; pmeg < NPMEG; ++pmeg) { 660 register int va = addr; 661 662 setsegmap(addr, pmeg); 663 i = NPTESG; 664 do { 665 *pte++ = getpte(va); 666 if (pte >= ptend) { 667 /* 668 * Note that we'll dump the last block 669 * the last time through the loops because 670 * all the PMEGs occupy 32KB which is 671 * a multiple of the block size. 672 */ 673 error = (*dump)(dumpdev, blkno, 674 (caddr_t)buffer, 675 dbtob(1)); 676 if (error != 0) 677 return (error); 678 ++blkno; 679 pte = buffer; 680 } 681 va += NBPG; 682 } while (--i > 0); 683 } 684 setsegmap(addr, seginval); 685 686 /* 687 * dump (512 byte) segment map 688 * XXX assume it's a multiple of the block size 689 */ 690 error = (*dump)(dumpdev, blkno, (caddr_t)kpmap->pm_rsegmap, 691 sizeof(kpmap->pm_rsegmap), 0); 692 return (error); 693 } 694 #endif 695 696 #define BYTES_PER_DUMP (32 * 1024) /* must be a multiple of pagesize */ 697 static vm_offset_t dumpspace; 698 699 caddr_t 700 reserve_dumppages(p) 701 caddr_t p; 702 { 703 704 dumpspace = (vm_offset_t)p; 705 return (p + BYTES_PER_DUMP); 706 } 707 708 /* 709 * Write a crash dump. 710 */ 711 dumpsys() 712 { 713 register unsigned bytes, i, n; 714 register int maddr, psize; 715 register daddr_t blkno; 716 register int (*dump)(/*dev_t, daddr_t, caddr_t, int, int*/); 717 int error = 0; 718 719 if (dumpdev == NODEV) 720 return; 721 /* copy registers to memory */ 722 snapshot(cpcb); 723 /* 724 * For dumps during autoconfiguration, 725 * if dump device has already configured... 726 */ 727 if (dumpsize == 0) 728 dumpconf(); 729 if (dumplo < 0) 730 return; 731 printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo); 732 733 psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev); 734 printf("dump "); 735 if (psize == -1) { 736 printf("area unavailable\n"); 737 return; 738 } 739 bytes = physmem << PGSHIFT; 740 maddr = 0; 741 blkno = dumplo; 742 dump = bdevsw[major(dumpdev)].d_dump; 743 for (i = 0; i < bytes; i += n) { 744 n = bytes - i; 745 if (n > BYTES_PER_DUMP) 746 n = BYTES_PER_DUMP; 747 #ifdef DEBUG 748 /* print out how many MBs we have dumped */ 749 if (i && (i % (1024*1024)) == 0) 750 printf("%d ", i / (1024*1024)); 751 #endif 752 (void) pmap_map(dumpspace, maddr, maddr + n, VM_PROT_READ); 753 error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, (int)n); 754 if (error) 755 break; 756 maddr += n; 757 blkno += btodb(n); 758 } 759 #ifdef DUMPMMU 760 if (!error) 761 error = dumpmmu(blkno); 762 #endif 763 switch (error) { 764 765 case ENXIO: 766 printf("device bad\n"); 767 break; 768 769 case EFAULT: 770 printf("device not ready\n"); 771 break; 772 773 case EINVAL: 774 printf("area improper\n"); 775 break; 776 777 case EIO: 778 printf("i/o error\n"); 779 break; 780 781 case 0: 782 printf("succeeded\n"); 783 break; 784 785 default: 786 printf("error %d\n", error); 787 break; 788 } 789 } 790 791 /* 792 * Map an I/O device given physical address and size in bytes, e.g., 793 * 794 * mydev = (struct mydev *)mapdev(myioaddr, 0, sizeof(struct mydev)); 795 * 796 * See also machine/autoconf.h. 797 */ 798 void * 799 mapdev(phys, virt, size) 800 register void *phys; 801 register int virt, size; 802 { 803 register vm_offset_t v; 804 register void *ret; 805 static vm_offset_t iobase = IODEV_BASE; 806 807 size = round_page(size); 808 if (virt) 809 v = virt; 810 else { 811 v = iobase; 812 iobase += size; 813 if (iobase > IODEV_END) /* unlikely */ 814 panic("mapiodev"); 815 } 816 ret = (void *)v; 817 do { 818 pmap_enter(kernel_pmap, v, 819 (vm_offset_t)phys | PMAP_OBIO | PMAP_NC, 820 VM_PROT_READ | VM_PROT_WRITE, 1); 821 v += PAGE_SIZE; 822 phys += PAGE_SIZE; 823 } while ((size -= PAGE_SIZE) > 0); 824 return (ret); 825 } 826