1 /* 2 * Copyright (c) 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This software was developed by the Computer Systems Engineering group 6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 7 * contributed to Berkeley. 8 * 9 * All advertising materials mentioning features or use of this software 10 * must display the following acknowledgement: 11 * This product includes software developed by the University of 12 * California, Lawrence Berkeley Laboratory. 13 * 14 * %sccs.include.redist.c% 15 * 16 * @(#)machdep.c 8.8 (Berkeley) 05/09/95 17 * 18 * from: $Header: machdep.c,v 1.44 93/10/31 05:28:36 torek Exp $ 19 */ 20 21 #include <sys/param.h> 22 #include <sys/proc.h> 23 #include <sys/user.h> 24 #include <sys/map.h> 25 #include <sys/buf.h> 26 #include <sys/device.h> 27 #include <sys/reboot.h> 28 #include <sys/systm.h> 29 #include <sys/conf.h> 30 #include <sys/file.h> 31 #include <sys/clist.h> 32 #include <sys/callout.h> 33 #include <sys/malloc.h> 34 #include <sys/mbuf.h> 35 #include <sys/mount.h> 36 #include <sys/msgbuf.h> 37 #ifdef SYSVSHM 38 #include <sys/shm.h> 39 #endif 40 #include <sys/exec.h> 41 #include <sys/sysctl.h> 42 43 #include <machine/autoconf.h> 44 #include <machine/frame.h> 45 #include <machine/cpu.h> 46 47 #include <vm/vm_kern.h> 48 #include <vm/vm_page.h> 49 50 #include <sparc/sparc/asm.h> 51 #include <sparc/sparc/cache.h> 52 #include <sparc/sparc/vaddrs.h> 53 54 vm_map_t buffer_map; 55 extern vm_offset_t avail_end; 56 57 /* 58 * Declare these as initialized data so we can patch them. 59 */ 60 int nswbuf = 0; 61 #ifdef NBUF 62 int nbuf = NBUF; 63 #else 64 int nbuf = 0; 65 #endif 66 #ifdef BUFPAGES 67 int bufpages = BUFPAGES; 68 #else 69 int bufpages = 0; 70 #endif 71 72 int physmem; 73 74 extern struct msgbuf msgbuf; 75 struct msgbuf *msgbufp = &msgbuf; 76 int msgbufmapped = 1; /* message buffer is always mapped */ 77 78 /* 79 * safepri is a safe priority for sleep to set for a spin-wait 80 * during autoconfiguration or after a panic. 81 */ 82 int safepri = 0; 83 84 caddr_t allocsys(); 85 86 /* 87 * Machine-dependent startup code 88 */ 89 cpu_startup() 90 { 91 register unsigned i; 92 register caddr_t v; 93 register int sz; 94 int base, residual; 95 #ifdef DEBUG 96 extern int pmapdebug; 97 int opmapdebug = pmapdebug; 98 #endif 99 vm_offset_t minaddr, maxaddr; 100 vm_size_t size; 101 102 #ifdef DEBUG 103 pmapdebug = 0; 104 #endif 105 106 /* 107 * Good {morning,afternoon,evening,night}. 108 */ 109 printf(version); 110 /*identifycpu();*/ 111 physmem = btoc(avail_end); 112 printf("real mem = %d\n", avail_end); 113 114 /* 115 * Find out how much space we need, allocate it, 116 * and then give everything true virtual addresses. 117 */ 118 sz = (int)allocsys((caddr_t)0); 119 if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0) 120 panic("startup: no room for tables"); 121 if (allocsys(v) - v != sz) 122 panic("startup: table size inconsistency"); 123 124 /* 125 * Now allocate buffers proper. They are different than the above 126 * in that they usually occupy more virtual memory than physical. 127 */ 128 size = MAXBSIZE * nbuf; 129 buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers, 130 &maxaddr, size, TRUE); 131 minaddr = (vm_offset_t)buffers; 132 if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0, 133 &minaddr, size, FALSE) != KERN_SUCCESS) 134 panic("startup: cannot allocate buffers"); 135 base = bufpages / nbuf; 136 residual = bufpages % nbuf; 137 for (i = 0; i < nbuf; i++) { 138 vm_size_t curbufsize; 139 vm_offset_t curbuf; 140 141 /* 142 * First <residual> buffers get (base+1) physical pages 143 * allocated for them. The rest get (base) physical pages. 144 * 145 * The rest of each buffer occupies virtual space, 146 * but has no physical memory allocated for it. 147 */ 148 curbuf = (vm_offset_t)buffers + i * MAXBSIZE; 149 curbufsize = CLBYTES * (i < residual ? base+1 : base); 150 vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE); 151 vm_map_simplify(buffer_map, curbuf); 152 } 153 /* 154 * Allocate a submap for exec arguments. This map effectively 155 * limits the number of processes exec'ing at any time. 156 */ 157 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 158 16*NCARGS, TRUE); 159 /* 160 * Allocate a map for physio. Others use a submap of the kernel 161 * map, but we want one completely separate, even though it uses 162 * the same pmap. 163 */ 164 phys_map = vm_map_create(kernel_pmap, DVMA_BASE, DVMA_END, 1); 165 if (phys_map == NULL) 166 panic("unable to create DVMA map"); 167 168 /* 169 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size 170 * we use the more space efficient malloc in place of kmem_alloc. 171 */ 172 mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES, 173 M_MBUF, M_NOWAIT); 174 bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES); 175 mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr, 176 VM_MBUF_SIZE, FALSE); 177 /* 178 * Initialize callouts 179 */ 180 callfree = callout; 181 for (i = 1; i < ncallout; i++) 182 callout[i-1].c_next = &callout[i]; 183 callout[i-1].c_next = NULL; 184 185 #ifdef DEBUG 186 pmapdebug = opmapdebug; 187 #endif 188 printf("avail mem = %d\n", ptoa(cnt.v_free_count)); 189 printf("using %d buffers containing %d bytes of memory\n", 190 nbuf, bufpages * CLBYTES); 191 192 /* 193 * Set up buffers, so they can be used to read disk labels. 194 */ 195 bufinit(); 196 197 /* 198 * Configure the system, then turn on the cache. Should be able 199 * to do this earlier, but then esp.c fails on SS1+ boxes (??). 200 */ 201 configure(); 202 cache_enable(); 203 } 204 205 /* 206 * Allocate space for system data structures. We are given 207 * a starting virtual address and we return a final virtual 208 * address; along the way we set each data structure pointer. 209 * 210 * You call allocsys() with 0 to find out how much space we want, 211 * allocate that much and fill it with zeroes, and then call 212 * allocsys() again with the correct base virtual address. 213 */ 214 caddr_t 215 allocsys(v) 216 register caddr_t v; 217 { 218 219 #define valloc(name, type, num) \ 220 v = (caddr_t)(((name) = (type *)v) + (num)) 221 valloc(cfree, struct cblock, nclist); 222 valloc(callout, struct callout, ncallout); 223 valloc(swapmap, struct map, nswapmap = maxproc * 2); 224 #ifdef SYSVSHM 225 valloc(shmsegs, struct shmid_ds, shminfo.shmmni); 226 #endif 227 228 /* 229 * Determine how many buffers to allocate (enough to 230 * hold 5% of total physical memory, but at least 16). 231 * Allocate 1/2 as many swap buffer headers as file i/o buffers. 232 */ 233 if (bufpages == 0) 234 bufpages = (physmem / 20) / CLSIZE; 235 if (nbuf == 0) { 236 nbuf = bufpages; 237 if (nbuf < 16) 238 nbuf = 16; 239 } 240 if (nswbuf == 0) { 241 nswbuf = (nbuf / 2) &~ 1; /* force even */ 242 if (nswbuf > 256) 243 nswbuf = 256; /* sanity */ 244 } 245 valloc(swbuf, struct buf, nswbuf); 246 valloc(buf, struct buf, nbuf); 247 return (v); 248 } 249 250 /* 251 * Set up registers on exec. 252 * 253 * XXX this entire mess must be fixed 254 */ 255 /* ARGSUSED */ 256 setregs(p, entry, retval) 257 register struct proc *p; 258 u_long entry; 259 int retval[2]; 260 { 261 register struct trapframe *tf = p->p_md.md_tf; 262 register struct fpstate *fs; 263 register int psr, sp; 264 265 /* 266 * The syscall will ``return'' to npc or %g7 or %g2; set them all. 267 * Set the rest of the registers to 0 except for %o6 (stack pointer, 268 * built in exec()) and psr (retain CWP and PSR_S bits). 269 */ 270 psr = tf->tf_psr & (PSR_S | PSR_CWP); 271 sp = tf->tf_out[6]; 272 if ((fs = p->p_md.md_fpstate) != NULL) { 273 /* 274 * We hold an FPU state. If we own *the* FPU chip state 275 * we must get rid of it, and the only way to do that is 276 * to save it. In any case, get rid of our FPU state. 277 */ 278 if (p == fpproc) { 279 savefpstate(fs); 280 fpproc = NULL; 281 } 282 free((void *)fs, M_SUBPROC); 283 p->p_md.md_fpstate = NULL; 284 } 285 bzero((caddr_t)tf, sizeof *tf); 286 tf->tf_psr = psr; 287 tf->tf_global[2] = tf->tf_global[7] = tf->tf_npc = entry & ~3; 288 tf->tf_out[6] = sp; 289 retval[1] = 0; 290 } 291 292 #ifdef DEBUG 293 int sigdebug = 0; 294 int sigpid = 0; 295 #define SDB_FOLLOW 0x01 296 #define SDB_KSTACK 0x02 297 #define SDB_FPSTATE 0x04 298 #endif 299 300 struct sigframe { 301 int sf_signo; /* signal number */ 302 int sf_code; /* code */ 303 #ifdef COMPAT_SUNOS 304 struct sigcontext *sf_scp; /* points to user addr of sigcontext */ 305 #else 306 int sf_xxx; /* placeholder */ 307 #endif 308 int sf_addr; /* SunOS compat, always 0 for now */ 309 struct sigcontext sf_sc; /* actual sigcontext */ 310 }; 311 312 /* 313 * machine dependent system variables. 314 */ 315 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) 316 int *name; 317 u_int namelen; 318 void *oldp; 319 size_t *oldlenp; 320 void *newp; 321 size_t newlen; 322 struct proc *p; 323 { 324 325 /* all sysctl names are this level are terminal */ 326 if (namelen != 1) 327 return (ENOTDIR); /* overloaded */ 328 329 switch (name[0]) { 330 default: 331 return (EOPNOTSUPP); 332 } 333 /* NOTREACHED */ 334 } 335 336 /* 337 * Send an interrupt to process. 338 */ 339 void 340 sendsig(catcher, sig, mask, code) 341 sig_t catcher; 342 int sig, mask; 343 u_long code; 344 { 345 register struct proc *p = curproc; 346 register struct sigacts *psp = p->p_sigacts; 347 register struct sigframe *fp; 348 register struct trapframe *tf; 349 register int addr, oonstack, oldsp, newsp; 350 struct sigframe sf; 351 extern char sigcode[], esigcode[]; 352 #define szsigcode (esigcode - sigcode) 353 354 tf = p->p_md.md_tf; 355 oldsp = tf->tf_out[6]; 356 oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK; 357 /* 358 * Compute new user stack addresses, subtract off 359 * one signal frame, and align. 360 */ 361 if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack && 362 (psp->ps_sigonstack & sigmask(sig))) { 363 fp = (struct sigframe *)(psp->ps_sigstk.ss_base + 364 psp->ps_sigstk.ss_size); 365 psp->ps_sigstk.ss_flags |= SA_ONSTACK; 366 } else 367 fp = (struct sigframe *)oldsp; 368 fp = (struct sigframe *)((int)(fp - 1) & ~7); 369 370 #ifdef DEBUG 371 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 372 printf("sendsig: %s[%d] sig %d newusp %x scp %x\n", 373 p->p_comm, p->p_pid, sig, fp, &fp->sf_sc); 374 #endif 375 /* 376 * Now set up the signal frame. We build it in kernel space 377 * and then copy it out. We probably ought to just build it 378 * directly in user space.... 379 */ 380 sf.sf_signo = sig; 381 sf.sf_code = code; 382 #ifdef COMPAT_SUNOS 383 sf.sf_scp = &fp->sf_sc; 384 #endif 385 sf.sf_addr = 0; /* XXX */ 386 387 /* 388 * Build the signal context to be used by sigreturn. 389 */ 390 sf.sf_sc.sc_onstack = oonstack; 391 sf.sf_sc.sc_mask = mask; 392 sf.sf_sc.sc_sp = oldsp; 393 sf.sf_sc.sc_pc = tf->tf_pc; 394 sf.sf_sc.sc_npc = tf->tf_npc; 395 sf.sf_sc.sc_psr = tf->tf_psr; 396 sf.sf_sc.sc_g1 = tf->tf_global[1]; 397 sf.sf_sc.sc_o0 = tf->tf_out[0]; 398 399 /* 400 * Put the stack in a consistent state before we whack away 401 * at it. Note that write_user_windows may just dump the 402 * registers into the pcb; we need them in the process's memory. 403 * We also need to make sure that when we start the signal handler, 404 * its %i6 (%fp), which is loaded from the newly allocated stack area, 405 * joins seamlessly with the frame it was in when the signal occurred, 406 * so that the debugger and _longjmp code can back up through it. 407 */ 408 newsp = (int)fp - sizeof(struct rwindow); 409 write_user_windows(); 410 if (rwindow_save(p) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf) || 411 suword(&((struct rwindow *)newsp)->rw_in[6], oldsp)) { 412 /* 413 * Process has trashed its stack; give it an illegal 414 * instruction to halt it in its tracks. 415 */ 416 #ifdef DEBUG 417 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 418 printf("sendsig: window save or copyout error\n"); 419 #endif 420 sigexit(p, SIGILL); 421 /* NOTREACHED */ 422 } 423 #ifdef DEBUG 424 if (sigdebug & SDB_FOLLOW) 425 printf("sendsig: %s[%d] sig %d scp %x\n", 426 p->p_comm, p->p_pid, sig, &fp->sf_sc); 427 #endif 428 /* 429 * Arrange to continue execution at the code copied out in exec(). 430 * It needs the function to call in %g1, and a new stack pointer. 431 */ 432 #ifdef COMPAT_SUNOS 433 if (psp->ps_usertramp & sigmask(sig)) { 434 addr = (int)catcher; /* user does his own trampolining */ 435 } else 436 #endif 437 { 438 addr = USRSTACK - sizeof(struct ps_strings) - szsigcode; 439 tf->tf_global[1] = (int)catcher; 440 } 441 tf->tf_pc = addr; 442 tf->tf_npc = addr + 4; 443 tf->tf_out[6] = newsp; 444 #ifdef DEBUG 445 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 446 printf("sendsig: about to return to catcher\n"); 447 #endif 448 } 449 450 /* 451 * System call to cleanup state after a signal 452 * has been taken. Reset signal mask and 453 * stack state from context left by sendsig (above), 454 * and return to the given trap frame (if there is one). 455 * Check carefully to make sure that the user has not 456 * modified the state to gain improper privileges or to cause 457 * a machine fault. 458 */ 459 /* ARGSUSED */ 460 struct sigreturn_args { 461 struct sigcontext *scp; 462 }; 463 sigreturn(p, uap, retval) 464 register struct proc *p; 465 struct sigreturn_args *uap; 466 int *retval; 467 { 468 register struct sigcontext *scp; 469 register struct trapframe *tf; 470 471 /* First ensure consistent stack state (see sendsig). */ 472 write_user_windows(); 473 if (rwindow_save(p)) 474 sigexit(p, SIGILL); 475 #ifdef DEBUG 476 if (sigdebug & SDB_FOLLOW) 477 printf("sigreturn: %s[%d], scp %x\n", 478 p->p_comm, p->p_pid, uap->scp); 479 #endif 480 scp = uap->scp; 481 if ((int)scp & 3 || useracc((caddr_t)scp, sizeof *scp, B_WRITE) == 0) 482 return (EINVAL); 483 tf = p->p_md.md_tf; 484 /* 485 * Only the icc bits in the psr are used, so it need not be 486 * verified. pc and npc must be multiples of 4. This is all 487 * that is required; if it holds, just do it. 488 */ 489 if (((scp->sc_pc | scp->sc_npc) & 3) != 0) 490 return (EINVAL); 491 /* take only psr ICC field */ 492 tf->tf_psr = (tf->tf_psr & ~PSR_ICC) | (scp->sc_psr & PSR_ICC); 493 tf->tf_pc = scp->sc_pc; 494 tf->tf_npc = scp->sc_npc; 495 tf->tf_global[1] = scp->sc_g1; 496 tf->tf_out[0] = scp->sc_o0; 497 tf->tf_out[6] = scp->sc_sp; 498 if (scp->sc_onstack & 1) 499 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK; 500 else 501 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK; 502 p->p_sigmask = scp->sc_mask & ~sigcantmask; 503 return (EJUSTRETURN); 504 } 505 506 int waittime = -1; 507 508 boot(howto) 509 register int howto; 510 { 511 int i; 512 static char str[4]; /* room for "-sd\0" */ 513 extern volatile void romhalt(void); 514 extern volatile void romboot(char *); 515 516 fb_unblank(); 517 boothowto = howto; 518 if ((howto & RB_NOSYNC) == 0 && waittime < 0) { 519 register struct buf *bp; 520 int iter, nbusy; 521 #if 1 522 extern struct proc proc0; 523 524 /* protect against curproc->p_stats.foo refs in sync() XXX */ 525 if (curproc == NULL) 526 curproc = &proc0; 527 #endif 528 waittime = 0; 529 (void) spl0(); 530 printf("syncing disks... "); 531 /* 532 * Release vnodes held by texts before sync. 533 */ 534 if (panicstr == 0) 535 vnode_pager_umount((struct mount *)NULL); 536 sync(&proc0, (void *)NULL, (int *)NULL); 537 /* 538 * Unmount filesystems 539 */ 540 if (panicstr == 0) 541 vfs_unmountall(); 542 543 for (iter = 0; iter < 20; iter++) { 544 nbusy = 0; 545 for (bp = &buf[nbuf]; --bp >= buf; ) 546 if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY) 547 nbusy++; 548 if (nbusy == 0) 549 break; 550 printf("%d ", nbusy); 551 DELAY(40000 * iter); 552 } 553 if (nbusy) 554 printf("giving up\n"); 555 else 556 printf("done\n"); 557 /* 558 * If we've been adjusting the clock, the todr 559 * will be out of synch; adjust it now. 560 */ 561 resettodr(); 562 } 563 (void) splhigh(); /* ??? */ 564 if (howto & RB_HALT) { 565 printf("halted\n\n"); 566 romhalt(); 567 } 568 if (howto & RB_DUMP) 569 dumpsys(); 570 printf("rebooting\n\n"); 571 i = 1; 572 if (howto & RB_SINGLE) 573 str[i++] = 's'; 574 if (howto & RB_KDB) 575 str[i++] = 'd'; 576 if (i > 1) { 577 str[0] = '-'; 578 str[i] = 0; 579 } else 580 str[0] = 0; 581 romboot(str); 582 /*NOTREACHED*/ 583 } 584 585 int dumpmag = 0x8fca0101; /* magic number for savecore */ 586 int dumpsize = 0; /* also for savecore */ 587 long dumplo = 0; 588 589 dumpconf() 590 { 591 int nblks; 592 593 dumpsize = physmem; 594 #define DUMPMMU 595 #ifdef DUMPMMU 596 #define NPMEG 128 597 /* 598 * savecore views the image in units of pages (i.e., dumpsize is in 599 * pages) so we round the two mmu entities into page-sized chunks. 600 * The PMEGs (32kB) and the segment table (512 bytes plus padding) 601 * are appending to the end of the crash dump. 602 */ 603 dumpsize += btoc(sizeof(((struct kpmap *)0)->pm_rsegmap)) + 604 btoc(NPMEG * NPTESG * sizeof(int)); 605 #endif 606 if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) { 607 nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev); 608 /* 609 * Don't dump on the first CLBYTES (why CLBYTES?) 610 * in case the dump device includes a disk label. 611 */ 612 if (dumplo < btodb(CLBYTES)) 613 dumplo = btodb(CLBYTES); 614 615 /* 616 * If dumpsize is too big for the partition, truncate it. 617 * Otherwise, put the dump at the end of the partition 618 * by making dumplo as large as possible. 619 */ 620 if (dumpsize > btoc(dbtob(nblks - dumplo))) 621 dumpsize = btoc(dbtob(nblks - dumplo)); 622 else if (dumplo + ctod(dumpsize) > nblks) 623 dumplo = nblks - ctod(dumpsize); 624 } 625 } 626 627 #ifdef DUMPMMU 628 /* XXX */ 629 #include <machine/ctlreg.h> 630 #define getpte(va) lda(va, ASI_PTE) 631 #define setsegmap(va, pmeg) stba(va, ASI_SEGMAP, pmeg) 632 633 /* 634 * Write the mmu contents to the dump device. 635 * This gets appended to the end of a crash dump since 636 * there is no in-core copy of kernel memory mappings. 637 */ 638 int 639 dumpmmu(blkno) 640 register daddr_t blkno; 641 { 642 register int (*dump)(/*dev_t, daddr_t, caddr_t, int*/); 643 register int pmeg; 644 register int addr; /* unused kernel virtual address */ 645 register int i; 646 register int *pte, *ptend; 647 register int error; 648 register struct kpmap *kpmap = &kernel_pmap_store; 649 int buffer[dbtob(1) / sizeof(int)]; 650 extern int seginval; /* from pmap.c */ 651 652 653 dump = bdevsw[major(dumpdev)].d_dump; 654 655 /* 656 * dump page table entries 657 * 658 * We dump each pmeg in order (by segment number). Since the MMU 659 * automatically maps the given virtual segment to a pmeg we must 660 * iterate over the segments by incrementing an unused segment slot 661 * in the MMU. This fixed segment number is used in the virtual 662 * address argument to getpte(). 663 */ 664 665 /* First find an unused virtual segment. */ 666 i = NKSEG; 667 while (kpmap->pm_rsegmap[--i] != seginval) 668 if (i <= 0) 669 return (-1); 670 /* 671 * Compute the base address corresponding to the unused segment. 672 * Note that the kernel segments start after all the user segments 673 * so we must account for this offset. 674 */ 675 addr = VSTOVA(i + NUSEG); 676 /* 677 * Go through the pmegs and dump each one. 678 */ 679 pte = buffer; 680 ptend = &buffer[sizeof(buffer) / sizeof(buffer[0])]; 681 for (pmeg = 0; pmeg < NPMEG; ++pmeg) { 682 register int va = addr; 683 684 setsegmap(addr, pmeg); 685 i = NPTESG; 686 do { 687 *pte++ = getpte(va); 688 if (pte >= ptend) { 689 /* 690 * Note that we'll dump the last block 691 * the last time through the loops because 692 * all the PMEGs occupy 32KB which is 693 * a multiple of the block size. 694 */ 695 error = (*dump)(dumpdev, blkno, 696 (caddr_t)buffer, 697 dbtob(1)); 698 if (error != 0) 699 return (error); 700 ++blkno; 701 pte = buffer; 702 } 703 va += NBPG; 704 } while (--i > 0); 705 } 706 setsegmap(addr, seginval); 707 708 /* 709 * dump (512 byte) segment map 710 * XXX assume it's a multiple of the block size 711 */ 712 error = (*dump)(dumpdev, blkno, (caddr_t)kpmap->pm_rsegmap, 713 sizeof(kpmap->pm_rsegmap)); 714 return (error); 715 } 716 #endif 717 718 #define BYTES_PER_DUMP (32 * 1024) /* must be a multiple of pagesize */ 719 static vm_offset_t dumpspace; 720 721 caddr_t 722 reserve_dumppages(p) 723 caddr_t p; 724 { 725 726 dumpspace = (vm_offset_t)p; 727 return (p + BYTES_PER_DUMP); 728 } 729 730 /* 731 * Write a crash dump. 732 */ 733 dumpsys() 734 { 735 register unsigned bytes, i, n; 736 register int maddr, psize; 737 register daddr_t blkno; 738 register int (*dump)(/*dev_t, daddr_t, caddr_t, int, int*/); 739 int error = 0; 740 741 if (dumpdev == NODEV) 742 return; 743 /* copy registers to memory */ 744 snapshot(cpcb); 745 /* 746 * For dumps during autoconfiguration, 747 * if dump device has already configured... 748 */ 749 if (dumpsize == 0) 750 dumpconf(); 751 if (dumplo < 0) 752 return; 753 printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo); 754 755 psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev); 756 printf("dump "); 757 if (psize == -1) { 758 printf("area unavailable\n"); 759 return; 760 } 761 bytes = physmem << PGSHIFT; 762 maddr = 0; 763 blkno = dumplo; 764 dump = bdevsw[major(dumpdev)].d_dump; 765 for (i = 0; i < bytes; i += n) { 766 n = bytes - i; 767 if (n > BYTES_PER_DUMP) 768 n = BYTES_PER_DUMP; 769 #ifdef DEBUG 770 /* print out how many MBs we have dumped */ 771 if (i && (i % (1024*1024)) == 0) 772 printf("%d ", i / (1024*1024)); 773 #endif 774 (void) pmap_map(dumpspace, maddr, maddr + n, VM_PROT_READ); 775 error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, (int)n); 776 if (error) 777 break; 778 maddr += n; 779 blkno += btodb(n); 780 } 781 #ifdef DUMPMMU 782 if (!error) 783 error = dumpmmu(blkno); 784 #endif 785 switch (error) { 786 787 case ENXIO: 788 printf("device bad\n"); 789 break; 790 791 case EFAULT: 792 printf("device not ready\n"); 793 break; 794 795 case EINVAL: 796 printf("area improper\n"); 797 break; 798 799 case EIO: 800 printf("i/o error\n"); 801 break; 802 803 case 0: 804 printf("succeeded\n"); 805 break; 806 807 default: 808 printf("error %d\n", error); 809 break; 810 } 811 } 812 813 /* 814 * Map an I/O device given physical address and size in bytes, e.g., 815 * 816 * mydev = (struct mydev *)mapdev(myioaddr, 0, sizeof(struct mydev)); 817 * 818 * See also machine/autoconf.h. 819 */ 820 void * 821 mapdev(phys, virt, size) 822 register void *phys; 823 register int virt, size; 824 { 825 register vm_offset_t v; 826 register void *ret; 827 static vm_offset_t iobase = IODEV_BASE; 828 829 size = round_page(size); 830 if (virt) 831 v = trunc_page(virt); 832 else { 833 v = iobase; 834 iobase += size; 835 if (iobase > IODEV_END) /* unlikely */ 836 panic("mapiodev"); 837 } 838 ret = (void *)v; 839 phys = (void *)trunc_page(phys); 840 do { 841 pmap_enter(kernel_pmap, v, 842 (vm_offset_t)phys | PMAP_OBIO | PMAP_NC, 843 VM_PROT_READ | VM_PROT_WRITE, 1); 844 v += PAGE_SIZE; 845 phys += PAGE_SIZE; 846 } while ((size -= PAGE_SIZE) > 0); 847 return (ret); 848 } 849