1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1992, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, The Mach Operating System project at 9 * Carnegie-Mellon University, Ralph Campbell, Sony Corp. and Kazumasa 10 * Utashiro of Software Research Associates, Inc. 11 * 12 * %sccs.include.redist.c% 13 * 14 * @(#)machdep.c 8.4 (Berkeley) 05/09/95 15 */ 16 17 /* from: Utah $Hdr: machdep.c 1.63 91/04/24$ */ 18 19 #include <sys/param.h> 20 #include <sys/systm.h> 21 #include <sys/signalvar.h> 22 #include <sys/kernel.h> 23 #include <sys/map.h> 24 #include <sys/proc.h> 25 #include <sys/buf.h> 26 #include <sys/reboot.h> 27 #include <sys/conf.h> 28 #include <sys/file.h> 29 #include <sys/clist.h> 30 #include <sys/callout.h> 31 #include <sys/malloc.h> 32 #include <sys/mbuf.h> 33 #include <sys/msgbuf.h> 34 #include <sys/user.h> 35 #include <sys/exec.h> 36 #include <sys/sysctl.h> 37 #ifdef SYSVSHM 38 #include <sys/shm.h> 39 #endif 40 41 #include <vm/vm_kern.h> 42 43 #include <machine/cpu.h> 44 #include <machine/reg.h> 45 #include <machine/psl.h> 46 #include <machine/pte.h> 47 48 #include <machine/adrsmap.h> 49 50 vm_map_t buffer_map; 51 52 /* the following is used externally (sysctl_hw) */ 53 char machine[] = "SONY"; /* cpu "architecture" */ 54 char cpu_model[30]; 55 56 /* 57 * Declare these as initialized data so we can patch them. 58 */ 59 int nswbuf = 0; 60 #ifdef NBUF 61 int nbuf = NBUF; 62 #else 63 int nbuf = 0; 64 #endif 65 #ifdef BUFPAGES 66 int bufpages = BUFPAGES; 67 #else 68 int bufpages = 0; 69 #endif 70 int msgbufmapped = 0; /* set when safe to use msgbuf */ 71 int maxmem; /* max memory per process */ 72 int physmem; /* max supported memory, changes to actual */ 73 /* 74 * safepri is a safe priority for sleep to set for a spin-wait 75 * during autoconfiguration or after a panic. 76 */ 77 int safepri = PSL_LOWIPL; 78 79 struct user *proc0paddr; 80 struct proc nullproc; /* for use by switch_exit() */ 81 82 /* 83 * Do all the stuff that locore normally does before calling main(). 84 * Process arguments passed to us by the prom monitor. 85 * Return the first page address following the system. 86 */ 87 mach_init(x_boothowto, x_unkown, x_bootdev, x_maxmem) 88 int x_boothowto; 89 int x_unkown; 90 int x_bootdev; 91 int x_maxmem; 92 { 93 register char *cp; 94 register int i; 95 register unsigned firstaddr; 96 register caddr_t v; 97 caddr_t start; 98 extern u_long bootdev; 99 extern char edata[], end[]; 100 extern char MachUTLBMiss[], MachUTLBMissEnd[]; 101 extern char MachException[], MachExceptionEnd[]; 102 103 /* 104 * Save parameters into kernel work area. 105 */ 106 *(int *)(MACH_CACHED_TO_UNCACHED(MACH_MAXMEMSIZE_ADDR)) = x_maxmem; 107 *(int *)(MACH_CACHED_TO_UNCACHED(MACH_BOOTDEV_ADDR)) = x_bootdev; 108 *(int *)(MACH_CACHED_TO_UNCACHED(MACH_BOOTSW_ADDR)) = x_boothowto; 109 110 /* clear the BSS segment */ 111 v = (caddr_t)pmax_round_page(end); 112 bzero(edata, v - edata); 113 114 boothowto = x_boothowto; 115 bootdev = x_bootdev; 116 maxmem = physmem = pmax_btop(x_maxmem); 117 118 /* 119 * Look at arguments passed to us and compute boothowto. 120 */ 121 #ifdef GENERIC 122 boothowto |= RB_SINGLE | RB_ASKNAME; 123 #endif 124 #ifdef KADB 125 boothowto |= RB_KDB; 126 #endif 127 128 #ifdef MFS 129 /* 130 * Check to see if a mini-root was loaded into memory. It resides 131 * at the start of the next page just after the end of BSS. 132 */ 133 if (boothowto & RB_MINIROOT) { 134 boothowto |= RB_DFLTROOT; 135 v += mfs_initminiroot(v); 136 } 137 #endif 138 139 /* 140 * Init mapping for u page(s) for proc[0], pm_tlbpid 1. 141 */ 142 start = v; 143 curproc->p_addr = proc0paddr = (struct user *)v; 144 curproc->p_md.md_regs = proc0paddr->u_pcb.pcb_regs; 145 firstaddr = MACH_CACHED_TO_PHYS(v); 146 for (i = 0; i < UPAGES; i++) { 147 MachTLBWriteIndexed(i, 148 (UADDR + (i << PGSHIFT)) | (1 << VMMACH_TLB_PID_SHIFT), 149 curproc->p_md.md_upte[i] = firstaddr | PG_V | PG_M); 150 firstaddr += NBPG; 151 } 152 v += UPAGES * NBPG; 153 MachSetPID(1); 154 155 /* 156 * init nullproc for switch_exit(). 157 * init mapping for u page(s), pm_tlbpid 0 158 * This could be used for an idle process. 159 */ 160 nullproc.p_addr = (struct user *)v; 161 nullproc.p_md.md_regs = nullproc.p_addr->u_pcb.pcb_regs; 162 bcopy("nullproc", nullproc.p_comm, sizeof("nullproc")); 163 for (i = 0; i < UPAGES; i++) { 164 nullproc.p_md.md_upte[i] = firstaddr | PG_V | PG_M; 165 firstaddr += NBPG; 166 } 167 v += UPAGES * NBPG; 168 169 /* clear pages for u areas */ 170 bzero(start, v - start); 171 172 /* 173 * Copy down exception vector code. 174 */ 175 if (MachUTLBMissEnd - MachUTLBMiss > 0x80) 176 panic("startup: UTLB code too large"); 177 bcopy(MachUTLBMiss, (char *)MACH_UTLB_MISS_EXC_VEC, 178 MachUTLBMissEnd - MachUTLBMiss); 179 bcopy(MachException, (char *)MACH_GEN_EXC_VEC, 180 MachExceptionEnd - MachException); 181 182 /* 183 * Clear out the I and D caches. 184 */ 185 MachConfigCache(); 186 MachFlushCache(); 187 188 /* 189 * Initialize error message buffer (at end of core). 190 */ 191 maxmem -= btoc(sizeof (struct msgbuf)); 192 msgbufp = (struct msgbuf *)(MACH_PHYS_TO_CACHED(maxmem << PGSHIFT)); 193 msgbufmapped = 1; 194 195 /* 196 * Allocate space for system data structures. 197 * The first available kernel virtual address is in "v". 198 * As pages of kernel virtual memory are allocated, "v" is incremented. 199 * 200 * These data structures are allocated here instead of cpu_startup() 201 * because physical memory is directly addressable. We don't have 202 * to map these into virtual address space. 203 */ 204 start = v; 205 206 #define valloc(name, type, num) \ 207 (name) = (type *)v; v = (caddr_t)((name)+(num)) 208 #define valloclim(name, type, num, lim) \ 209 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 210 valloc(cfree, struct cblock, nclist); 211 valloc(callout, struct callout, ncallout); 212 valloc(swapmap, struct map, nswapmap = maxproc * 2); 213 #ifdef SYSVSHM 214 valloc(shmsegs, struct shmid_ds, shminfo.shmmni); 215 #endif 216 217 /* 218 * Determine how many buffers to allocate. 219 * We allocate more buffer space than the BSD standard of 220 * using 10% of memory for the first 2 Meg, 5% of remaining. 221 * We just allocate a flat 10%. Insure a minimum of 16 buffers. 222 * We allocate 1/2 as many swap buffer headers as file i/o buffers. 223 */ 224 if (bufpages == 0) 225 bufpages = physmem / 10 / CLSIZE; 226 if (nbuf == 0) { 227 nbuf = bufpages; 228 if (nbuf < 16) 229 nbuf = 16; 230 } 231 if (nswbuf == 0) { 232 nswbuf = (nbuf / 2) &~ 1; /* force even */ 233 if (nswbuf > 256) 234 nswbuf = 256; /* sanity */ 235 } 236 valloc(swbuf, struct buf, nswbuf); 237 valloc(buf, struct buf, nbuf); 238 239 /* 240 * Clear allocated memory. 241 */ 242 bzero(start, v - start); 243 244 /* 245 * Initialize the virtual memory system. 246 */ 247 pmap_bootstrap((vm_offset_t)v); 248 } 249 250 /* 251 * Console initialization: called early on from main, 252 * before vm init or startup. Do enough configuration 253 * to choose and initialize a console. 254 * XXX need something better here. 255 */ 256 #define SCC_CONSOLE 0 257 #define SW_CONSOLE 0x07 258 #define SW_NWB512 0x04 259 #define SW_NWB225 0x01 260 #define SW_FBPOP 0x02 261 #define SW_FBPOP1 0x06 262 #define SW_FBPOP2 0x03 263 #define SW_AUTOSEL 0x07 264 consinit() 265 { 266 extern dev_t consdev; 267 extern struct tty *constty, *cn_tty, *rs_tty; 268 int dipsw = (int)*(volatile u_char *)DIP_SWITCH; 269 270 #include "bm.h" 271 #if NBM > 0 272 #if defined(news3200) || defined(news3400) /* KU:XXX */ 273 fbbm_probe(dipsw|2); 274 #else 275 fbbm_probe(dipsw); 276 #endif 277 vt100_open(); 278 setup_fnt(); 279 setup_fnt24(); 280 #else 281 dipsw &= SW_CONSOLE; 282 #endif 283 284 switch (dipsw & SW_CONSOLE) { 285 case 0: 286 scc_open(SCC_CONSOLE); 287 consdev = makedev(1, 0); 288 constty = rs_tty; 289 break; 290 291 default: 292 #if NBM > 0 293 consdev = makedev(22, 0); 294 constty = cn_tty; 295 #endif 296 break; 297 } 298 return(0); 299 } 300 301 /* 302 * cpu_startup: allocate memory for variable-sized tables, 303 * initialize cpu, and do autoconfiguration. 304 */ 305 cpu_startup() 306 { 307 register unsigned i; 308 register caddr_t v; 309 int base, residual; 310 vm_offset_t minaddr, maxaddr; 311 vm_size_t size; 312 #ifdef DEBUG 313 extern int pmapdebug; 314 int opmapdebug = pmapdebug; 315 316 pmapdebug = 0; 317 #endif 318 319 /* 320 * Good {morning,afternoon,evening,night}. 321 */ 322 printf(version); 323 printf("real mem = %d\n", ctob(physmem)); 324 325 /* 326 * Allocate virtual address space for file I/O buffers. 327 * Note they are different than the array of headers, 'buf', 328 * and usually occupy more virtual memory than physical. 329 */ 330 size = MAXBSIZE * nbuf; 331 buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers, 332 &maxaddr, size, TRUE); 333 minaddr = (vm_offset_t)buffers; 334 if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0, 335 &minaddr, size, FALSE) != KERN_SUCCESS) 336 panic("startup: cannot allocate buffers"); 337 base = bufpages / nbuf; 338 residual = bufpages % nbuf; 339 for (i = 0; i < nbuf; i++) { 340 vm_size_t curbufsize; 341 vm_offset_t curbuf; 342 343 /* 344 * First <residual> buffers get (base+1) physical pages 345 * allocated for them. The rest get (base) physical pages. 346 * 347 * The rest of each buffer occupies virtual space, 348 * but has no physical memory allocated for it. 349 */ 350 curbuf = (vm_offset_t)buffers + i * MAXBSIZE; 351 curbufsize = CLBYTES * (i < residual ? base+1 : base); 352 vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE); 353 vm_map_simplify(buffer_map, curbuf); 354 } 355 /* 356 * Allocate a submap for exec arguments. This map effectively 357 * limits the number of processes exec'ing at any time. 358 */ 359 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 360 16 * NCARGS, TRUE); 361 /* 362 * Allocate a submap for physio 363 */ 364 phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 365 VM_PHYS_SIZE, TRUE); 366 367 /* 368 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size 369 * we use the more space efficient malloc in place of kmem_alloc. 370 */ 371 mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES, 372 M_MBUF, M_NOWAIT); 373 bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES); 374 mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr, 375 VM_MBUF_SIZE, FALSE); 376 /* 377 * Initialize callouts 378 */ 379 callfree = callout; 380 for (i = 1; i < ncallout; i++) 381 callout[i-1].c_next = &callout[i]; 382 callout[i-1].c_next = NULL; 383 384 #ifdef DEBUG 385 pmapdebug = opmapdebug; 386 #endif 387 printf("avail mem = %d\n", ptoa(cnt.v_free_count)); 388 printf("using %d buffers containing %d bytes of memory\n", 389 nbuf, bufpages * CLBYTES); 390 /* 391 * Set up CPU-specific registers, cache, etc. 392 */ 393 initcpu(); 394 395 /* 396 * Set up buffers, so they can be used to read disk labels. 397 */ 398 bufinit(); 399 400 /* 401 * Configure the system. 402 */ 403 configure(); 404 } 405 406 /* 407 * Set registers on exec. 408 * Clear all registers except sp, pc. 409 */ 410 setregs(p, entry, retval) 411 register struct proc *p; 412 u_long entry; 413 int retval[2]; 414 { 415 int sp = p->p_md.md_regs[SP]; 416 extern struct proc *machFPCurProcPtr; 417 418 bzero((caddr_t)p->p_md.md_regs, (FSR + 1) * sizeof(int)); 419 p->p_md.md_regs[SP] = sp; 420 p->p_md.md_regs[PC] = entry & ~3; 421 p->p_md.md_regs[PS] = PSL_USERSET; 422 p->p_md.md_flags & ~MDP_FPUSED; 423 if (machFPCurProcPtr == p) 424 machFPCurProcPtr = (struct proc *)0; 425 } 426 427 /* 428 * WARNING: code in locore.s assumes the layout shown for sf_signum 429 * thru sf_handler so... don't screw with them! 430 */ 431 struct sigframe { 432 int sf_signum; /* signo for handler */ 433 int sf_code; /* additional info for handler */ 434 struct sigcontext *sf_scp; /* context ptr for handler */ 435 sig_t sf_handler; /* handler addr for u_sigc */ 436 struct sigcontext sf_sc; /* actual context */ 437 }; 438 439 #ifdef DEBUG 440 int sigdebug = 0; 441 int sigpid = 0; 442 #define SDB_FOLLOW 0x01 443 #define SDB_KSTACK 0x02 444 #define SDB_FPSTATE 0x04 445 #endif 446 447 /* 448 * Send an interrupt to process. 449 */ 450 void 451 sendsig(catcher, sig, mask, code) 452 sig_t catcher; 453 int sig, mask; 454 unsigned code; 455 { 456 register struct proc *p = curproc; 457 register struct sigframe *fp; 458 register int *regs; 459 register struct sigacts *psp = p->p_sigacts; 460 int oonstack, fsize; 461 struct sigcontext ksc; 462 extern char sigcode[], esigcode[]; 463 464 regs = p->p_md.md_regs; 465 oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK; 466 /* 467 * Allocate and validate space for the signal handler 468 * context. Note that if the stack is in data space, the 469 * call to grow() is a nop, and the copyout() 470 * will fail if the process has not already allocated 471 * the space with a `brk'. 472 */ 473 fsize = sizeof(struct sigframe); 474 if ((psp->ps_flags & SAS_ALTSTACK) && 475 (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 && 476 (psp->ps_sigonstack & sigmask(sig))) { 477 fp = (struct sigframe *)(psp->ps_sigstk.ss_base + 478 psp->ps_sigstk.ss_size - fsize); 479 psp->ps_sigstk.ss_flags |= SA_ONSTACK; 480 } else 481 fp = (struct sigframe *)(regs[SP] - fsize); 482 if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize)) 483 (void)grow(p, (unsigned)fp); 484 #ifdef DEBUG 485 if ((sigdebug & SDB_FOLLOW) || 486 (sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 487 printf("sendsig(%d): sig %d ssp %x usp %x scp %x\n", 488 p->p_pid, sig, &oonstack, fp, &fp->sf_sc); 489 #endif 490 /* 491 * Build the signal context to be used by sigreturn. 492 */ 493 ksc.sc_onstack = oonstack; 494 ksc.sc_mask = mask; 495 ksc.sc_pc = regs[PC]; 496 ksc.sc_regs[ZERO] = 0xACEDBADE; /* magic number */ 497 bcopy((caddr_t)®s[1], (caddr_t)&ksc.sc_regs[1], 498 sizeof(ksc.sc_regs) - sizeof(int)); 499 ksc.sc_fpused = p->p_md.md_flags & MDP_FPUSED; 500 if (ksc.sc_fpused) { 501 extern struct proc *machFPCurProcPtr; 502 503 /* if FPU has current state, save it first */ 504 if (p == machFPCurProcPtr) 505 MachSaveCurFPState(p); 506 bcopy((caddr_t)&p->p_md.md_regs[F0], (caddr_t)ksc.sc_fpregs, 507 sizeof(ksc.sc_fpregs)); 508 } 509 if (copyout((caddr_t)&ksc, (caddr_t)&fp->sf_sc, sizeof(ksc))) { 510 /* 511 * Process has trashed its stack; give it an illegal 512 * instruction to halt it in its tracks. 513 */ 514 SIGACTION(p, SIGILL) = SIG_DFL; 515 sig = sigmask(SIGILL); 516 p->p_sigignore &= ~sig; 517 p->p_sigcatch &= ~sig; 518 p->p_sigmask &= ~sig; 519 psignal(p, SIGILL); 520 return; 521 } 522 /* 523 * Build the argument list for the signal handler. 524 */ 525 regs[A0] = sig; 526 regs[A1] = code; 527 regs[A2] = (int)&fp->sf_sc; 528 regs[A3] = (int)catcher; 529 530 regs[PC] = (int)catcher; 531 regs[SP] = (int)fp; 532 /* 533 * Signal trampoline code is at base of user stack. 534 */ 535 regs[RA] = (int)PS_STRINGS - (esigcode - sigcode); 536 #ifdef DEBUG 537 if ((sigdebug & SDB_FOLLOW) || 538 (sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 539 printf("sendsig(%d): sig %d returns\n", 540 p->p_pid, sig); 541 #endif 542 } 543 544 /* 545 * System call to cleanup state after a signal 546 * has been taken. Reset signal mask and 547 * stack state from context left by sendsig (above). 548 * Return to previous pc and psl as specified by 549 * context left by sendsig. Check carefully to 550 * make sure that the user has not modified the 551 * psl to gain improper priviledges or to cause 552 * a machine fault. 553 */ 554 struct sigreturn_args { 555 struct sigcontext *sigcntxp; 556 }; 557 /* ARGSUSED */ 558 sigreturn(p, uap, retval) 559 struct proc *p; 560 struct sigreturn_args *uap; 561 int *retval; 562 { 563 register struct sigcontext *scp; 564 register int *regs; 565 struct sigcontext ksc; 566 int error; 567 568 scp = uap->sigcntxp; 569 #ifdef DEBUG 570 if (sigdebug & SDB_FOLLOW) 571 printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp); 572 #endif 573 regs = p->p_md.md_regs; 574 /* 575 * Test and fetch the context structure. 576 * We grab it all at once for speed. 577 */ 578 error = copyin((caddr_t)scp, (caddr_t)&ksc, sizeof(ksc)); 579 if (error || ksc.sc_regs[ZERO] != 0xACEDBADE) { 580 #ifdef DEBUG 581 if (!(sigdebug & SDB_FOLLOW)) 582 printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp); 583 printf(" old sp %x ra %x pc %x\n", 584 regs[SP], regs[RA], regs[PC]); 585 printf(" new sp %x ra %x pc %x err %d z %x\n", 586 ksc.sc_regs[SP], ksc.sc_regs[RA], ksc.sc_regs[PC], 587 error, ksc.sc_regs[ZERO]); 588 #endif 589 return (EINVAL); 590 } 591 scp = &ksc; 592 /* 593 * Restore the user supplied information 594 */ 595 if (scp->sc_onstack & 01) 596 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK; 597 else 598 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK; 599 p->p_sigmask = scp->sc_mask &~ sigcantmask; 600 regs[PC] = scp->sc_pc; 601 bcopy((caddr_t)&scp->sc_regs[1], (caddr_t)®s[1], 602 sizeof(scp->sc_regs) - sizeof(int)); 603 if (scp->sc_fpused) 604 bcopy((caddr_t)scp->sc_fpregs, (caddr_t)&p->p_md.md_regs[F0], 605 sizeof(scp->sc_fpregs)); 606 return (EJUSTRETURN); 607 } 608 609 int waittime = -1; 610 611 boot(howto) 612 register int howto; 613 { 614 615 /* take a snap shot before clobbering any registers */ 616 if (curproc) 617 savectx(curproc->p_addr, 0); 618 619 #ifdef DEBUG 620 if (panicstr) 621 traceback(); 622 #endif 623 624 boothowto = howto; 625 if ((howto & RB_NOSYNC) == 0 && waittime < 0) { 626 register struct buf *bp; 627 int iter, nbusy; 628 629 waittime = 0; 630 (void) spl0(); 631 printf("syncing disks... "); 632 /* 633 * Release vnodes held by texts before sync. 634 */ 635 if (panicstr == 0) 636 vnode_pager_umount(NULL); 637 #ifdef notyet 638 #include "fd.h" 639 #if NFD > 0 640 fdshutdown(); 641 #endif 642 #endif 643 sync(&proc0, (void *)NULL, (int *)NULL); 644 /* 645 * Unmount filesystems 646 */ 647 if (panicstr == 0) 648 vfs_unmountall(); 649 650 for (iter = 0; iter < 20; iter++) { 651 nbusy = 0; 652 for (bp = &buf[nbuf]; --bp >= buf; ) 653 if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY) 654 nbusy++; 655 if (nbusy == 0) 656 break; 657 printf("%d ", nbusy); 658 DELAY(40000 * iter); 659 } 660 if (nbusy) 661 printf("giving up\n"); 662 else 663 printf("done\n"); 664 /* 665 * If we've been adjusting the clock, the todr 666 * will be out of synch; adjust it now. 667 */ 668 resettodr(); 669 } 670 (void) splhigh(); /* extreme priority */ 671 if (howto & RB_HALT) { 672 halt(howto); 673 /*NOTREACHED*/ 674 } else { 675 if (howto & RB_DUMP) 676 dumpsys(); 677 halt(howto); 678 /*NOTREACHED*/ 679 } 680 /*NOTREACHED*/ 681 } 682 683 halt(howto) 684 int howto; 685 { 686 if (*(volatile u_char *)DIP_SWITCH & 0x20) 687 howto |= RB_HALT; 688 to_monitor(howto); 689 /*NOTREACHED*/ 690 } 691 692 int dumpmag = 0x8fca0101; /* magic number for savecore */ 693 int dumpsize = 0; /* also for savecore */ 694 long dumplo = 0; 695 696 dumpconf() 697 { 698 int nblks; 699 700 dumpsize = physmem; 701 if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) { 702 nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev); 703 if (dumpsize > btoc(dbtob(nblks - dumplo))) 704 dumpsize = btoc(dbtob(nblks - dumplo)); 705 else if (dumplo == 0) 706 dumplo = nblks - btodb(ctob(physmem)); 707 } 708 /* 709 * Don't dump on the first CLBYTES (why CLBYTES?) 710 * in case the dump device includes a disk label. 711 */ 712 if (dumplo < btodb(CLBYTES)) 713 dumplo = btodb(CLBYTES); 714 } 715 716 /* 717 * Doadump comes here after turning off memory management and 718 * getting on the dump stack, either when called above, or by 719 * the auto-restart code. 720 */ 721 dumpsys() 722 { 723 int error; 724 725 msgbufmapped = 0; 726 if (dumpdev == NODEV) 727 return; 728 /* 729 * For dumps during autoconfiguration, 730 * if dump device has already configured... 731 */ 732 if (dumpsize == 0) 733 dumpconf(); 734 if (dumplo < 0) 735 return; 736 printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo); 737 printf("dump "); 738 switch (error = (*bdevsw[major(dumpdev)].d_dump)(dumpdev)) { 739 740 case ENXIO: 741 printf("device bad\n"); 742 break; 743 744 case EFAULT: 745 printf("device not ready\n"); 746 break; 747 748 case EINVAL: 749 printf("area improper\n"); 750 break; 751 752 case EIO: 753 printf("i/o error\n"); 754 break; 755 756 default: 757 printf("error %d\n", error); 758 break; 759 760 case 0: 761 printf("succeeded\n"); 762 } 763 } 764 765 /* 766 * machine dependent system variables. 767 */ 768 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) 769 int *name; 770 u_int namelen; 771 void *oldp; 772 size_t *oldlenp; 773 void *newp; 774 size_t newlen; 775 struct proc *p; 776 { 777 extern dev_t consdev; 778 779 /* all sysctl names at this level are terminal */ 780 if (namelen != 1) 781 return (ENOTDIR); /* overloaded */ 782 783 switch (name[0]) { 784 case CPU_CONSDEV: 785 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev, 786 sizeof consdev)); 787 default: 788 return (EOPNOTSUPP); 789 } 790 /* NOTREACHED */ 791 } 792 793 /* 794 * Return the best possible estimate of the time in the timeval 795 * to which tvp points. Unfortunately, we can't read the hardware registers. 796 * We guarantee that the time will be greater than the value obtained by a 797 * previous call. 798 */ 799 microtime(tvp) 800 register struct timeval *tvp; 801 { 802 int s = splclock(); 803 static struct timeval lasttime; 804 805 *tvp = time; 806 #ifdef notdef 807 tvp->tv_usec += clkread(); 808 while (tvp->tv_usec > 1000000) { 809 tvp->tv_sec++; 810 tvp->tv_usec -= 1000000; 811 } 812 #endif 813 if (tvp->tv_sec == lasttime.tv_sec && 814 tvp->tv_usec <= lasttime.tv_usec && 815 (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) { 816 tvp->tv_sec++; 817 tvp->tv_usec -= 1000000; 818 } 819 lasttime = *tvp; 820 splx(s); 821 } 822 823 initcpu() 824 { 825 826 /* 827 * clear LEDs 828 */ 829 *(char*)DEBUG_PORT = (char)DP_WRITE|DP_LED0|DP_LED1|DP_LED2|DP_LED3; 830 831 /* 832 * clear all interrupts 833 */ 834 *(char*)INTCLR0 = 0; 835 *(char*)INTCLR1 = 0; 836 837 /* 838 * It's not a time to enable timer yet. 839 * 840 * INTEN0: PERR ABORT BERR TIMER KBD MS CFLT CBSY 841 * o o o x o o x x 842 * INTEN1: BEEP SCC LANCE DMA SLOT1 SLOT3 EXT1 EXT3 843 * x o o o o o x x 844 */ 845 846 *(char*)INTEN0 = (char) INTEN0_PERR|INTEN0_ABORT|INTEN0_BERR| 847 INTEN0_KBDINT|INTEN0_MSINT; 848 849 *(char*)INTEN1 = (char) INTEN1_SCC|INTEN1_LANCE|INTEN1_DMA| 850 INTEN1_SLOT1|INTEN1_SLOT3; 851 852 spl0(); /* safe to turn interrupts on now */ 853 } 854 855 /* 856 * Convert an ASCII string into an integer. 857 */ 858 int 859 atoi(s) 860 char *s; 861 { 862 int c; 863 unsigned base = 10, d; 864 int neg = 0, val = 0; 865 866 if (s == 0 || (c = *s++) == 0) 867 goto out; 868 869 /* skip spaces if any */ 870 while (c == ' ' || c == '\t') 871 c = *s++; 872 873 /* parse sign, allow more than one (compat) */ 874 while (c == '-') { 875 neg = !neg; 876 c = *s++; 877 } 878 879 /* parse base specification, if any */ 880 if (c == '0') { 881 c = *s++; 882 switch (c) { 883 case 'X': 884 case 'x': 885 base = 16; 886 break; 887 case 'B': 888 case 'b': 889 base = 2; 890 break; 891 default: 892 base = 8; 893 break; 894 } 895 } 896 897 /* parse number proper */ 898 for (;;) { 899 if (c >= '0' && c <= '9') 900 d = c - '0'; 901 else if (c >= 'a' && c <= 'z') 902 d = c - 'a' + 10; 903 else if (c >= 'A' && c <= 'Z') 904 d = c - 'A' + 10; 905 else 906 break; 907 val *= base; 908 val += d; 909 c = *s++; 910 } 911 if (neg) 912 val = -val; 913 out: 914 return val; 915 } 916 917 #ifdef CPU_SINGLE 918 /* 919 * small ring buffers for keyboard/mouse 920 */ 921 struct ring_buf { 922 u_char head; 923 u_char tail; 924 u_char count; 925 u_char buf[13]; 926 } ring_buf[2]; 927 928 xputc(c, chan) 929 u_char c; 930 int chan; 931 { 932 register struct ring_buf *p = &ring_buf[chan]; 933 int s = splhigh(); 934 935 if (p->count >= sizeof (p->buf)) { 936 (void) splx(s); 937 return (-1); 938 } 939 p->buf[p->head] = c; 940 if (++p->head >= sizeof (p->buf)) 941 p->head = 0; 942 p->count++; 943 (void) splx(s); 944 return (c); 945 } 946 947 xgetc(chan) 948 int chan; 949 { 950 register struct ring_buf *p = &ring_buf[chan]; 951 int c; 952 int s = splhigh(); 953 954 if (p->count == 0) { 955 (void) splx(s); 956 return (-1); 957 } 958 c = p->buf[p->tail]; 959 if (++p->tail >= sizeof (p->buf)) 960 p->tail = 0; 961 p->count--; 962 (void) splx(s); 963 return (c); 964 } 965 #endif /* CPU_SINGLE */ 966