1 /* 2 * Copyright (c) 1982 Regents of the University of California. 3 * All rights reserved. The Berkeley software License Agreement 4 * specifies the terms and conditions for redistribution. 5 * 6 * @(#)machdep.c 7.9 (Berkeley) 10/23/87 7 */ 8 9 #include "reg.h" 10 #include "pte.h" 11 #include "psl.h" 12 13 #include "param.h" 14 #include "systm.h" 15 #include "dir.h" 16 #include "user.h" 17 #include "kernel.h" 18 #include "map.h" 19 #include "vm.h" 20 #include "proc.h" 21 #include "buf.h" 22 #include "reboot.h" 23 #include "conf.h" 24 #include "inode.h" 25 #include "file.h" 26 #include "text.h" 27 #include "clist.h" 28 #include "callout.h" 29 #include "cmap.h" 30 #include "mbuf.h" 31 #include "msgbuf.h" 32 #include "quota.h" 33 #include "malloc.h" 34 35 #include "frame.h" 36 #include "clock.h" 37 #include "cons.h" 38 #include "cpu.h" 39 #include "mem.h" 40 #include "mtpr.h" 41 #include "rpb.h" 42 #include "ka630.h" 43 #include "../vaxuba/ubavar.h" 44 #include "../vaxuba/ubareg.h" 45 46 /* 47 * Declare these as initialized data so we can patch them. 48 */ 49 int nswbuf = 0; 50 #ifdef NBUF 51 int nbuf = NBUF; 52 #else 53 int nbuf = 0; 54 #endif 55 #ifdef BUFPAGES 56 int bufpages = BUFPAGES; 57 #else 58 int bufpages = 0; 59 #endif 60 61 /* 62 * Machine-dependent startup code 63 */ 64 startup(firstaddr) 65 int firstaddr; 66 { 67 register int unixsize; 68 register unsigned i; 69 register struct pte *pte; 70 int mapaddr, j; 71 register caddr_t v; 72 int maxbufs, base, residual; 73 74 #if VAX630 75 /* 76 * Leave last 5k of phys. memory as console work area. 77 */ 78 if (cpu == VAX_630) 79 maxmem -= 10; 80 #endif 81 /* 82 * Initialize error message buffer (at end of core). 83 */ 84 maxmem -= btoc(sizeof (struct msgbuf)); 85 pte = msgbufmap; 86 for (i = 0; i < btoc(sizeof (struct msgbuf)); i++) 87 *(int *)pte++ = PG_V | PG_KW | (maxmem + i); 88 mtpr(TBIA, 0); 89 90 #if VAX630 91 #include "qv.h" 92 #if NQV > 0 93 /* 94 * redirect console to qvss if it exists 95 */ 96 if (!qvcons_init()) 97 printf("qvss not initialized\n"); 98 #endif 99 #include "qd.h" 100 #if NQD > 0 101 /* 102 * redirect console to qdss if it exists 103 */ 104 if (!qdcons_init()) 105 printf("qdss not initialized\n"); 106 #endif 107 #endif 108 109 #ifdef KDB 110 kdb_init(); 111 #endif 112 /* 113 * Good {morning,afternoon,evening,night}. 114 */ 115 printf(version); 116 printf("real mem = %d\n", ctob(physmem)); 117 118 /* 119 * Allocate space for system data structures. 120 * The first available real memory address is in "firstaddr". 121 * The first available kernel virtual address is in "v". 122 * As pages of kernel virtual memory are allocated, "v" is incremented. 123 * As pages of memory are allocated and cleared, 124 * "firstaddr" is incremented. 125 * An index into the kernel page table corresponding to the 126 * virtual memory address maintained in "v" is kept in "mapaddr". 127 */ 128 v = (caddr_t)(0x80000000 | (firstaddr * NBPG)); 129 #define valloc(name, type, num) \ 130 (name) = (type *)v; v = (caddr_t)((name)+(num)) 131 #define valloclim(name, type, num, lim) \ 132 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 133 valloclim(inode, struct inode, ninode, inodeNINODE); 134 valloclim(file, struct file, nfile, fileNFILE); 135 valloclim(proc, struct proc, nproc, procNPROC); 136 valloclim(text, struct text, ntext, textNTEXT); 137 valloc(cfree, struct cblock, nclist); 138 valloc(callout, struct callout, ncallout); 139 valloc(swapmap, struct map, nswapmap = nproc * 2); 140 valloc(argmap, struct map, ARGMAPSIZE); 141 valloc(kernelmap, struct map, nproc); 142 valloc(mbmap, struct map, nmbclusters/4); 143 valloc(namecache, struct namecache, nchsize); 144 valloc(kmemmap, struct map, ekmempt - kmempt); 145 valloc(kmemusage, struct kmemusage, ekmempt - kmempt); 146 #ifdef QUOTA 147 valloclim(quota, struct quota, nquota, quotaNQUOTA); 148 valloclim(dquot, struct dquot, ndquot, dquotNDQUOT); 149 #endif 150 151 /* 152 * Determine how many buffers to allocate. 153 * Use 10% of memory for the first 2 Meg, 5% of the remaining 154 * memory. Insure a minimum of 16 buffers. 155 * We allocate 1/2 as many swap buffer headers as file i/o buffers. 156 */ 157 if (bufpages == 0) 158 if (physmem < (2 * 1024 * CLSIZE)) 159 bufpages = physmem / 10 / CLSIZE; 160 else 161 bufpages = ((2 * 1024 * CLSIZE + physmem) / 20) / CLSIZE; 162 if (nbuf == 0) { 163 nbuf = bufpages / 2; 164 if (nbuf < 16) 165 nbuf = 16; 166 } 167 if (nswbuf == 0) { 168 nswbuf = (nbuf / 2) &~ 1; /* force even */ 169 if (nswbuf > 256) 170 nswbuf = 256; /* sanity */ 171 } 172 valloc(swbuf, struct buf, nswbuf); 173 174 /* 175 * Now the amount of virtual memory remaining for buffers 176 * can be calculated, estimating needs for the cmap. 177 */ 178 ncmap = (maxmem*NBPG - ((int)v &~ 0x80000000)) / 179 (CLBYTES + sizeof(struct cmap)) + 2; 180 maxbufs = ((SYSPTSIZE * NBPG) - 181 ((int)(v + ncmap * sizeof(struct cmap)) - 0x80000000)) / 182 (MAXBSIZE + sizeof(struct buf)); 183 if (maxbufs < 16) 184 panic("sys pt too small"); 185 if (nbuf > maxbufs) { 186 printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs); 187 nbuf = maxbufs; 188 } 189 if (bufpages > nbuf * (MAXBSIZE / CLBYTES)) 190 bufpages = nbuf * (MAXBSIZE / CLBYTES); 191 valloc(buf, struct buf, nbuf); 192 193 /* 194 * Allocate space for core map. 195 * Allow space for all of phsical memory minus the amount 196 * dedicated to the system. The amount of physical memory 197 * dedicated to the system is the total virtual memory of 198 * the system thus far, plus core map, buffer pages, 199 * and buffer headers not yet allocated. 200 * Add 2: 1 because the 0th entry is unused, 1 for rounding. 201 */ 202 ncmap = (maxmem*NBPG - ((int)(v + bufpages*CLBYTES) &~ 0x80000000)) / 203 (CLBYTES + sizeof(struct cmap)) + 2; 204 valloclim(cmap, struct cmap, ncmap, ecmap); 205 206 /* 207 * Clear space allocated thus far, and make r/w entries 208 * for the space in the kernel map. 209 */ 210 unixsize = btoc((int)v &~ 0x80000000); 211 while (firstaddr < unixsize) { 212 *(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | firstaddr; 213 clearseg((unsigned)firstaddr); 214 firstaddr++; 215 } 216 217 /* 218 * Now allocate buffers proper. They are different than the above 219 * in that they usually occupy more virtual memory than physical. 220 */ 221 v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET); 222 valloc(buffers, char, MAXBSIZE * nbuf); 223 base = bufpages / nbuf; 224 residual = bufpages % nbuf; 225 mapaddr = firstaddr; 226 for (i = 0; i < residual; i++) { 227 for (j = 0; j < (base + 1) * CLSIZE; j++) { 228 *(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr; 229 clearseg((unsigned)firstaddr); 230 firstaddr++; 231 } 232 mapaddr += MAXBSIZE / NBPG; 233 } 234 for (i = residual; i < nbuf; i++) { 235 for (j = 0; j < base * CLSIZE; j++) { 236 *(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr; 237 clearseg((unsigned)firstaddr); 238 firstaddr++; 239 } 240 mapaddr += MAXBSIZE / NBPG; 241 } 242 243 unixsize = btoc((int)v &~ 0x80000000); 244 if (firstaddr >= physmem - 8*UPAGES) 245 panic("no memory"); 246 mtpr(TBIA, 0); /* After we just cleared it all! */ 247 248 /* 249 * Initialize callouts 250 */ 251 callfree = callout; 252 for (i = 1; i < ncallout; i++) 253 callout[i-1].c_next = &callout[i]; 254 255 /* 256 * Initialize memory allocator and swap 257 * and user page table maps. 258 * 259 * THE USER PAGE TABLE MAP IS CALLED ``kernelmap'' 260 * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME. 261 */ 262 meminit(firstaddr, maxmem); 263 maxmem = freemem; 264 printf("avail mem = %d\n", ctob(maxmem)); 265 printf("using %d buffers containing %d bytes of memory\n", 266 nbuf, bufpages * CLBYTES); 267 rminit(kernelmap, (long)USRPTSIZE, (long)1, 268 "usrpt", nproc); 269 rminit(mbmap, (long)(nmbclusters * CLSIZE), (long)CLSIZE, 270 "mbclusters", nmbclusters/4); 271 kmeminit(); /* now safe to do malloc/free */ 272 273 /* 274 * Set up CPU-specific registers, cache, etc. 275 */ 276 initcpu(); 277 278 /* 279 * Set up buffers, so they can be used to read disk labels. 280 */ 281 bhinit(); 282 binit(); 283 284 /* 285 * Configure the system. 286 */ 287 configure(); 288 289 /* 290 * Clear restart inhibit flags. 291 */ 292 tocons(TXDB_CWSI); 293 tocons(TXDB_CCSI); 294 } 295 296 #ifdef PGINPROF 297 /* 298 * Return the difference (in microseconds) 299 * between the current time and a previous 300 * time as represented by the arguments. 301 * If there is a pending clock interrupt 302 * which has not been serviced due to high 303 * ipl, return error code. 304 */ 305 vmtime(otime, olbolt, oicr) 306 register int otime, olbolt, oicr; 307 { 308 309 if (mfpr(ICCS)&ICCS_INT) 310 return(-1); 311 else 312 return(((time.tv_sec-otime)*60 + lbolt-olbolt)*16667 + mfpr(ICR)-oicr); 313 } 314 #endif 315 316 /* 317 * Clear registers on exec 318 */ 319 setregs(entry) 320 u_long entry; 321 { 322 #ifdef notdef 323 register int *rp; 324 325 /* should pass args to init on the stack */ 326 /* should also fix this code before using it, it's wrong */ 327 /* wanna clear the scb? */ 328 for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];) 329 *rp++ = 0; 330 #endif 331 u.u_ar0[PC] = entry + 2; 332 } 333 334 /* 335 * Send an interrupt to process. 336 * 337 * Stack is set up to allow sigcode stored 338 * in u. to call routine, followed by chmk 339 * to sigreturn routine below. After sigreturn 340 * resets the signal mask, the stack, the frame 341 * pointer, and the argument pointer, it returns 342 * to the user specified pc, psl. 343 */ 344 sendsig(p, sig, mask) 345 int (*p)(), sig, mask; 346 { 347 register struct sigcontext *scp; 348 register int *regs; 349 register struct sigframe { 350 int sf_signum; 351 int sf_code; 352 struct sigcontext *sf_scp; 353 int (*sf_handler)(); 354 int sf_argcount; 355 struct sigcontext *sf_scpcopy; 356 } *fp; 357 int oonstack; 358 359 regs = u.u_ar0; 360 oonstack = u.u_onstack; 361 /* 362 * Allocate and validate space for the signal handler 363 * context. Note that if the stack is in P0 space, the 364 * call to grow() is a nop, and the useracc() check 365 * will fail if the process has not already allocated 366 * the space with a `brk'. 367 */ 368 if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) { 369 scp = (struct sigcontext *)u.u_sigsp - 1; 370 u.u_onstack = 1; 371 } else 372 scp = (struct sigcontext *)regs[SP] - 1; 373 fp = (struct sigframe *)scp - 1; 374 if ((int)fp <= USRSTACK - ctob(u.u_ssize)) 375 (void)grow((unsigned)fp); 376 if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), B_WRITE) == 0) { 377 /* 378 * Process has trashed its stack; give it an illegal 379 * instruction to halt it in its tracks. 380 */ 381 u.u_signal[SIGILL] = SIG_DFL; 382 sig = sigmask(SIGILL); 383 u.u_procp->p_sigignore &= ~sig; 384 u.u_procp->p_sigcatch &= ~sig; 385 u.u_procp->p_sigmask &= ~sig; 386 psignal(u.u_procp, SIGILL); 387 return; 388 } 389 /* 390 * Build the argument list for the signal handler. 391 */ 392 fp->sf_signum = sig; 393 if (sig == SIGILL || sig == SIGFPE) { 394 fp->sf_code = u.u_code; 395 u.u_code = 0; 396 } else 397 fp->sf_code = 0; 398 fp->sf_scp = scp; 399 fp->sf_handler = p; 400 /* 401 * Build the calls argument frame to be used to call sigreturn 402 */ 403 fp->sf_argcount = 1; 404 fp->sf_scpcopy = scp; 405 /* 406 * Build the signal context to be used by sigreturn. 407 */ 408 scp->sc_onstack = oonstack; 409 scp->sc_mask = mask; 410 scp->sc_sp = regs[SP]; 411 scp->sc_fp = regs[FP]; 412 scp->sc_ap = regs[AP]; 413 scp->sc_pc = regs[PC]; 414 scp->sc_ps = regs[PS]; 415 regs[SP] = (int)fp; 416 regs[PS] &= ~(PSL_CM|PSL_FPD); 417 regs[PC] = (int)u.u_pcb.pcb_sigc; 418 return; 419 } 420 421 /* 422 * System call to cleanup state after a signal 423 * has been taken. Reset signal mask and 424 * stack state from context left by sendsig (above). 425 * Return to previous pc and psl as specified by 426 * context left by sendsig. Check carefully to 427 * make sure that the user has not modified the 428 * psl to gain improper priviledges or to cause 429 * a machine fault. 430 */ 431 sigreturn() 432 { 433 struct a { 434 struct sigcontext *sigcntxp; 435 }; 436 register struct sigcontext *scp; 437 register int *regs = u.u_ar0; 438 439 scp = ((struct a *)(u.u_ap))->sigcntxp; 440 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0) 441 return; 442 if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_IS)) != 0 || 443 (scp->sc_ps & (PSL_PRVMOD|PSL_CURMOD)) != (PSL_PRVMOD|PSL_CURMOD) || 444 ((scp->sc_ps & PSL_CM) && 445 (scp->sc_ps & (PSL_FPD|PSL_DV|PSL_FU|PSL_IV)) != 0)) { 446 u.u_error = EINVAL; 447 return; 448 } 449 u.u_eosys = JUSTRETURN; 450 u.u_onstack = scp->sc_onstack & 01; 451 u.u_procp->p_sigmask = scp->sc_mask &~ 452 (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP)); 453 regs[FP] = scp->sc_fp; 454 regs[AP] = scp->sc_ap; 455 regs[SP] = scp->sc_sp; 456 regs[PC] = scp->sc_pc; 457 regs[PS] = scp->sc_ps; 458 } 459 460 /* XXX - BEGIN 4.2 COMPATIBILITY */ 461 /* 462 * Compatibility with 4.2 chmk $139 used by longjmp() 463 */ 464 osigcleanup() 465 { 466 register struct sigcontext *scp; 467 register int *regs = u.u_ar0; 468 469 scp = (struct sigcontext *)fuword((caddr_t)regs[SP]); 470 if ((int)scp == -1) 471 return; 472 if (useracc((caddr_t)scp, 3 * sizeof (int), B_WRITE) == 0) 473 return; 474 u.u_onstack = scp->sc_onstack & 01; 475 u.u_procp->p_sigmask = scp->sc_mask &~ 476 (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP)); 477 regs[SP] = scp->sc_sp; 478 } 479 /* XXX - END 4.2 COMPATIBILITY */ 480 481 #ifdef notdef 482 dorti() 483 { 484 struct frame frame; 485 register int sp; 486 register int reg, mask; 487 extern int ipcreg[]; 488 489 (void) copyin((caddr_t)u.u_ar0[FP], (caddr_t)&frame, sizeof (frame)); 490 sp = u.u_ar0[FP] + sizeof (frame); 491 u.u_ar0[PC] = frame.fr_savpc; 492 u.u_ar0[FP] = frame.fr_savfp; 493 u.u_ar0[AP] = frame.fr_savap; 494 mask = frame.fr_mask; 495 for (reg = 0; reg <= 11; reg++) { 496 if (mask&1) { 497 u.u_ar0[ipcreg[reg]] = fuword((caddr_t)sp); 498 sp += 4; 499 } 500 mask >>= 1; 501 } 502 sp += frame.fr_spa; 503 u.u_ar0[PS] = (u.u_ar0[PS] & 0xffff0000) | frame.fr_psw; 504 if (frame.fr_s) 505 sp += 4 + 4 * (fuword((caddr_t)sp) & 0xff); 506 /* phew, now the rei */ 507 u.u_ar0[PC] = fuword((caddr_t)sp); 508 sp += 4; 509 u.u_ar0[PS] = fuword((caddr_t)sp); 510 sp += 4; 511 u.u_ar0[PS] |= PSL_USERSET; 512 u.u_ar0[PS] &= ~PSL_USERCLR; 513 u.u_ar0[SP] = (int)sp; 514 } 515 #endif 516 517 /* 518 * Memenable enables the memory controlle corrected data reporting. 519 * This runs at regular intervals, turning on the interrupt. 520 * The interrupt is turned off, per memory controller, when error 521 * reporting occurs. Thus we report at most once per memintvl. 522 */ 523 int memintvl = MEMINTVL; 524 525 memenable() 526 { 527 register struct mcr *mcr; 528 register int m; 529 530 #if VAX630 531 if (cpu == VAX_630) 532 return; 533 #endif 534 #ifdef VAX8600 535 if (cpu == VAX_8600) { 536 M8600_ENA; 537 } else 538 #endif 539 for (m = 0; m < nmcr; m++) { 540 mcr = mcraddr[m]; 541 switch (mcrtype[m]) { 542 #if VAX780 543 case M780C: 544 M780C_ENA(mcr); 545 break; 546 case M780EL: 547 M780EL_ENA(mcr); 548 break; 549 case M780EU: 550 M780EU_ENA(mcr); 551 break; 552 #endif 553 #if VAX750 554 case M750: 555 M750_ENA(mcr); 556 break; 557 #endif 558 #if VAX730 559 case M730: 560 M730_ENA(mcr); 561 break; 562 #endif 563 } 564 } 565 if (memintvl > 0) 566 timeout(memenable, (caddr_t)0, memintvl*hz); 567 } 568 569 /* 570 * Memerr is the interrupt routine for corrected read data 571 * interrupts. It looks to see which memory controllers have 572 * unreported errors, reports them, and disables further 573 * reporting for a time on those controller. 574 */ 575 memerr() 576 { 577 #ifdef VAX8600 578 register int reg11; /* known to be r11 below */ 579 #endif 580 register struct mcr *mcr; 581 register int m; 582 583 #if VAX630 584 if (cpu == VAX_630) 585 return; 586 #endif 587 #ifdef VAX8600 588 if (cpu == VAX_8600) { 589 int mdecc, mear, mstat1, mstat2, array; 590 591 /* 592 * Scratchpad registers in the Ebox must be read by 593 * storing their ID number in ESPA and then immediately 594 * reading ESPD's contents with no other intervening 595 * machine instructions! 596 * 597 * The asm's below have a number of constants which 598 * are defined correctly in mem.h and mtpr.h. 599 */ 600 #ifdef lint 601 reg11 = 0; 602 #else 603 asm("mtpr $0x27,$0x4e; mfpr $0x4f,r11"); 604 #endif 605 mdecc = reg11; /* must acknowledge interrupt? */ 606 if (M8600_MEMERR(mdecc)) { 607 asm("mtpr $0x2a,$0x4e; mfpr $0x4f,r11"); 608 mear = reg11; 609 asm("mtpr $0x25,$0x4e; mfpr $0x4f,r11"); 610 mstat1 = reg11; 611 asm("mtpr $0x26,$0x4e; mfpr $0x4f,r11"); 612 mstat2 = reg11; 613 array = M8600_ARRAY(mear); 614 615 printf("mcr0: ecc error, addr %x (array %d) syn %x\n", 616 M8600_ADDR(mear), array, M8600_SYN(mdecc)); 617 printf("\tMSTAT1 = %b\n\tMSTAT2 = %b\n", 618 mstat1, M8600_MSTAT1_BITS, 619 mstat2, M8600_MSTAT2_BITS); 620 M8600_INH; 621 } 622 } else 623 #endif 624 for (m = 0; m < nmcr; m++) { 625 mcr = mcraddr[m]; 626 switch (mcrtype[m]) { 627 #if VAX780 628 case M780C: 629 if (M780C_ERR(mcr)) { 630 printf("mcr%d: soft ecc addr %x syn %x\n", 631 m, M780C_ADDR(mcr), M780C_SYN(mcr)); 632 #ifdef TRENDATA 633 memlog(m, mcr); 634 #endif 635 M780C_INH(mcr); 636 } 637 break; 638 639 case M780EL: 640 if (M780EL_ERR(mcr)) { 641 printf("mcr%d: soft ecc addr %x syn %x\n", 642 m, M780EL_ADDR(mcr), M780EL_SYN(mcr)); 643 M780EL_INH(mcr); 644 } 645 break; 646 647 case M780EU: 648 if (M780EU_ERR(mcr)) { 649 printf("mcr%d: soft ecc addr %x syn %x\n", 650 m, M780EU_ADDR(mcr), M780EU_SYN(mcr)); 651 M780EU_INH(mcr); 652 } 653 break; 654 #endif 655 #if VAX750 656 case M750: 657 if (M750_ERR(mcr)) { 658 struct mcr amcr; 659 amcr.mc_reg[0] = mcr->mc_reg[0]; 660 printf("mcr%d: %s", 661 m, (amcr.mc_reg[0] & M750_UNCORR) ? 662 "hard error" : "soft ecc"); 663 printf(" addr %x syn %x\n", 664 M750_ADDR(&amcr), M750_SYN(&amcr)); 665 M750_INH(mcr); 666 } 667 break; 668 #endif 669 #if VAX730 670 case M730: { 671 struct mcr amcr; 672 673 /* 674 * Must be careful on the 730 not to use invalid 675 * instructions in I/O space, so make a copy; 676 */ 677 amcr.mc_reg[0] = mcr->mc_reg[0]; 678 amcr.mc_reg[1] = mcr->mc_reg[1]; 679 if (M730_ERR(&amcr)) { 680 printf("mcr%d: %s", 681 m, (amcr.mc_reg[1] & M730_UNCORR) ? 682 "hard error" : "soft ecc"); 683 printf(" addr %x syn %x\n", 684 M730_ADDR(&amcr), M730_SYN(&amcr)); 685 M730_INH(mcr); 686 } 687 break; 688 } 689 #endif 690 } 691 } 692 } 693 694 #ifdef TRENDATA 695 /* 696 * Figure out what chip to replace on Trendata boards. 697 * Assumes all your memory is Trendata or the non-Trendata 698 * memory never fails.. 699 */ 700 struct { 701 u_char m_syndrome; 702 char m_chip[4]; 703 } memlogtab[] = { 704 0x01, "C00", 0x02, "C01", 0x04, "C02", 0x08, "C03", 705 0x10, "C04", 0x19, "L01", 0x1A, "L02", 0x1C, "L04", 706 0x1F, "L07", 0x20, "C05", 0x38, "L00", 0x3B, "L03", 707 0x3D, "L05", 0x3E, "L06", 0x40, "C06", 0x49, "L09", 708 0x4A, "L10", 0x4c, "L12", 0x4F, "L15", 0x51, "L17", 709 0x52, "L18", 0x54, "L20", 0x57, "L23", 0x58, "L24", 710 0x5B, "L27", 0x5D, "L29", 0x5E, "L30", 0x68, "L08", 711 0x6B, "L11", 0x6D, "L13", 0x6E, "L14", 0x70, "L16", 712 0x73, "L19", 0x75, "L21", 0x76, "L22", 0x79, "L25", 713 0x7A, "L26", 0x7C, "L28", 0x7F, "L31", 0x80, "C07", 714 0x89, "U01", 0x8A, "U02", 0x8C, "U04", 0x8F, "U07", 715 0x91, "U09", 0x92, "U10", 0x94, "U12", 0x97, "U15", 716 0x98, "U16", 0x9B, "U19", 0x9D, "U21", 0x9E, "U22", 717 0xA8, "U00", 0xAB, "U03", 0xAD, "U05", 0xAE, "U06", 718 0xB0, "U08", 0xB3, "U11", 0xB5, "U13", 0xB6, "U14", 719 0xB9, "U17", 0xBA, "U18", 0xBC, "U20", 0xBF, "U23", 720 0xC1, "U25", 0xC2, "U26", 0xC4, "U28", 0xC7, "U31", 721 0xE0, "U24", 0xE3, "U27", 0xE5, "U29", 0xE6, "U30" 722 }; 723 724 memlog (m, mcr) 725 int m; 726 struct mcr *mcr; 727 { 728 register i; 729 730 switch (mcrtype[m]) { 731 732 #if VAX780 733 case M780C: 734 for (i = 0; i < (sizeof (memlogtab) / sizeof (memlogtab[0])); i++) 735 if ((u_char)(M780C_SYN(mcr)) == memlogtab[i].m_syndrome) { 736 printf ( 737 "mcr%d: replace %s chip in %s bank of memory board %d (0-15)\n", 738 m, 739 memlogtab[i].m_chip, 740 (M780C_ADDR(mcr) & 0x8000) ? "upper" : "lower", 741 (M780C_ADDR(mcr) >> 16)); 742 return; 743 } 744 printf ("mcr%d: multiple errors, not traceable\n", m); 745 break; 746 #endif 747 } 748 } 749 #endif 750 751 /* 752 * Invalidate single all pte's in a cluster 753 */ 754 tbiscl(v) 755 unsigned v; 756 { 757 register caddr_t addr; /* must be first reg var */ 758 register int i; 759 760 asm(".set TBIS,58"); 761 addr = ptob(v); 762 for (i = 0; i < CLSIZE; i++) { 763 #ifdef lint 764 mtpr(TBIS, addr); 765 #else 766 asm("mtpr r11,$TBIS"); 767 #endif 768 addr += NBPG; 769 } 770 } 771 772 int waittime = -1; 773 774 boot(arghowto) 775 int arghowto; 776 { 777 register int howto; /* r11 == how to boot */ 778 register int devtype; /* r10 == major of root dev */ 779 extern char *panicstr; 780 781 howto = arghowto; 782 if ((howto&RB_NOSYNC)==0 && waittime < 0 && bfreelist[0].b_forw) { 783 register struct buf *bp; 784 int iter, nbusy; 785 786 waittime = 0; 787 (void) splnet(); 788 printf("syncing disks... "); 789 /* 790 * Release inodes held by texts before update. 791 */ 792 if (panicstr == 0) 793 xumount(NODEV); 794 update(); 795 796 for (iter = 0; iter < 20; iter++) { 797 nbusy = 0; 798 for (bp = &buf[nbuf]; --bp >= buf; ) 799 if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY) 800 nbusy++; 801 if (nbusy == 0) 802 break; 803 printf("%d ", nbusy); 804 DELAY(40000 * iter); 805 } 806 if (nbusy) 807 printf("giving up\n"); 808 else 809 printf("done\n"); 810 /* 811 * If we've been adjusting the clock, the todr 812 * will be out of synch; adjust it now. 813 */ 814 resettodr(); 815 } 816 splx(0x1f); /* extreme priority */ 817 devtype = major(rootdev); 818 if (howto&RB_HALT) { 819 printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n"); 820 mtpr(IPL, 0x1f); 821 for (;;) 822 ; 823 } else { 824 if (howto & RB_DUMP) { 825 doadump(); /* TXDB_BOOT's itself */ 826 /*NOTREACHED*/ 827 } 828 tocons(TXDB_BOOT); 829 } 830 #if defined(VAX750) || defined(VAX730) || defined(VAX630) 831 if (cpu == VAX_750 || cpu == VAX_730 || cpu == VAX_630) 832 { asm("movl r11,r5"); } /* boot flags go in r5 */ 833 #endif 834 for (;;) 835 asm("halt"); 836 #ifdef lint 837 printf("howto %d, devtype %d\n", arghowto, devtype); 838 #endif 839 /*NOTREACHED*/ 840 } 841 842 tocons(c) 843 { 844 register oldmask; 845 846 while (((oldmask = mfpr(TXCS)) & TXCS_RDY) == 0) 847 continue; 848 849 switch (cpu) { 850 851 #if VAX780 || VAX750 || VAX730 || VAX630 852 case VAX_780: 853 case VAX_750: 854 case VAX_730: 855 case VAX_630: 856 c |= TXDB_CONS; 857 break; 858 #endif 859 860 #if VAX8600 861 case VAX_8600: 862 mtpr(TXCS, TXCS_LCONS | TXCS_WMASK); 863 while ((mfpr(TXCS) & TXCS_RDY) == 0) 864 continue; 865 break; 866 #endif 867 } 868 869 mtpr(TXDB, c); 870 871 #if VAX8600 872 switch (cpu) { 873 874 case VAX_8600: 875 while ((mfpr(TXCS) & TXCS_RDY) == 0) 876 continue; 877 mtpr(TXCS, oldmask | TXCS_WMASK); 878 break; 879 } 880 #endif 881 } 882 883 int dumpmag = 0x8fca0101; /* magic number for savecore */ 884 int dumpsize = 0; /* also for savecore */ 885 /* 886 * Doadump comes here after turning off memory management and 887 * getting on the dump stack, either when called above, or by 888 * the auto-restart code. 889 */ 890 dumpsys() 891 { 892 893 rpb.rp_flag = 1; 894 if (dumpdev == NODEV) 895 return; 896 #ifdef notdef 897 if ((minor(dumpdev)&07) != 1) 898 return; 899 #endif 900 /* 901 * For dumps during autoconfiguration, 902 * if dump device has already configured... 903 */ 904 if (dumplo == 0 && bdevsw[major(dumpdev)].d_psize) 905 dumplo = (*bdevsw[major(dumpdev)].d_psize)(dumpdev) - physmem; 906 if (dumplo < 0) 907 dumplo = 0; 908 dumpsize = physmem; 909 printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo); 910 printf("dump "); 911 switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) { 912 913 case ENXIO: 914 printf("device bad\n"); 915 break; 916 917 case EFAULT: 918 printf("device not ready\n"); 919 break; 920 921 case EINVAL: 922 printf("area improper\n"); 923 break; 924 925 case EIO: 926 printf("i/o error"); 927 break; 928 929 default: 930 printf("succeeded"); 931 break; 932 } 933 } 934 935 /* 936 * Machine check error recovery code. 937 * Print out the machine check frame and then give up. 938 */ 939 #if VAX8600 940 #define NMC8600 7 941 char *mc8600[] = { 942 "unkn type", "fbox error", "ebox error", "ibox error", 943 "mbox error", "tbuf error", "mbox 1D error" 944 }; 945 /* codes for above */ 946 #define MC_FBOX 1 947 #define MC_EBOX 2 948 #define MC_IBOX 3 949 #define MC_MBOX 4 950 #define MC_TBUF 5 951 #define MC_MBOX1D 6 952 953 /* error bits */ 954 #define MBOX_FE 0x8000 /* Mbox fatal error */ 955 #define FBOX_SERV 0x10000000 /* Fbox service error */ 956 #define IBOX_ERR 0x2000 /* Ibox error */ 957 #define EBOX_ERR 0x1e00 /* Ebox error */ 958 #define MBOX_1D 0x81d0000 /* Mbox 1D error */ 959 #define EDP_PE 0x200 960 #endif 961 962 #if defined(VAX780) || defined(VAX750) 963 char *mc780[] = { 964 "cp read", "ctrl str par", "cp tbuf par", "cp cache par", 965 "cp rdtimo", "cp rds", "ucode lost", 0, 966 0, 0, "ib tbuf par", 0, 967 "ib rds", "ib rd timo", 0, "ib cache par" 968 }; 969 #define MC750_TBERR 2 /* type code of cp tbuf par */ 970 #define MC750_TBPAR 4 /* tbuf par bit in mcesr */ 971 #endif 972 973 #if VAX730 974 #define NMC730 12 975 char *mc730[] = { 976 "tb par", "bad retry", "bad intr id", "cant write ptem", 977 "unkn mcr err", "iib rd err", "nxm ref", "cp rds", 978 "unalgn ioref", "nonlw ioref", "bad ioaddr", "unalgn ubaddr", 979 }; 980 #endif 981 #if VAX630 982 #define NMC630 10 983 extern struct ka630cpu ka630cpu; 984 char *mc630[] = { 985 0, "immcr (fsd)", "immcr (ssd)", "fpu err 0", 986 "fpu err 7", "mmu st(tb)", "mmu st(m=0)", "pte in p0", 987 "pte in p1", "un intr id", 988 }; 989 #endif 990 991 /* 992 * Frame for each cpu 993 */ 994 struct mc780frame { 995 int mc8_bcnt; /* byte count == 0x28 */ 996 int mc8_summary; /* summary parameter (as above) */ 997 int mc8_cpues; /* cpu error status */ 998 int mc8_upc; /* micro pc */ 999 int mc8_vaviba; /* va/viba register */ 1000 int mc8_dreg; /* d register */ 1001 int mc8_tber0; /* tbuf error reg 0 */ 1002 int mc8_tber1; /* tbuf error reg 1 */ 1003 int mc8_timo; /* timeout address divided by 4 */ 1004 int mc8_parity; /* parity */ 1005 int mc8_sbier; /* sbi error register */ 1006 int mc8_pc; /* trapped pc */ 1007 int mc8_psl; /* trapped psl */ 1008 }; 1009 struct mc750frame { 1010 int mc5_bcnt; /* byte count == 0x28 */ 1011 int mc5_summary; /* summary parameter (as above) */ 1012 int mc5_va; /* virtual address register */ 1013 int mc5_errpc; /* error pc */ 1014 int mc5_mdr; 1015 int mc5_svmode; /* saved mode register */ 1016 int mc5_rdtimo; /* read lock timeout */ 1017 int mc5_tbgpar; /* tb group parity error register */ 1018 int mc5_cacherr; /* cache error register */ 1019 int mc5_buserr; /* bus error register */ 1020 int mc5_mcesr; /* machine check status register */ 1021 int mc5_pc; /* trapped pc */ 1022 int mc5_psl; /* trapped psl */ 1023 }; 1024 struct mc730frame { 1025 int mc3_bcnt; /* byte count == 0xc */ 1026 int mc3_summary; /* summary parameter */ 1027 int mc3_parm[2]; /* parameter 1 and 2 */ 1028 int mc3_pc; /* trapped pc */ 1029 int mc3_psl; /* trapped psl */ 1030 }; 1031 struct mc630frame { 1032 int mc63_bcnt; /* byte count == 0xc */ 1033 int mc63_summary; /* summary parameter */ 1034 int mc63_mrvaddr; /* most recent vad */ 1035 int mc63_istate; /* internal state */ 1036 int mc63_pc; /* trapped pc */ 1037 int mc63_psl; /* trapped psl */ 1038 }; 1039 struct mc8600frame { 1040 int mc6_bcnt; /* byte count == 0x58 */ 1041 int mc6_ehmsts; 1042 int mc6_evmqsav; 1043 int mc6_ebcs; 1044 int mc6_edpsr; 1045 int mc6_cslint; 1046 int mc6_ibesr; 1047 int mc6_ebxwd1; 1048 int mc6_ebxwd2; 1049 int mc6_ivasav; 1050 int mc6_vibasav; 1051 int mc6_esasav; 1052 int mc6_isasav; 1053 int mc6_cpc; 1054 int mc6_mstat1; 1055 int mc6_mstat2; 1056 int mc6_mdecc; 1057 int mc6_merg; 1058 int mc6_cshctl; 1059 int mc6_mear; 1060 int mc6_medr; 1061 int mc6_accs; 1062 int mc6_cses; 1063 int mc6_pc; /* trapped pc */ 1064 int mc6_psl; /* trapped psl */ 1065 }; 1066 1067 machinecheck(cmcf) 1068 caddr_t cmcf; 1069 { 1070 register u_int type = ((struct mc780frame *)cmcf)->mc8_summary; 1071 1072 printf("machine check %x: ", type); 1073 switch (cpu) { 1074 #if VAX8600 1075 case VAX_8600: { 1076 register struct mc8600frame *mcf = (struct mc8600frame *)cmcf; 1077 1078 if (mcf->mc6_ebcs & MBOX_FE) 1079 mcf->mc6_ehmsts |= MC_MBOX; 1080 else if (mcf->mc6_ehmsts & FBOX_SERV) 1081 mcf->mc6_ehmsts |= MC_FBOX; 1082 else if (mcf->mc6_ebcs & EBOX_ERR) { 1083 if (mcf->mc6_ebcs & EDP_PE) 1084 mcf->mc6_ehmsts |= MC_MBOX; 1085 else 1086 mcf->mc6_ehmsts |= MC_EBOX; 1087 } else if (mcf->mc6_ehmsts & IBOX_ERR) 1088 mcf->mc6_ehmsts |= MC_IBOX; 1089 else if (mcf->mc6_mstat1 & M8600_TB_ERR) 1090 mcf->mc6_ehmsts |= MC_TBUF; 1091 else if ((mcf->mc6_cslint & MBOX_1D) == MBOX_1D) 1092 mcf->mc6_ehmsts |= MC_MBOX1D; 1093 1094 type = mcf->mc6_ehmsts & 0x7; 1095 if (type < NMC8600) 1096 printf("machine check %x: %s", type, mc8600[type]); 1097 printf("\n"); 1098 printf("\tehm.sts %x evmqsav %x ebcs %x edpsr %x cslint %x\n", 1099 mcf->mc6_ehmsts, mcf->mc6_evmqsav, mcf->mc6_ebcs, 1100 mcf->mc6_edpsr, mcf->mc6_cslint); 1101 printf("\tibesr %x ebxwd %x %x ivasav %x vibasav %x\n", 1102 mcf->mc6_ibesr, mcf->mc6_ebxwd1, mcf->mc6_ebxwd2, 1103 mcf->mc6_ivasav, mcf->mc6_vibasav); 1104 printf("\tesasav %x isasav %x cpc %x mstat %x %x mdecc %x\n", 1105 mcf->mc6_esasav, mcf->mc6_isasav, mcf->mc6_cpc, 1106 mcf->mc6_mstat1, mcf->mc6_mstat2, mcf->mc6_mdecc); 1107 printf("\tmerg %x cshctl %x mear %x medr %x accs %x cses %x\n", 1108 mcf->mc6_merg, mcf->mc6_cshctl, mcf->mc6_mear, 1109 mcf->mc6_medr, mcf->mc6_accs, mcf->mc6_cses); 1110 printf("\tpc %x psl %x\n", mcf->mc6_pc, mcf->mc6_psl); 1111 mtpr(EHSR, 0); 1112 break; 1113 }; 1114 #endif 1115 #if VAX780 1116 case VAX_780: { 1117 register struct mc780frame *mcf = (struct mc780frame *)cmcf; 1118 1119 register int sbifs; 1120 printf("%s%s\n", mc780[type&0xf], 1121 (type&0xf0) ? " abort" : " fault"); 1122 printf("\tcpues %x upc %x va/viba %x dreg %x tber %x %x\n", 1123 mcf->mc8_cpues, mcf->mc8_upc, mcf->mc8_vaviba, 1124 mcf->mc8_dreg, mcf->mc8_tber0, mcf->mc8_tber1); 1125 sbifs = mfpr(SBIFS); 1126 printf("\ttimo %x parity %x sbier %x pc %x psl %x sbifs %x\n", 1127 mcf->mc8_timo*4, mcf->mc8_parity, mcf->mc8_sbier, 1128 mcf->mc8_pc, mcf->mc8_psl, sbifs); 1129 /* THE FUNNY BITS IN THE FOLLOWING ARE FROM THE ``BLACK */ 1130 /* BOOK'' AND SHOULD BE PUT IN AN ``sbi.h'' */ 1131 mtpr(SBIFS, sbifs &~ 0x2000000); 1132 mtpr(SBIER, mfpr(SBIER) | 0x70c0); 1133 break; 1134 } 1135 #endif 1136 #if VAX750 1137 case VAX_750: { 1138 register struct mc750frame *mcf = (struct mc750frame *)cmcf; 1139 1140 int mcsr = mfpr(MCSR); 1141 printf("%s%s\n", mc780[type&0xf], 1142 (type&0xf0) ? " abort" : " fault"); 1143 mtpr(TBIA, 0); 1144 mtpr(MCESR, 0xf); 1145 printf("\tva %x errpc %x mdr %x smr %x rdtimo %x tbgpar %x cacherr %x\n", 1146 mcf->mc5_va, mcf->mc5_errpc, mcf->mc5_mdr, mcf->mc5_svmode, 1147 mcf->mc5_rdtimo, mcf->mc5_tbgpar, mcf->mc5_cacherr); 1148 printf("\tbuserr %x mcesr %x pc %x psl %x mcsr %x\n", 1149 mcf->mc5_buserr, mcf->mc5_mcesr, mcf->mc5_pc, mcf->mc5_psl, 1150 mcsr); 1151 if (type == MC750_TBERR && (mcf->mc5_mcesr&0xe) == MC750_TBPAR){ 1152 printf("tbuf par: flushing and returning\n"); 1153 return; 1154 } 1155 break; 1156 } 1157 #endif 1158 #if VAX730 1159 case VAX_730: { 1160 register struct mc730frame *mcf = (struct mc730frame *)cmcf; 1161 1162 if (type < NMC730) 1163 printf("%s", mc730[type]); 1164 printf("\n"); 1165 printf("params %x,%x pc %x psl %x mcesr %x\n", 1166 mcf->mc3_parm[0], mcf->mc3_parm[1], 1167 mcf->mc3_pc, mcf->mc3_psl, mfpr(MCESR)); 1168 mtpr(MCESR, 0xf); 1169 break; 1170 } 1171 #endif 1172 #if VAX630 1173 case VAX_630: { 1174 register struct ka630cpu *ka630addr = &ka630cpu; 1175 register struct mc630frame *mcf = (struct mc630frame *)cmcf; 1176 printf("vap %x istate %x pc %x psl %x\n", 1177 mcf->mc63_mrvaddr, mcf->mc63_istate, 1178 mcf->mc63_pc, mcf->mc63_psl); 1179 if (ka630addr->ka630_mser & KA630MSER_MERR) { 1180 printf("mser=0x%x ",ka630addr->ka630_mser); 1181 if (ka630addr->ka630_mser & KA630MSER_CPUER) 1182 printf("page=%d",ka630addr->ka630_cear); 1183 if (ka630addr->ka630_mser & KA630MSER_DQPE) 1184 printf("page=%d",ka630addr->ka630_dear); 1185 printf("\n"); 1186 } 1187 break; 1188 } 1189 #endif 1190 } 1191 memerr(); 1192 panic("mchk"); 1193 } 1194 1195 /* 1196 * Return the best possible estimate of the time in the timeval 1197 * to which tvp points. We do this by reading the interval count 1198 * register to determine the time remaining to the next clock tick. 1199 * We must compensate for wraparound which is not yet reflected in the time 1200 * (which happens when the ICR hits 0 and wraps after the splhigh(), 1201 * but before the mfpr(ICR)). Also check that this time is no less than 1202 * any previously-reported time, which could happen around the time 1203 * of a clock adjustment. Just for fun, we guarantee that the time 1204 * will be greater than the value obtained by a previous call. 1205 */ 1206 microtime(tvp) 1207 register struct timeval *tvp; 1208 { 1209 int s = splhigh(); 1210 static struct timeval lasttime; 1211 register long t; 1212 1213 *tvp = time; 1214 t = mfpr(ICR); 1215 if (t < -tick / 2 && (mfpr(ICCS) & ICCS_INT)) 1216 t += tick; 1217 tvp->tv_usec += tick + t; 1218 if (tvp->tv_usec > 1000000) { 1219 tvp->tv_sec++; 1220 tvp->tv_usec -= 1000000; 1221 } 1222 if (tvp->tv_sec == lasttime.tv_sec && 1223 tvp->tv_usec <= lasttime.tv_usec && 1224 (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) { 1225 tvp->tv_sec++; 1226 tvp->tv_usec -= 1000000; 1227 } 1228 lasttime = *tvp; 1229 splx(s); 1230 } 1231 1232 physstrat(bp, strat, prio) 1233 struct buf *bp; 1234 int (*strat)(), prio; 1235 { 1236 int s; 1237 1238 (*strat)(bp); 1239 /* pageout daemon doesn't wait for pushed pages */ 1240 if (bp->b_flags & B_DIRTY) 1241 return; 1242 s = splbio(); 1243 while ((bp->b_flags & B_DONE) == 0) 1244 sleep((caddr_t)bp, prio); 1245 splx(s); 1246 } 1247 1248 initcpu() 1249 { 1250 /* 1251 * Enable cache. 1252 */ 1253 switch (cpu) { 1254 1255 #if VAX780 1256 case VAX_780: 1257 mtpr(SBIMT, 0x200000); 1258 break; 1259 #endif 1260 #if VAX750 1261 case VAX_750: 1262 mtpr(CADR, 0); 1263 break; 1264 #endif 1265 #if VAX8600 1266 case VAX_8600: 1267 mtpr(CSWP, 3); 1268 break; 1269 #endif 1270 default: 1271 break; 1272 } 1273 1274 /* 1275 * Enable floating point accelerator if it exists 1276 * and has control register. 1277 */ 1278 switch(cpu) { 1279 1280 #if VAX8600 || VAX780 1281 case VAX_780: 1282 case VAX_8600: 1283 if ((mfpr(ACCS) & 0xff) != 0) { 1284 printf("Enabling FPA\n"); 1285 mtpr(ACCS, 0x8000); 1286 } 1287 #endif 1288 default: 1289 break; 1290 } 1291 } 1292 1293 /* 1294 * Return a reasonable approximation to a time-of-day register. 1295 * More precisely, return a number that increases by one about 1296 * once every ten milliseconds. 1297 */ 1298 todr() 1299 { 1300 switch (cpu) { 1301 1302 #if VAX8600 || VAX8200 || VAX780 || VAX750 || VAX730 1303 case VAX_8600: 1304 /* case VAX_8200: */ 1305 case VAX_780: 1306 case VAX_750: 1307 case VAX_730: 1308 return (mfpr(TODR)); 1309 #endif 1310 1311 #if VAX630 1312 case VAX_630: 1313 /* XXX crude */ 1314 { static int t; DELAY(10000); return (++t); } 1315 #endif 1316 1317 default: 1318 panic("todr"); 1319 } 1320 /* NOTREACHED */ 1321 } 1322