1 /* $NetBSD: machdep.c,v 1.29 2002/03/20 17:59:24 christos Exp $ */ 2 3 /* 4 * Copyright (c) 1988 University of Utah. 5 * Copyright (c) 1982, 1986, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: Utah $Hdr: machdep.c 1.74 92/12/20$ 41 * 42 * @(#)machdep.c 8.10 (Berkeley) 4/20/94 43 */ 44 45 #include "opt_ddb.h" 46 #include "opt_compat_netbsd.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/buf.h> 51 #include <sys/conf.h> 52 #include <sys/kernel.h> 53 #include <sys/device.h> 54 #include <sys/malloc.h> 55 #include <sys/mbuf.h> 56 #include <sys/mount.h> 57 #include <sys/msgbuf.h> 58 #include <sys/proc.h> 59 #include <sys/reboot.h> 60 #include <sys/tty.h> 61 #include <sys/user.h> 62 #include <sys/exec.h> 63 #include <sys/core.h> 64 #include <sys/kcore.h> 65 66 #ifdef DDB 67 #include <machine/db_machdep.h> 68 #include <ddb/db_sym.h> 69 #include <ddb/db_extern.h> 70 #ifdef __ELF__ 71 #include <sys/exec_elf.h> 72 #endif 73 #endif 74 75 #include <machine/autoconf.h> 76 #include <machine/cpu.h> 77 #include <machine/reg.h> 78 #include <machine/pte.h> 79 80 #include <machine/kcore.h> /* XXX should be pulled in by sys/kcore.h */ 81 82 #include <dev/cons.h> 83 84 #define MAXMEM 64*1024 /* XXX - from cmap.h */ 85 #include <uvm/uvm_extern.h> 86 87 #include <sys/sysctl.h> 88 89 #include <news68k/news68k/machid.h> 90 #include <news68k/news68k/isr.h> 91 92 #include "le.h" 93 #include "kb.h" 94 #include "ms.h" 95 #include "si.h" 96 /* XXX etc. etc. */ 97 98 /* the following is used externally (sysctl_hw) */ 99 char machine[] = MACHINE; /* from <machine/param.h> */ 100 101 /* Our exported CPU info; we can have only one. */ 102 struct cpu_info cpu_info_store; 103 104 struct vm_map *exec_map = NULL; 105 struct vm_map *mb_map = NULL; 106 struct vm_map *phys_map = NULL; 107 108 caddr_t msgbufaddr; 109 int maxmem; /* max memory per process */ 110 int physmem = MAXMEM; /* max supported memory, changes to actual */ 111 /* 112 * safepri is a safe priority for sleep to set for a spin-wait 113 * during autoconfiguration or after a panic. 114 */ 115 int safepri = PSL_LOWIPL; 116 117 extern paddr_t avail_start, avail_end; 118 extern char *kernel_text, *etext; 119 extern int end, *esym; 120 extern u_int lowram; 121 122 /* prototypes for local functions */ 123 void identifycpu __P((void)); 124 void initcpu __P((void)); 125 void parityenable __P((void)); 126 void parityerror __P((void)); 127 void init_intreg __P((void)); 128 int readidrom __P((u_char *)); 129 130 int cpu_dumpsize __P((void)); 131 int cpu_dump __P((int (*)(dev_t, daddr_t, caddr_t, size_t), daddr_t *)); 132 void cpu_init_kcore_hdr __P((void)); 133 134 #ifdef news1700 135 void news1700_init __P((void)); 136 #endif 137 #ifdef news1200 138 void news1200_init __P((void)); 139 #endif 140 /* functions called from locore.s */ 141 void dumpsys __P((void)); 142 void news68k_init __P((void)); 143 void straytrap __P((int, u_short)); 144 145 /* 146 * Machine-dependent crash dump header info. 147 */ 148 cpu_kcore_hdr_t cpu_kcore_hdr; 149 150 /* 151 * Note that the value of delay_divisor is roughly 152 * 2048 / cpuspeed (where cpuspeed is in MHz) on 68020 153 * and 68030 systems. 154 */ 155 int cpuspeed = 25; /* relative cpu speed; XXX skewed on 68040 */ 156 int delay_divisor = 82; /* delay constant */ 157 158 /* 159 * Early initialization, before main() is called. 160 */ 161 void 162 news68k_init() 163 { 164 int i; 165 166 /* 167 * Tell the VM system about available physical memory. The 168 * news68k only has one segment. 169 */ 170 uvm_page_physload(atop(avail_start), atop(avail_end), 171 atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT); 172 173 /* Initialize system variables. */ 174 switch (systype) { 175 #ifdef news1700 176 case NEWS1700: 177 news1700_init(); 178 break; 179 #endif 180 #ifdef news1200 181 case NEWS1200: 182 news1200_init(); 183 break; 184 #endif 185 default: 186 panic("impossible system type"); 187 } 188 189 isrinit(); 190 191 /* 192 * Initialize error message buffer (at end of core). 193 * avail_end was pre-decremented in pmap_bootstrap to compensate. 194 */ 195 for (i = 0; i < btoc(MSGBUFSIZE); i++) 196 pmap_kenter_pa((vaddr_t)msgbufaddr + i * NBPG, 197 avail_end + i * NBPG, VM_PROT_READ|VM_PROT_WRITE) 198 pmap_update(pmap_kernel()); 199 initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE)); 200 } 201 202 /* 203 * cpu_startup: allocate memory for variable-sized tables, 204 * initialize cpu, and do autoconfiguration. 205 */ 206 void 207 cpu_startup() 208 { 209 unsigned i; 210 caddr_t v; 211 int base, residual; 212 vaddr_t minaddr, maxaddr; 213 vsize_t size; 214 char pbuf[9]; 215 #ifdef DEBUG 216 extern int pmapdebug; 217 int opmapdebug = pmapdebug; 218 219 pmapdebug = 0; 220 #endif 221 222 /* 223 * Initialize the kernel crash dump header. 224 */ 225 cpu_init_kcore_hdr(); 226 227 /* 228 * Good {morning,afternoon,evening,night}. 229 */ 230 printf(version); 231 identifycpu(); 232 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 233 printf("total memory = %s\n", pbuf); 234 235 /* 236 * Find out how much space we need, allocate it, 237 * and then give everything true virtual addresses. 238 */ 239 size = (vsize_t)allocsys(NULL, NULL); 240 if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0) 241 panic("startup: no room for tables"); 242 if ((allocsys(v, NULL) - v) != size) 243 panic("startup: table size inconsistency"); 244 245 /* 246 * Now allocate buffers proper. They are different than the above 247 * in that they usually occupy more virtual memory than physical. 248 */ 249 size = MAXBSIZE * nbuf; 250 if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), 251 NULL, UVM_UNKNOWN_OFFSET, 0, 252 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 253 UVM_ADV_NORMAL, 0)) != 0) 254 panic("startup: cannot allocate VM for buffers"); 255 minaddr = (vaddr_t)buffers; 256 base = bufpages / nbuf; 257 residual = bufpages % nbuf; 258 for (i = 0; i < nbuf; i++) { 259 vsize_t curbufsize; 260 vaddr_t curbuf; 261 struct vm_page *pg; 262 263 /* 264 * Each buffer has MAXBSIZE bytes of VM space allocated. Of 265 * that MAXBSIZE space, we allocate and map (base+1) pages 266 * for the first "residual" buffers, and then we allocate 267 * "base" pages for the rest. 268 */ 269 curbuf = (vaddr_t) buffers + (i * MAXBSIZE); 270 curbufsize = NBPG * ((i < residual) ? (base + 1) : base); 271 272 while (curbufsize) { 273 pg = uvm_pagealloc(NULL, 0, NULL, 0); 274 if (pg == NULL) 275 panic("cpu_startup: not enough memory for " 276 "buffer cache"); 277 pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), 278 VM_PROT_READ|VM_PROT_WRITE); 279 curbuf += PAGE_SIZE; 280 curbufsize -= PAGE_SIZE; 281 } 282 } 283 pmap_update(pmap_kernel()); 284 285 /* 286 * Allocate a submap for exec arguments. This map effectively 287 * limits the number of processes exec'ing at any time. 288 */ 289 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 290 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); 291 292 /* 293 * Allocate a submap for physio 294 */ 295 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 296 VM_PHYS_SIZE, 0, FALSE, NULL); 297 298 /* 299 * Finally, allocate mbuf cluster submap. 300 */ 301 mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 302 nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL); 303 304 #ifdef DEBUG 305 pmapdebug = opmapdebug; 306 #endif 307 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 308 printf("avail memory = %s\n", pbuf); 309 format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG); 310 printf("using %d buffers containing %s of memory\n", nbuf, pbuf); 311 312 /* 313 * Tell the VM system that the area before the text segment 314 * is invalid. 315 * 316 * XXX This is bogus; should just fix KERNBASE and 317 * XXX VM_MIN_KERNEL_ADDRESS, but not right now. 318 */ 319 if (uvm_map_protect(kernel_map, 0, m68k_round_page(&kernel_text), 320 UVM_PROT_NONE, TRUE) != 0) 321 panic("can't mark pre-text pages off-limits"); 322 323 /* 324 * Tell the VM system that writing to the kernel text isn't allowed. 325 * If we don't, we might end up COW'ing the text segment! 326 */ 327 if (uvm_map_protect(kernel_map, m68k_trunc_page(&kernel_text), 328 m68k_round_page(&etext), UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != 0) 329 panic("can't protect kernel text"); 330 331 /* 332 * Set up CPU-specific registers, cache, etc. 333 */ 334 initcpu(); 335 336 /* 337 * Set up buffers, so they can be used to read disk labels. 338 */ 339 bufinit(); 340 } 341 342 /* 343 * Set registers on exec. 344 */ 345 void 346 setregs(p, pack, stack) 347 struct proc *p; 348 struct exec_package *pack; 349 u_long stack; 350 { 351 struct frame *frame = (struct frame *)p->p_md.md_regs; 352 353 frame->f_sr = PSL_USERSET; 354 frame->f_pc = pack->ep_entry & ~1; 355 frame->f_regs[D0] = 0; 356 frame->f_regs[D1] = 0; 357 frame->f_regs[D2] = 0; 358 frame->f_regs[D3] = 0; 359 frame->f_regs[D4] = 0; 360 frame->f_regs[D5] = 0; 361 frame->f_regs[D6] = 0; 362 frame->f_regs[D7] = 0; 363 frame->f_regs[A0] = 0; 364 frame->f_regs[A1] = 0; 365 frame->f_regs[A2] = (int)p->p_psstr; 366 frame->f_regs[A3] = 0; 367 frame->f_regs[A4] = 0; 368 frame->f_regs[A5] = 0; 369 frame->f_regs[A6] = 0; 370 frame->f_regs[SP] = stack; 371 372 /* restore a null state frame */ 373 p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0; 374 if (fputype) 375 m68881_restore(&p->p_addr->u_pcb.pcb_fpregs); 376 } 377 378 /* 379 * Info for CTL_HW 380 */ 381 char cpu_model[124]; 382 383 int news_machine_id; 384 385 void 386 identifycpu() 387 { 388 389 printf("SONY NET WORK STATION, Model %s, ", cpu_model); 390 printf("Machine ID #%d\n", news_machine_id); 391 392 delay_divisor = (20480 / cpuspeed + 5) / 10; /* XXX */ 393 } 394 395 /* 396 * machine dependent system variables. 397 */ 398 int 399 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) 400 int *name; 401 u_int namelen; 402 void *oldp; 403 size_t *oldlenp; 404 void *newp; 405 size_t newlen; 406 struct proc *p; 407 { 408 dev_t consdev; 409 410 /* all sysctl names at this level are terminal */ 411 if (namelen != 1) 412 return (ENOTDIR); /* overloaded */ 413 414 switch (name[0]) { 415 case CPU_CONSDEV: 416 if (cn_tab != NULL) 417 consdev = cn_tab->cn_dev; 418 else 419 consdev = NODEV; 420 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev, 421 sizeof consdev)); 422 default: 423 return (EOPNOTSUPP); 424 } 425 /* NOTREACHED */ 426 } 427 428 int waittime = -1; 429 430 void 431 cpu_reboot(howto, bootstr) 432 int howto; 433 char *bootstr; 434 { 435 436 #if __GNUC__ /* XXX work around lame compiler problem (gcc 2.7.2) */ 437 (void)&howto; 438 #endif 439 440 /* take a snap shot before clobbering any registers */ 441 if (curproc && curproc->p_addr) 442 savectx(&curproc->p_addr->u_pcb); 443 444 /* If system is cold, just halt. */ 445 if (cold) { 446 howto |= RB_HALT; 447 goto haltsys; 448 } 449 450 boothowto = howto; 451 if ((howto & RB_NOSYNC) == 0 && waittime < 0) { 452 waittime = 0; 453 vfs_shutdown(); 454 /* 455 * If we've been adjusting the clock, the todr 456 * will be out of synch; adjust it now. 457 */ 458 resettodr(); 459 } 460 461 /* Disable interrupts. */ 462 splhigh(); 463 464 /* If rebooting and a dump is requested, do it. */ 465 if (howto & RB_DUMP) 466 dumpsys(); 467 468 haltsys: 469 /* Run any shutdown hooks. */ 470 doshutdownhooks(); 471 472 #if defined(PANICWAIT) && !defined(DDB) 473 if ((howto & RB_HALT) == 0 && panicstr) { 474 printf("hit any key to reboot...\n"); 475 (void)cngetc(); 476 printf("\n"); 477 } 478 #endif 479 480 /* Finally, halt/reboot the system. */ 481 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) { 482 DELAY(1000000); 483 doboot(RB_POWERDOWN); 484 /* NOTREACHED */ 485 } 486 487 if (howto & RB_HALT) { 488 printf("System halted.\n\n"); 489 doboot(RB_HALT); 490 /* NOTREACHED */ 491 } 492 493 printf("rebooting...\n"); 494 DELAY(1000000); 495 doboot(RB_AUTOBOOT); 496 /* NOTREACHED */ 497 } 498 499 /* 500 * Initialize the kernel crash dump header. 501 */ 502 void 503 cpu_init_kcore_hdr() 504 { 505 cpu_kcore_hdr_t *h = &cpu_kcore_hdr; 506 struct m68k_kcore_hdr *m = &h->un._m68k; 507 508 memset(&cpu_kcore_hdr, 0, sizeof(cpu_kcore_hdr)); 509 510 /* 511 * Initialize the `dispatcher' portion of the header. 512 */ 513 strcpy(h->name, machine); 514 h->page_size = NBPG; 515 h->kernbase = KERNBASE; 516 517 /* 518 * Fill in information about our MMU configuration. 519 */ 520 m->mmutype = mmutype; 521 m->sg_v = SG_V; 522 m->sg_frame = SG_FRAME; 523 m->sg_ishift = SG_ISHIFT; 524 m->sg_pmask = SG_PMASK; 525 m->sg40_shift1 = SG4_SHIFT1; 526 m->sg40_mask2 = SG4_MASK2; 527 m->sg40_shift2 = SG4_SHIFT2; 528 m->sg40_mask3 = SG4_MASK3; 529 m->sg40_shift3 = SG4_SHIFT3; 530 m->sg40_addr1 = SG4_ADDR1; 531 m->sg40_addr2 = SG4_ADDR2; 532 m->pg_v = PG_V; 533 m->pg_frame = PG_FRAME; 534 535 /* 536 * Initialize pointer to kernel segment table. 537 */ 538 m->sysseg_pa = (u_int32_t)(pmap_kernel()->pm_stpa); 539 540 /* 541 * Initialize relocation value such that: 542 * 543 * pa = (va - KERNBASE) + reloc 544 */ 545 m->reloc = lowram; 546 547 /* 548 * Define the end of the relocatable range. 549 */ 550 m->relocend = (u_int32_t)&end; 551 552 /* 553 * news68k has one contiguous memory segment. 554 */ 555 m->ram_segs[0].start = lowram; 556 m->ram_segs[0].size = ctob(physmem); 557 } 558 559 /* 560 * Compute the size of the machine-dependent crash dump header. 561 * Returns size in disk blocks. 562 */ 563 int 564 cpu_dumpsize() 565 { 566 int size; 567 568 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)); 569 return (btodb(roundup(size, dbtob(1)))); 570 } 571 572 /* 573 * Called by dumpsys() to dump the machine-dependent header. 574 */ 575 int 576 cpu_dump(dump, blknop) 577 int (*dump) __P((dev_t, daddr_t, caddr_t, size_t)); 578 daddr_t *blknop; 579 { 580 int buf[dbtob(1) / sizeof(int)]; 581 cpu_kcore_hdr_t *chdr; 582 kcore_seg_t *kseg; 583 int error; 584 585 kseg = (kcore_seg_t *)buf; 586 chdr = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(kcore_seg_t)) / 587 sizeof(int)]; 588 589 /* Create the segment header. */ 590 CORE_SETMAGIC(*kseg, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 591 kseg->c_size = dbtob(1) - ALIGN(sizeof(kcore_seg_t)); 592 593 memcpy(chdr, &cpu_kcore_hdr, sizeof(cpu_kcore_hdr_t)); 594 error = (*dump)(dumpdev, *blknop, (caddr_t)buf, sizeof(buf)); 595 *blknop += btodb(sizeof(buf)); 596 return (error); 597 } 598 599 /* 600 * These variables are needed by /sbin/savecore 601 */ 602 u_int32_t dumpmag = 0x8fca0101; /* magic number */ 603 int dumpsize = 0; /* pages */ 604 long dumplo = 0; /* blocks */ 605 606 /* 607 * This is called by main to set dumplo and dumpsize. 608 * Dumps always skip the first NBPG of disk space 609 * in case there might be a disk label stored there. 610 * If there is extra space, put dump at the end to 611 * reduce the chance that swapping trashes it. 612 */ 613 void 614 cpu_dumpconf() 615 { 616 int chdrsize; /* size of dump header */ 617 int nblks; /* size of dump area */ 618 int maj; 619 620 if (dumpdev == NODEV) 621 return; 622 maj = major(dumpdev); 623 if (maj < 0 || maj >= nblkdev) 624 panic("dumpconf: bad dumpdev=0x%x", dumpdev); 625 if (bdevsw[maj].d_psize == NULL) 626 return; 627 nblks = (*bdevsw[maj].d_psize)(dumpdev); 628 chdrsize = cpu_dumpsize(); 629 630 dumpsize = btoc(cpu_kcore_hdr.un._m68k.ram_segs[0].size); 631 632 /* 633 * Check do see if we will fit. Note we always skip the 634 * first NBPG in case there is a disk label there. 635 */ 636 if (nblks < (ctod(dumpsize) + chdrsize + ctod(1))) { 637 dumpsize = 0; 638 dumplo = -1; 639 return; 640 } 641 642 /* 643 * Put dump at the end of the partition. 644 */ 645 dumplo = (nblks - 1) - ctod(dumpsize) - chdrsize; 646 } 647 648 /* 649 * Dump physical memory onto the dump device. Called by cpu_reboot(). 650 */ 651 void 652 dumpsys() 653 { 654 daddr_t blkno; /* current block to write */ 655 /* dump routine */ 656 int (*dump) __P((dev_t, daddr_t, caddr_t, size_t)); 657 int pg; /* page being dumped */ 658 paddr_t maddr; /* PA being dumped */ 659 int error; /* error code from (*dump)() */ 660 661 /* XXX initialized here because of gcc lossage */ 662 maddr = lowram; 663 pg = 0; 664 665 /* Make sure dump device is valid. */ 666 if (dumpdev == NODEV) 667 return; 668 if (dumpsize == 0) { 669 cpu_dumpconf(); 670 if (dumpsize == 0) 671 return; 672 } 673 if (dumplo <= 0) { 674 printf("\ndump to dev %u,%u not possible\n", major(dumpdev), 675 minor(dumpdev)); 676 return; 677 } 678 dump = bdevsw[major(dumpdev)].d_dump; 679 blkno = dumplo; 680 681 printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev), 682 minor(dumpdev), dumplo); 683 684 printf("dump "); 685 686 /* Write the dump header. */ 687 error = cpu_dump(dump, &blkno); 688 if (error) 689 goto bad; 690 691 for (pg = 0; pg < dumpsize; pg++) { 692 #define NPGMB (1024*1024/NBPG) 693 /* print out how many MBs we have dumped */ 694 if (pg && (pg % NPGMB) == 0) 695 printf("%d ", pg / NPGMB); 696 #undef NPGMB 697 pmap_enter(pmap_kernel(), (vaddr_t)vmmap, maddr, 698 VM_PROT_READ, VM_PROT_READ|PMAP_WIRED); 699 700 pmap_update(pmap_kernel()); 701 error = (*dump)(dumpdev, blkno, vmmap, NBPG); 702 bad: 703 switch (error) { 704 case 0: 705 maddr += NBPG; 706 blkno += btodb(NBPG); 707 break; 708 709 case ENXIO: 710 printf("device bad\n"); 711 return; 712 713 case EFAULT: 714 printf("device not ready\n"); 715 return; 716 717 case EINVAL: 718 printf("area improper\n"); 719 return; 720 721 case EIO: 722 printf("i/o error\n"); 723 return; 724 725 case EINTR: 726 printf("aborted from console\n"); 727 return; 728 729 default: 730 printf("error %d\n", error); 731 return; 732 } 733 } 734 printf("succeeded\n"); 735 } 736 737 void 738 initcpu() 739 { 740 741 #ifdef MAPPEDCOPY 742 /* 743 * Initialize lower bound for doing copyin/copyout using 744 * page mapping (if not already set). We don't do this on 745 * VAC machines as it loses big time. 746 */ 747 if (ectype == EC_VIRT) 748 mappedcopysize = -1; /* in case it was patched */ 749 else 750 mappedcopysize = NBPG; 751 #endif 752 } 753 754 void 755 straytrap(pc, evec) 756 int pc; 757 u_short evec; 758 { 759 printf("unexpected trap (vector offset %x) from %x\n", 760 evec & 0xFFF, pc); 761 } 762 763 /* XXX should change the interface, and make one badaddr() function */ 764 765 int *nofault; 766 767 int 768 badaddr(addr, nbytes) 769 caddr_t addr; 770 int nbytes; 771 { 772 int i; 773 label_t faultbuf; 774 775 #ifdef lint 776 i = *addr; if (i) return (0); 777 #endif 778 779 nofault = (int *) &faultbuf; 780 if (setjmp((label_t *)nofault)) { 781 nofault = (int *) 0; 782 return(1); 783 } 784 switch (nbytes) { 785 case 1: 786 i = *(volatile char *)addr; 787 break; 788 789 case 2: 790 i = *(volatile short *)addr; 791 break; 792 793 case 4: 794 i = *(volatile int *)addr; 795 break; 796 797 default: 798 panic("badaddr: bad request"); 799 } 800 nofault = (int *) 0; 801 return (0); 802 } 803 804 int 805 badbaddr(addr) 806 caddr_t addr; 807 { 808 int i; 809 label_t faultbuf; 810 811 nofault = (int *) &faultbuf; 812 if (setjmp((label_t *)nofault)) { 813 nofault = (int *) 0; 814 return(1); 815 } 816 i = *(volatile char *)addr; 817 nofault = (int *) 0; 818 return(0); 819 } 820 821 /* 822 * cpu_exec_aout_makecmds(): 823 * cpu-dependent a.out format hook for execve(). 824 * 825 * Determine of the given exec package refers to something which we 826 * understand and, if so, set up the vmcmds for it. 827 * 828 * XXX what are the special cases for the hp300? 829 * XXX why is this COMPAT_NOMID? was something generating 830 * hp300 binaries with an a_mid of 0? i thought that was only 831 * done on little-endian machines... -- cgd 832 */ 833 int 834 cpu_exec_aout_makecmds(p, epp) 835 struct proc *p; 836 struct exec_package *epp; 837 { 838 #if defined(COMPAT_NOMID) || defined(COMPAT_44) 839 u_long midmag, magic; 840 u_short mid; 841 int error; 842 struct exec *execp = epp->ep_hdr; 843 844 midmag = ntohl(execp->a_midmag); 845 mid = (midmag >> 16) & 0xffff; 846 magic = midmag & 0xffff; 847 848 midmag = mid << 16 | magic; 849 850 switch (midmag) { 851 #ifdef COMPAT_NOMID 852 case (MID_ZERO << 16) | ZMAGIC: 853 error = exec_aout_prep_oldzmagic(p, epp); 854 return(error); 855 #endif 856 #ifdef COMPAT_44 857 case (MID_HP300 << 16) | ZMAGIC: 858 error = exec_aout_prep_oldzmagic(p, epp); 859 return(error); 860 #endif 861 } 862 #endif /* !(defined(COMPAT_NOMID) || defined(COMPAT_44)) */ 863 864 return ENOEXEC; 865 } 866 867 /* 868 * System dependent initilization 869 */ 870 871 static volatile u_char *dip_switch, *int_status; 872 873 volatile u_char *idrom_addr, *ctrl_ast, *ctrl_int2; 874 volatile u_char *lance_mem, *ctrl_led, *sccport0a; 875 876 #ifdef news1700 877 static volatile u_char *ctrl_parity, *ctrl_parity_clr, *parity_vector; 878 879 struct news68k_model { 880 const int id; 881 const char *name; 882 }; 883 884 const struct news68k_model news68k_models[] = { 885 { ICK001, "ICK001" }, /* 1 */ 886 { ICK00X, "ICK00X" }, /* 2 */ 887 { NWS799, "NWS-799" }, /* 3 */ 888 { NWS800, "NWS-800" }, /* 4 */ 889 { NWS801, "NWS-801" }, /* 5 */ 890 { NWS802, "NWS-802" }, /* 6 */ 891 { NWS711, "NWS-711" }, /* 7 */ 892 { NWS721, "NWS-721" }, /* 8 */ 893 { NWS1850, "NWS-1850" }, /* 9 */ 894 { NWS810, "NWS-810" }, /* 10 */ 895 { NWS811, "NWS-811" }, /* 11 */ 896 { NWS1830, "NWS-1830" }, /* 12 */ 897 { NWS1750, "NWS-1750" }, /* 13 */ 898 { NWS1720, "NWS-1720" }, /* 14 */ 899 { NWS1930, "NWS-1930" }, /* 15 */ 900 { NWS1960, "NWS-1960" }, /* 16 */ 901 { NWS712, "NWS-712" }, /* 17 */ 902 { NWS1860, "NWS-1860" }, /* 18 */ 903 { PWS1630, "PWS-1630" }, /* 19 */ 904 { NWS820, "NWS-820" }, /* 20 */ 905 { NWS821, "NWS-821" }, /* 21 */ 906 { NWS1760, "NWS-1760" }, /* 22 */ 907 { NWS1710, "NWS-1710" }, /* 23 */ 908 { NWS830, "NWS-830" }, /* 30 */ 909 { NWS831, "NWS-831" }, /* 31 */ 910 { NWS841, "NWS-841" }, /* 41 */ 911 { PWS1570, "PWS-1570" }, /* 52 */ 912 { PWS1590, "PWS-1590" }, /* 54 */ 913 { NWS1520, "NWS-1520" }, /* 56 */ 914 { PWS1550, "PWS-1550" }, /* 73 */ 915 { PWS1520, "PWS-1520" }, /* 74 */ 916 { PWS1560, "PWS-1560" }, /* 75 */ 917 { NWS1530, "NWS-1530" }, /* 76 */ 918 { NWS1580, "NWS-1580" }, /* 77 */ 919 { NWS1510, "NWS-1510" }, /* 78 */ 920 { NWS1410, "NWS-1410" }, /* 81 */ 921 { NWS1450, "NWS-1450" }, /* 85 */ 922 { NWS1460, "NWS-1460" }, /* 86 */ 923 { NWS891, "NWS-891" }, /* 91 */ 924 { NWS911, "NWS-911" }, /* 111 */ 925 { NWS921, "NWS-921" }, /* 121 */ 926 { 0, NULL } 927 }; 928 929 void 930 news1700_init() 931 { 932 struct oidrom idrom; 933 const char *t; 934 u_char *p, *q; 935 int i; 936 937 dip_switch = (u_char *)IIOV(0xe1c00100); 938 int_status = (u_char *)IIOV(0xe1c00200); 939 940 idrom_addr = (u_char *)IIOV(0xe1c00000); 941 ctrl_ast = (u_char *)IIOV(0xe1280000); 942 ctrl_int2 = (u_char *)IIOV(0xe1180000); 943 944 lance_mem = (u_char *)IIOV(0xe0e00000); 945 ctrl_led = (u_char *)IIOV(0xe0dc0000); 946 sccport0a = (u_char *)IIOV(0xe0d40002); 947 948 p = (u_char *)idrom_addr; 949 q = (u_char *)&idrom; 950 951 for (i = 0; i < sizeof(idrom); i++, p += 2) 952 *q++ = ((*p & 0x0f) << 4) | (*(p + 1) & 0x0f); 953 954 for (i = 0; news68k_models[i].name != NULL; i++) { 955 if (news68k_models[i].id == idrom.id_model) { 956 t = news68k_models[i].name; 957 } 958 } 959 if (t == NULL) 960 panic("unexpected system model.\n"); 961 962 strcat(cpu_model, t); 963 news_machine_id = (idrom.id_serial[0] << 8) + idrom.id_serial[1]; 964 965 ctrl_parity = (u_char *)IIOV(0xe1080000); 966 ctrl_parity_clr = (u_char *)IIOV(0xe1a00000); 967 parity_vector = (u_char *)IIOV(0xe1c00200); 968 969 parityenable(); 970 971 cpuspeed = 25; 972 } 973 974 /* 975 * parity error handling (vectored NMI?) 976 */ 977 978 void 979 parityenable() 980 { 981 982 #define PARITY_VECT 0xc0 983 #define PARITY_PRI 7 984 985 *parity_vector = PARITY_VECT; 986 987 isrlink_vectored((int (*) __P((void *)))parityerror, NULL, 988 PARITY_PRI, PARITY_VECT); 989 990 *ctrl_parity_clr = 1; 991 *ctrl_parity = 1; 992 993 #ifdef DEBUG 994 printf("enable parity check\n"); 995 #endif 996 } 997 998 static int innmihand; /* simple mutex */ 999 1000 void 1001 parityerror() 1002 { 1003 1004 /* Prevent unwanted recursion. */ 1005 if (innmihand) 1006 return; 1007 innmihand = 1; 1008 1009 #if 0 /* XXX need to implement XXX */ 1010 panic("parity error"); 1011 #else 1012 printf("parity error detected.\n"); 1013 *ctrl_parity_clr = 1; 1014 #endif 1015 innmihand = 0; 1016 } 1017 #endif /* news1700 */ 1018 1019 #ifdef news1200 1020 void 1021 news1200_init() 1022 { 1023 struct idrom idrom; 1024 u_char *p, *q; 1025 int i; 1026 1027 dip_switch = (u_char *)IIOV(0xe1680000); 1028 int_status = (u_char *)IIOV(0xe1200000); 1029 1030 idrom_addr = (u_char *)IIOV(0xe1400000); 1031 ctrl_ast = (u_char *)IIOV(0xe1100000); 1032 ctrl_int2 = (u_char *)IIOV(0xe10c0000); 1033 1034 lance_mem = (u_char *)IIOV(0xe1a00000); 1035 ctrl_led = (u_char *)IIOV(0xe1500001); 1036 sccport0a = (u_char *)IIOV(0xe1780002); 1037 1038 p = (u_char *)idrom_addr; 1039 q = (u_char *)&idrom; 1040 for (i = 0; i < sizeof(idrom); i++, p += 2) 1041 *q++ = ((*p & 0x0f) << 4) | (*(p + 1) & 0x0f); 1042 1043 strcat(cpu_model, idrom.id_model); 1044 news_machine_id = idrom.id_serial; 1045 1046 cpuspeed = 25; 1047 } 1048 #endif /* news1200 */ 1049 1050 /* 1051 * interrupt handlers 1052 * XXX should do better handling XXX 1053 */ 1054 1055 void intrhand_lev2 __P((void)); 1056 void intrhand_lev3 __P((void)); 1057 void intrhand_lev4 __P((void)); 1058 1059 void (*sir_routines[NSIR]) __P((void *)); 1060 void *sir_args[NSIR]; 1061 u_char ssir; 1062 int next_sir; 1063 1064 void 1065 intrhand_lev2() 1066 { 1067 int bit, s; 1068 u_char sintr; 1069 1070 /* disable level 2 interrupt */ 1071 *ctrl_int2 = 0; 1072 1073 s = splhigh(); 1074 sintr = ssir; 1075 ssir = 0; 1076 splx(s); 1077 1078 intrcnt[2]++; 1079 uvmexp.intrs++; 1080 1081 for (bit = 0; bit < next_sir; bit++) { 1082 if (sintr & (1 << bit)) { 1083 uvmexp.softs++; 1084 if (sir_routines[bit]) 1085 sir_routines[bit](sir_args[bit]); 1086 } 1087 } 1088 } 1089 /* 1090 * Allocation routines for software interrupts. 1091 */ 1092 u_char 1093 allocate_sir(proc, arg) 1094 void (*proc) __P((void *)); 1095 void *arg; 1096 { 1097 int bit; 1098 1099 if (next_sir >= NSIR) 1100 panic("allocate_sir: none left"); 1101 bit = next_sir++; 1102 sir_routines[bit] = proc; 1103 sir_args[bit] = arg; 1104 return (1 << bit); 1105 } 1106 1107 void 1108 init_sir() 1109 { 1110 1111 sir_routines[SIR_NET] = (void (*) __P((void *)))netintr; 1112 sir_routines[SIR_CLOCK] = softclock; 1113 next_sir = NEXT_SIR; 1114 } 1115 1116 void 1117 intrhand_lev3() 1118 { 1119 int stat; 1120 1121 stat = *int_status; 1122 intrcnt[3]++; 1123 uvmexp.intrs++; 1124 #if 1 1125 printf("level 3 interrupt: INT_STATUS = 0x%02x\n", stat); 1126 #endif 1127 } 1128 1129 void 1130 intrhand_lev4() 1131 { 1132 int stat; 1133 #if NLE > 0 1134 extern int leintr __P((int)); 1135 #endif 1136 #if NSI > 0 1137 extern int si_intr __P((int)); 1138 #endif 1139 1140 #define INTST_LANCE 0x04 1141 #define INTST_SCSI 0x80 1142 1143 stat = *int_status; 1144 intrcnt[4]++; 1145 uvmexp.intrs++; 1146 1147 #if NSI > 0 1148 if (stat & INTST_SCSI) { 1149 si_intr(0); 1150 } 1151 #endif 1152 #if NLE > 0 1153 if (stat & INTST_LANCE) { 1154 leintr(0); 1155 } 1156 #endif 1157 #if 0 1158 printf("level 4 interrupt\n"); 1159 #endif 1160 } 1161 1162 /* 1163 * consinit() routines - from newsmips/cpu_cons.c 1164 */ 1165 1166 /* 1167 * Console initialization: called early on from main, 1168 * before vm init or startup. Do enough configuration 1169 * to choose and initialize a console. 1170 * XXX need something better here. 1171 */ 1172 #define SCC_CONSOLE 0 1173 #define SW_CONSOLE 0x07 1174 #define SW_NWB512 0x04 1175 #define SW_NWB225 0x01 1176 #define SW_FBPOP 0x02 1177 #define SW_FBPOP1 0x06 1178 #define SW_FBPOP2 0x03 1179 #define SW_AUTOSEL 0x07 1180 1181 struct consdev *cn_tab = NULL; 1182 extern struct consdev consdev_bm, consdev_zs; 1183 1184 int tty00_is_console = 0; 1185 1186 void 1187 consinit() 1188 { 1189 1190 int dipsw = *dip_switch; 1191 1192 dipsw &= ~SW_CONSOLE; 1193 1194 switch (dipsw & SW_CONSOLE) { 1195 default: /* XXX no fb support yet */ 1196 case 0: 1197 tty00_is_console = 1; 1198 cn_tab = &consdev_zs; 1199 (*cn_tab->cn_init)(cn_tab); 1200 break; 1201 } 1202 #ifdef DDB 1203 { 1204 #ifndef __ELF__ 1205 ddb_init(*(int *)&end, ((int *)&end) + 1, esym); 1206 #else 1207 ddb_init((int)esym - (int)&end - sizeof(Elf32_Ehdr), 1208 (void *)&end, esym); 1209 #endif 1210 } 1211 if (boothowto & RB_KDB) 1212 Debugger(); 1213 #endif 1214 } 1215