1 /*
2 * Copyright (c) 1982,1986,1988,1990 Regents of the University of California.
3 * All rights reserved. The Berkeley software License Agreement
4 * specifies the terms and conditions for redistribution.
5 *
6 * @(#)machdep.c 7.33 (Berkeley) 07/12/92
7 */
8
9 #include "sys/param.h"
10 #include "sys/systm.h"
11 #include "sys/user.h"
12 #include "sys/kernel.h"
13 #include "sys/malloc.h"
14 #include "sys/map.h"
15 #include "sys/vm.h"
16 #include "sys/proc.h"
17 #include "sys/buf.h"
18 #include "sys/reboot.h"
19 #include "sys/conf.h"
20 #include "sys/file.h"
21 #include "sys/text.h"
22 #include "sys/clist.h"
23 #include "sys/callout.h"
24 #include "sys/cmap.h"
25 #include "sys/mbuf.h"
26 #include "sys/msgbuf.h"
27 #ifdef SYSVSHM
28 #include "sys/shm.h"
29 #endif
30
31 #include "../include/reg.h"
32 #include "../include/pte.h"
33 #include "../include/psl.h"
34 #include "../include/frame.h"
35 #include "../include/clock.h"
36 #include "cons.h"
37 #include "../include/cpu.h"
38 #include "mem.h"
39 #include "../include/mtpr.h"
40 #include "rpb.h"
41 #include "ka630.h"
42 #include "ka650.h"
43
44 #include "../uba/ubavar.h"
45 #include "../uba/ubareg.h"
46
47 /*
48 * Declare these as initialized data so we can patch them.
49 */
50 int nswbuf = 0;
51 #ifdef NBUF
52 int nbuf = NBUF;
53 #else
54 int nbuf = 0;
55 #endif
56 #ifdef BUFPAGES
57 int bufpages = BUFPAGES;
58 #else
59 int bufpages = 0;
60 #endif
61 int msgbufmapped; /* set when safe to use msgbuf */
62 int physmem = MAXMEM; /* max supported memory, changes to actual */
63 /*
64 * safepri is a safe priority for sleep to set for a spin-wait
65 * during autoconfiguration or after a panic. On the vax, this must
66 * be > 0 so that we can take interrupts after a panic while on the interrupt
67 * stack. Otherwise, we will get a reserved operand fault when we return
68 * from any interrupt that comes in.
69 */
70 int safepri = 1;
71
72 /*
73 * Machine-dependent startup code
74 */
startup(firstaddr)75 startup(firstaddr)
76 int firstaddr;
77 {
78 register int unixsize;
79 register unsigned i;
80 register struct pte *pte;
81 int mapaddr, j, n;
82 register caddr_t v;
83 int maxbufs, base, residual;
84
85 /*
86 * Initialize error message buffer (at end of core).
87 */
88 maxmem = physmem - btoc(sizeof (struct msgbuf));
89 pte = msgbufmap;
90 for (i = 1; i < btoc(sizeof (struct msgbuf)) + 1; i++)
91 *(int *)pte++ = PG_V | PG_KW | (physmem - i);
92 mtpr(TBIA, 0);
93 msgbufmapped = 1;
94
95 #ifdef QBA
96 #include "qv.h"
97 #if NQV > 0
98 /*
99 * redirect console to qvss if it exists
100 */
101 qvcons_init();
102 #endif
103 #include "qd.h"
104 #if NQD > 0
105 /*
106 * redirect console to qdss if it exists
107 */
108 qdcons_init();
109 #endif
110 #endif
111
112 #ifdef KADB
113 kdb_init();
114 (void) cnopen(makedev(0, 0), 0); /* open console XXX */
115 #endif
116 /*
117 * Good {morning,afternoon,evening,night}.
118 */
119 printf(version);
120 printf("real mem = %d\n", ctob(physmem));
121
122 /*
123 * Allocate space for system data structures.
124 * The first available real memory address is in "firstaddr".
125 * The first available kernel virtual address is in "v".
126 * As pages of kernel virtual memory are allocated, "v" is incremented.
127 * As pages of memory are allocated and cleared,
128 * "firstaddr" is incremented.
129 * An index into the kernel page table corresponding to the
130 * virtual memory address maintained in "v" is kept in "mapaddr".
131 */
132 v = (caddr_t)(KERNBASE | (firstaddr * NBPG));
133 #define valloc(name, type, num) \
134 (name) = (type *)v; v = (caddr_t)((name)+(num))
135 #define valloclim(name, type, num, lim) \
136 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
137 valloclim(file, struct file, nfile, fileNFILE);
138 valloclim(proc, struct proc, nproc, procNPROC);
139 valloclim(text, struct text, ntext, textNTEXT);
140 valloc(cfree, struct cblock, nclist);
141 valloc(callout, struct callout, ncallout);
142 valloc(swapmap, struct map, nswapmap = nproc * 2);
143 valloc(argmap, struct map, ARGMAPSIZE);
144 valloc(kernelmap, struct map, nproc);
145 valloc(mbmap, struct map, nmbclusters/4);
146 valloc(kmemmap, struct map, ekmempt - kmempt);
147 valloc(kmemusage, struct kmemusage, ekmempt - kmempt);
148 #ifdef SYSVSHM
149 valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
150 #endif
151
152 /*
153 * Determine how many buffers to allocate.
154 * Use 10% of memory for the first 2 Meg, 5% of the remaining
155 * memory. Insure a minimum of 16 buffers.
156 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
157 */
158 if (bufpages == 0)
159 if (physmem < (2 * 1024 * CLSIZE))
160 bufpages = physmem / 10 / CLSIZE;
161 else
162 bufpages = ((2 * 1024 * CLSIZE + physmem) / 20) / CLSIZE;
163 if (nbuf == 0) {
164 nbuf = bufpages / 2;
165 if (nbuf < 16)
166 nbuf = 16;
167 }
168 if (nswbuf == 0) {
169 nswbuf = (nbuf / 2) &~ 1; /* force even */
170 if (nswbuf > 256)
171 nswbuf = 256; /* sanity */
172 }
173 valloc(swbuf, struct buf, nswbuf);
174
175 /*
176 * Now the amount of virtual memory remaining for buffers
177 * can be calculated, estimating needs for the cmap.
178 */
179 ncmap = (maxmem*NBPG - ((int)v &~ KERNBASE)) /
180 (CLBYTES + sizeof(struct cmap)) + 2;
181 maxbufs = ((SYSPTSIZE * NBPG) -
182 ((int)(v + ncmap * sizeof(struct cmap)) - KERNBASE)) /
183 (MAXBSIZE + sizeof(struct buf));
184 if (maxbufs < 16)
185 panic("sys pt too small");
186 if (nbuf > maxbufs) {
187 printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs);
188 nbuf = maxbufs;
189 }
190 if (bufpages > nbuf * (MAXBSIZE / CLBYTES))
191 bufpages = nbuf * (MAXBSIZE / CLBYTES);
192 valloc(buf, struct buf, nbuf);
193
194 /*
195 * Allocate space for core map.
196 * Allow space for all of phsical memory minus the amount
197 * dedicated to the system. The amount of physical memory
198 * dedicated to the system is the total virtual memory of
199 * the system thus far, plus core map, buffer pages,
200 * and buffer headers not yet allocated.
201 * Add 2: 1 because the 0th entry is unused, 1 for rounding.
202 */
203 ncmap = (maxmem*NBPG - ((int)(v + bufpages*CLBYTES) &~ KERNBASE)) /
204 (CLBYTES + sizeof(struct cmap)) + 2;
205 valloclim(cmap, struct cmap, ncmap, ecmap);
206
207 /*
208 * Clear space allocated thus far, and make r/w entries
209 * for the space in the kernel map.
210 */
211 unixsize = btoc((int)v &~ KERNBASE);
212 while (firstaddr < unixsize) {
213 *(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | firstaddr;
214 clearseg((unsigned)firstaddr);
215 firstaddr++;
216 }
217
218 /*
219 * Now allocate buffers proper. They are different than the above
220 * in that they usually occupy more virtual memory than physical.
221 */
222 v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET);
223 valloc(buffers, char, MAXBSIZE * nbuf);
224 base = bufpages / nbuf;
225 residual = bufpages % nbuf;
226 mapaddr = firstaddr;
227 for (i = 0; i < nbuf; i++) {
228 n = (i < residual ? base + 1 : base) * CLSIZE;
229 for (j = 0; j < n; j++) {
230 *(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
231 clearseg((unsigned)firstaddr);
232 firstaddr++;
233 }
234 mapaddr += MAXBSIZE / NBPG;
235 }
236
237 unixsize = btoc((int)v &~ KERNBASE);
238 if (firstaddr >= physmem - 8*UPAGES)
239 panic("no memory");
240 mtpr(TBIA, 0); /* After we just cleared it all! */
241
242 /*
243 * Initialize callouts
244 */
245 callfree = callout;
246 for (i = 1; i < ncallout; i++)
247 callout[i-1].c_next = &callout[i];
248
249 /*
250 * Initialize memory allocator and swap
251 * and user page table maps.
252 *
253 * THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
254 * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
255 */
256 meminit(firstaddr, maxmem);
257 maxmem = freemem;
258 printf("avail mem = %d\n", ctob(maxmem));
259 printf("using %d buffers containing %d bytes of memory\n",
260 nbuf, bufpages * CLBYTES);
261 rminit(kernelmap, (long)USRPTSIZE, (long)1,
262 "usrpt", nproc);
263 rminit(mbmap, (long)(nmbclusters * CLSIZE), (long)CLSIZE,
264 "mbclusters", nmbclusters/4);
265 kmeminit(); /* now safe to do malloc/free */
266
267 /*
268 * Set up CPU-specific registers, cache, etc.
269 */
270 initcpu();
271
272 /*
273 * Set up buffers, so they can be used to read disk labels.
274 */
275 bhinit();
276 binit();
277
278 /*
279 * Configure the system.
280 */
281 configure();
282
283 /*
284 * Clear restart inhibit flags.
285 */
286 tocons(TXDB_CWSI);
287 tocons(TXDB_CCSI);
288 }
289
290 #ifdef PGINPROF
291 /*
292 * Return the difference (in microseconds)
293 * between the current time and a previous
294 * time as represented by the arguments.
295 * If there is a pending clock interrupt
296 * which has not been serviced due to high
297 * ipl, return error code.
298 */
vmtime(otime,olbolt,oicr)299 vmtime(otime, olbolt, oicr)
300 register int otime, olbolt, oicr;
301 {
302
303 if (mfpr(ICCS)&ICCS_INT)
304 return(-1);
305 else
306 return(((time.tv_sec-otime)*60 + lbolt-olbolt)*16667 + mfpr(ICR)-oicr);
307 }
308 #endif
309
310 /*
311 * Clear registers on exec
312 */
313 /* ARGSUSED */
setregs(entry,retval)314 setregs(entry, retval)
315 u_long entry;
316 int *retval;
317 {
318 #ifdef notdef
319 register int *rp;
320
321 /* should pass args to init on the stack */
322 /* should also fix this code before using it, it's wrong */
323 /* wanna clear the scb? */
324 for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];)
325 *rp++ = 0;
326 #endif
327 u.u_ar0[PC] = entry + 2;
328 }
329
330 /*
331 * Send an interrupt to process.
332 *
333 * Stack is set up to allow sigcode stored
334 * in u. to call routine, followed by chmk
335 * to sigreturn routine below. After sigreturn
336 * resets the signal mask, the stack, the frame
337 * pointer, and the argument pointer, it returns
338 * to the user specified pc, psl.
339 */
sendsig(catcher,sig,mask,code)340 sendsig(catcher, sig, mask, code)
341 sig_t catcher;
342 int sig, mask;
343 unsigned code;
344 {
345 register struct sigcontext *scp;
346 register struct proc *p = u.u_procp;
347 register int *regs;
348 register struct sigframe {
349 int sf_signum;
350 int sf_code;
351 struct sigcontext *sf_scp;
352 sig_t sf_handler;
353 int sf_argcount;
354 struct sigcontext *sf_scpcopy;
355 } *fp;
356 int oonstack;
357
358 regs = u.u_ar0;
359 oonstack = u.u_onstack;
360 /*
361 * Allocate and validate space for the signal handler
362 * context. Note that if the stack is in P0 space, the
363 * call to grow() is a nop, and the useracc() check
364 * will fail if the process has not already allocated
365 * the space with a `brk'.
366 */
367 if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) {
368 scp = (struct sigcontext *)u.u_sigsp - 1;
369 u.u_onstack = 1;
370 } else
371 scp = (struct sigcontext *)regs[SP] - 1;
372 fp = (struct sigframe *)scp - 1;
373 if ((int)fp <= USRSTACK - ctob(u.u_ssize))
374 (void)grow((unsigned)fp);
375 if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), B_WRITE) == 0) {
376 /*
377 * Process has trashed its stack; give it an illegal
378 * instruction to halt it in its tracks.
379 */
380 SIGACTION(p, SIGILL) = SIG_DFL;
381 sig = sigmask(SIGILL);
382 p->p_sigignore &= ~sig;
383 p->p_sigcatch &= ~sig;
384 p->p_sigmask &= ~sig;
385 psignal(p, SIGILL);
386 return;
387 }
388 /*
389 * Build the argument list for the signal handler.
390 */
391 fp->sf_signum = sig;
392 fp->sf_code = code;
393 fp->sf_scp = scp;
394 fp->sf_handler = catcher;
395 /*
396 * Build the calls argument frame to be used to call sigreturn
397 */
398 fp->sf_argcount = 1;
399 fp->sf_scpcopy = scp;
400 /*
401 * Build the signal context to be used by sigreturn.
402 */
403 scp->sc_onstack = oonstack;
404 scp->sc_mask = mask;
405 scp->sc_sp = regs[SP];
406 scp->sc_fp = regs[FP];
407 scp->sc_ap = regs[AP];
408 scp->sc_pc = regs[PC];
409 scp->sc_ps = regs[PS];
410 regs[SP] = (int)fp;
411 regs[PS] &= ~(PSL_CM|PSL_FPD);
412 regs[PC] = (int)u.u_pcb.pcb_sigc;
413 return;
414 }
415
416 /*
417 * System call to cleanup state after a signal
418 * has been taken. Reset signal mask and
419 * stack state from context left by sendsig (above).
420 * Return to previous pc and psl as specified by
421 * context left by sendsig. Check carefully to
422 * make sure that the user has not modified the
423 * psl to gain improper priviledges or to cause
424 * a machine fault.
425 */
426 struct sigreturn_args {
427 struct sigcontext *sigcntxp;
428 };
429 /* ARGSUSED */
430 sigreturn(p, uap, retval)
431 struct proc *p;
432 struct sigreturn_args *uap;
433 int *retval;
434 {
435 register struct sigcontext *scp;
436 register int *regs = u.u_ar0;
437
438 scp = uap->sigcntxp;
439 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0)
440 return (EINVAL);
441 if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_IS)) != 0 ||
442 (scp->sc_ps & (PSL_PRVMOD|PSL_CURMOD)) != (PSL_PRVMOD|PSL_CURMOD) ||
443 ((scp->sc_ps & PSL_CM) &&
444 (scp->sc_ps & (PSL_FPD|PSL_DV|PSL_FU|PSL_IV)) != 0))
445 return (EINVAL);
446 u.u_onstack = scp->sc_onstack & 01;
447 p->p_sigmask = scp->sc_mask &~ sigcantmask;
448 regs[FP] = scp->sc_fp;
449 regs[AP] = scp->sc_ap;
450 regs[SP] = scp->sc_sp;
451 regs[PC] = scp->sc_pc;
452 regs[PS] = scp->sc_ps;
453 return (EJUSTRETURN);
454 }
455
456 /*
457 * Memenable enables memory controller corrected data reporting.
458 * This runs at regular intervals, turning on the interrupt.
459 * The interrupt is turned off, per memory controller, when error
460 * reporting occurs. Thus we report at most once per memintvl.
461 */
462 int memintvl = MEMINTVL;
463
memenable()464 memenable()
465 {
466
467 (*cpuops->cpu_memenable)();
468 if (memintvl > 0)
469 timeout(memenable, (caddr_t)0, memintvl*hz);
470 }
471
472 /*
473 * Memerr is the interrupt routine for corrected read data
474 * interrupts. It looks to see which memory controllers have
475 * unreported errors, reports them, and disables further
476 * reporting for a time on those controller.
477 */
memerr()478 memerr()
479 {
480
481 (*cpuops->cpu_memerr)();
482 }
483
484 /*
485 * Invalidate single all pte's in a cluster
486 */
tbiscl(v)487 tbiscl(v)
488 unsigned v;
489 {
490 register caddr_t addr; /* must be first reg var */
491 register int i;
492
493 asm(".set TBIS,58");
494 addr = ptob(v);
495 for (i = 0; i < CLSIZE; i++) {
496 #ifdef lint
497 mtpr(TBIS, addr);
498 #else
499 asm("mtpr r11,$TBIS");
500 #endif
501 addr += NBPG;
502 }
503 }
504
505 int waittime = -1;
506
boot(howto)507 boot(howto)
508 register int howto; /* r11 == how to boot */
509 {
510 register int devtype; /* r10 == major of root dev */
511 extern char *panicstr;
512
513 if ((howto&RB_NOSYNC)==0 && waittime < 0 && bfreelist[0].b_forw) {
514 register struct buf *bp;
515 int iter, nbusy;
516
517 waittime = 0;
518 (void) splnet();
519 printf("syncing disks... ");
520 /*
521 * Release vnodes held by texts before sync.
522 */
523 if (panicstr == 0)
524 xumount(NULL);
525 sync();
526
527 for (iter = 0; iter < 20; iter++) {
528 nbusy = 0;
529 for (bp = &buf[nbuf]; --bp >= buf; )
530 if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
531 nbusy++;
532 if (nbusy == 0)
533 break;
534 printf("%d ", nbusy);
535 DELAY(40000 * iter);
536 }
537 if (nbusy)
538 printf("giving up\n");
539 else
540 printf("done\n");
541 /*
542 * If we've been adjusting the clock, the todr
543 * will be out of synch; adjust it now.
544 */
545 resettodr();
546 }
547 splx(0x1f); /* extreme priority */
548 devtype = major(rootdev);
549 if (howto&RB_HALT) {
550 switch (cpu) {
551
552 /* 630 can be told to halt, but how? */
553 #if VAX650
554 case VAX_650:
555 ka650ssc.ssc_cpmbx &= ~CPMB650_HALTACT;
556 ka650ssc.ssc_cpmbx |= CPMB650_HALT;
557 asm("halt");
558 #endif
559 }
560 printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
561 for (;;)
562 ;
563 } else {
564 if (howto & RB_DUMP)
565 doadump();
566 vaxboot();
567 }
568 #ifdef lint
569 devtype = devtype;
570 #endif
571 /*NOTREACHED*/
572 }
573
574 /*
575 * Reboot after panic or via reboot system call. Note that r11
576 * and r10 must already have the proper boot values (`call by voodoo').
577 */
vaxboot()578 vaxboot()
579 {
580
581 switch (cpu) {
582
583 #ifdef VAX8200
584 case VAX_8200:
585 /*
586 * TXDB_BOOT erases memory! Instead we set the `did
587 * a dump' flag in the rpb.
588 */
589 *(int *)&Sysmap[0] &= ~PG_PROT;
590 *(int *)&Sysmap[0] |= PG_KW;
591 mtpr(TBIS, &rpb);
592 rpb.rp_flag = 1;
593 break;
594 #endif
595
596 #ifdef VAX650
597 case VAX_650:
598 /* set boot-on-halt flag in "console mailbox" */
599 ka650ssc.ssc_cpmbx &= ~CPMB650_HALTACT;
600 ka650ssc.ssc_cpmbx |= CPMB650_REBOOT;
601 break;
602 #endif
603
604 default:
605 tocons(TXDB_BOOT);
606 }
607
608 /*
609 * Except on 780s and 8600s, boot flags go in r5. SBI
610 * VAXen do not care, so copy boot flags to r5 always.
611 */
612 asm("movl r11,r5");
613 for (;;) {
614 asm("halt");
615 }
616 }
617
tocons(c)618 tocons(c)
619 {
620 register int oldmask;
621
622 while (((oldmask = mfpr(TXCS)) & TXCS_RDY) == 0)
623 continue;
624
625 switch (cpu) {
626
627 #if VAX8200 || VAX780 || VAX750 || VAX730 || VAX630
628 case VAX_8200:
629 case VAX_780:
630 case VAX_750:
631 case VAX_730:
632 case VAX_630:
633 c |= TXDB_CONS;
634 break;
635 #endif
636
637 #if VAX8600
638 case VAX_8600:
639 mtpr(TXCS, TXCS_LCONS | TXCS_WMASK);
640 while ((mfpr(TXCS) & TXCS_RDY) == 0)
641 continue;
642 break;
643 #endif
644
645 #if VAX650
646 case VAX_650:
647 /* everything is a real console terminal character on ka650 */
648 return;
649 #endif
650 }
651
652 mtpr(TXDB, c);
653
654 #if VAX8600
655 switch (cpu) {
656
657 case VAX_8600:
658 while ((mfpr(TXCS) & TXCS_RDY) == 0)
659 continue;
660 mtpr(TXCS, oldmask | TXCS_WMASK);
661 break;
662 }
663 #endif
664 #ifdef lint
665 oldmask = oldmask;
666 #endif
667 }
668
669 int dumpmag = 0x8fca0101; /* magic number for savecore */
670 int dumpsize = 0; /* also for savecore */
671
dumpconf()672 dumpconf()
673 {
674 int nblks;
675
676 dumpsize = physmem;
677 if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
678 nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
679 if (dumpsize > btoc(dbtob(nblks - dumplo)))
680 dumpsize = btoc(dbtob(nblks - dumplo));
681 else if (dumplo == 0)
682 dumplo = nblks - btodb(ctob(physmem));
683 }
684 /*
685 * Don't dump on the first CLSIZE pages,
686 * in case the dump device includes a disk label.
687 */
688 if (dumplo < CLSIZE)
689 dumplo = CLSIZE;
690 }
691
692 /*
693 * Doadump comes here after turning off memory management and
694 * getting on the dump stack, either when called above, or by
695 * the auto-restart code.
696 */
dumpsys()697 dumpsys()
698 {
699
700 rpb.rp_flag = 1;
701 msgbufmapped = 0;
702 if (dumpdev == NODEV)
703 return;
704 /*
705 * For dumps during autoconfiguration,
706 * if dump device has already configured...
707 */
708 if (dumpsize == 0)
709 dumpconf();
710 if (dumplo < 0)
711 return;
712 printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
713 printf("dump ");
714 switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
715
716 case ENXIO:
717 printf("device bad\n");
718 break;
719
720 case EFAULT:
721 printf("device not ready\n");
722 break;
723
724 case EINVAL: /* XXX */
725 printf("area improper\n");
726 break;
727
728 case EIO:
729 printf("i/o error");
730 break;
731
732 default:
733 printf("succeeded");
734 break;
735 }
736 }
737
738 /*
739 * Machine check error recovery code.
740 */
machinecheck(cmcf)741 machinecheck(cmcf)
742 caddr_t cmcf;
743 {
744
745 if ((*cpuops->cpu_mchk)(cmcf) == MCHK_RECOVERED)
746 return;
747 (*cpuops->cpu_memerr)();
748 panic("mchk");
749 }
750
751 #if defined(VAX780) || defined(VAX750)
752 /*
753 * These strings are shared between the 780 and 750 machine check code
754 * in ka780.c and ka730.c.
755 */
756 char *mc780750[16] = {
757 "cp read", "ctrl str par", "cp tbuf par", "cp cache par",
758 "cp rdtimo", "cp rds", "ucode lost", 0,
759 0, 0, "ib tbuf par", 0,
760 "ib rds", "ib rd timo", 0, "ib cache par"
761 };
762 #endif
763
764 /*
765 * Return the best possible estimate of the time in the timeval
766 * to which tvp points. We do this by reading the interval count
767 * register to determine the time remaining to the next clock tick.
768 * We must compensate for wraparound which is not yet reflected in the time
769 * (which happens when the ICR hits 0 and wraps after the splhigh(),
770 * but before the mfpr(ICR)). Also check that this time is no less than
771 * any previously-reported time, which could happen around the time
772 * of a clock adjustment. Just for fun, we guarantee that the time
773 * will be greater than the value obtained by a previous call.
774 */
microtime(tvp)775 microtime(tvp)
776 register struct timeval *tvp;
777 {
778 int s = splhigh();
779 static struct timeval lasttime;
780 register long t;
781
782 *tvp = time;
783 t = mfpr(ICR);
784 if (t < -tick / 2 && (mfpr(ICCS) & ICCS_INT))
785 t += tick;
786 tvp->tv_usec += tick + t;
787 if (tvp->tv_usec > 1000000) {
788 tvp->tv_sec++;
789 tvp->tv_usec -= 1000000;
790 }
791 if (tvp->tv_sec == lasttime.tv_sec &&
792 tvp->tv_usec <= lasttime.tv_usec &&
793 (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
794 tvp->tv_sec++;
795 tvp->tv_usec -= 1000000;
796 }
797 lasttime = *tvp;
798 splx(s);
799 }
800
initcpu()801 initcpu()
802 {
803 /*
804 * Enable cache.
805 */
806 switch (cpu) {
807
808 #if VAX8600
809 case VAX_8600:
810 mtpr(CSWP, 3);
811 break;
812 #endif
813 #if VAX8200
814 case VAX_8200:
815 mtpr(CADR, 0);
816 break;
817 #endif
818 #if VAX780
819 case VAX_780:
820 mtpr(SBIMT, 0x200000);
821 break;
822 #endif
823 #if VAX750
824 case VAX_750:
825 mtpr(CADR, 0);
826 break;
827 #endif
828 default:
829 break;
830 }
831
832 /*
833 * Enable floating point accelerator if it exists
834 * and has control register.
835 */
836 switch(cpu) {
837
838 #if VAX8600 || VAX780
839 case VAX_8600:
840 case VAX_780:
841 if ((mfpr(ACCS) & 0xff) != 0) {
842 printf("Enabling FPA\n");
843 mtpr(ACCS, 0x8000);
844 }
845 #endif
846 default:
847 break;
848 }
849 }
850
851 /*
852 * Return a reasonable approximation of the time of day register.
853 * More precisely, return a number that increases by one about
854 * once every ten milliseconds.
855 */
todr()856 todr()
857 {
858
859 switch (cpu) {
860
861 #if VAX8600 || VAX8200 || VAX780 || VAX750 || VAX730 || VAX650
862 case VAX_8600:
863 case VAX_8200:
864 case VAX_780:
865 case VAX_750:
866 case VAX_730:
867 case VAX_650:
868 return (mfpr(TODR));
869 #endif
870
871 #if VAX630
872 case VAX_630:
873 /* XXX crude */
874 { static int t; DELAY(10000); return (++t); }
875 #endif
876
877 default:
878 panic("todr");
879 }
880 /* NOTREACHED */
881 }
882