xref: /original-bsd/sys/vax/vax/machdep.c (revision 7562ff97)
1 /*
2  * Copyright (c) 1982,1986,1988,1990 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)machdep.c	7.29 (Berkeley) 06/21/90
7  */
8 
9 #include "param.h"
10 #include "systm.h"
11 #include "syscontext.h"
12 #include "kernel.h"
13 #include "malloc.h"
14 #include "map.h"
15 #include "vm.h"
16 #include "proc.h"
17 #include "buf.h"
18 #include "reboot.h"
19 #include "conf.h"
20 #include "file.h"
21 #include "text.h"
22 #include "clist.h"
23 #include "callout.h"
24 #include "cmap.h"
25 #include "mbuf.h"
26 #include "msgbuf.h"
27 #ifdef SYSVSHM
28 #include "shm.h"
29 #endif
30 
31 #include "reg.h"
32 #include "pte.h"
33 #include "psl.h"
34 #include "frame.h"
35 #include "clock.h"
36 #include "cons.h"
37 #include "cpu.h"
38 #include "mem.h"
39 #include "mtpr.h"
40 #include "rpb.h"
41 #include "ka630.h"
42 #include "ka650.h"
43 
44 #include "../vaxuba/ubavar.h"
45 #include "../vaxuba/ubareg.h"
46 
47 /*
48  * Declare these as initialized data so we can patch them.
49  */
50 int	nswbuf = 0;
51 #ifdef	NBUF
52 int	nbuf = NBUF;
53 #else
54 int	nbuf = 0;
55 #endif
56 #ifdef	BUFPAGES
57 int	bufpages = BUFPAGES;
58 #else
59 int	bufpages = 0;
60 #endif
61 int	msgbufmapped;		/* set when safe to use msgbuf */
62 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
63 
64 /*
65  * Machine-dependent startup code
66  */
67 startup(firstaddr)
68 	int firstaddr;
69 {
70 	register int unixsize;
71 	register unsigned i;
72 	register struct pte *pte;
73 	int mapaddr, j, n;
74 	register caddr_t v;
75 	int maxbufs, base, residual;
76 
77 	/*
78 	 * Initialize error message buffer (at end of core).
79 	 */
80 	maxmem = physmem - btoc(sizeof (struct msgbuf));
81 	pte = msgbufmap;
82 	for (i = 1; i < btoc(sizeof (struct msgbuf)) + 1; i++)
83 		*(int *)pte++ = PG_V | PG_KW | (physmem - i);
84 	mtpr(TBIA, 0);
85 	msgbufmapped = 1;
86 
87 #ifdef QBA
88 #include "qv.h"
89 #if NQV > 0
90 	/*
91 	 * redirect console to qvss if it exists
92 	 */
93 	qvcons_init();
94 #endif
95 #include "qd.h"
96 #if NQD > 0
97 	/*
98 	 * redirect console to qdss if it exists
99 	 */
100 	qdcons_init();
101 #endif
102 #endif
103 
104 #ifdef KADB
105 	kdb_init();
106 	(void) cnopen(makedev(0, 0), 0);	/* open console XXX */
107 #endif
108 	/*
109 	 * Good {morning,afternoon,evening,night}.
110 	 */
111 	printf(version);
112 	printf("real mem = %d\n", ctob(physmem));
113 
114 	/*
115 	 * Allocate space for system data structures.
116 	 * The first available real memory address is in "firstaddr".
117 	 * The first available kernel virtual address is in "v".
118 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
119 	 * As pages of memory are allocated and cleared,
120 	 * "firstaddr" is incremented.
121 	 * An index into the kernel page table corresponding to the
122 	 * virtual memory address maintained in "v" is kept in "mapaddr".
123 	 */
124 	v = (caddr_t)(KERNBASE | (firstaddr * NBPG));
125 #define	valloc(name, type, num) \
126 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
127 #define	valloclim(name, type, num, lim) \
128 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
129 	valloclim(file, struct file, nfile, fileNFILE);
130 	valloclim(proc, struct proc, nproc, procNPROC);
131 	valloclim(text, struct text, ntext, textNTEXT);
132 	valloc(cfree, struct cblock, nclist);
133 	valloc(callout, struct callout, ncallout);
134 	valloc(swapmap, struct map, nswapmap = nproc * 2);
135 	valloc(argmap, struct map, ARGMAPSIZE);
136 	valloc(kernelmap, struct map, nproc);
137 	valloc(mbmap, struct map, nmbclusters/4);
138 	valloc(kmemmap, struct map, ekmempt - kmempt);
139 	valloc(kmemusage, struct kmemusage, ekmempt - kmempt);
140 #ifdef SYSVSHM
141 	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
142 #endif
143 
144 	/*
145 	 * Determine how many buffers to allocate.
146 	 * Use 10% of memory for the first 2 Meg, 5% of the remaining
147 	 * memory. Insure a minimum of 16 buffers.
148 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
149 	 */
150 	if (bufpages == 0)
151 		if (physmem < (2 * 1024 * CLSIZE))
152 			bufpages = physmem / 10 / CLSIZE;
153 		else
154 			bufpages = ((2 * 1024 * CLSIZE + physmem) / 20) / CLSIZE;
155 	if (nbuf == 0) {
156 		nbuf = bufpages / 2;
157 		if (nbuf < 16)
158 			nbuf = 16;
159 	}
160 	if (nswbuf == 0) {
161 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
162 		if (nswbuf > 256)
163 			nswbuf = 256;		/* sanity */
164 	}
165 	valloc(swbuf, struct buf, nswbuf);
166 
167 	/*
168 	 * Now the amount of virtual memory remaining for buffers
169 	 * can be calculated, estimating needs for the cmap.
170 	 */
171 	ncmap = (maxmem*NBPG - ((int)v &~ KERNBASE)) /
172 		(CLBYTES + sizeof(struct cmap)) + 2;
173 	maxbufs = ((SYSPTSIZE * NBPG) -
174 	    ((int)(v + ncmap * sizeof(struct cmap)) - KERNBASE)) /
175 		(MAXBSIZE + sizeof(struct buf));
176 	if (maxbufs < 16)
177 		panic("sys pt too small");
178 	if (nbuf > maxbufs) {
179 		printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs);
180 		nbuf = maxbufs;
181 	}
182 	if (bufpages > nbuf * (MAXBSIZE / CLBYTES))
183 		bufpages = nbuf * (MAXBSIZE / CLBYTES);
184 	valloc(buf, struct buf, nbuf);
185 
186 	/*
187 	 * Allocate space for core map.
188 	 * Allow space for all of phsical memory minus the amount
189 	 * dedicated to the system. The amount of physical memory
190 	 * dedicated to the system is the total virtual memory of
191 	 * the system thus far, plus core map, buffer pages,
192 	 * and buffer headers not yet allocated.
193 	 * Add 2: 1 because the 0th entry is unused, 1 for rounding.
194 	 */
195 	ncmap = (maxmem*NBPG - ((int)(v + bufpages*CLBYTES) &~ KERNBASE)) /
196 		(CLBYTES + sizeof(struct cmap)) + 2;
197 	valloclim(cmap, struct cmap, ncmap, ecmap);
198 
199 	/*
200 	 * Clear space allocated thus far, and make r/w entries
201 	 * for the space in the kernel map.
202 	 */
203 	unixsize = btoc((int)v &~ KERNBASE);
204 	while (firstaddr < unixsize) {
205 		*(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | firstaddr;
206 		clearseg((unsigned)firstaddr);
207 		firstaddr++;
208 	}
209 
210 	/*
211 	 * Now allocate buffers proper.  They are different than the above
212 	 * in that they usually occupy more virtual memory than physical.
213 	 */
214 	v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET);
215 	valloc(buffers, char, MAXBSIZE * nbuf);
216 	base = bufpages / nbuf;
217 	residual = bufpages % nbuf;
218 	mapaddr = firstaddr;
219 	for (i = 0; i < nbuf; i++) {
220 		n = (i < residual ? base + 1 : base) * CLSIZE;
221 		for (j = 0; j < n; j++) {
222 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
223 			clearseg((unsigned)firstaddr);
224 			firstaddr++;
225 		}
226 		mapaddr += MAXBSIZE / NBPG;
227 	}
228 
229 	unixsize = btoc((int)v &~ KERNBASE);
230 	if (firstaddr >= physmem - 8*UPAGES)
231 		panic("no memory");
232 	mtpr(TBIA, 0);			/* After we just cleared it all! */
233 
234 	/*
235 	 * Initialize callouts
236 	 */
237 	callfree = callout;
238 	for (i = 1; i < ncallout; i++)
239 		callout[i-1].c_next = &callout[i];
240 
241 	/*
242 	 * Initialize memory allocator and swap
243 	 * and user page table maps.
244 	 *
245 	 * THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
246 	 * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
247 	 */
248 	meminit(firstaddr, maxmem);
249 	maxmem = freemem;
250 	printf("avail mem = %d\n", ctob(maxmem));
251 	printf("using %d buffers containing %d bytes of memory\n",
252 		nbuf, bufpages * CLBYTES);
253 	rminit(kernelmap, (long)USRPTSIZE, (long)1,
254 	    "usrpt", nproc);
255 	rminit(mbmap, (long)(nmbclusters * CLSIZE), (long)CLSIZE,
256 	    "mbclusters", nmbclusters/4);
257 	kmeminit();	/* now safe to do malloc/free */
258 
259 	/*
260 	 * Set up CPU-specific registers, cache, etc.
261 	 */
262 	initcpu();
263 
264 	/*
265 	 * Set up buffers, so they can be used to read disk labels.
266 	 */
267 	bhinit();
268 	binit();
269 
270 	/*
271 	 * Configure the system.
272 	 */
273 	configure();
274 
275 	/*
276 	 * Clear restart inhibit flags.
277 	 */
278 	tocons(TXDB_CWSI);
279 	tocons(TXDB_CCSI);
280 }
281 
282 #ifdef PGINPROF
283 /*
284  * Return the difference (in microseconds)
285  * between the  current time and a previous
286  * time as represented  by the arguments.
287  * If there is a pending clock interrupt
288  * which has not been serviced due to high
289  * ipl, return error code.
290  */
291 vmtime(otime, olbolt, oicr)
292 	register int otime, olbolt, oicr;
293 {
294 
295 	if (mfpr(ICCS)&ICCS_INT)
296 		return(-1);
297 	else
298 		return(((time.tv_sec-otime)*60 + lbolt-olbolt)*16667 + mfpr(ICR)-oicr);
299 }
300 #endif
301 
302 /*
303  * Clear registers on exec
304  */
305 /* ARGSUSED */
306 setregs(entry, retval)
307 	u_long entry;
308 	int *retval;
309 {
310 #ifdef notdef
311 	register int *rp;
312 
313 	/* should pass args to init on the stack */
314 	/* should also fix this code before using it, it's wrong */
315 	/* wanna clear the scb? */
316 	for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];)
317 		*rp++ = 0;
318 #endif
319 	u.u_ar0[PC] = entry + 2;
320 }
321 
322 /*
323  * Send an interrupt to process.
324  *
325  * Stack is set up to allow sigcode stored
326  * in u. to call routine, followed by chmk
327  * to sigreturn routine below.  After sigreturn
328  * resets the signal mask, the stack, the frame
329  * pointer, and the argument pointer, it returns
330  * to the user specified pc, psl.
331  */
332 sendsig(catcher, sig, mask, code)
333 	sig_t catcher;
334 	int sig, mask;
335 	unsigned code;
336 {
337 	register struct sigcontext *scp;
338 	register struct proc *p = u.u_procp;
339 	register int *regs;
340 	register struct sigframe {
341 		int	sf_signum;
342 		int	sf_code;
343 		struct	sigcontext *sf_scp;
344 		sig_t	sf_handler;
345 		int	sf_argcount;
346 		struct	sigcontext *sf_scpcopy;
347 	} *fp;
348 	int oonstack;
349 
350 	regs = u.u_ar0;
351 	oonstack = u.u_onstack;
352 	/*
353 	 * Allocate and validate space for the signal handler
354 	 * context. Note that if the stack is in P0 space, the
355 	 * call to grow() is a nop, and the useracc() check
356 	 * will fail if the process has not already allocated
357 	 * the space with a `brk'.
358 	 */
359 	if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) {
360 		scp = (struct sigcontext *)u.u_sigsp - 1;
361 		u.u_onstack = 1;
362 	} else
363 		scp = (struct sigcontext *)regs[SP] - 1;
364 	fp = (struct sigframe *)scp - 1;
365 	if ((int)fp <= USRSTACK - ctob(u.u_ssize))
366 		(void)grow((unsigned)fp);
367 	if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), B_WRITE) == 0) {
368 		/*
369 		 * Process has trashed its stack; give it an illegal
370 		 * instruction to halt it in its tracks.
371 		 */
372 		SIGACTION(p, SIGILL) = SIG_DFL;
373 		sig = sigmask(SIGILL);
374 		p->p_sigignore &= ~sig;
375 		p->p_sigcatch &= ~sig;
376 		p->p_sigmask &= ~sig;
377 		psignal(p, SIGILL);
378 		return;
379 	}
380 	/*
381 	 * Build the argument list for the signal handler.
382 	 */
383 	fp->sf_signum = sig;
384 	fp->sf_code = code;
385 	fp->sf_scp = scp;
386 	fp->sf_handler = catcher;
387 	/*
388 	 * Build the calls argument frame to be used to call sigreturn
389 	 */
390 	fp->sf_argcount = 1;
391 	fp->sf_scpcopy = scp;
392 	/*
393 	 * Build the signal context to be used by sigreturn.
394 	 */
395 	scp->sc_onstack = oonstack;
396 	scp->sc_mask = mask;
397 	scp->sc_sp = regs[SP];
398 	scp->sc_fp = regs[FP];
399 	scp->sc_ap = regs[AP];
400 	scp->sc_pc = regs[PC];
401 	scp->sc_ps = regs[PS];
402 	regs[SP] = (int)fp;
403 	regs[PS] &= ~(PSL_CM|PSL_FPD);
404 	regs[PC] = (int)u.u_pcb.pcb_sigc;
405 	return;
406 }
407 
408 /*
409  * System call to cleanup state after a signal
410  * has been taken.  Reset signal mask and
411  * stack state from context left by sendsig (above).
412  * Return to previous pc and psl as specified by
413  * context left by sendsig. Check carefully to
414  * make sure that the user has not modified the
415  * psl to gain improper priviledges or to cause
416  * a machine fault.
417  */
418 /* ARGSUSED */
419 sigreturn(p, uap, retval)
420 	struct proc *p;
421 	struct args {
422 		struct sigcontext *sigcntxp;
423 	} *uap;
424 	int *retval;
425 {
426 	register struct sigcontext *scp;
427 	register int *regs = u.u_ar0;
428 
429 	scp = uap->sigcntxp;
430 	if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0)
431 		RETURN (EINVAL);
432 	if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_IS)) != 0 ||
433 	    (scp->sc_ps & (PSL_PRVMOD|PSL_CURMOD)) != (PSL_PRVMOD|PSL_CURMOD) ||
434 	    ((scp->sc_ps & PSL_CM) &&
435 	     (scp->sc_ps & (PSL_FPD|PSL_DV|PSL_FU|PSL_IV)) != 0))
436 		RETURN (EINVAL);
437 	u.u_onstack = scp->sc_onstack & 01;
438 	p->p_sigmask = scp->sc_mask &~ sigcantmask;
439 	regs[FP] = scp->sc_fp;
440 	regs[AP] = scp->sc_ap;
441 	regs[SP] = scp->sc_sp;
442 	regs[PC] = scp->sc_pc;
443 	regs[PS] = scp->sc_ps;
444 	RETURN (EJUSTRETURN);
445 }
446 
447 /*
448  * Memenable enables memory controller corrected data reporting.
449  * This runs at regular intervals, turning on the interrupt.
450  * The interrupt is turned off, per memory controller, when error
451  * reporting occurs.  Thus we report at most once per memintvl.
452  */
453 int	memintvl = MEMINTVL;
454 
455 memenable()
456 {
457 
458 	(*cpuops->cpu_memenable)();
459 	if (memintvl > 0)
460 		timeout(memenable, (caddr_t)0, memintvl*hz);
461 }
462 
463 /*
464  * Memerr is the interrupt routine for corrected read data
465  * interrupts.  It looks to see which memory controllers have
466  * unreported errors, reports them, and disables further
467  * reporting for a time on those controller.
468  */
469 memerr()
470 {
471 
472 	(*cpuops->cpu_memerr)();
473 }
474 
475 /*
476  * Invalidate single all pte's in a cluster
477  */
478 tbiscl(v)
479 	unsigned v;
480 {
481 	register caddr_t addr;		/* must be first reg var */
482 	register int i;
483 
484 	asm(".set TBIS,58");
485 	addr = ptob(v);
486 	for (i = 0; i < CLSIZE; i++) {
487 #ifdef lint
488 		mtpr(TBIS, addr);
489 #else
490 		asm("mtpr r11,$TBIS");
491 #endif
492 		addr += NBPG;
493 	}
494 }
495 
496 int	waittime = -1;
497 
498 boot(howto)
499 	register int howto;		/* r11 == how to boot */
500 {
501 	register int devtype;		/* r10 == major of root dev */
502 	extern char *panicstr;
503 
504 	if ((howto&RB_NOSYNC)==0 && waittime < 0 && bfreelist[0].b_forw) {
505 		register struct buf *bp;
506 		int iter, nbusy;
507 
508 		waittime = 0;
509 		(void) splnet();
510 		printf("syncing disks... ");
511 		/*
512 		 * Release vnodes held by texts before sync.
513 		 */
514 		if (panicstr == 0)
515 			xumount(NULL);
516 		sync();
517 
518 		for (iter = 0; iter < 20; iter++) {
519 			nbusy = 0;
520 			for (bp = &buf[nbuf]; --bp >= buf; )
521 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
522 					nbusy++;
523 			if (nbusy == 0)
524 				break;
525 			printf("%d ", nbusy);
526 			DELAY(40000 * iter);
527 		}
528 		if (nbusy)
529 			printf("giving up\n");
530 		else
531 			printf("done\n");
532 		/*
533 		 * If we've been adjusting the clock, the todr
534 		 * will be out of synch; adjust it now.
535 		 */
536 		resettodr();
537 	}
538 	splx(0x1f);			/* extreme priority */
539 	devtype = major(rootdev);
540 	if (howto&RB_HALT) {
541 		switch (cpu) {
542 
543 		/* 630 can be told to halt, but how? */
544 #if VAX650
545 		case VAX_650:
546 			ka650ssc.ssc_cpmbx &= ~CPMB650_HALTACT;
547 			ka650ssc.ssc_cpmbx |= CPMB650_HALT;
548 			asm("halt");
549 #endif
550 		}
551 		printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
552 		for (;;)
553 			;
554 	} else {
555 		if (howto & RB_DUMP)
556 			doadump();
557 		vaxboot();
558 	}
559 #ifdef lint
560 	devtype = devtype;
561 #endif
562 	/*NOTREACHED*/
563 }
564 
565 /*
566  * Reboot after panic or via reboot system call.  Note that r11
567  * and r10 must already have the proper boot values (`call by voodoo').
568  */
569 vaxboot()
570 {
571 
572 	switch (cpu) {
573 
574 #ifdef VAX8200
575 	case VAX_8200:
576 		/*
577 		 * TXDB_BOOT erases memory!  Instead we set the `did
578 		 * a dump' flag in the rpb.
579 		 */
580 		*(int *)&Sysmap[0] &= ~PG_PROT;
581 		*(int *)&Sysmap[0] |= PG_KW;
582 		mtpr(TBIS, &rpb);
583 		rpb.rp_flag = 1;
584 		break;
585 #endif
586 
587 #ifdef VAX650
588 	case VAX_650:
589 		/* set boot-on-halt flag in "console mailbox" */
590 		ka650ssc.ssc_cpmbx &= ~CPMB650_HALTACT;
591 		ka650ssc.ssc_cpmbx |= CPMB650_REBOOT;
592 		break;
593 #endif
594 
595 	default:
596 		tocons(TXDB_BOOT);
597 	}
598 
599 	/*
600 	 * Except on 780s and 8600s, boot flags go in r5.  SBI
601 	 * VAXen do not care, so copy boot flags to r5 always.
602 	 */
603 	asm("movl r11,r5");
604 	for (;;) {
605 		asm("halt");
606 	}
607 }
608 
609 tocons(c)
610 {
611 	register int oldmask;
612 
613 	while (((oldmask = mfpr(TXCS)) & TXCS_RDY) == 0)
614 		continue;
615 
616 	switch (cpu) {
617 
618 #if VAX8200 || VAX780 || VAX750 || VAX730 || VAX630
619 	case VAX_8200:
620 	case VAX_780:
621 	case VAX_750:
622 	case VAX_730:
623 	case VAX_630:
624 		c |= TXDB_CONS;
625 		break;
626 #endif
627 
628 #if VAX8600
629 	case VAX_8600:
630 		mtpr(TXCS, TXCS_LCONS | TXCS_WMASK);
631 		while ((mfpr(TXCS) & TXCS_RDY) == 0)
632 			continue;
633 		break;
634 #endif
635 
636 #if VAX650
637 	case VAX_650:
638 		/* everything is a real console terminal character on ka650 */
639 		return;
640 #endif
641 	}
642 
643 	mtpr(TXDB, c);
644 
645 #if VAX8600
646 	switch (cpu) {
647 
648 	case VAX_8600:
649 		while ((mfpr(TXCS) & TXCS_RDY) == 0)
650 			continue;
651 		mtpr(TXCS, oldmask | TXCS_WMASK);
652 		break;
653 	}
654 #endif
655 #ifdef lint
656 	oldmask = oldmask;
657 #endif
658 }
659 
660 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
661 int	dumpsize = 0;		/* also for savecore */
662 
663 dumpconf()
664 {
665 	int nblks;
666 
667 	dumpsize = physmem;
668 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
669 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
670 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
671 			dumpsize = btoc(dbtob(nblks - dumplo));
672 		else if (dumplo == 0)
673 			dumplo = nblks - btodb(ctob(physmem));
674 	}
675 	/*
676 	 * Don't dump on the first CLSIZE pages,
677 	 * in case the dump device includes a disk label.
678 	 */
679 	if (dumplo < CLSIZE)
680 		dumplo = CLSIZE;
681 }
682 
683 /*
684  * Doadump comes here after turning off memory management and
685  * getting on the dump stack, either when called above, or by
686  * the auto-restart code.
687  */
688 dumpsys()
689 {
690 
691 	rpb.rp_flag = 1;
692 	msgbufmapped = 0;
693 	if (dumpdev == NODEV)
694 		return;
695 	/*
696 	 * For dumps during autoconfiguration,
697 	 * if dump device has already configured...
698 	 */
699 	if (dumpsize == 0)
700 		dumpconf();
701 	if (dumplo < 0)
702 		return;
703 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
704 	printf("dump ");
705 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
706 
707 	case ENXIO:
708 		printf("device bad\n");
709 		break;
710 
711 	case EFAULT:
712 		printf("device not ready\n");
713 		break;
714 
715 	case EINVAL:					/* XXX */
716 		printf("area improper\n");
717 		break;
718 
719 	case EIO:
720 		printf("i/o error");
721 		break;
722 
723 	default:
724 		printf("succeeded");
725 		break;
726 	}
727 }
728 
729 /*
730  * Machine check error recovery code.
731  */
732 machinecheck(cmcf)
733 	caddr_t cmcf;
734 {
735 
736 	if ((*cpuops->cpu_mchk)(cmcf) == MCHK_RECOVERED)
737 		return;
738 	(*cpuops->cpu_memerr)();
739 	panic("mchk");
740 }
741 
742 #if defined(VAX780) || defined(VAX750)
743 /*
744  * These strings are shared between the 780 and 750 machine check code
745  * in ka780.c and ka730.c.
746  */
747 char *mc780750[16] = {
748 	"cp read",	"ctrl str par",	"cp tbuf par",	"cp cache par",
749 	"cp rdtimo", 	"cp rds",	"ucode lost",	0,
750 	0,		0,		"ib tbuf par",	0,
751 	"ib rds",	"ib rd timo",	0,		"ib cache par"
752 };
753 #endif
754 
755 /*
756  * Return the best possible estimate of the time in the timeval
757  * to which tvp points.  We do this by reading the interval count
758  * register to determine the time remaining to the next clock tick.
759  * We must compensate for wraparound which is not yet reflected in the time
760  * (which happens when the ICR hits 0 and wraps after the splhigh(),
761  * but before the mfpr(ICR)).  Also check that this time is no less than
762  * any previously-reported time, which could happen around the time
763  * of a clock adjustment.  Just for fun, we guarantee that the time
764  * will be greater than the value obtained by a previous call.
765  */
766 microtime(tvp)
767 	register struct timeval *tvp;
768 {
769 	int s = splhigh();
770 	static struct timeval lasttime;
771 	register long t;
772 
773 	*tvp = time;
774 	t =  mfpr(ICR);
775 	if (t < -tick / 2 && (mfpr(ICCS) & ICCS_INT))
776 		t += tick;
777 	tvp->tv_usec += tick + t;
778 	if (tvp->tv_usec > 1000000) {
779 		tvp->tv_sec++;
780 		tvp->tv_usec -= 1000000;
781 	}
782 	if (tvp->tv_sec == lasttime.tv_sec &&
783 	    tvp->tv_usec <= lasttime.tv_usec &&
784 	    (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
785 		tvp->tv_sec++;
786 		tvp->tv_usec -= 1000000;
787 	}
788 	lasttime = *tvp;
789 	splx(s);
790 }
791 
792 initcpu()
793 {
794 	/*
795 	 * Enable cache.
796 	 */
797 	switch (cpu) {
798 
799 #if VAX8600
800 	case VAX_8600:
801 		mtpr(CSWP, 3);
802 		break;
803 #endif
804 #if VAX8200
805 	case VAX_8200:
806 		mtpr(CADR, 0);
807 		break;
808 #endif
809 #if VAX780
810 	case VAX_780:
811 		mtpr(SBIMT, 0x200000);
812 		break;
813 #endif
814 #if VAX750
815 	case VAX_750:
816 		mtpr(CADR, 0);
817 		break;
818 #endif
819 	default:
820 		break;
821 	}
822 
823 	/*
824 	 * Enable floating point accelerator if it exists
825 	 * and has control register.
826 	 */
827 	switch(cpu) {
828 
829 #if VAX8600 || VAX780
830 	case VAX_8600:
831 	case VAX_780:
832 		if ((mfpr(ACCS) & 0xff) != 0) {
833 			printf("Enabling FPA\n");
834 			mtpr(ACCS, 0x8000);
835 		}
836 #endif
837 	default:
838 		break;
839 	}
840 }
841 
842 /*
843  * Return a reasonable approximation of the time of day register.
844  * More precisely, return a number that increases by one about
845  * once every ten milliseconds.
846  */
847 todr()
848 {
849 
850 	switch (cpu) {
851 
852 #if VAX8600 || VAX8200 || VAX780 || VAX750 || VAX730 || VAX650
853 	case VAX_8600:
854 	case VAX_8200:
855 	case VAX_780:
856 	case VAX_750:
857 	case VAX_730:
858 	case VAX_650:
859 		return (mfpr(TODR));
860 #endif
861 
862 #if VAX630
863 	case VAX_630:
864 		/* XXX crude */
865 		{ static int t; DELAY(10000); return (++t); }
866 #endif
867 
868 	default:
869 		panic("todr");
870 	}
871 	/* NOTREACHED */
872 }
873