xref: /original-bsd/sys/vax/vax/machdep.c (revision 30f8b1be)
1 /*
2  * Copyright (c) 1982,1986,1988,1990 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)machdep.c	7.27 (Berkeley) 05/10/90
7  */
8 
9 #include "param.h"
10 #include "systm.h"
11 #include "user.h"
12 #include "kernel.h"
13 #include "malloc.h"
14 #include "map.h"
15 #include "vm.h"
16 #include "proc.h"
17 #include "buf.h"
18 #include "reboot.h"
19 #include "conf.h"
20 #include "file.h"
21 #include "text.h"
22 #include "clist.h"
23 #include "callout.h"
24 #include "cmap.h"
25 #include "mbuf.h"
26 #include "msgbuf.h"
27 #ifdef SYSVSHM
28 #include "shm.h"
29 #endif
30 
31 #include "reg.h"
32 #include "pte.h"
33 #include "psl.h"
34 #include "frame.h"
35 #include "clock.h"
36 #include "cons.h"
37 #include "cpu.h"
38 #include "mem.h"
39 #include "mtpr.h"
40 #include "rpb.h"
41 #include "ka630.h"
42 #include "ka650.h"
43 
44 #include "../vaxuba/ubavar.h"
45 #include "../vaxuba/ubareg.h"
46 
47 #define RETURN(value)   { u.u_error = (value); return; }
48 
49 /*
50  * Declare these as initialized data so we can patch them.
51  */
52 int	nswbuf = 0;
53 #ifdef	NBUF
54 int	nbuf = NBUF;
55 #else
56 int	nbuf = 0;
57 #endif
58 #ifdef	BUFPAGES
59 int	bufpages = BUFPAGES;
60 #else
61 int	bufpages = 0;
62 #endif
63 int	msgbufmapped;		/* set when safe to use msgbuf */
64 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
65 
66 /*
67  * Machine-dependent startup code
68  */
69 startup(firstaddr)
70 	int firstaddr;
71 {
72 	register int unixsize;
73 	register unsigned i;
74 	register struct pte *pte;
75 	int mapaddr, j, n;
76 	register caddr_t v;
77 	int maxbufs, base, residual;
78 
79 	/*
80 	 * Initialize error message buffer (at end of core).
81 	 */
82 	maxmem = physmem - btoc(sizeof (struct msgbuf));
83 	pte = msgbufmap;
84 	for (i = 1; i < btoc(sizeof (struct msgbuf)) + 1; i++)
85 		*(int *)pte++ = PG_V | PG_KW | (physmem - i);
86 	mtpr(TBIA, 0);
87 	msgbufmapped = 1;
88 
89 #ifdef QBA
90 #include "qv.h"
91 #if NQV > 0
92 	/*
93 	 * redirect console to qvss if it exists
94 	 */
95 	qvcons_init();
96 #endif
97 #include "qd.h"
98 #if NQD > 0
99 	/*
100 	 * redirect console to qdss if it exists
101 	 */
102 	qdcons_init();
103 #endif
104 #endif
105 
106 #ifdef KADB
107 	kdb_init();
108 	(void) cnopen(makedev(0, 0), 0);	/* open console XXX */
109 #endif
110 	/*
111 	 * Good {morning,afternoon,evening,night}.
112 	 */
113 	printf(version);
114 	printf("real mem = %d\n", ctob(physmem));
115 
116 	/*
117 	 * Allocate space for system data structures.
118 	 * The first available real memory address is in "firstaddr".
119 	 * The first available kernel virtual address is in "v".
120 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
121 	 * As pages of memory are allocated and cleared,
122 	 * "firstaddr" is incremented.
123 	 * An index into the kernel page table corresponding to the
124 	 * virtual memory address maintained in "v" is kept in "mapaddr".
125 	 */
126 	v = (caddr_t)(KERNBASE | (firstaddr * NBPG));
127 #define	valloc(name, type, num) \
128 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
129 #define	valloclim(name, type, num, lim) \
130 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
131 	valloclim(file, struct file, nfile, fileNFILE);
132 	valloclim(proc, struct proc, nproc, procNPROC);
133 	valloclim(text, struct text, ntext, textNTEXT);
134 	valloc(cfree, struct cblock, nclist);
135 	valloc(callout, struct callout, ncallout);
136 	valloc(swapmap, struct map, nswapmap = nproc * 2);
137 	valloc(argmap, struct map, ARGMAPSIZE);
138 	valloc(kernelmap, struct map, nproc);
139 	valloc(mbmap, struct map, nmbclusters/4);
140 	valloc(kmemmap, struct map, ekmempt - kmempt);
141 	valloc(kmemusage, struct kmemusage, ekmempt - kmempt);
142 #ifdef SYSVSHM
143 	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
144 #endif
145 
146 	/*
147 	 * Determine how many buffers to allocate.
148 	 * Use 10% of memory for the first 2 Meg, 5% of the remaining
149 	 * memory. Insure a minimum of 16 buffers.
150 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
151 	 */
152 	if (bufpages == 0)
153 		if (physmem < (2 * 1024 * CLSIZE))
154 			bufpages = physmem / 10 / CLSIZE;
155 		else
156 			bufpages = ((2 * 1024 * CLSIZE + physmem) / 20) / CLSIZE;
157 	if (nbuf == 0) {
158 		nbuf = bufpages / 2;
159 		if (nbuf < 16)
160 			nbuf = 16;
161 	}
162 	if (nswbuf == 0) {
163 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
164 		if (nswbuf > 256)
165 			nswbuf = 256;		/* sanity */
166 	}
167 	valloc(swbuf, struct buf, nswbuf);
168 
169 	/*
170 	 * Now the amount of virtual memory remaining for buffers
171 	 * can be calculated, estimating needs for the cmap.
172 	 */
173 	ncmap = (maxmem*NBPG - ((int)v &~ KERNBASE)) /
174 		(CLBYTES + sizeof(struct cmap)) + 2;
175 	maxbufs = ((SYSPTSIZE * NBPG) -
176 	    ((int)(v + ncmap * sizeof(struct cmap)) - KERNBASE)) /
177 		(MAXBSIZE + sizeof(struct buf));
178 	if (maxbufs < 16)
179 		panic("sys pt too small");
180 	if (nbuf > maxbufs) {
181 		printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs);
182 		nbuf = maxbufs;
183 	}
184 	if (bufpages > nbuf * (MAXBSIZE / CLBYTES))
185 		bufpages = nbuf * (MAXBSIZE / CLBYTES);
186 	valloc(buf, struct buf, nbuf);
187 
188 	/*
189 	 * Allocate space for core map.
190 	 * Allow space for all of phsical memory minus the amount
191 	 * dedicated to the system. The amount of physical memory
192 	 * dedicated to the system is the total virtual memory of
193 	 * the system thus far, plus core map, buffer pages,
194 	 * and buffer headers not yet allocated.
195 	 * Add 2: 1 because the 0th entry is unused, 1 for rounding.
196 	 */
197 	ncmap = (maxmem*NBPG - ((int)(v + bufpages*CLBYTES) &~ KERNBASE)) /
198 		(CLBYTES + sizeof(struct cmap)) + 2;
199 	valloclim(cmap, struct cmap, ncmap, ecmap);
200 
201 	/*
202 	 * Clear space allocated thus far, and make r/w entries
203 	 * for the space in the kernel map.
204 	 */
205 	unixsize = btoc((int)v &~ KERNBASE);
206 	while (firstaddr < unixsize) {
207 		*(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | firstaddr;
208 		clearseg((unsigned)firstaddr);
209 		firstaddr++;
210 	}
211 
212 	/*
213 	 * Now allocate buffers proper.  They are different than the above
214 	 * in that they usually occupy more virtual memory than physical.
215 	 */
216 	v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET);
217 	valloc(buffers, char, MAXBSIZE * nbuf);
218 	base = bufpages / nbuf;
219 	residual = bufpages % nbuf;
220 	mapaddr = firstaddr;
221 	for (i = 0; i < nbuf; i++) {
222 		n = (i < residual ? base + 1 : base) * CLSIZE;
223 		for (j = 0; j < n; j++) {
224 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
225 			clearseg((unsigned)firstaddr);
226 			firstaddr++;
227 		}
228 		mapaddr += MAXBSIZE / NBPG;
229 	}
230 
231 	unixsize = btoc((int)v &~ KERNBASE);
232 	if (firstaddr >= physmem - 8*UPAGES)
233 		panic("no memory");
234 	mtpr(TBIA, 0);			/* After we just cleared it all! */
235 
236 	/*
237 	 * Initialize callouts
238 	 */
239 	callfree = callout;
240 	for (i = 1; i < ncallout; i++)
241 		callout[i-1].c_next = &callout[i];
242 
243 	/*
244 	 * Initialize memory allocator and swap
245 	 * and user page table maps.
246 	 *
247 	 * THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
248 	 * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
249 	 */
250 	meminit(firstaddr, maxmem);
251 	maxmem = freemem;
252 	printf("avail mem = %d\n", ctob(maxmem));
253 	printf("using %d buffers containing %d bytes of memory\n",
254 		nbuf, bufpages * CLBYTES);
255 	rminit(kernelmap, (long)USRPTSIZE, (long)1,
256 	    "usrpt", nproc);
257 	rminit(mbmap, (long)(nmbclusters * CLSIZE), (long)CLSIZE,
258 	    "mbclusters", nmbclusters/4);
259 	kmeminit();	/* now safe to do malloc/free */
260 
261 	/*
262 	 * Set up CPU-specific registers, cache, etc.
263 	 */
264 	initcpu();
265 
266 	/*
267 	 * Set up buffers, so they can be used to read disk labels.
268 	 */
269 	bhinit();
270 	binit();
271 
272 	/*
273 	 * Configure the system.
274 	 */
275 	configure();
276 
277 	/*
278 	 * Clear restart inhibit flags.
279 	 */
280 	tocons(TXDB_CWSI);
281 	tocons(TXDB_CCSI);
282 }
283 
284 #ifdef PGINPROF
285 /*
286  * Return the difference (in microseconds)
287  * between the  current time and a previous
288  * time as represented  by the arguments.
289  * If there is a pending clock interrupt
290  * which has not been serviced due to high
291  * ipl, return error code.
292  */
293 vmtime(otime, olbolt, oicr)
294 	register int otime, olbolt, oicr;
295 {
296 
297 	if (mfpr(ICCS)&ICCS_INT)
298 		return(-1);
299 	else
300 		return(((time.tv_sec-otime)*60 + lbolt-olbolt)*16667 + mfpr(ICR)-oicr);
301 }
302 #endif
303 
304 /*
305  * Clear registers on exec
306  */
307 setregs(entry)
308 	u_long entry;
309 {
310 #ifdef notdef
311 	register int *rp;
312 
313 	/* should pass args to init on the stack */
314 	/* should also fix this code before using it, it's wrong */
315 	/* wanna clear the scb? */
316 	for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];)
317 		*rp++ = 0;
318 #endif
319 	u.u_ar0[PC] = entry + 2;
320 }
321 
322 /*
323  * Send an interrupt to process.
324  *
325  * Stack is set up to allow sigcode stored
326  * in u. to call routine, followed by chmk
327  * to sigreturn routine below.  After sigreturn
328  * resets the signal mask, the stack, the frame
329  * pointer, and the argument pointer, it returns
330  * to the user specified pc, psl.
331  */
332 sendsig(catcher, sig, mask, code)
333 	sig_t catcher;
334 	int sig, mask;
335 	unsigned code;
336 {
337 	register struct sigcontext *scp;
338 	register struct proc *p = u.u_procp;
339 	register int *regs;
340 	register struct sigframe {
341 		int	sf_signum;
342 		int	sf_code;
343 		struct	sigcontext *sf_scp;
344 		sig_t	sf_handler;
345 		int	sf_argcount;
346 		struct	sigcontext *sf_scpcopy;
347 	} *fp;
348 	int oonstack;
349 
350 	regs = u.u_ar0;
351 	oonstack = u.u_onstack;
352 	/*
353 	 * Allocate and validate space for the signal handler
354 	 * context. Note that if the stack is in P0 space, the
355 	 * call to grow() is a nop, and the useracc() check
356 	 * will fail if the process has not already allocated
357 	 * the space with a `brk'.
358 	 */
359 	if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) {
360 		scp = (struct sigcontext *)u.u_sigsp - 1;
361 		u.u_onstack = 1;
362 	} else
363 		scp = (struct sigcontext *)regs[SP] - 1;
364 	fp = (struct sigframe *)scp - 1;
365 	if ((int)fp <= USRSTACK - ctob(u.u_ssize))
366 		(void)grow((unsigned)fp);
367 	if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), B_WRITE) == 0) {
368 		/*
369 		 * Process has trashed its stack; give it an illegal
370 		 * instruction to halt it in its tracks.
371 		 */
372 		SIGACTION(p, SIGILL) = SIG_DFL;
373 		sig = sigmask(SIGILL);
374 		p->p_sigignore &= ~sig;
375 		p->p_sigcatch &= ~sig;
376 		p->p_sigmask &= ~sig;
377 		psignal(p, SIGILL);
378 		return;
379 	}
380 	/*
381 	 * Build the argument list for the signal handler.
382 	 */
383 	fp->sf_signum = sig;
384 	fp->sf_code = code;
385 	fp->sf_scp = scp;
386 	fp->sf_handler = catcher;
387 	/*
388 	 * Build the calls argument frame to be used to call sigreturn
389 	 */
390 	fp->sf_argcount = 1;
391 	fp->sf_scpcopy = scp;
392 	/*
393 	 * Build the signal context to be used by sigreturn.
394 	 */
395 	scp->sc_onstack = oonstack;
396 	scp->sc_mask = mask;
397 	scp->sc_sp = regs[SP];
398 	scp->sc_fp = regs[FP];
399 	scp->sc_ap = regs[AP];
400 	scp->sc_pc = regs[PC];
401 	scp->sc_ps = regs[PS];
402 	regs[SP] = (int)fp;
403 	regs[PS] &= ~(PSL_CM|PSL_FPD);
404 	regs[PC] = (int)u.u_pcb.pcb_sigc;
405 	return;
406 }
407 
408 /*
409  * System call to cleanup state after a signal
410  * has been taken.  Reset signal mask and
411  * stack state from context left by sendsig (above).
412  * Return to previous pc and psl as specified by
413  * context left by sendsig. Check carefully to
414  * make sure that the user has not modified the
415  * psl to gain improper priviledges or to cause
416  * a machine fault.
417  */
418 sigreturn()
419 {
420 	struct a {
421 		struct sigcontext *sigcntxp;
422 	};
423 	register struct sigcontext *scp;
424 	register int *regs = u.u_ar0;
425 
426 	scp = ((struct a *)(u.u_ap))->sigcntxp;
427 	if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0)
428 		RETURN (EINVAL);
429 	if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_IS)) != 0 ||
430 	    (scp->sc_ps & (PSL_PRVMOD|PSL_CURMOD)) != (PSL_PRVMOD|PSL_CURMOD) ||
431 	    ((scp->sc_ps & PSL_CM) &&
432 	     (scp->sc_ps & (PSL_FPD|PSL_DV|PSL_FU|PSL_IV)) != 0))
433 		RETURN (EINVAL);
434 	u.u_onstack = scp->sc_onstack & 01;
435 	u.u_procp->p_sigmask = scp->sc_mask &~ sigcantmask;
436 	regs[FP] = scp->sc_fp;
437 	regs[AP] = scp->sc_ap;
438 	regs[SP] = scp->sc_sp;
439 	regs[PC] = scp->sc_pc;
440 	regs[PS] = scp->sc_ps;
441 	RETURN (EJUSTRETURN);
442 }
443 
444 /*
445  * Memenable enables memory controller corrected data reporting.
446  * This runs at regular intervals, turning on the interrupt.
447  * The interrupt is turned off, per memory controller, when error
448  * reporting occurs.  Thus we report at most once per memintvl.
449  */
450 int	memintvl = MEMINTVL;
451 
452 memenable()
453 {
454 
455 	(*cpuops->cpu_memenable)();
456 	if (memintvl > 0)
457 		timeout(memenable, (caddr_t)0, memintvl*hz);
458 }
459 
460 /*
461  * Memerr is the interrupt routine for corrected read data
462  * interrupts.  It looks to see which memory controllers have
463  * unreported errors, reports them, and disables further
464  * reporting for a time on those controller.
465  */
466 memerr()
467 {
468 
469 	(*cpuops->cpu_memerr)();
470 }
471 
472 /*
473  * Invalidate single all pte's in a cluster
474  */
475 tbiscl(v)
476 	unsigned v;
477 {
478 	register caddr_t addr;		/* must be first reg var */
479 	register int i;
480 
481 	asm(".set TBIS,58");
482 	addr = ptob(v);
483 	for (i = 0; i < CLSIZE; i++) {
484 #ifdef lint
485 		mtpr(TBIS, addr);
486 #else
487 		asm("mtpr r11,$TBIS");
488 #endif
489 		addr += NBPG;
490 	}
491 }
492 
493 int	waittime = -1;
494 
495 boot(howto)
496 	register int howto;		/* r11 == how to boot */
497 {
498 	register int devtype;		/* r10 == major of root dev */
499 	extern char *panicstr;
500 
501 	if ((howto&RB_NOSYNC)==0 && waittime < 0 && bfreelist[0].b_forw) {
502 		register struct buf *bp;
503 		int iter, nbusy;
504 
505 		waittime = 0;
506 		(void) splnet();
507 		printf("syncing disks... ");
508 		/*
509 		 * Release vnodes held by texts before sync.
510 		 */
511 		if (panicstr == 0)
512 			xumount(NULL);
513 		sync();
514 
515 		for (iter = 0; iter < 20; iter++) {
516 			nbusy = 0;
517 			for (bp = &buf[nbuf]; --bp >= buf; )
518 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
519 					nbusy++;
520 			if (nbusy == 0)
521 				break;
522 			printf("%d ", nbusy);
523 			DELAY(40000 * iter);
524 		}
525 		if (nbusy)
526 			printf("giving up\n");
527 		else
528 			printf("done\n");
529 		/*
530 		 * If we've been adjusting the clock, the todr
531 		 * will be out of synch; adjust it now.
532 		 */
533 		resettodr();
534 	}
535 	splx(0x1f);			/* extreme priority */
536 	devtype = major(rootdev);
537 	if (howto&RB_HALT) {
538 		switch (cpu) {
539 
540 		/* 630 can be told to halt, but how? */
541 #if VAX650
542 		case VAX_650:
543 			ka650ssc.ssc_cpmbx &= ~CPMB650_HALTACT;
544 			ka650ssc.ssc_cpmbx |= CPMB650_HALT;
545 			asm("halt");
546 #endif
547 		}
548 		printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
549 		for (;;)
550 			;
551 	} else {
552 		if (howto & RB_DUMP)
553 			doadump();
554 		vaxboot();
555 	}
556 #ifdef lint
557 	devtype = devtype;
558 #endif
559 	/*NOTREACHED*/
560 }
561 
562 /*
563  * Reboot after panic or via reboot system call.  Note that r11
564  * and r10 must already have the proper boot values (`call by voodoo').
565  */
566 vaxboot()
567 {
568 
569 	switch (cpu) {
570 
571 #ifdef VAX8200
572 	case VAX_8200:
573 		/*
574 		 * TXDB_BOOT erases memory!  Instead we set the `did
575 		 * a dump' flag in the rpb.
576 		 */
577 		*(int *)&Sysmap[0] &= ~PG_PROT;
578 		*(int *)&Sysmap[0] |= PG_KW;
579 		mtpr(TBIS, &rpb);
580 		rpb.rp_flag = 1;
581 		break;
582 #endif
583 
584 #ifdef VAX650
585 	case VAX_650:
586 		/* set boot-on-halt flag in "console mailbox" */
587 		ka650ssc.ssc_cpmbx &= ~CPMB650_HALTACT;
588 		ka650ssc.ssc_cpmbx |= CPMB650_REBOOT;
589 		break;
590 #endif
591 
592 	default:
593 		tocons(TXDB_BOOT);
594 	}
595 
596 	/*
597 	 * Except on 780s and 8600s, boot flags go in r5.  SBI
598 	 * VAXen do not care, so copy boot flags to r5 always.
599 	 */
600 	asm("movl r11,r5");
601 	for (;;) {
602 		asm("halt");
603 	}
604 }
605 
606 tocons(c)
607 {
608 	register int oldmask;
609 
610 	while (((oldmask = mfpr(TXCS)) & TXCS_RDY) == 0)
611 		continue;
612 
613 	switch (cpu) {
614 
615 #if VAX8200 || VAX780 || VAX750 || VAX730 || VAX630
616 	case VAX_8200:
617 	case VAX_780:
618 	case VAX_750:
619 	case VAX_730:
620 	case VAX_630:
621 		c |= TXDB_CONS;
622 		break;
623 #endif
624 
625 #if VAX8600
626 	case VAX_8600:
627 		mtpr(TXCS, TXCS_LCONS | TXCS_WMASK);
628 		while ((mfpr(TXCS) & TXCS_RDY) == 0)
629 			continue;
630 		break;
631 #endif
632 
633 #if VAX650
634 	case VAX_650:
635 		/* everything is a real console terminal character on ka650 */
636 		return;
637 #endif
638 	}
639 
640 	mtpr(TXDB, c);
641 
642 #if VAX8600
643 	switch (cpu) {
644 
645 	case VAX_8600:
646 		while ((mfpr(TXCS) & TXCS_RDY) == 0)
647 			continue;
648 		mtpr(TXCS, oldmask | TXCS_WMASK);
649 		break;
650 	}
651 #endif
652 #ifdef lint
653 	oldmask = oldmask;
654 #endif
655 }
656 
657 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
658 int	dumpsize = 0;		/* also for savecore */
659 
660 dumpconf()
661 {
662 	int nblks;
663 
664 	dumpsize = physmem;
665 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
666 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
667 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
668 			dumpsize = btoc(dbtob(nblks - dumplo));
669 		else if (dumplo == 0)
670 			dumplo = nblks - btodb(ctob(physmem));
671 	}
672 	/*
673 	 * Don't dump on the first CLSIZE pages,
674 	 * in case the dump device includes a disk label.
675 	 */
676 	if (dumplo < CLSIZE)
677 		dumplo = CLSIZE;
678 }
679 
680 /*
681  * Doadump comes here after turning off memory management and
682  * getting on the dump stack, either when called above, or by
683  * the auto-restart code.
684  */
685 dumpsys()
686 {
687 
688 	rpb.rp_flag = 1;
689 	msgbufmapped = 0;
690 	if (dumpdev == NODEV)
691 		return;
692 	/*
693 	 * For dumps during autoconfiguration,
694 	 * if dump device has already configured...
695 	 */
696 	if (dumpsize == 0)
697 		dumpconf();
698 	if (dumplo < 0)
699 		return;
700 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
701 	printf("dump ");
702 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
703 
704 	case ENXIO:
705 		printf("device bad\n");
706 		break;
707 
708 	case EFAULT:
709 		printf("device not ready\n");
710 		break;
711 
712 	case EINVAL:					/* XXX */
713 		printf("area improper\n");
714 		break;
715 
716 	case EIO:
717 		printf("i/o error");
718 		break;
719 
720 	default:
721 		printf("succeeded");
722 		break;
723 	}
724 }
725 
726 /*
727  * Machine check error recovery code.
728  */
729 machinecheck(cmcf)
730 	caddr_t cmcf;
731 {
732 
733 	if ((*cpuops->cpu_mchk)(cmcf) == MCHK_RECOVERED)
734 		return;
735 	(*cpuops->cpu_memerr)();
736 	panic("mchk");
737 }
738 
739 #if defined(VAX780) || defined(VAX750)
740 /*
741  * These strings are shared between the 780 and 750 machine check code
742  * in ka780.c and ka730.c.
743  */
744 char *mc780750[16] = {
745 	"cp read",	"ctrl str par",	"cp tbuf par",	"cp cache par",
746 	"cp rdtimo", 	"cp rds",	"ucode lost",	0,
747 	0,		0,		"ib tbuf par",	0,
748 	"ib rds",	"ib rd timo",	0,		"ib cache par"
749 };
750 #endif
751 
752 /*
753  * Return the best possible estimate of the time in the timeval
754  * to which tvp points.  We do this by reading the interval count
755  * register to determine the time remaining to the next clock tick.
756  * We must compensate for wraparound which is not yet reflected in the time
757  * (which happens when the ICR hits 0 and wraps after the splhigh(),
758  * but before the mfpr(ICR)).  Also check that this time is no less than
759  * any previously-reported time, which could happen around the time
760  * of a clock adjustment.  Just for fun, we guarantee that the time
761  * will be greater than the value obtained by a previous call.
762  */
763 microtime(tvp)
764 	register struct timeval *tvp;
765 {
766 	int s = splhigh();
767 	static struct timeval lasttime;
768 	register long t;
769 
770 	*tvp = time;
771 	t =  mfpr(ICR);
772 	if (t < -tick / 2 && (mfpr(ICCS) & ICCS_INT))
773 		t += tick;
774 	tvp->tv_usec += tick + t;
775 	if (tvp->tv_usec > 1000000) {
776 		tvp->tv_sec++;
777 		tvp->tv_usec -= 1000000;
778 	}
779 	if (tvp->tv_sec == lasttime.tv_sec &&
780 	    tvp->tv_usec <= lasttime.tv_usec &&
781 	    (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
782 		tvp->tv_sec++;
783 		tvp->tv_usec -= 1000000;
784 	}
785 	lasttime = *tvp;
786 	splx(s);
787 }
788 
789 initcpu()
790 {
791 	/*
792 	 * Enable cache.
793 	 */
794 	switch (cpu) {
795 
796 #if VAX8600
797 	case VAX_8600:
798 		mtpr(CSWP, 3);
799 		break;
800 #endif
801 #if VAX8200
802 	case VAX_8200:
803 		mtpr(CADR, 0);
804 		break;
805 #endif
806 #if VAX780
807 	case VAX_780:
808 		mtpr(SBIMT, 0x200000);
809 		break;
810 #endif
811 #if VAX750
812 	case VAX_750:
813 		mtpr(CADR, 0);
814 		break;
815 #endif
816 	default:
817 		break;
818 	}
819 
820 	/*
821 	 * Enable floating point accelerator if it exists
822 	 * and has control register.
823 	 */
824 	switch(cpu) {
825 
826 #if VAX8600 || VAX780
827 	case VAX_8600:
828 	case VAX_780:
829 		if ((mfpr(ACCS) & 0xff) != 0) {
830 			printf("Enabling FPA\n");
831 			mtpr(ACCS, 0x8000);
832 		}
833 #endif
834 	default:
835 		break;
836 	}
837 }
838 
839 /*
840  * Return a reasonable approximation of the time of day register.
841  * More precisely, return a number that increases by one about
842  * once every ten milliseconds.
843  */
844 todr()
845 {
846 
847 	switch (cpu) {
848 
849 #if VAX8600 || VAX8200 || VAX780 || VAX750 || VAX730 || VAX650
850 	case VAX_8600:
851 	case VAX_8200:
852 	case VAX_780:
853 	case VAX_750:
854 	case VAX_730:
855 	case VAX_650:
856 		return (mfpr(TODR));
857 #endif
858 
859 #if VAX630
860 	case VAX_630:
861 		/* XXX crude */
862 		{ static int t; DELAY(10000); return (++t); }
863 #endif
864 
865 	default:
866 		panic("todr");
867 	}
868 	/* NOTREACHED */
869 }
870