xref: /original-bsd/sys/vax/vax/machdep.c (revision 3b6250d9)
1 /*
2  * Copyright (c) 1982,1986,1988,1990 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)machdep.c	7.32 (Berkeley) 12/16/90
7  */
8 
9 #include "sys/param.h"
10 #include "sys/systm.h"
11 #include "sys/user.h"
12 #include "sys/kernel.h"
13 #include "sys/malloc.h"
14 #include "sys/map.h"
15 #include "sys/vm.h"
16 #include "sys/proc.h"
17 #include "sys/buf.h"
18 #include "sys/reboot.h"
19 #include "sys/conf.h"
20 #include "sys/file.h"
21 #include "sys/text.h"
22 #include "sys/clist.h"
23 #include "sys/callout.h"
24 #include "sys/cmap.h"
25 #include "sys/mbuf.h"
26 #include "sys/msgbuf.h"
27 #ifdef SYSVSHM
28 #include "sys/shm.h"
29 #endif
30 
31 #include "../include/reg.h"
32 #include "../include/pte.h"
33 #include "../include/psl.h"
34 #include "../include/frame.h"
35 #include "../include/clock.h"
36 #include "cons.h"
37 #include "../include/cpu.h"
38 #include "mem.h"
39 #include "../include/mtpr.h"
40 #include "rpb.h"
41 #include "ka630.h"
42 #include "ka650.h"
43 
44 #include "../uba/ubavar.h"
45 #include "../uba/ubareg.h"
46 
47 /*
48  * Declare these as initialized data so we can patch them.
49  */
50 int	nswbuf = 0;
51 #ifdef	NBUF
52 int	nbuf = NBUF;
53 #else
54 int	nbuf = 0;
55 #endif
56 #ifdef	BUFPAGES
57 int	bufpages = BUFPAGES;
58 #else
59 int	bufpages = 0;
60 #endif
61 int	msgbufmapped;		/* set when safe to use msgbuf */
62 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
63 /*
64  * safepri is a safe priority for sleep to set for a spin-wait
65  * during autoconfiguration or after a panic.  On the vax, this must
66  * be > 0 so that we can take interrupts after a panic while on the interrupt
67  * stack.  Otherwise, we will get a reserved operand fault when we return
68  * from any interrupt that comes in.
69  */
70 int	safepri = 1;
71 
72 /*
73  * Machine-dependent startup code
74  */
75 startup(firstaddr)
76 	int firstaddr;
77 {
78 	register int unixsize;
79 	register unsigned i;
80 	register struct pte *pte;
81 	int mapaddr, j, n;
82 	register caddr_t v;
83 	int maxbufs, base, residual;
84 
85 	/*
86 	 * Initialize error message buffer (at end of core).
87 	 */
88 	maxmem = physmem - btoc(sizeof (struct msgbuf));
89 	pte = msgbufmap;
90 	for (i = 1; i < btoc(sizeof (struct msgbuf)) + 1; i++)
91 		*(int *)pte++ = PG_V | PG_KW | (physmem - i);
92 	mtpr(TBIA, 0);
93 	msgbufmapped = 1;
94 
95 #ifdef QBA
96 #include "qv.h"
97 #if NQV > 0
98 	/*
99 	 * redirect console to qvss if it exists
100 	 */
101 	qvcons_init();
102 #endif
103 #include "qd.h"
104 #if NQD > 0
105 	/*
106 	 * redirect console to qdss if it exists
107 	 */
108 	qdcons_init();
109 #endif
110 #endif
111 
112 #ifdef KADB
113 	kdb_init();
114 	(void) cnopen(makedev(0, 0), 0);	/* open console XXX */
115 #endif
116 	/*
117 	 * Good {morning,afternoon,evening,night}.
118 	 */
119 	printf(version);
120 	printf("real mem = %d\n", ctob(physmem));
121 
122 	/*
123 	 * Allocate space for system data structures.
124 	 * The first available real memory address is in "firstaddr".
125 	 * The first available kernel virtual address is in "v".
126 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
127 	 * As pages of memory are allocated and cleared,
128 	 * "firstaddr" is incremented.
129 	 * An index into the kernel page table corresponding to the
130 	 * virtual memory address maintained in "v" is kept in "mapaddr".
131 	 */
132 	v = (caddr_t)(KERNBASE | (firstaddr * NBPG));
133 #define	valloc(name, type, num) \
134 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
135 #define	valloclim(name, type, num, lim) \
136 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
137 	valloclim(file, struct file, nfile, fileNFILE);
138 	valloclim(proc, struct proc, nproc, procNPROC);
139 	valloclim(text, struct text, ntext, textNTEXT);
140 	valloc(cfree, struct cblock, nclist);
141 	valloc(callout, struct callout, ncallout);
142 	valloc(swapmap, struct map, nswapmap = nproc * 2);
143 	valloc(argmap, struct map, ARGMAPSIZE);
144 	valloc(kernelmap, struct map, nproc);
145 	valloc(mbmap, struct map, nmbclusters/4);
146 	valloc(kmemmap, struct map, ekmempt - kmempt);
147 	valloc(kmemusage, struct kmemusage, ekmempt - kmempt);
148 #ifdef SYSVSHM
149 	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
150 #endif
151 
152 	/*
153 	 * Determine how many buffers to allocate.
154 	 * Use 10% of memory for the first 2 Meg, 5% of the remaining
155 	 * memory. Insure a minimum of 16 buffers.
156 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
157 	 */
158 	if (bufpages == 0)
159 		if (physmem < (2 * 1024 * CLSIZE))
160 			bufpages = physmem / 10 / CLSIZE;
161 		else
162 			bufpages = ((2 * 1024 * CLSIZE + physmem) / 20) / CLSIZE;
163 	if (nbuf == 0) {
164 		nbuf = bufpages / 2;
165 		if (nbuf < 16)
166 			nbuf = 16;
167 	}
168 	if (nswbuf == 0) {
169 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
170 		if (nswbuf > 256)
171 			nswbuf = 256;		/* sanity */
172 	}
173 	valloc(swbuf, struct buf, nswbuf);
174 
175 	/*
176 	 * Now the amount of virtual memory remaining for buffers
177 	 * can be calculated, estimating needs for the cmap.
178 	 */
179 	ncmap = (maxmem*NBPG - ((int)v &~ KERNBASE)) /
180 		(CLBYTES + sizeof(struct cmap)) + 2;
181 	maxbufs = ((SYSPTSIZE * NBPG) -
182 	    ((int)(v + ncmap * sizeof(struct cmap)) - KERNBASE)) /
183 		(MAXBSIZE + sizeof(struct buf));
184 	if (maxbufs < 16)
185 		panic("sys pt too small");
186 	if (nbuf > maxbufs) {
187 		printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs);
188 		nbuf = maxbufs;
189 	}
190 	if (bufpages > nbuf * (MAXBSIZE / CLBYTES))
191 		bufpages = nbuf * (MAXBSIZE / CLBYTES);
192 	valloc(buf, struct buf, nbuf);
193 
194 	/*
195 	 * Allocate space for core map.
196 	 * Allow space for all of phsical memory minus the amount
197 	 * dedicated to the system. The amount of physical memory
198 	 * dedicated to the system is the total virtual memory of
199 	 * the system thus far, plus core map, buffer pages,
200 	 * and buffer headers not yet allocated.
201 	 * Add 2: 1 because the 0th entry is unused, 1 for rounding.
202 	 */
203 	ncmap = (maxmem*NBPG - ((int)(v + bufpages*CLBYTES) &~ KERNBASE)) /
204 		(CLBYTES + sizeof(struct cmap)) + 2;
205 	valloclim(cmap, struct cmap, ncmap, ecmap);
206 
207 	/*
208 	 * Clear space allocated thus far, and make r/w entries
209 	 * for the space in the kernel map.
210 	 */
211 	unixsize = btoc((int)v &~ KERNBASE);
212 	while (firstaddr < unixsize) {
213 		*(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | firstaddr;
214 		clearseg((unsigned)firstaddr);
215 		firstaddr++;
216 	}
217 
218 	/*
219 	 * Now allocate buffers proper.  They are different than the above
220 	 * in that they usually occupy more virtual memory than physical.
221 	 */
222 	v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET);
223 	valloc(buffers, char, MAXBSIZE * nbuf);
224 	base = bufpages / nbuf;
225 	residual = bufpages % nbuf;
226 	mapaddr = firstaddr;
227 	for (i = 0; i < nbuf; i++) {
228 		n = (i < residual ? base + 1 : base) * CLSIZE;
229 		for (j = 0; j < n; j++) {
230 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
231 			clearseg((unsigned)firstaddr);
232 			firstaddr++;
233 		}
234 		mapaddr += MAXBSIZE / NBPG;
235 	}
236 
237 	unixsize = btoc((int)v &~ KERNBASE);
238 	if (firstaddr >= physmem - 8*UPAGES)
239 		panic("no memory");
240 	mtpr(TBIA, 0);			/* After we just cleared it all! */
241 
242 	/*
243 	 * Initialize callouts
244 	 */
245 	callfree = callout;
246 	for (i = 1; i < ncallout; i++)
247 		callout[i-1].c_next = &callout[i];
248 
249 	/*
250 	 * Initialize memory allocator and swap
251 	 * and user page table maps.
252 	 *
253 	 * THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
254 	 * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
255 	 */
256 	meminit(firstaddr, maxmem);
257 	maxmem = freemem;
258 	printf("avail mem = %d\n", ctob(maxmem));
259 	printf("using %d buffers containing %d bytes of memory\n",
260 		nbuf, bufpages * CLBYTES);
261 	rminit(kernelmap, (long)USRPTSIZE, (long)1,
262 	    "usrpt", nproc);
263 	rminit(mbmap, (long)(nmbclusters * CLSIZE), (long)CLSIZE,
264 	    "mbclusters", nmbclusters/4);
265 	kmeminit();	/* now safe to do malloc/free */
266 
267 	/*
268 	 * Set up CPU-specific registers, cache, etc.
269 	 */
270 	initcpu();
271 
272 	/*
273 	 * Set up buffers, so they can be used to read disk labels.
274 	 */
275 	bhinit();
276 	binit();
277 
278 	/*
279 	 * Configure the system.
280 	 */
281 	configure();
282 
283 	/*
284 	 * Clear restart inhibit flags.
285 	 */
286 	tocons(TXDB_CWSI);
287 	tocons(TXDB_CCSI);
288 }
289 
290 #ifdef PGINPROF
291 /*
292  * Return the difference (in microseconds)
293  * between the  current time and a previous
294  * time as represented  by the arguments.
295  * If there is a pending clock interrupt
296  * which has not been serviced due to high
297  * ipl, return error code.
298  */
299 vmtime(otime, olbolt, oicr)
300 	register int otime, olbolt, oicr;
301 {
302 
303 	if (mfpr(ICCS)&ICCS_INT)
304 		return(-1);
305 	else
306 		return(((time.tv_sec-otime)*60 + lbolt-olbolt)*16667 + mfpr(ICR)-oicr);
307 }
308 #endif
309 
310 /*
311  * Clear registers on exec
312  */
313 /* ARGSUSED */
314 setregs(entry, retval)
315 	u_long entry;
316 	int *retval;
317 {
318 #ifdef notdef
319 	register int *rp;
320 
321 	/* should pass args to init on the stack */
322 	/* should also fix this code before using it, it's wrong */
323 	/* wanna clear the scb? */
324 	for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];)
325 		*rp++ = 0;
326 #endif
327 	u.u_ar0[PC] = entry + 2;
328 }
329 
330 /*
331  * Send an interrupt to process.
332  *
333  * Stack is set up to allow sigcode stored
334  * in u. to call routine, followed by chmk
335  * to sigreturn routine below.  After sigreturn
336  * resets the signal mask, the stack, the frame
337  * pointer, and the argument pointer, it returns
338  * to the user specified pc, psl.
339  */
340 sendsig(catcher, sig, mask, code)
341 	sig_t catcher;
342 	int sig, mask;
343 	unsigned code;
344 {
345 	register struct sigcontext *scp;
346 	register struct proc *p = u.u_procp;
347 	register int *regs;
348 	register struct sigframe {
349 		int	sf_signum;
350 		int	sf_code;
351 		struct	sigcontext *sf_scp;
352 		sig_t	sf_handler;
353 		int	sf_argcount;
354 		struct	sigcontext *sf_scpcopy;
355 	} *fp;
356 	int oonstack;
357 
358 	regs = u.u_ar0;
359 	oonstack = u.u_onstack;
360 	/*
361 	 * Allocate and validate space for the signal handler
362 	 * context. Note that if the stack is in P0 space, the
363 	 * call to grow() is a nop, and the useracc() check
364 	 * will fail if the process has not already allocated
365 	 * the space with a `brk'.
366 	 */
367 	if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) {
368 		scp = (struct sigcontext *)u.u_sigsp - 1;
369 		u.u_onstack = 1;
370 	} else
371 		scp = (struct sigcontext *)regs[SP] - 1;
372 	fp = (struct sigframe *)scp - 1;
373 	if ((int)fp <= USRSTACK - ctob(u.u_ssize))
374 		(void)grow((unsigned)fp);
375 	if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), B_WRITE) == 0) {
376 		/*
377 		 * Process has trashed its stack; give it an illegal
378 		 * instruction to halt it in its tracks.
379 		 */
380 		SIGACTION(p, SIGILL) = SIG_DFL;
381 		sig = sigmask(SIGILL);
382 		p->p_sigignore &= ~sig;
383 		p->p_sigcatch &= ~sig;
384 		p->p_sigmask &= ~sig;
385 		psignal(p, SIGILL);
386 		return;
387 	}
388 	/*
389 	 * Build the argument list for the signal handler.
390 	 */
391 	fp->sf_signum = sig;
392 	fp->sf_code = code;
393 	fp->sf_scp = scp;
394 	fp->sf_handler = catcher;
395 	/*
396 	 * Build the calls argument frame to be used to call sigreturn
397 	 */
398 	fp->sf_argcount = 1;
399 	fp->sf_scpcopy = scp;
400 	/*
401 	 * Build the signal context to be used by sigreturn.
402 	 */
403 	scp->sc_onstack = oonstack;
404 	scp->sc_mask = mask;
405 	scp->sc_sp = regs[SP];
406 	scp->sc_fp = regs[FP];
407 	scp->sc_ap = regs[AP];
408 	scp->sc_pc = regs[PC];
409 	scp->sc_ps = regs[PS];
410 	regs[SP] = (int)fp;
411 	regs[PS] &= ~(PSL_CM|PSL_FPD);
412 	regs[PC] = (int)u.u_pcb.pcb_sigc;
413 	return;
414 }
415 
416 /*
417  * System call to cleanup state after a signal
418  * has been taken.  Reset signal mask and
419  * stack state from context left by sendsig (above).
420  * Return to previous pc and psl as specified by
421  * context left by sendsig. Check carefully to
422  * make sure that the user has not modified the
423  * psl to gain improper priviledges or to cause
424  * a machine fault.
425  */
426 /* ARGSUSED */
427 sigreturn(p, uap, retval)
428 	struct proc *p;
429 	struct args {
430 		struct sigcontext *sigcntxp;
431 	} *uap;
432 	int *retval;
433 {
434 	register struct sigcontext *scp;
435 	register int *regs = u.u_ar0;
436 
437 	scp = uap->sigcntxp;
438 	if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0)
439 		return (EINVAL);
440 	if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_IS)) != 0 ||
441 	    (scp->sc_ps & (PSL_PRVMOD|PSL_CURMOD)) != (PSL_PRVMOD|PSL_CURMOD) ||
442 	    ((scp->sc_ps & PSL_CM) &&
443 	     (scp->sc_ps & (PSL_FPD|PSL_DV|PSL_FU|PSL_IV)) != 0))
444 		return (EINVAL);
445 	u.u_onstack = scp->sc_onstack & 01;
446 	p->p_sigmask = scp->sc_mask &~ sigcantmask;
447 	regs[FP] = scp->sc_fp;
448 	regs[AP] = scp->sc_ap;
449 	regs[SP] = scp->sc_sp;
450 	regs[PC] = scp->sc_pc;
451 	regs[PS] = scp->sc_ps;
452 	return (EJUSTRETURN);
453 }
454 
455 /*
456  * Memenable enables memory controller corrected data reporting.
457  * This runs at regular intervals, turning on the interrupt.
458  * The interrupt is turned off, per memory controller, when error
459  * reporting occurs.  Thus we report at most once per memintvl.
460  */
461 int	memintvl = MEMINTVL;
462 
463 memenable()
464 {
465 
466 	(*cpuops->cpu_memenable)();
467 	if (memintvl > 0)
468 		timeout(memenable, (caddr_t)0, memintvl*hz);
469 }
470 
471 /*
472  * Memerr is the interrupt routine for corrected read data
473  * interrupts.  It looks to see which memory controllers have
474  * unreported errors, reports them, and disables further
475  * reporting for a time on those controller.
476  */
477 memerr()
478 {
479 
480 	(*cpuops->cpu_memerr)();
481 }
482 
483 /*
484  * Invalidate single all pte's in a cluster
485  */
486 tbiscl(v)
487 	unsigned v;
488 {
489 	register caddr_t addr;		/* must be first reg var */
490 	register int i;
491 
492 	asm(".set TBIS,58");
493 	addr = ptob(v);
494 	for (i = 0; i < CLSIZE; i++) {
495 #ifdef lint
496 		mtpr(TBIS, addr);
497 #else
498 		asm("mtpr r11,$TBIS");
499 #endif
500 		addr += NBPG;
501 	}
502 }
503 
504 int	waittime = -1;
505 
506 boot(howto)
507 	register int howto;		/* r11 == how to boot */
508 {
509 	register int devtype;		/* r10 == major of root dev */
510 	extern char *panicstr;
511 
512 	if ((howto&RB_NOSYNC)==0 && waittime < 0 && bfreelist[0].b_forw) {
513 		register struct buf *bp;
514 		int iter, nbusy;
515 
516 		waittime = 0;
517 		(void) splnet();
518 		printf("syncing disks... ");
519 		/*
520 		 * Release vnodes held by texts before sync.
521 		 */
522 		if (panicstr == 0)
523 			xumount(NULL);
524 		sync();
525 
526 		for (iter = 0; iter < 20; iter++) {
527 			nbusy = 0;
528 			for (bp = &buf[nbuf]; --bp >= buf; )
529 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
530 					nbusy++;
531 			if (nbusy == 0)
532 				break;
533 			printf("%d ", nbusy);
534 			DELAY(40000 * iter);
535 		}
536 		if (nbusy)
537 			printf("giving up\n");
538 		else
539 			printf("done\n");
540 		/*
541 		 * If we've been adjusting the clock, the todr
542 		 * will be out of synch; adjust it now.
543 		 */
544 		resettodr();
545 	}
546 	splx(0x1f);			/* extreme priority */
547 	devtype = major(rootdev);
548 	if (howto&RB_HALT) {
549 		switch (cpu) {
550 
551 		/* 630 can be told to halt, but how? */
552 #if VAX650
553 		case VAX_650:
554 			ka650ssc.ssc_cpmbx &= ~CPMB650_HALTACT;
555 			ka650ssc.ssc_cpmbx |= CPMB650_HALT;
556 			asm("halt");
557 #endif
558 		}
559 		printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
560 		for (;;)
561 			;
562 	} else {
563 		if (howto & RB_DUMP)
564 			doadump();
565 		vaxboot();
566 	}
567 #ifdef lint
568 	devtype = devtype;
569 #endif
570 	/*NOTREACHED*/
571 }
572 
573 /*
574  * Reboot after panic or via reboot system call.  Note that r11
575  * and r10 must already have the proper boot values (`call by voodoo').
576  */
577 vaxboot()
578 {
579 
580 	switch (cpu) {
581 
582 #ifdef VAX8200
583 	case VAX_8200:
584 		/*
585 		 * TXDB_BOOT erases memory!  Instead we set the `did
586 		 * a dump' flag in the rpb.
587 		 */
588 		*(int *)&Sysmap[0] &= ~PG_PROT;
589 		*(int *)&Sysmap[0] |= PG_KW;
590 		mtpr(TBIS, &rpb);
591 		rpb.rp_flag = 1;
592 		break;
593 #endif
594 
595 #ifdef VAX650
596 	case VAX_650:
597 		/* set boot-on-halt flag in "console mailbox" */
598 		ka650ssc.ssc_cpmbx &= ~CPMB650_HALTACT;
599 		ka650ssc.ssc_cpmbx |= CPMB650_REBOOT;
600 		break;
601 #endif
602 
603 	default:
604 		tocons(TXDB_BOOT);
605 	}
606 
607 	/*
608 	 * Except on 780s and 8600s, boot flags go in r5.  SBI
609 	 * VAXen do not care, so copy boot flags to r5 always.
610 	 */
611 	asm("movl r11,r5");
612 	for (;;) {
613 		asm("halt");
614 	}
615 }
616 
617 tocons(c)
618 {
619 	register int oldmask;
620 
621 	while (((oldmask = mfpr(TXCS)) & TXCS_RDY) == 0)
622 		continue;
623 
624 	switch (cpu) {
625 
626 #if VAX8200 || VAX780 || VAX750 || VAX730 || VAX630
627 	case VAX_8200:
628 	case VAX_780:
629 	case VAX_750:
630 	case VAX_730:
631 	case VAX_630:
632 		c |= TXDB_CONS;
633 		break;
634 #endif
635 
636 #if VAX8600
637 	case VAX_8600:
638 		mtpr(TXCS, TXCS_LCONS | TXCS_WMASK);
639 		while ((mfpr(TXCS) & TXCS_RDY) == 0)
640 			continue;
641 		break;
642 #endif
643 
644 #if VAX650
645 	case VAX_650:
646 		/* everything is a real console terminal character on ka650 */
647 		return;
648 #endif
649 	}
650 
651 	mtpr(TXDB, c);
652 
653 #if VAX8600
654 	switch (cpu) {
655 
656 	case VAX_8600:
657 		while ((mfpr(TXCS) & TXCS_RDY) == 0)
658 			continue;
659 		mtpr(TXCS, oldmask | TXCS_WMASK);
660 		break;
661 	}
662 #endif
663 #ifdef lint
664 	oldmask = oldmask;
665 #endif
666 }
667 
668 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
669 int	dumpsize = 0;		/* also for savecore */
670 
671 dumpconf()
672 {
673 	int nblks;
674 
675 	dumpsize = physmem;
676 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
677 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
678 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
679 			dumpsize = btoc(dbtob(nblks - dumplo));
680 		else if (dumplo == 0)
681 			dumplo = nblks - btodb(ctob(physmem));
682 	}
683 	/*
684 	 * Don't dump on the first CLSIZE pages,
685 	 * in case the dump device includes a disk label.
686 	 */
687 	if (dumplo < CLSIZE)
688 		dumplo = CLSIZE;
689 }
690 
691 /*
692  * Doadump comes here after turning off memory management and
693  * getting on the dump stack, either when called above, or by
694  * the auto-restart code.
695  */
696 dumpsys()
697 {
698 
699 	rpb.rp_flag = 1;
700 	msgbufmapped = 0;
701 	if (dumpdev == NODEV)
702 		return;
703 	/*
704 	 * For dumps during autoconfiguration,
705 	 * if dump device has already configured...
706 	 */
707 	if (dumpsize == 0)
708 		dumpconf();
709 	if (dumplo < 0)
710 		return;
711 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
712 	printf("dump ");
713 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
714 
715 	case ENXIO:
716 		printf("device bad\n");
717 		break;
718 
719 	case EFAULT:
720 		printf("device not ready\n");
721 		break;
722 
723 	case EINVAL:					/* XXX */
724 		printf("area improper\n");
725 		break;
726 
727 	case EIO:
728 		printf("i/o error");
729 		break;
730 
731 	default:
732 		printf("succeeded");
733 		break;
734 	}
735 }
736 
737 /*
738  * Machine check error recovery code.
739  */
740 machinecheck(cmcf)
741 	caddr_t cmcf;
742 {
743 
744 	if ((*cpuops->cpu_mchk)(cmcf) == MCHK_RECOVERED)
745 		return;
746 	(*cpuops->cpu_memerr)();
747 	panic("mchk");
748 }
749 
750 #if defined(VAX780) || defined(VAX750)
751 /*
752  * These strings are shared between the 780 and 750 machine check code
753  * in ka780.c and ka730.c.
754  */
755 char *mc780750[16] = {
756 	"cp read",	"ctrl str par",	"cp tbuf par",	"cp cache par",
757 	"cp rdtimo", 	"cp rds",	"ucode lost",	0,
758 	0,		0,		"ib tbuf par",	0,
759 	"ib rds",	"ib rd timo",	0,		"ib cache par"
760 };
761 #endif
762 
763 /*
764  * Return the best possible estimate of the time in the timeval
765  * to which tvp points.  We do this by reading the interval count
766  * register to determine the time remaining to the next clock tick.
767  * We must compensate for wraparound which is not yet reflected in the time
768  * (which happens when the ICR hits 0 and wraps after the splhigh(),
769  * but before the mfpr(ICR)).  Also check that this time is no less than
770  * any previously-reported time, which could happen around the time
771  * of a clock adjustment.  Just for fun, we guarantee that the time
772  * will be greater than the value obtained by a previous call.
773  */
774 microtime(tvp)
775 	register struct timeval *tvp;
776 {
777 	int s = splhigh();
778 	static struct timeval lasttime;
779 	register long t;
780 
781 	*tvp = time;
782 	t =  mfpr(ICR);
783 	if (t < -tick / 2 && (mfpr(ICCS) & ICCS_INT))
784 		t += tick;
785 	tvp->tv_usec += tick + t;
786 	if (tvp->tv_usec > 1000000) {
787 		tvp->tv_sec++;
788 		tvp->tv_usec -= 1000000;
789 	}
790 	if (tvp->tv_sec == lasttime.tv_sec &&
791 	    tvp->tv_usec <= lasttime.tv_usec &&
792 	    (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
793 		tvp->tv_sec++;
794 		tvp->tv_usec -= 1000000;
795 	}
796 	lasttime = *tvp;
797 	splx(s);
798 }
799 
800 initcpu()
801 {
802 	/*
803 	 * Enable cache.
804 	 */
805 	switch (cpu) {
806 
807 #if VAX8600
808 	case VAX_8600:
809 		mtpr(CSWP, 3);
810 		break;
811 #endif
812 #if VAX8200
813 	case VAX_8200:
814 		mtpr(CADR, 0);
815 		break;
816 #endif
817 #if VAX780
818 	case VAX_780:
819 		mtpr(SBIMT, 0x200000);
820 		break;
821 #endif
822 #if VAX750
823 	case VAX_750:
824 		mtpr(CADR, 0);
825 		break;
826 #endif
827 	default:
828 		break;
829 	}
830 
831 	/*
832 	 * Enable floating point accelerator if it exists
833 	 * and has control register.
834 	 */
835 	switch(cpu) {
836 
837 #if VAX8600 || VAX780
838 	case VAX_8600:
839 	case VAX_780:
840 		if ((mfpr(ACCS) & 0xff) != 0) {
841 			printf("Enabling FPA\n");
842 			mtpr(ACCS, 0x8000);
843 		}
844 #endif
845 	default:
846 		break;
847 	}
848 }
849 
850 /*
851  * Return a reasonable approximation of the time of day register.
852  * More precisely, return a number that increases by one about
853  * once every ten milliseconds.
854  */
855 todr()
856 {
857 
858 	switch (cpu) {
859 
860 #if VAX8600 || VAX8200 || VAX780 || VAX750 || VAX730 || VAX650
861 	case VAX_8600:
862 	case VAX_8200:
863 	case VAX_780:
864 	case VAX_750:
865 	case VAX_730:
866 	case VAX_650:
867 		return (mfpr(TODR));
868 #endif
869 
870 #if VAX630
871 	case VAX_630:
872 		/* XXX crude */
873 		{ static int t; DELAY(10000); return (++t); }
874 #endif
875 
876 	default:
877 		panic("todr");
878 	}
879 	/* NOTREACHED */
880 }
881