xref: /original-bsd/sys/vax/vax/machdep.c (revision 8a1064d0)
1 /*
2  * Copyright (c) 1982 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)machdep.c	7.5 (Berkeley) 04/02/87
7  */
8 
9 #include "reg.h"
10 #include "pte.h"
11 #include "psl.h"
12 
13 #include "param.h"
14 #include "systm.h"
15 #include "dir.h"
16 #include "user.h"
17 #include "kernel.h"
18 #include "map.h"
19 #include "vm.h"
20 #include "proc.h"
21 #include "buf.h"
22 #include "reboot.h"
23 #include "conf.h"
24 #include "inode.h"
25 #include "file.h"
26 #include "text.h"
27 #include "clist.h"
28 #include "callout.h"
29 #include "cmap.h"
30 #include "mbuf.h"
31 #include "msgbuf.h"
32 #include "quota.h"
33 
34 #include "frame.h"
35 #include "clock.h"
36 #include "cons.h"
37 #include "cpu.h"
38 #include "mem.h"
39 #include "mtpr.h"
40 #include "rpb.h"
41 #include "ka630.h"
42 #include "../vaxuba/ubavar.h"
43 #include "../vaxuba/ubareg.h"
44 
45 /*
46  * Declare these as initialized data so we can patch them.
47  */
48 int	nswbuf = 0;
49 #ifdef	NBUF
50 int	nbuf = NBUF;
51 #else
52 int	nbuf = 0;
53 #endif
54 #ifdef	BUFPAGES
55 int	bufpages = BUFPAGES;
56 #else
57 int	bufpages = 0;
58 #endif
59 
60 /*
61  * Machine-dependent startup code
62  */
63 startup(firstaddr)
64 	int firstaddr;
65 {
66 	register int unixsize;
67 	register unsigned i;
68 	register struct pte *pte;
69 	int mapaddr, j;
70 	register caddr_t v;
71 	int maxbufs, base, residual;
72 
73 #if VAX630
74 	/*
75  	 * Leave last 5k of phys. memory as console work area.
76 	 */
77 	if (cpu == VAX_630)
78 		maxmem -= 10;
79 #endif
80 	/*
81 	 * Initialize error message buffer (at end of core).
82 	 */
83 	maxmem -= btoc(sizeof (struct msgbuf));
84 	pte = msgbufmap;
85 	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
86 		*(int *)pte++ = PG_V | PG_KW | (maxmem + i);
87 	mtpr(TBIA, 0);
88 
89 #if VAX630
90 #include "qv.h"
91 #if NQV > 0
92 	/*
93 	 * redirect console to qvss if it exists
94 	 */
95 	if (!qvcons_init())
96 		printf("qvss not initialized\n");
97 #endif
98 #endif
99 
100 #ifdef KDB
101 	kdb_init();
102 #endif
103 	/*
104 	 * Good {morning,afternoon,evening,night}.
105 	 */
106 	printf(version);
107 	printf("real mem  = %d\n", ctob(physmem));
108 
109 	/*
110 	 * Allocate space for system data structures.
111 	 * The first available real memory address is in "firstaddr".
112 	 * The first available kernel virtual address is in "v".
113 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
114 	 * As pages of memory are allocated and cleared,
115 	 * "firstaddr" is incremented.
116 	 * An index into the kernel page table corresponding to the
117 	 * virtual memory address maintained in "v" is kept in "mapaddr".
118 	 */
119 	v = (caddr_t)(0x80000000 | (firstaddr * NBPG));
120 #define	valloc(name, type, num) \
121 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
122 #define	valloclim(name, type, num, lim) \
123 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
124 	valloclim(inode, struct inode, ninode, inodeNINODE);
125 	valloclim(file, struct file, nfile, fileNFILE);
126 	valloclim(proc, struct proc, nproc, procNPROC);
127 	valloclim(text, struct text, ntext, textNTEXT);
128 	valloc(cfree, struct cblock, nclist);
129 	valloc(callout, struct callout, ncallout);
130 	valloc(swapmap, struct map, nswapmap = nproc * 2);
131 	valloc(argmap, struct map, ARGMAPSIZE);
132 	valloc(kernelmap, struct map, nproc);
133 	valloc(mbmap, struct map, nmbclusters/4);
134 	valloc(namecache, struct namecache, nchsize);
135 #ifdef QUOTA
136 	valloclim(quota, struct quota, nquota, quotaNQUOTA);
137 	valloclim(dquot, struct dquot, ndquot, dquotNDQUOT);
138 #endif
139 
140 	/*
141 	 * Determine how many buffers to allocate.
142 	 * Use 10% of memory for the first 2 Meg, 5% of the remaining
143 	 * memory. Insure a minimum of 16 buffers.
144 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
145 	 */
146 	if (bufpages == 0)
147 		if (physmem < (2 * 1024 * CLSIZE))
148 			bufpages = physmem / 10 / CLSIZE;
149 		else
150 			bufpages = ((2 * 1024 * CLSIZE + physmem) / 20) / CLSIZE;
151 	if (nbuf == 0) {
152 		nbuf = bufpages / 2;
153 		if (nbuf < 16)
154 			nbuf = 16;
155 	}
156 	if (nswbuf == 0) {
157 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
158 		if (nswbuf > 256)
159 			nswbuf = 256;		/* sanity */
160 	}
161 	valloc(swbuf, struct buf, nswbuf);
162 
163 	/*
164 	 * Now the amount of virtual memory remaining for buffers
165 	 * can be calculated, estimating needs for the cmap.
166 	 */
167 	ncmap = (maxmem*NBPG - ((int)v &~ 0x80000000)) /
168 		(CLBYTES + sizeof(struct cmap)) + 2;
169 	maxbufs = ((SYSPTSIZE * NBPG) -
170 	    ((int)(v + ncmap * sizeof(struct cmap)) - 0x80000000)) /
171 		(MAXBSIZE + sizeof(struct buf));
172 	if (maxbufs < 16)
173 		panic("sys pt too small");
174 	if (nbuf > maxbufs) {
175 		printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs);
176 		nbuf = maxbufs;
177 	}
178 	if (bufpages > nbuf * (MAXBSIZE / CLBYTES))
179 		bufpages = nbuf * (MAXBSIZE / CLBYTES);
180 	valloc(buf, struct buf, nbuf);
181 
182 	/*
183 	 * Allocate space for core map.
184 	 * Allow space for all of phsical memory minus the amount
185 	 * dedicated to the system. The amount of physical memory
186 	 * dedicated to the system is the total virtual memory of
187 	 * the system thus far, plus core map, buffer pages,
188 	 * and buffer headers not yet allocated.
189 	 * Add 2: 1 because the 0th entry is unused, 1 for rounding.
190 	 */
191 	ncmap = (maxmem*NBPG - ((int)(v + bufpages*CLBYTES) &~ 0x80000000)) /
192 		(CLBYTES + sizeof(struct cmap)) + 2;
193 	valloclim(cmap, struct cmap, ncmap, ecmap);
194 
195 	/*
196 	 * Clear space allocated thus far, and make r/w entries
197 	 * for the space in the kernel map.
198 	 */
199 	unixsize = btoc((int)v &~ 0x80000000);
200 	while (firstaddr < unixsize) {
201 		*(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | firstaddr;
202 		clearseg((unsigned)firstaddr);
203 		firstaddr++;
204 	}
205 
206 	/*
207 	 * Now allocate buffers proper.  They are different than the above
208 	 * in that they usually occupy more virtual memory than physical.
209 	 */
210 	v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET);
211 	valloc(buffers, char, MAXBSIZE * nbuf);
212 	base = bufpages / nbuf;
213 	residual = bufpages % nbuf;
214 	mapaddr = firstaddr;
215 	for (i = 0; i < residual; i++) {
216 		for (j = 0; j < (base + 1) * CLSIZE; j++) {
217 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
218 			clearseg((unsigned)firstaddr);
219 			firstaddr++;
220 		}
221 		mapaddr += MAXBSIZE / NBPG;
222 	}
223 	for (i = residual; i < nbuf; i++) {
224 		for (j = 0; j < base * CLSIZE; j++) {
225 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
226 			clearseg((unsigned)firstaddr);
227 			firstaddr++;
228 		}
229 		mapaddr += MAXBSIZE / NBPG;
230 	}
231 
232 	unixsize = btoc((int)v &~ 0x80000000);
233 	if (firstaddr >= physmem - 8*UPAGES)
234 		panic("no memory");
235 	mtpr(TBIA, 0);			/* After we just cleared it all! */
236 
237 	/*
238 	 * Initialize callouts
239 	 */
240 	callfree = callout;
241 	for (i = 1; i < ncallout; i++)
242 		callout[i-1].c_next = &callout[i];
243 
244 	/*
245 	 * Initialize memory allocator and swap
246 	 * and user page table maps.
247 	 *
248 	 * THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
249 	 * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
250 	 */
251 	meminit(firstaddr, maxmem);
252 	maxmem = freemem;
253 	printf("avail mem = %d\n", ctob(maxmem));
254 	printf("using %d buffers containing %d bytes of memory\n",
255 		nbuf, bufpages * CLBYTES);
256 	rminit(kernelmap, (long)USRPTSIZE, (long)1,
257 	    "usrpt", nproc);
258 	rminit(mbmap, (long)(nmbclusters * CLSIZE), (long)CLSIZE,
259 	    "mbclusters", nmbclusters/4);
260 
261 	/*
262 	 * Set up CPU-specific registers, cache, etc.
263 	 */
264 	initcpu();
265 
266 	/*
267 	 * Set up buffers, so they can be used to read disk labels.
268 	 */
269 	bhinit();
270 	binit();
271 
272 	/*
273 	 * Configure the system.
274 	 */
275 	configure();
276 
277 	/*
278 	 * Clear restart inhibit flags.
279 	 */
280 	tocons(TXDB_CWSI);
281 	tocons(TXDB_CCSI);
282 }
283 
284 #ifdef PGINPROF
285 /*
286  * Return the difference (in microseconds)
287  * between the  current time and a previous
288  * time as represented  by the arguments.
289  * If there is a pending clock interrupt
290  * which has not been serviced due to high
291  * ipl, return error code.
292  */
293 vmtime(otime, olbolt, oicr)
294 	register int otime, olbolt, oicr;
295 {
296 
297 	if (mfpr(ICCS)&ICCS_INT)
298 		return(-1);
299 	else
300 		return(((time.tv_sec-otime)*60 + lbolt-olbolt)*16667 + mfpr(ICR)-oicr);
301 }
302 #endif
303 
304 /*
305  * Clear registers on exec
306  */
307 setregs(entry)
308 	u_long entry;
309 {
310 #ifdef notdef
311 	register int *rp;
312 
313 	/* should pass args to init on the stack */
314 	/* should also fix this code before using it, it's wrong */
315 	/* wanna clear the scb? */
316 	for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];)
317 		*rp++ = 0;
318 #endif
319 	u.u_ar0[PC] = entry + 2;
320 }
321 
322 /*
323  * Send an interrupt to process.
324  *
325  * Stack is set up to allow sigcode stored
326  * in u. to call routine, followed by chmk
327  * to sigreturn routine below.  After sigreturn
328  * resets the signal mask, the stack, the frame
329  * pointer, and the argument pointer, it returns
330  * to the user specified pc, psl.
331  */
332 sendsig(p, sig, mask)
333 	int (*p)(), sig, mask;
334 {
335 	register struct sigcontext *scp;
336 	register int *regs;
337 	register struct sigframe {
338 		int	sf_signum;
339 		int	sf_code;
340 		struct	sigcontext *sf_scp;
341 		int	(*sf_handler)();
342 		int	sf_argcount;
343 		struct	sigcontext *sf_scpcopy;
344 	} *fp;
345 	int oonstack;
346 
347 	regs = u.u_ar0;
348 	oonstack = u.u_onstack;
349 	/*
350 	 * Allocate and validate space for the signal handler
351 	 * context. Note that if the stack is in P0 space, the
352 	 * call to grow() is a nop, and the useracc() check
353 	 * will fail if the process has not already allocated
354 	 * the space with a `brk'.
355 	 */
356 	if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) {
357 		scp = (struct sigcontext *)u.u_sigsp - 1;
358 		u.u_onstack = 1;
359 	} else
360 		scp = (struct sigcontext *)regs[SP] - 1;
361 	fp = (struct sigframe *)scp - 1;
362 	if ((int)fp <= USRSTACK - ctob(u.u_ssize))
363 		(void)grow((unsigned)fp);
364 	if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), B_WRITE) == 0) {
365 		/*
366 		 * Process has trashed its stack; give it an illegal
367 		 * instruction to halt it in its tracks.
368 		 */
369 		u.u_signal[SIGILL] = SIG_DFL;
370 		sig = sigmask(SIGILL);
371 		u.u_procp->p_sigignore &= ~sig;
372 		u.u_procp->p_sigcatch &= ~sig;
373 		u.u_procp->p_sigmask &= ~sig;
374 		psignal(u.u_procp, SIGILL);
375 		return;
376 	}
377 	/*
378 	 * Build the argument list for the signal handler.
379 	 */
380 	fp->sf_signum = sig;
381 	if (sig == SIGILL || sig == SIGFPE) {
382 		fp->sf_code = u.u_code;
383 		u.u_code = 0;
384 	} else
385 		fp->sf_code = 0;
386 	fp->sf_scp = scp;
387 	fp->sf_handler = p;
388 	/*
389 	 * Build the calls argument frame to be used to call sigreturn
390 	 */
391 	fp->sf_argcount = 1;
392 	fp->sf_scpcopy = scp;
393 	/*
394 	 * Build the signal context to be used by sigreturn.
395 	 */
396 	scp->sc_onstack = oonstack;
397 	scp->sc_mask = mask;
398 	scp->sc_sp = regs[SP];
399 	scp->sc_fp = regs[FP];
400 	scp->sc_ap = regs[AP];
401 	scp->sc_pc = regs[PC];
402 	scp->sc_ps = regs[PS];
403 	regs[SP] = (int)fp;
404 	regs[PS] &= ~(PSL_CM|PSL_FPD);
405 	regs[PC] = (int)u.u_pcb.pcb_sigc;
406 	return;
407 }
408 
409 /*
410  * System call to cleanup state after a signal
411  * has been taken.  Reset signal mask and
412  * stack state from context left by sendsig (above).
413  * Return to previous pc and psl as specified by
414  * context left by sendsig. Check carefully to
415  * make sure that the user has not modified the
416  * psl to gain improper priviledges or to cause
417  * a machine fault.
418  */
419 sigreturn()
420 {
421 	struct a {
422 		struct sigcontext *sigcntxp;
423 	};
424 	register struct sigcontext *scp;
425 	register int *regs = u.u_ar0;
426 
427 	scp = ((struct a *)(u.u_ap))->sigcntxp;
428 	if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0)
429 		return;
430 	if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_IS)) != 0 ||
431 	    (scp->sc_ps & (PSL_PRVMOD|PSL_CURMOD)) != (PSL_PRVMOD|PSL_CURMOD) ||
432 	    ((scp->sc_ps & PSL_CM) &&
433 	     (scp->sc_ps & (PSL_FPD|PSL_DV|PSL_FU|PSL_IV)) != 0)) {
434 		u.u_error = EINVAL;
435 		return;
436 	}
437 	u.u_eosys = JUSTRETURN;
438 	u.u_onstack = scp->sc_onstack & 01;
439 	u.u_procp->p_sigmask = scp->sc_mask &~
440 	    (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
441 	regs[FP] = scp->sc_fp;
442 	regs[AP] = scp->sc_ap;
443 	regs[SP] = scp->sc_sp;
444 	regs[PC] = scp->sc_pc;
445 	regs[PS] = scp->sc_ps;
446 }
447 
448 /* XXX - BEGIN 4.2 COMPATIBILITY */
449 /*
450  * Compatibility with 4.2 chmk $139 used by longjmp()
451  */
452 osigcleanup()
453 {
454 	register struct sigcontext *scp;
455 	register int *regs = u.u_ar0;
456 
457 	scp = (struct sigcontext *)fuword((caddr_t)regs[SP]);
458 	if ((int)scp == -1)
459 		return;
460 	if (useracc((caddr_t)scp, 3 * sizeof (int), B_WRITE) == 0)
461 		return;
462 	u.u_onstack = scp->sc_onstack & 01;
463 	u.u_procp->p_sigmask = scp->sc_mask &~
464 	    (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
465 	regs[SP] = scp->sc_sp;
466 }
467 /* XXX - END 4.2 COMPATIBILITY */
468 
469 #ifdef notdef
470 dorti()
471 {
472 	struct frame frame;
473 	register int sp;
474 	register int reg, mask;
475 	extern int ipcreg[];
476 
477 	(void) copyin((caddr_t)u.u_ar0[FP], (caddr_t)&frame, sizeof (frame));
478 	sp = u.u_ar0[FP] + sizeof (frame);
479 	u.u_ar0[PC] = frame.fr_savpc;
480 	u.u_ar0[FP] = frame.fr_savfp;
481 	u.u_ar0[AP] = frame.fr_savap;
482 	mask = frame.fr_mask;
483 	for (reg = 0; reg <= 11; reg++) {
484 		if (mask&1) {
485 			u.u_ar0[ipcreg[reg]] = fuword((caddr_t)sp);
486 			sp += 4;
487 		}
488 		mask >>= 1;
489 	}
490 	sp += frame.fr_spa;
491 	u.u_ar0[PS] = (u.u_ar0[PS] & 0xffff0000) | frame.fr_psw;
492 	if (frame.fr_s)
493 		sp += 4 + 4 * (fuword((caddr_t)sp) & 0xff);
494 	/* phew, now the rei */
495 	u.u_ar0[PC] = fuword((caddr_t)sp);
496 	sp += 4;
497 	u.u_ar0[PS] = fuword((caddr_t)sp);
498 	sp += 4;
499 	u.u_ar0[PS] |= PSL_USERSET;
500 	u.u_ar0[PS] &= ~PSL_USERCLR;
501 	u.u_ar0[SP] = (int)sp;
502 }
503 #endif
504 
505 /*
506  * Memenable enables the memory controlle corrected data reporting.
507  * This runs at regular intervals, turning on the interrupt.
508  * The interrupt is turned off, per memory controller, when error
509  * reporting occurs.  Thus we report at most once per memintvl.
510  */
511 int	memintvl = MEMINTVL;
512 
513 memenable()
514 {
515 	register struct mcr *mcr;
516 	register int m;
517 
518 #if VAX630
519 	if (cpu == VAX_630)
520 		return;
521 #endif
522 #ifdef	VAX8600
523 	if (cpu == VAX_8600) {
524 		M8600_ENA;
525 	} else
526 #endif
527 	for (m = 0; m < nmcr; m++) {
528 		mcr = mcraddr[m];
529 		switch (mcrtype[m]) {
530 #if VAX780
531 		case M780C:
532 			M780C_ENA(mcr);
533 			break;
534 		case M780EL:
535 			M780EL_ENA(mcr);
536 			break;
537 		case M780EU:
538 			M780EU_ENA(mcr);
539 			break;
540 #endif
541 #if VAX750
542 		case M750:
543 			M750_ENA(mcr);
544 			break;
545 #endif
546 #if VAX730
547 		case M730:
548 			M730_ENA(mcr);
549 			break;
550 #endif
551 		}
552 	}
553 	if (memintvl > 0)
554 		timeout(memenable, (caddr_t)0, memintvl*hz);
555 }
556 
557 /*
558  * Memerr is the interrupt routine for corrected read data
559  * interrupts.  It looks to see which memory controllers have
560  * unreported errors, reports them, and disables further
561  * reporting for a time on those controller.
562  */
563 memerr()
564 {
565 #ifdef VAX8600
566 	register int reg11;	/* known to be r11 below */
567 #endif
568 	register struct mcr *mcr;
569 	register int m;
570 
571 #if VAX630
572 	if (cpu == VAX_630)
573 		return;
574 #endif
575 #ifdef VAX8600
576 	if (cpu == VAX_8600) {
577 		int mdecc, mear, mstat1, mstat2, array;
578 
579 		/*
580 		 * Scratchpad registers in the Ebox must be read by
581 		 * storing their ID number in ESPA and then immediately
582 		 * reading ESPD's contents with no other intervening
583 		 * machine instructions!
584 		 *
585 		 * The asm's below have a number of constants which
586 		 * are defined correctly in mem.h and mtpr.h.
587 		 */
588 #ifdef lint
589 		reg11 = 0;
590 #else
591 		asm("mtpr $0x27,$0x4e; mfpr $0x4f,r11");
592 #endif
593 		mdecc = reg11;	/* must acknowledge interrupt? */
594 		if (M8600_MEMERR(mdecc)) {
595 			asm("mtpr $0x2a,$0x4e; mfpr $0x4f,r11");
596 			mear = reg11;
597 			asm("mtpr $0x25,$0x4e; mfpr $0x4f,r11");
598 			mstat1 = reg11;
599 			asm("mtpr $0x26,$0x4e; mfpr $0x4f,r11");
600 			mstat2 = reg11;
601 			array = M8600_ARRAY(mear);
602 
603 			printf("mcr0: ecc error, addr %x (array %d) syn %x\n",
604 				M8600_ADDR(mear), array, M8600_SYN(mdecc));
605 			printf("\tMSTAT1 = %b\n\tMSTAT2 = %b\n",
606 				    mstat1, M8600_MSTAT1_BITS,
607 				    mstat2, M8600_MSTAT2_BITS);
608 			M8600_INH;
609 		}
610 	} else
611 #endif
612 	for (m = 0; m < nmcr; m++) {
613 		mcr = mcraddr[m];
614 		switch (mcrtype[m]) {
615 #if VAX780
616 		case M780C:
617 			if (M780C_ERR(mcr)) {
618 				printf("mcr%d: soft ecc addr %x syn %x\n",
619 				    m, M780C_ADDR(mcr), M780C_SYN(mcr));
620 #ifdef TRENDATA
621 				memlog(m, mcr);
622 #endif
623 				M780C_INH(mcr);
624 			}
625 			break;
626 
627 		case M780EL:
628 			if (M780EL_ERR(mcr)) {
629 				printf("mcr%d: soft ecc addr %x syn %x\n",
630 				    m, M780EL_ADDR(mcr), M780EL_SYN(mcr));
631 				M780EL_INH(mcr);
632 			}
633 			break;
634 
635 		case M780EU:
636 			if (M780EU_ERR(mcr)) {
637 				printf("mcr%d: soft ecc addr %x syn %x\n",
638 				    m, M780EU_ADDR(mcr), M780EU_SYN(mcr));
639 				M780EU_INH(mcr);
640 			}
641 			break;
642 #endif
643 #if VAX750
644 		case M750:
645 			if (M750_ERR(mcr)) {
646 				struct mcr amcr;
647 				amcr.mc_reg[0] = mcr->mc_reg[0];
648 				printf("mcr%d: %s",
649 				    m, (amcr.mc_reg[0] & M750_UNCORR) ?
650 				    "hard error" : "soft ecc");
651 				printf(" addr %x syn %x\n",
652 				    M750_ADDR(&amcr), M750_SYN(&amcr));
653 				M750_INH(mcr);
654 			}
655 			break;
656 #endif
657 #if VAX730
658 		case M730: {
659 			struct mcr amcr;
660 
661 			/*
662 			 * Must be careful on the 730 not to use invalid
663 			 * instructions in I/O space, so make a copy;
664 			 */
665 			amcr.mc_reg[0] = mcr->mc_reg[0];
666 			amcr.mc_reg[1] = mcr->mc_reg[1];
667 			if (M730_ERR(&amcr)) {
668 				printf("mcr%d: %s",
669 				    m, (amcr.mc_reg[1] & M730_UNCORR) ?
670 				    "hard error" : "soft ecc");
671 				printf(" addr %x syn %x\n",
672 				    M730_ADDR(&amcr), M730_SYN(&amcr));
673 				M730_INH(mcr);
674 			}
675 			break;
676 		}
677 #endif
678 		}
679 	}
680 }
681 
682 #ifdef TRENDATA
683 /*
684  * Figure out what chip to replace on Trendata boards.
685  * Assumes all your memory is Trendata or the non-Trendata
686  * memory never fails..
687  */
688 struct {
689 	u_char	m_syndrome;
690 	char	m_chip[4];
691 } memlogtab[] = {
692 	0x01,	"C00",	0x02,	"C01",	0x04,	"C02",	0x08,	"C03",
693 	0x10,	"C04",	0x19,	"L01",	0x1A,	"L02",	0x1C,	"L04",
694 	0x1F,	"L07",	0x20,	"C05",	0x38,	"L00",	0x3B,	"L03",
695 	0x3D,	"L05",	0x3E,	"L06",	0x40,	"C06",	0x49,	"L09",
696 	0x4A,	"L10",	0x4c,	"L12",	0x4F,	"L15",	0x51,	"L17",
697 	0x52,	"L18",	0x54,	"L20",	0x57,	"L23",	0x58,	"L24",
698 	0x5B,	"L27",	0x5D,	"L29",	0x5E,	"L30",	0x68,	"L08",
699 	0x6B,	"L11",	0x6D,	"L13",	0x6E,	"L14",	0x70,	"L16",
700 	0x73,	"L19",	0x75,	"L21",	0x76,	"L22",	0x79,	"L25",
701 	0x7A,	"L26",	0x7C,	"L28",	0x7F,	"L31",	0x80,	"C07",
702 	0x89,	"U01",	0x8A,	"U02",	0x8C,	"U04",	0x8F,	"U07",
703 	0x91,	"U09",	0x92,	"U10",	0x94,	"U12",	0x97, 	"U15",
704 	0x98,	"U16",	0x9B,	"U19",	0x9D,	"U21",	0x9E, 	"U22",
705 	0xA8,	"U00",	0xAB,	"U03",	0xAD,	"U05",	0xAE,	"U06",
706 	0xB0,	"U08",	0xB3,	"U11",	0xB5,	"U13",	0xB6,	"U14",
707 	0xB9,	"U17",	0xBA,	"U18",	0xBC,	"U20",	0xBF,	"U23",
708 	0xC1,	"U25",	0xC2,	"U26",	0xC4,	"U28",	0xC7,	"U31",
709 	0xE0,	"U24",	0xE3,	"U27",	0xE5,	"U29",	0xE6,	"U30"
710 };
711 
712 memlog (m, mcr)
713 	int m;
714 	struct mcr *mcr;
715 {
716 	register i;
717 
718 	switch (mcrtype[m]) {
719 
720 #if VAX780
721 	case M780C:
722 	for (i = 0; i < (sizeof (memlogtab) / sizeof (memlogtab[0])); i++)
723 		if ((u_char)(M780C_SYN(mcr)) == memlogtab[i].m_syndrome) {
724 			printf (
725 	"mcr%d: replace %s chip in %s bank of memory board %d (0-15)\n",
726 				m,
727 				memlogtab[i].m_chip,
728 				(M780C_ADDR(mcr) & 0x8000) ? "upper" : "lower",
729 				(M780C_ADDR(mcr) >> 16));
730 			return;
731 		}
732 	printf ("mcr%d: multiple errors, not traceable\n", m);
733 	break;
734 #endif
735 	}
736 }
737 #endif
738 
739 /*
740  * Invalidate single all pte's in a cluster
741  */
742 tbiscl(v)
743 	unsigned v;
744 {
745 	register caddr_t addr;		/* must be first reg var */
746 	register int i;
747 
748 	asm(".set TBIS,58");
749 	addr = ptob(v);
750 	for (i = 0; i < CLSIZE; i++) {
751 #ifdef lint
752 		mtpr(TBIS, addr);
753 #else
754 		asm("mtpr r11,$TBIS");
755 #endif
756 		addr += NBPG;
757 	}
758 }
759 
760 int	waittime = -1;
761 
762 boot(arghowto)
763 	int arghowto;
764 {
765 	register int howto;		/* r11 == how to boot */
766 	register int devtype;		/* r10 == major of root dev */
767 
768 	howto = arghowto;
769 	if ((howto&RB_NOSYNC)==0 && waittime < 0 && bfreelist[0].b_forw) {
770 		register struct buf *bp;
771 		int iter, nbusy;
772 
773 		waittime = 0;
774 		(void) splnet();
775 		printf("syncing disks... ");
776 		/*
777 		 * Release inodes held by texts before update.
778 		 */
779 		xumount(NODEV);
780 		update();
781 
782 		for (iter = 0; iter < 20; iter++) {
783 			nbusy = 0;
784 			for (bp = &buf[nbuf]; --bp >= buf; )
785 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
786 					nbusy++;
787 			if (nbusy == 0)
788 				break;
789 			printf("%d ", nbusy);
790 			DELAY(40000 * iter);
791 		}
792 		if (nbusy)
793 			printf("giving up\n");
794 		else
795 			printf("done\n");
796 		/*
797 		 * If we've been adjusting the clock, the todr
798 		 * will be out of synch; adjust it now.
799 		 */
800 		resettodr();
801 	}
802 	splx(0x1f);			/* extreme priority */
803 	devtype = major(rootdev);
804 	if (howto&RB_HALT) {
805 		printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
806 		mtpr(IPL, 0x1f);
807 		for (;;)
808 			;
809 	} else {
810 		if (howto & RB_DUMP) {
811 			doadump();		/* TXDB_BOOT's itself */
812 			/*NOTREACHED*/
813 		}
814 		tocons(TXDB_BOOT);
815 	}
816 #if defined(VAX750) || defined(VAX730) || defined(VAX630)
817 	if (cpu == VAX_750 || cpu == VAX_730 || cpu == VAX_630)
818 		{ asm("movl r11,r5"); }		/* boot flags go in r5 */
819 #endif
820 	for (;;)
821 		asm("halt");
822 #ifdef lint
823 	printf("howto %d, devtype %d\n", arghowto, devtype);
824 #endif
825 	/*NOTREACHED*/
826 }
827 
828 tocons(c)
829 {
830 	register oldmask;
831 
832 	while (((oldmask = mfpr(TXCS)) & TXCS_RDY) == 0)
833 		continue;
834 
835 	switch (cpu) {
836 
837 #if VAX780 || VAX750 || VAX730 || VAX630
838 	case VAX_780:
839 	case VAX_750:
840 	case VAX_730:
841 	case VAX_630:
842 		c |= TXDB_CONS;
843 		break;
844 #endif
845 
846 #if VAX8600
847 	case VAX_8600:
848 		mtpr(TXCS, TXCS_LCONS | TXCS_WMASK);
849 		while ((mfpr(TXCS) & TXCS_RDY) == 0)
850 			continue;
851 		break;
852 #endif
853 	}
854 
855 	mtpr(TXDB, c);
856 
857 #if VAX8600
858 	switch (cpu) {
859 
860 	case VAX_8600:
861 		while ((mfpr(TXCS) & TXCS_RDY) == 0)
862 			continue;
863 		mtpr(TXCS, oldmask | TXCS_WMASK);
864 		break;
865 	}
866 #endif
867 }
868 
869 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
870 int	dumpsize = 0;		/* also for savecore */
871 /*
872  * Doadump comes here after turning off memory management and
873  * getting on the dump stack, either when called above, or by
874  * the auto-restart code.
875  */
876 dumpsys()
877 {
878 
879 	rpb.rp_flag = 1;
880 	if (dumpdev == NODEV)
881 		return;
882 #ifdef notdef
883 	if ((minor(dumpdev)&07) != 1)
884 		return;
885 #endif
886 	/*
887 	 * For dumps during autoconfiguration,
888 	 * if dump device has already configured...
889 	 */
890 	if (dumplo == 0 && bdevsw[major(dumpdev)].d_psize)
891 		dumplo = (*bdevsw[major(dumpdev)].d_psize)(dumpdev) - physmem;
892 	if (dumplo < 0)
893 		dumplo = 0;
894 	dumpsize = physmem;
895 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
896 	printf("dump ");
897 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
898 
899 	case ENXIO:
900 		printf("device bad\n");
901 		break;
902 
903 	case EFAULT:
904 		printf("device not ready\n");
905 		break;
906 
907 	case EINVAL:
908 		printf("area improper\n");
909 		break;
910 
911 	case EIO:
912 		printf("i/o error");
913 		break;
914 
915 	default:
916 		printf("succeeded");
917 		break;
918 	}
919 }
920 
921 /*
922  * Machine check error recovery code.
923  * Print out the machine check frame and then give up.
924  */
925 #if VAX8600
926 #define NMC8600	7
927 char *mc8600[] = {
928 	"unkn type",	"fbox error",	"ebox error",	"ibox error",
929 	"mbox error",	"tbuf error",	"mbox 1D error"
930 };
931 /* codes for above */
932 #define	MC_FBOX		1
933 #define	MC_EBOX		2
934 #define	MC_IBOX		3
935 #define	MC_MBOX		4
936 #define	MC_TBUF		5
937 #define	MC_MBOX1D	6
938 
939 /* error bits */
940 #define	MBOX_FE		0x8000		/* Mbox fatal error */
941 #define	FBOX_SERV	0x10000000	/* Fbox service error */
942 #define	IBOX_ERR	0x2000		/* Ibox error */
943 #define	EBOX_ERR	0x1e00		/* Ebox error */
944 #define	MBOX_1D		0x81d0000	/* Mbox 1D error */
945 #define EDP_PE		0x200
946 #endif
947 
948 #if defined(VAX780) || defined(VAX750)
949 char *mc780[] = {
950 	"cp read",	"ctrl str par",	"cp tbuf par",	"cp cache par",
951 	"cp rdtimo", 	"cp rds",	"ucode lost",	0,
952 	0,		0,		"ib tbuf par",	0,
953 	"ib rds",	"ib rd timo",	0,		"ib cache par"
954 };
955 #define MC750_TBERR	2		/* type code of cp tbuf par */
956 #define	MC750_TBPAR	4		/* tbuf par bit in mcesr */
957 #endif
958 
959 #if VAX730
960 #define	NMC730	12
961 char *mc730[] = {
962 	"tb par",	"bad retry",	"bad intr id",	"cant write ptem",
963 	"unkn mcr err",	"iib rd err",	"nxm ref",	"cp rds",
964 	"unalgn ioref",	"nonlw ioref",	"bad ioaddr",	"unalgn ubaddr",
965 };
966 #endif
967 #if VAX630
968 #define NMC630	10
969 extern struct ka630cpu ka630cpu;
970 char *mc630[] = {
971 	0,		"immcr (fsd)",	"immcr (ssd)",	"fpu err 0",
972 	"fpu err 7",	"mmu st(tb)",	"mmu st(m=0)",	"pte in p0",
973 	"pte in p1",	"un intr id",
974 };
975 #endif
976 
977 /*
978  * Frame for each cpu
979  */
980 struct mc780frame {
981 	int	mc8_bcnt;		/* byte count == 0x28 */
982 	int	mc8_summary;		/* summary parameter (as above) */
983 	int	mc8_cpues;		/* cpu error status */
984 	int	mc8_upc;		/* micro pc */
985 	int	mc8_vaviba;		/* va/viba register */
986 	int	mc8_dreg;		/* d register */
987 	int	mc8_tber0;		/* tbuf error reg 0 */
988 	int	mc8_tber1;		/* tbuf error reg 1 */
989 	int	mc8_timo;		/* timeout address divided by 4 */
990 	int	mc8_parity;		/* parity */
991 	int	mc8_sbier;		/* sbi error register */
992 	int	mc8_pc;			/* trapped pc */
993 	int	mc8_psl;		/* trapped psl */
994 };
995 struct mc750frame {
996 	int	mc5_bcnt;		/* byte count == 0x28 */
997 	int	mc5_summary;		/* summary parameter (as above) */
998 	int	mc5_va;			/* virtual address register */
999 	int	mc5_errpc;		/* error pc */
1000 	int	mc5_mdr;
1001 	int	mc5_svmode;		/* saved mode register */
1002 	int	mc5_rdtimo;		/* read lock timeout */
1003 	int	mc5_tbgpar;		/* tb group parity error register */
1004 	int	mc5_cacherr;		/* cache error register */
1005 	int	mc5_buserr;		/* bus error register */
1006 	int	mc5_mcesr;		/* machine check status register */
1007 	int	mc5_pc;			/* trapped pc */
1008 	int	mc5_psl;		/* trapped psl */
1009 };
1010 struct mc730frame {
1011 	int	mc3_bcnt;		/* byte count == 0xc */
1012 	int	mc3_summary;		/* summary parameter */
1013 	int	mc3_parm[2];		/* parameter 1 and 2 */
1014 	int	mc3_pc;			/* trapped pc */
1015 	int	mc3_psl;		/* trapped psl */
1016 };
1017 struct mc630frame {
1018 	int	mc63_bcnt;		/* byte count == 0xc */
1019 	int	mc63_summary;		/* summary parameter */
1020 	int	mc63_mrvaddr;		/* most recent vad */
1021 	int	mc63_istate;		/* internal state */
1022 	int	mc63_pc;			/* trapped pc */
1023 	int	mc63_psl;		/* trapped psl */
1024 };
1025 struct mc8600frame {
1026 	int	mc6_bcnt;		/* byte count == 0x58 */
1027 	int	mc6_ehmsts;
1028 	int	mc6_evmqsav;
1029 	int	mc6_ebcs;
1030 	int	mc6_edpsr;
1031 	int	mc6_cslint;
1032 	int	mc6_ibesr;
1033 	int	mc6_ebxwd1;
1034 	int	mc6_ebxwd2;
1035 	int	mc6_ivasav;
1036 	int	mc6_vibasav;
1037 	int	mc6_esasav;
1038 	int	mc6_isasav;
1039 	int	mc6_cpc;
1040 	int	mc6_mstat1;
1041 	int	mc6_mstat2;
1042 	int	mc6_mdecc;
1043 	int	mc6_merg;
1044 	int	mc6_cshctl;
1045 	int	mc6_mear;
1046 	int	mc6_medr;
1047 	int	mc6_accs;
1048 	int	mc6_cses;
1049 	int	mc6_pc;			/* trapped pc */
1050 	int	mc6_psl;		/* trapped psl */
1051 };
1052 
1053 machinecheck(cmcf)
1054 	caddr_t cmcf;
1055 {
1056 	register u_int type = ((struct mc780frame *)cmcf)->mc8_summary;
1057 
1058 	printf("machine check %x: ", type);
1059 	switch (cpu) {
1060 #if VAX8600
1061 	case VAX_8600: {
1062 		register struct mc8600frame *mcf = (struct mc8600frame *)cmcf;
1063 
1064 		if (mcf->mc6_ebcs & MBOX_FE)
1065 			mcf->mc6_ehmsts |= MC_MBOX;
1066 		else if (mcf->mc6_ehmsts & FBOX_SERV)
1067 			mcf->mc6_ehmsts |= MC_FBOX;
1068 		else if (mcf->mc6_ebcs & EBOX_ERR) {
1069 			if (mcf->mc6_ebcs & EDP_PE)
1070 				mcf->mc6_ehmsts |= MC_MBOX;
1071 			else
1072 				mcf->mc6_ehmsts |= MC_EBOX;
1073 		} else if (mcf->mc6_ehmsts & IBOX_ERR)
1074 			mcf->mc6_ehmsts |= MC_IBOX;
1075 		else if (mcf->mc6_mstat1 & M8600_TB_ERR)
1076 			mcf->mc6_ehmsts |= MC_TBUF;
1077 		else if ((mcf->mc6_cslint & MBOX_1D) == MBOX_1D)
1078 			mcf->mc6_ehmsts |= MC_MBOX1D;
1079 
1080 		type = mcf->mc6_ehmsts & 0x7;
1081 		if (type < NMC8600)
1082 			printf("machine check %x: %s", type, mc8600[type]);
1083 		printf("\n");
1084 		printf("\tehm.sts %x evmqsav %x ebcs %x edpsr %x cslint %x\n",
1085 		    mcf->mc6_ehmsts, mcf->mc6_evmqsav, mcf->mc6_ebcs,
1086 		    mcf->mc6_edpsr, mcf->mc6_cslint);
1087 		printf("\tibesr %x ebxwd %x %x ivasav %x vibasav %x\n",
1088 		    mcf->mc6_ibesr, mcf->mc6_ebxwd1, mcf->mc6_ebxwd2,
1089 		    mcf->mc6_ivasav, mcf->mc6_vibasav);
1090 		printf("\tesasav %x isasav %x cpc %x mstat %x %x mdecc %x\n",
1091 		    mcf->mc6_esasav, mcf->mc6_isasav, mcf->mc6_cpc,
1092 		    mcf->mc6_mstat1, mcf->mc6_mstat2, mcf->mc6_mdecc);
1093 		printf("\tmerg %x cshctl %x mear %x medr %x accs %x cses %x\n",
1094 		    mcf->mc6_merg, mcf->mc6_cshctl, mcf->mc6_mear,
1095 		    mcf->mc6_medr, mcf->mc6_accs, mcf->mc6_cses);
1096 		printf("\tpc %x psl %x\n", mcf->mc6_pc, mcf->mc6_psl);
1097 		mtpr(EHSR, 0);
1098 		break;
1099 	};
1100 #endif
1101 #if VAX780
1102 	case VAX_780: {
1103 		register struct mc780frame *mcf = (struct mc780frame *)cmcf;
1104 
1105 		register int sbifs;
1106 		printf("%s%s\n", mc780[type&0xf],
1107 		    (type&0xf0) ? " abort" : " fault");
1108 		printf("\tcpues %x upc %x va/viba %x dreg %x tber %x %x\n",
1109 		   mcf->mc8_cpues, mcf->mc8_upc, mcf->mc8_vaviba,
1110 		   mcf->mc8_dreg, mcf->mc8_tber0, mcf->mc8_tber1);
1111 		sbifs = mfpr(SBIFS);
1112 		printf("\ttimo %x parity %x sbier %x pc %x psl %x sbifs %x\n",
1113 		   mcf->mc8_timo*4, mcf->mc8_parity, mcf->mc8_sbier,
1114 		   mcf->mc8_pc, mcf->mc8_psl, sbifs);
1115 		/* THE FUNNY BITS IN THE FOLLOWING ARE FROM THE ``BLACK */
1116 		/* BOOK'' AND SHOULD BE PUT IN AN ``sbi.h'' */
1117 		mtpr(SBIFS, sbifs &~ 0x2000000);
1118 		mtpr(SBIER, mfpr(SBIER) | 0x70c0);
1119 		break;
1120 	}
1121 #endif
1122 #if VAX750
1123 	case VAX_750: {
1124 		register struct mc750frame *mcf = (struct mc750frame *)cmcf;
1125 
1126 		int mcsr = mfpr(MCSR);
1127 		printf("%s%s\n", mc780[type&0xf],
1128 		    (type&0xf0) ? " abort" : " fault");
1129 		mtpr(TBIA, 0);
1130 		mtpr(MCESR, 0xf);
1131 		printf("\tva %x errpc %x mdr %x smr %x rdtimo %x tbgpar %x cacherr %x\n",
1132 		    mcf->mc5_va, mcf->mc5_errpc, mcf->mc5_mdr, mcf->mc5_svmode,
1133 		    mcf->mc5_rdtimo, mcf->mc5_tbgpar, mcf->mc5_cacherr);
1134 		printf("\tbuserr %x mcesr %x pc %x psl %x mcsr %x\n",
1135 		    mcf->mc5_buserr, mcf->mc5_mcesr, mcf->mc5_pc, mcf->mc5_psl,
1136 		    mcsr);
1137 		if (type == MC750_TBERR && (mcf->mc5_mcesr&0xe) == MC750_TBPAR){
1138 			printf("tbuf par: flushing and returning\n");
1139 			return;
1140 		}
1141 		break;
1142 		}
1143 #endif
1144 #if VAX730
1145 	case VAX_730: {
1146 		register struct mc730frame *mcf = (struct mc730frame *)cmcf;
1147 
1148 		if (type < NMC730)
1149 			printf("%s", mc730[type]);
1150 		printf("\n");
1151 		printf("params %x,%x pc %x psl %x mcesr %x\n",
1152 		    mcf->mc3_parm[0], mcf->mc3_parm[1],
1153 		    mcf->mc3_pc, mcf->mc3_psl, mfpr(MCESR));
1154 		mtpr(MCESR, 0xf);
1155 		break;
1156 		}
1157 #endif
1158 #if VAX630
1159 	case VAX_630: {
1160 		register struct ka630cpu *ka630addr = &ka630cpu;
1161 		register struct mc630frame *mcf = (struct mc630frame *)cmcf;
1162 		printf("vap %x istate %x pc %x psl %x\n",
1163 		    mcf->mc63_mrvaddr, mcf->mc63_istate,
1164 		    mcf->mc63_pc, mcf->mc63_psl);
1165 		if (ka630addr->ka630_mser & KA630MSER_MERR) {
1166 			printf("mser=0x%x ",ka630addr->ka630_mser);
1167 			if (ka630addr->ka630_mser & KA630MSER_CPUER)
1168 				printf("page=%d",ka630addr->ka630_cear);
1169 			if (ka630addr->ka630_mser & KA630MSER_DQPE)
1170 				printf("page=%d",ka630addr->ka630_dear);
1171 			printf("\n");
1172 		}
1173 		break;
1174 		}
1175 #endif
1176 	}
1177 	memerr();
1178 	panic("mchk");
1179 }
1180 
1181 /*
1182  * Return the best possible estimate of the time in the timeval
1183  * to which tvp points.  We do this by reading the interval count
1184  * register to determine the time remaining to the next clock tick.
1185  * We must compensate for wraparound which is not yet reflected in the time
1186  * (which happens when the ICR hits 0 and wraps after the splhigh(),
1187  * but before the mfpr(ICR)).  Also check that this time is no less than
1188  * any previously-reported time, which could happen around the time
1189  * of a clock adjustment.  Just for fun, we guarantee that the time
1190  * will be greater than the value obtained by a previous call.
1191  */
1192 microtime(tvp)
1193 	register struct timeval *tvp;
1194 {
1195 	int s = splhigh();
1196 	static struct timeval lasttime;
1197 	register long t;
1198 
1199 	*tvp = time;
1200 	t =  mfpr(ICR);
1201 	if (t < -tick / 2 && (mfpr(ICCS) & ICCS_INT))
1202 		t += tick;
1203 	tvp->tv_usec += tick + t;
1204 	if (tvp->tv_usec > 1000000) {
1205 		tvp->tv_sec++;
1206 		tvp->tv_usec -= 1000000;
1207 	}
1208 	if (tvp->tv_sec == lasttime.tv_sec &&
1209 	    tvp->tv_usec <= lasttime.tv_usec &&
1210 	    (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
1211 		tvp->tv_sec++;
1212 		tvp->tv_usec -= 1000000;
1213 	}
1214 	lasttime = *tvp;
1215 	splx(s);
1216 }
1217 
1218 physstrat(bp, strat, prio)
1219 	struct buf *bp;
1220 	int (*strat)(), prio;
1221 {
1222 	int s;
1223 
1224 	(*strat)(bp);
1225 	/* pageout daemon doesn't wait for pushed pages */
1226 	if (bp->b_flags & B_DIRTY)
1227 		return;
1228 	s = splbio();
1229 	while ((bp->b_flags & B_DONE) == 0)
1230 		sleep((caddr_t)bp, prio);
1231 	splx(s);
1232 }
1233 
1234 initcpu()
1235 {
1236 	/*
1237 	 * Enable cache.
1238 	 */
1239 	switch (cpu) {
1240 
1241 #if VAX780
1242 	case VAX_780:
1243 		mtpr(SBIMT, 0x200000);
1244 		break;
1245 #endif
1246 #if VAX750
1247 	case VAX_750:
1248 		mtpr(CADR, 0);
1249 		break;
1250 #endif
1251 #if VAX8600
1252 	case VAX_8600:
1253 		mtpr(CSWP, 3);
1254 		break;
1255 #endif
1256 	default:
1257 		break;
1258 	}
1259 
1260 	/*
1261 	 * Enable floating point accelerator if it exists
1262 	 * and has control register.
1263 	 */
1264 	switch(cpu) {
1265 
1266 #if VAX8600 || VAX780
1267 	case VAX_780:
1268 	case VAX_8600:
1269 		if ((mfpr(ACCS) & 0xff) != 0) {
1270 			printf("Enabling FPA\n");
1271 			mtpr(ACCS, 0x8000);
1272 		}
1273 #endif
1274 	default:
1275 		break;
1276 	}
1277 }
1278