xref: /original-bsd/sys/vax/vax/machdep.c (revision e74403ba)
1 /*	machdep.c	6.5	84/03/22	*/
2 
3 #include "../machine/reg.h"
4 #include "../machine/pte.h"
5 #include "../machine/psl.h"
6 
7 #include "../h/param.h"
8 #include "../h/systm.h"
9 #include "../h/dir.h"
10 #include "../h/user.h"
11 #include "../h/kernel.h"
12 #include "../h/map.h"
13 #include "../h/vm.h"
14 #include "../h/proc.h"
15 #include "../h/buf.h"
16 #include "../h/reboot.h"
17 #include "../h/conf.h"
18 #include "../h/inode.h"
19 #include "../h/file.h"
20 #include "../h/text.h"
21 #include "../h/clist.h"
22 #include "../h/callout.h"
23 #include "../h/cmap.h"
24 #include "../h/mbuf.h"
25 #include "../h/msgbuf.h"
26 #include "../h/nami.h"
27 #include "../h/quota.h"
28 
29 #include "../vax/frame.h"
30 #include "../vax/cons.h"
31 #include "../vax/cpu.h"
32 #include "../vax/mem.h"
33 #include "../vax/mtpr.h"
34 #include "../vax/rpb.h"
35 #include "../vaxuba/ubavar.h"
36 #include "../vaxuba/ubareg.h"
37 
38 int	icode[] =
39 {
40 	0x9f19af9f,	/* pushab [&"init",0]; pushab */
41 	0x02dd09af,	/* "/etc/init"; pushl $2 */
42 	0xbc5c5ed0,	/* movl sp,ap; chmk */
43 	0x2ffe110b,	/* $exec; brb .; "/ */
44 	0x2f637465,	/* etc/ */
45 	0x74696e69,	/* init" */
46 	0x00000000,	/* \0\0\0";  0 */
47 	0x00000014,	/* [&"init", */
48 	0x00000000,	/* 0] */
49 };
50 int	szicode = sizeof(icode);
51 
52 /*
53  * Declare these as initialized data so we can patch them.
54  */
55 int	nbuf = 0;
56 int	nswbuf = 0;
57 int	bufpages = 0;
58 
59 /*
60  * Machine-dependent startup code
61  */
62 startup(firstaddr)
63 	int firstaddr;
64 {
65 	register int unixsize;
66 	register unsigned i;
67 	register struct pte *pte;
68 	int mapaddr, j;
69 	register caddr_t v;
70 	int maxbufs, base, residual;
71 	extern char etext;
72 
73 	/*
74 	 * Initialize error message buffer (at end of core).
75 	 */
76 	maxmem -= btoc(sizeof (struct msgbuf));
77 	pte = msgbufmap;
78 	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
79 		*(int *)pte++ = PG_V | PG_KW | (maxmem + i);
80 	mtpr(TBIA, 1);
81 
82 	/*
83 	 * Good {morning,afternoon,evening,night}.
84 	 */
85 	printf(version);
86 	printf("real mem  = %d\n", ctob(maxmem));
87 
88 	/*
89 	 * Determine how many buffers to allocate.
90 	 * Use 10% of memory, with min of 16.
91 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
92 	 */
93 	maxbufs = ((SYSPTSIZE * NBPG) - (5 * (int)(&etext - 0x80000000))) /
94 	    MAXBSIZE;
95 	if (bufpages == 0)
96 		bufpages = (physmem * NBPG) / 10 / CLBYTES;
97 	if (nbuf == 0) {
98 		nbuf = bufpages / 2;
99 		if (nbuf < 16)
100 			nbuf = 16;
101 		if (nbuf > maxbufs)
102 			nbuf = maxbufs;
103 	}
104 	if (bufpages > nbuf * (MAXBSIZE / CLBYTES))
105 		bufpages = nbuf * (MAXBSIZE / CLBYTES);
106 	if (nswbuf == 0) {
107 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
108 		if (nswbuf > 256)
109 			nswbuf = 256;		/* sanity */
110 	}
111 
112 	/*
113 	 * Allocate space for system data structures.
114 	 * The first available real memory address is in "firstaddr".
115 	 * As pages of memory are allocated, "firstaddr" is incremented.
116 	 * The first available kernel virtual address is in "v".
117 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
118 	 * An index into the kernel page table corresponding to the
119 	 * virtual memory address maintained in "v" is kept in "mapaddr".
120 	 */
121 	mapaddr = firstaddr;
122 	v = (caddr_t)(0x80000000 | (firstaddr * NBPG));
123 #define	valloc(name, type, num) \
124 	    (name) = (type *)(v); (v) = (caddr_t)((name)+(num))
125 #define	valloclim(name, type, num, lim) \
126 	    (name) = (type *)(v); (v) = (caddr_t)((lim) = ((name)+(num)))
127 	valloc(buffers, char, MAXBSIZE * nbuf);
128 	base = bufpages / nbuf;
129 	residual = bufpages % nbuf;
130 	for (i = 0; i < residual; i++) {
131 		for (j = 0; j < (base + 1) * CLSIZE; j++) {
132 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
133 			clearseg((unsigned)firstaddr);
134 			firstaddr++;
135 		}
136 		mapaddr += MAXBSIZE / NBPG;
137 	}
138 	for (i = residual; i < nbuf; i++) {
139 		for (j = 0; j < base * CLSIZE; j++) {
140 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
141 			clearseg((unsigned)firstaddr);
142 			firstaddr++;
143 		}
144 		mapaddr += MAXBSIZE / NBPG;
145 	}
146 	valloc(buf, struct buf, nbuf);
147 	valloc(swbuf, struct buf, nswbuf);
148 	valloclim(inode, struct inode, ninode, inodeNINODE);
149 	valloclim(file, struct file, nfile, fileNFILE);
150 	valloclim(proc, struct proc, nproc, procNPROC);
151 	valloclim(text, struct text, ntext, textNTEXT);
152 	valloc(cfree, struct cblock, nclist);
153 	valloc(callout, struct callout, ncallout);
154 	valloc(swapmap, struct map, nswapmap = nproc * 2);
155 	valloc(argmap, struct map, ARGMAPSIZE);
156 	valloc(kernelmap, struct map, nproc);
157 	valloc(mbmap, struct map, nmbclusters/4);
158 	valloc(nch, struct nch, nchsize);
159 #ifdef QUOTA
160 	valloclim(quota, struct quota, nquota, quotaNQUOTA);
161 	valloclim(dquot, struct dquot, ndquot, dquotNDQUOT);
162 #endif
163 	/*
164 	 * Now allocate space for core map
165 	 * Allow space for all of phsical memory minus the amount
166 	 * dedicated to the system. The amount of physical memory
167 	 * dedicated to the system is the total virtual memory of
168 	 * the system minus the space in the buffers which is not
169 	 * allocated real memory.
170 	 */
171 	ncmap = (physmem*NBPG - ((int)v &~ 0x80000000) +
172 		(nbuf * (MAXBSIZE - 2 * CLBYTES))) /
173 		    (NBPG*CLSIZE + sizeof (struct cmap));
174 	valloclim(cmap, struct cmap, ncmap, ecmap);
175 	if ((((int)(ecmap+1))&~0x80000000) > SYSPTSIZE*NBPG)
176 		panic("sys pt too small");
177 
178 	/*
179 	 * Clear allocated space, and make r/w entries
180 	 * for the space in the kernel map.
181 	 */
182 	unixsize = btoc((int)(ecmap+1) &~ 0x80000000);
183 	for (i = mapaddr; i < unixsize; i++) {
184 		*(int *)(&Sysmap[i]) = PG_V | PG_KW | firstaddr;
185 		clearseg((unsigned)firstaddr);
186 		firstaddr++;
187 	}
188 	if (firstaddr >= physmem - 8*UPAGES)
189 		panic("no memory");
190 	mtpr(TBIA, 1);
191 
192 	/*
193 	 * Initialize callouts
194 	 */
195 	callfree = callout;
196 	for (i = 1; i < ncallout; i++)
197 		callout[i-1].c_next = &callout[i];
198 
199 	/*
200 	 * Initialize memory allocator and swap
201 	 * and user page table maps.
202 	 *
203 	 * THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
204 	 * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
205 	 */
206 	meminit(firstaddr, maxmem);
207 	maxmem = freemem;
208 	printf("avail mem = %d\n", ctob(maxmem));
209 	printf("using %d buffers containing %d bytes of memory\n",
210 		nbuf, bufpages * CLBYTES);
211 	rminit(kernelmap, (long)USRPTSIZE, (long)1,
212 	    "usrpt", nproc);
213 	rminit(mbmap, (long)((nmbclusters - 1) * CLSIZE), (long)CLSIZE,
214 	    "mbclusters", nmbclusters/4);
215 
216 	/*
217 	 * Configure the system.
218 	 */
219 	configure();
220 
221 	/*
222 	 * Clear restart inhibit flags.
223 	 */
224 	tocons(TXDB_CWSI);
225 	tocons(TXDB_CCSI);
226 }
227 
228 #ifdef PGINPROF
229 /*
230  * Return the difference (in microseconds)
231  * between the  current time and a previous
232  * time as represented  by the arguments.
233  * If there is a pending clock interrupt
234  * which has not been serviced due to high
235  * ipl, return error code.
236  */
237 vmtime(otime, olbolt, oicr)
238 	register int otime, olbolt, oicr;
239 {
240 
241 	if (mfpr(ICCS)&ICCS_INT)
242 		return(-1);
243 	else
244 		return(((time.tv_sec-otime)*60 + lbolt-olbolt)*16667 + mfpr(ICR)-oicr);
245 }
246 #endif
247 
248 /*
249  * Send an interrupt to process.
250  *
251  * Stack is set up to allow sigcode stored
252  * in u. to call routine, followed by chmk
253  * to sigcleanup routine below.  After sigcleanup
254  * resets the signal mask and the stack, it
255  * returns to user who then unwinds with the
256  * rei at the bottom of sigcode.
257  */
258 sendsig(p, sig, sigmask)
259 	int (*p)(), sig, sigmask;
260 {
261 	register struct sigcontext *scp;	/* know to be r11 */
262 	register int *regs;
263 	register struct sigframe {
264 		int	sf_signum;
265 		int	sf_code;
266 		struct	sigcontext *sf_scp;
267 		int	(*sf_handler)();
268 		struct	sigcontext *sf_scpcopy;
269 	} *fp;					/* known to be r9 */
270 	int oonstack;
271 
272 	regs = u.u_ar0;
273 	oonstack = u.u_onstack;
274 	scp = (struct sigcontext *)regs[SP] - 1;
275 #define	mask(s)	(1<<((s)-1))
276 	if (!u.u_onstack && (u.u_sigonstack & mask(sig))) {
277 		fp = (struct sigframe *)u.u_sigsp - 1;
278 		u.u_onstack = 1;
279 	} else
280 		fp = (struct sigframe *)scp - 1;
281 	/*
282 	 * Must build signal handler context on stack to be returned to
283 	 * so that rei instruction in sigcode will pop ps and pc
284 	 * off correct stack.  The remainder of the signal state
285 	 * used in calling the handler must be placed on the stack
286 	 * on which the handler is to operate so that the calls
287 	 * in sigcode will save the registers and such correctly.
288 	 */
289 	if (!oonstack && (int)fp <= USRSTACK - ctob(u.u_ssize))
290 		grow((unsigned)fp);
291 	;
292 #ifndef lint
293 	asm("probew $3,$20,(r9)");
294 	asm("jeql bad");
295 #else
296 	if (useracc((caddr_t)fp, sizeof (struct sigframe), 1))
297 		goto bad;
298 #endif
299 	if (!u.u_onstack && (int)scp <= USRSTACK - ctob(u.u_ssize))
300 		grow((unsigned)scp);
301 	;			/* Avoid asm() label botch */
302 #ifndef lint
303 	asm("probew $3,$20,(r11)");
304 	asm("beql bad");
305 #else
306 	if (useracc((caddr_t)scp, sizeof (struct sigcontext), 1))
307 		goto bad;
308 #endif
309 	fp->sf_signum = sig;
310 	if (sig == SIGILL || sig == SIGFPE) {
311 		fp->sf_code = u.u_code;
312 		u.u_code = 0;
313 	} else
314 		fp->sf_code = 0;
315 	fp->sf_scp = scp;
316 	fp->sf_handler = p;
317 	/*
318 	 * Duplicate the pointer to the sigcontext structure.
319 	 * This one doesn't get popped by the ret, and is used
320 	 * by sigcleanup to reset the signal state on inward return.
321 	 */
322 	fp->sf_scpcopy = scp;
323 	/* sigcontext goes on previous stack */
324 	scp->sc_onstack = oonstack;
325 	scp->sc_mask = sigmask;
326 	/* setup rei */
327 	scp->sc_sp = (int)&scp->sc_pc;
328 	scp->sc_pc = regs[PC];
329 	scp->sc_ps = regs[PS];
330 	regs[SP] = (int)fp;
331 	regs[PS] &= ~(PSL_CM|PSL_FPD);
332 	regs[PC] = (int)u.u_pcb.pcb_sigc;
333 	return;
334 
335 asm("bad:");
336 bad:
337 	/*
338 	 * Process has trashed its stack; give it an illegal
339 	 * instruction to halt it in its tracks.
340 	 */
341 	u.u_signal[SIGILL] = SIG_DFL;
342 	sig = mask(SIGILL);
343 	u.u_procp->p_sigignore &= ~sig;
344 	u.u_procp->p_sigcatch &= ~sig;
345 	u.u_procp->p_sigmask &= ~sig;
346 	psignal(u.u_procp, SIGILL);
347 }
348 
349 /*
350  * Routine to cleanup state after a signal
351  * has been taken.  Reset signal mask and
352  * stack state from context left by sendsig (above).
353  * Pop these values in preparation for rei which
354  * follows return from this routine.
355  */
356 sigcleanup()
357 {
358 	register struct sigcontext *scp;
359 
360 	scp = (struct sigcontext *)fuword((caddr_t)u.u_ar0[SP]);
361 	if ((int)scp == -1)
362 		return;
363 #ifndef lint
364 	/* only probe 12 here because that's all we need */
365 	asm("prober $3,$12,(r11)");
366 	asm("bnequ 1f; ret; 1:");
367 #else
368 	if (useracc((caddr_t)scp, sizeof (*scp), 0))
369 		return;
370 #endif
371 	u.u_onstack = scp->sc_onstack & 01;
372 	u.u_procp->p_sigmask =
373 	    scp->sc_mask &~ (mask(SIGKILL)|mask(SIGCONT)|mask(SIGSTOP));
374 	u.u_ar0[SP] = scp->sc_sp;
375 }
376 #undef mask
377 
378 #ifdef notdef
379 dorti()
380 {
381 	struct frame frame;
382 	register int sp;
383 	register int reg, mask;
384 	extern int ipcreg[];
385 
386 	(void) copyin((caddr_t)u.u_ar0[FP], (caddr_t)&frame, sizeof (frame));
387 	sp = u.u_ar0[FP] + sizeof (frame);
388 	u.u_ar0[PC] = frame.fr_savpc;
389 	u.u_ar0[FP] = frame.fr_savfp;
390 	u.u_ar0[AP] = frame.fr_savap;
391 	mask = frame.fr_mask;
392 	for (reg = 0; reg <= 11; reg++) {
393 		if (mask&1) {
394 			u.u_ar0[ipcreg[reg]] = fuword((caddr_t)sp);
395 			sp += 4;
396 		}
397 		mask >>= 1;
398 	}
399 	sp += frame.fr_spa;
400 	u.u_ar0[PS] = (u.u_ar0[PS] & 0xffff0000) | frame.fr_psw;
401 	if (frame.fr_s)
402 		sp += 4 + 4 * (fuword((caddr_t)sp) & 0xff);
403 	/* phew, now the rei */
404 	u.u_ar0[PC] = fuword((caddr_t)sp);
405 	sp += 4;
406 	u.u_ar0[PS] = fuword((caddr_t)sp);
407 	sp += 4;
408 	u.u_ar0[PS] |= PSL_USERSET;
409 	u.u_ar0[PS] &= ~PSL_USERCLR;
410 	u.u_ar0[SP] = (int)sp;
411 }
412 #endif
413 
414 /*
415  * Memenable enables the memory controlle corrected data reporting.
416  * This runs at regular intervals, turning on the interrupt.
417  * The interrupt is turned off, per memory controller, when error
418  * reporting occurs.  Thus we report at most once per memintvl.
419  */
420 int	memintvl = MEMINTVL;
421 
422 memenable()
423 {
424 	register struct mcr *mcr;
425 	register int m;
426 
427 	for (m = 0; m < nmcr; m++) {
428 		mcr = mcraddr[m];
429 		switch (mcrtype[m]) {
430 #if VAX780
431 		case M780C:
432 			M780C_ENA(mcr);
433 			break;
434 		case M780EL:
435 			M780EL_ENA(mcr);
436 			break;
437 		case M780EU:
438 			M780EU_ENA(mcr);
439 			break;
440 #endif
441 #if VAX750
442 		case M750:
443 			M750_ENA(mcr);
444 			break;
445 #endif
446 #if VAX730
447 		case M730:
448 			M730_ENA(mcr);
449 			break;
450 #endif
451 		}
452 	}
453 	if (memintvl > 0)
454 		timeout(memenable, (caddr_t)0, memintvl*hz);
455 }
456 
457 /*
458  * Memerr is the interrupt routine for corrected read data
459  * interrupts.  It looks to see which memory controllers have
460  * unreported errors, reports them, and disables further
461  * reporting for a time on those controller.
462  */
463 memerr()
464 {
465 	register struct mcr *mcr;
466 	register int m;
467 
468 	for (m = 0; m < nmcr; m++) {
469 		mcr = mcraddr[m];
470 		switch (mcrtype[m]) {
471 #if VAX780
472 		case M780C:
473 			if (M780C_ERR(mcr)) {
474 				printf("mcr%d: soft ecc addr %x syn %x\n",
475 				    m, M780C_ADDR(mcr), M780C_SYN(mcr));
476 #ifdef TRENDATA
477 				memlog(m, mcr);
478 #endif
479 				M780C_INH(mcr);
480 			}
481 			break;
482 
483 		case M780EL:
484 			if (M780EL_ERR(mcr)) {
485 				printf("mcr%d: soft ecc addr %x syn %x\n",
486 				    m, M780EL_ADDR(mcr), M780EL_SYN(mcr));
487 				M780EL_INH(mcr);
488 			}
489 			break;
490 
491 		case M780EU:
492 			if (M780EU_ERR(mcr)) {
493 				printf("mcr%d: soft ecc addr %x syn %x\n",
494 				    m, M780EU_ADDR(mcr), M780EU_SYN(mcr));
495 				M780EU_INH(mcr);
496 			}
497 			break;
498 #endif
499 #if VAX750
500 		case M750:
501 			if (M750_ERR(mcr)) {
502 				struct mcr amcr;
503 				amcr.mc_reg[0] = mcr->mc_reg[0];
504 				printf("mcr%d: %s",
505 				    m, (amcr.mc_reg[0] & M750_UNCORR) ?
506 				    "hard error" : "soft ecc");
507 				printf(" addr %x syn %x\n",
508 				    M750_ADDR(&amcr), M750_SYN(&amcr));
509 				M750_INH(mcr);
510 			}
511 			break;
512 #endif
513 #if VAX730
514 		case M730: {
515 			register int mcreg = mcr->mc_reg[1];
516 
517 			if (mcreg & M730_CRD) {
518 				struct mcr amcr;
519 				amcr.mc_reg[0] = mcr->mc_reg[0];
520 				printf("mcr%d: soft ecc addr %x syn %x\n",
521 				    m, M730_ADDR(&amcr), M730_SYN(&amcr));
522 				M730_INH(mcr);
523 			}
524 			break;
525 		}
526 #endif
527 		}
528 	}
529 }
530 
531 #ifdef TRENDATA
532 /*
533  * Figure out what chip to replace on Trendata boards.
534  * Assumes all your memory is Trendata or the non-Trendata
535  * memory never fails..
536  */
537 struct {
538 	u_char	m_syndrome;
539 	char	m_chip[4];
540 } memlogtab[] = {
541 	0x01,	"C00",	0x02,	"C01",	0x04,	"C02",	0x08,	"C03",
542 	0x10,	"C04",	0x19,	"L01",	0x1A,	"L02",	0x1C,	"L04",
543 	0x1F,	"L07",	0x20,	"C05",	0x38,	"L00",	0x3B,	"L03",
544 	0x3D,	"L05",	0x3E,	"L06",	0x40,	"C06",	0x49,	"L09",
545 	0x4A,	"L10",	0x4c,	"L12",	0x4F,	"L15",	0x51,	"L17",
546 	0x52,	"L18",	0x54,	"L20",	0x57,	"L23",	0x58,	"L24",
547 	0x5B,	"L27",	0x5D,	"L29",	0x5E,	"L30",	0x68,	"L08",
548 	0x6B,	"L11",	0x6D,	"L13",	0x6E,	"L14",	0x70,	"L16",
549 	0x73,	"L19",	0x75,	"L21",	0x76,	"L22",	0x79,	"L25",
550 	0x7A,	"L26",	0x7C,	"L28",	0x7F,	"L31",	0x80,	"C07",
551 	0x89,	"U01",	0x8A,	"U02",	0x8C,	"U04",	0x8F,	"U07",
552 	0x91,	"U09",	0x92,	"U10",	0x94,	"U12",	0x97, 	"U15",
553 	0x98,	"U16",	0x9B,	"U19",	0x9D,	"U21",	0x9E, 	"U22",
554 	0xA8,	"U00",	0xAB,	"U03",	0xAD,	"U05",	0xAE,	"U06",
555 	0xB0,	"U08",	0xB3,	"U11",	0xB5,	"U13",	0xB6,	"U14",
556 	0xB9,	"U17",	0xBA,	"U18",	0xBC,	"U20",	0xBF,	"U23",
557 	0xC1,	"U25",	0xC2,	"U26",	0xC4,	"U28",	0xC7,	"U31",
558 	0xE0,	"U24",	0xE3,	"U27",	0xE5,	"U29",	0xE6,	"U30"
559 };
560 
561 memlog (m, mcr)
562 	int m;
563 	struct mcr *mcr;
564 {
565 	register i;
566 
567 	switch (mcrtype[m]) {
568 
569 #if VAX780
570 	case M780C:
571 	for (i = 0; i < (sizeof (memlogtab) / sizeof (memlogtab[0])); i++)
572 		if ((u_char)(M780C_SYN(mcr)) == memlogtab[i].m_syndrome) {
573 			printf (
574 	"mcr%d: replace %s chip in %s bank of memory board %d (0-15)\n",
575 				m,
576 				memlogtab[i].m_chip,
577 				(M780C_ADDR(mcr) & 0x8000) ? "upper" : "lower",
578 				(M780C_ADDR(mcr) >> 16));
579 			return;
580 		}
581 	printf ("mcr%d: multiple errors, not traceable\n", m);
582 	break;
583 #endif
584 	}
585 }
586 #endif
587 
588 /*
589  * Invalidate single all pte's in a cluster
590  */
591 tbiscl(v)
592 	unsigned v;
593 {
594 	register caddr_t addr;		/* must be first reg var */
595 	register int i;
596 
597 	asm(".set TBIS,58");
598 	addr = ptob(v);
599 	for (i = 0; i < CLSIZE; i++) {
600 #ifdef lint
601 		mtpr(TBIS, addr);
602 #else
603 		asm("mtpr r11,$TBIS");
604 #endif
605 		addr += NBPG;
606 	}
607 }
608 
609 int	waittime = -1;
610 
611 boot(paniced, arghowto)
612 	int paniced, arghowto;
613 {
614 	register int howto;		/* r11 == how to boot */
615 	register int devtype;		/* r10 == major of root dev */
616 
617 #ifdef lint
618 	howto = 0; devtype = 0;
619 	printf("howto %d, devtype %d\n", arghowto, devtype);
620 #endif
621 	(void) spl1();
622 	howto = arghowto;
623 	if ((howto&RB_NOSYNC)==0 && waittime < 0 && bfreelist[0].b_forw) {
624 		waittime = 0;
625 		update();
626 		printf("syncing disks... ");
627 #ifdef notdef
628 		DELAY(10000000);
629 #else
630 		{ register struct buf *bp;
631 		  int iter, nbusy;
632 
633 		  for (iter = 0; iter < 20; iter++) {
634 			nbusy = 0;
635 			for (bp = &buf[nbuf]; --bp >= buf; )
636 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
637 					nbusy++;
638 			if (nbusy == 0)
639 				break;
640 			printf("%d ", nbusy);
641 		  }
642 		}
643 #endif
644 		printf("done\n");
645 	}
646 	splx(0x1f);			/* extreme priority */
647 	devtype = major(rootdev);
648 	if (howto&RB_HALT) {
649 		printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
650 		mtpr(IPL, 0x1f);
651 		for (;;)
652 			;
653 	} else {
654 		if (paniced == RB_PANIC) {
655 			doadump();		/* TXDB_BOOT's itsself */
656 			/*NOTREACHED*/
657 		}
658 		tocons(TXDB_BOOT);
659 	}
660 #if defined(VAX750) || defined(VAX730)
661 	if (cpu != VAX_780)
662 		{ asm("movl r11,r5"); }		/* boot flags go in r5 */
663 #endif
664 	for (;;)
665 		asm("halt");
666 	/*NOTREACHED*/
667 }
668 
669 tocons(c)
670 {
671 
672 	while ((mfpr(TXCS)&TXCS_RDY) == 0)
673 		continue;
674 	mtpr(TXDB, c);
675 }
676 
677 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
678 int	dumpsize = 0;		/* also for savecore */
679 /*
680  * Doadump comes here after turning off memory management and
681  * getting on the dump stack, either when called above, or by
682  * the auto-restart code.
683  */
684 dumpsys()
685 {
686 
687 	rpb.rp_flag = 1;
688 #ifdef notdef
689 	if ((minor(dumpdev)&07) != 1)
690 		return;
691 #endif
692 	dumpsize = physmem;
693 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
694 	printf("dump ");
695 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
696 
697 	case ENXIO:
698 		printf("device bad\n");
699 		break;
700 
701 	case EFAULT:
702 		printf("device not ready\n");
703 		break;
704 
705 	case EINVAL:
706 		printf("area improper\n");
707 		break;
708 
709 	case EIO:
710 		printf("i/o error");
711 		break;
712 
713 	default:
714 		printf("succeeded");
715 		break;
716 	}
717 }
718 
719 /*
720  * Machine check error recovery code.
721  * Print out the machine check frame and then give up.
722  */
723 #if defined(VAX780) || defined(VAX750)
724 char *mc780[] = {
725 	"cp read",	"ctrl str par",	"cp tbuf par",	"cp cache par",
726 	"cp rdtimo", 	"cp rds",	"ucode lost",	0,
727 	0,		0,		"ib tbuf par",	0,
728 	"ib rds",	"ib rd timo",	0,		"ib cache par"
729 };
730 #define	MC750_TBPAR	4
731 #endif
732 #if VAX730
733 #define	NMC730	12
734 char *mc730[] = {
735 	"tb par",	"bad retry",	"bad intr id",	"cant write ptem",
736 	"unkn mcr err",	"iib rd err",	"nxm ref",	"cp rds",
737 	"unalgn ioref",	"nonlw ioref",	"bad ioaddr",	"unalgn ubaddr",
738 };
739 #endif
740 
741 /*
742  * Frame for each cpu
743  */
744 struct mc780frame {
745 	int	mc8_bcnt;		/* byte count == 0x28 */
746 	int	mc8_summary;		/* summary parameter (as above) */
747 	int	mc8_cpues;		/* cpu error status */
748 	int	mc8_upc;		/* micro pc */
749 	int	mc8_vaviba;		/* va/viba register */
750 	int	mc8_dreg;		/* d register */
751 	int	mc8_tber0;		/* tbuf error reg 0 */
752 	int	mc8_tber1;		/* tbuf error reg 1 */
753 	int	mc8_timo;		/* timeout address divided by 4 */
754 	int	mc8_parity;		/* parity */
755 	int	mc8_sbier;		/* sbi error register */
756 	int	mc8_pc;			/* trapped pc */
757 	int	mc8_psl;		/* trapped psl */
758 };
759 struct mc750frame {
760 	int	mc5_bcnt;		/* byte count == 0x28 */
761 	int	mc5_summary;		/* summary parameter (as above) */
762 	int	mc5_va;			/* virtual address register */
763 	int	mc5_errpc;		/* error pc */
764 	int	mc5_mdr;
765 	int	mc5_svmode;		/* saved mode register */
766 	int	mc5_rdtimo;		/* read lock timeout */
767 	int	mc5_tbgpar;		/* tb group parity error register */
768 	int	mc5_cacherr;		/* cache error register */
769 	int	mc5_buserr;		/* bus error register */
770 	int	mc5_mcesr;		/* machine check status register */
771 	int	mc5_pc;			/* trapped pc */
772 	int	mc5_psl;		/* trapped psl */
773 };
774 struct mc730frame {
775 	int	mc3_bcnt;		/* byte count == 0xc */
776 	int	mc3_summary;		/* summary parameter */
777 	int	mc3_parm[2];		/* parameter 1 and 2 */
778 	int	mc3_pc;			/* trapped pc */
779 	int	mc3_psl;		/* trapped psl */
780 };
781 
782 machinecheck(cmcf)
783 	caddr_t cmcf;
784 {
785 	register u_int type = ((struct mc780frame *)cmcf)->mc8_summary;
786 
787 	printf("machine check %x: ", type);
788 	switch (cpu) {
789 #if VAX780
790 	case VAX_780:
791 #endif
792 #if VAX750
793 	case VAX_750:
794 #endif
795 #if defined(VAX780) || defined(VAX750)
796 		printf("%s%s\n", mc780[type&0xf],
797 		    (type&0xf0) ? " abort" : " fault");
798 		break;
799 #endif
800 #if VAX730
801 	case VAX_730:
802 		if (type < NMC730)
803 			printf("%s", mc730[type]);
804 		printf("\n");
805 		break;
806 #endif
807 	}
808 	switch (cpu) {
809 #if VAX780
810 	case VAX_780: {
811 		register struct mc780frame *mcf = (struct mc780frame *)cmcf;
812 		register int sbifs;
813 		printf("\tcpues %x upc %x va/viba %x dreg %x tber %x %x\n",
814 		   mcf->mc8_cpues, mcf->mc8_upc, mcf->mc8_vaviba,
815 		   mcf->mc8_dreg, mcf->mc8_tber0, mcf->mc8_tber1);
816 		sbifs = mfpr(SBIFS);
817 		printf("\ttimo %x parity %x sbier %x pc %x psl %x sbifs %x\n",
818 		   mcf->mc8_timo*4, mcf->mc8_parity, mcf->mc8_sbier,
819 		   mcf->mc8_pc, mcf->mc8_psl, sbifs);
820 		/* THE FUNNY BITS IN THE FOLLOWING ARE FROM THE ``BLACK */
821 		/* BOOK'' AND SHOULD BE PUT IN AN ``sbi.h'' */
822 		mtpr(SBIFS, sbifs &~ 0x2000000);
823 		mtpr(SBIER, mfpr(SBIER) | 0x70c0);
824 		break;
825 	}
826 #endif
827 #if VAX750
828 	case VAX_750: {
829 		register struct mc750frame *mcf = (struct mc750frame *)cmcf;
830 		printf("\tva %x errpc %x mdr %x smr %x rdtimo %x tbgpar %x cacherr %x\n",
831 		    mcf->mc5_va, mcf->mc5_errpc, mcf->mc5_mdr, mcf->mc5_svmode,
832 		    mcf->mc5_rdtimo, mcf->mc5_tbgpar, mcf->mc5_cacherr);
833 		printf("\tbuserr %x mcesr %x pc %x psl %x mcsr %x\n",
834 		    mcf->mc5_buserr, mcf->mc5_mcesr, mcf->mc5_pc, mcf->mc5_psl,
835 		    mfpr(MCSR));
836 		mtpr(MCESR, 0xf);
837 		if ((mcf->mc5_mcesr&0xf) == MC750_TBPAR) {
838 			printf("tbuf par: flushing and returning\n");
839 			mtpr(TBIA, 0);
840 			return;
841 		}
842 		break;
843 		}
844 #endif
845 #if VAX730
846 	case VAX_730: {
847 		register struct mc730frame *mcf = (struct mc730frame *)cmcf;
848 		printf("params %x,%x pc %x psl %x mcesr %x\n",
849 		    mcf->mc3_parm[0], mcf->mc3_parm[1],
850 		    mcf->mc3_pc, mcf->mc3_psl, mfpr(MCESR));
851 		mtpr(MCESR, 0xf);
852 		break;
853 		}
854 #endif
855 	}
856 	memerr();
857 	panic("mchk");
858 }
859 
860 #ifdef notdef
861 microtime(tvp)
862 	struct timeval *tvp;
863 {
864 	int s = spl7();
865 
866 	tvp->tv_sec = time.tv_sec;
867 	tvp->tv_usec = (lbolt+1)*16667 + mfpr(ICR);
868 	while (tvp->tv_usec > 1000000) {
869 		tvp->tv_sec++;
870 		tvp->tv_usec -= 1000000;
871 	}
872 	splx(s);
873 }
874 #endif
875 
876 physstrat(bp, strat, prio)
877 	struct buf *bp;
878 	int (*strat)(), prio;
879 {
880 	int s;
881 
882 	(*strat)(bp);
883 	/* pageout daemon doesn't wait for pushed pages */
884 	if (bp->b_flags & B_DIRTY)
885 		return;
886 	s = spl6();
887 	while ((bp->b_flags & B_DONE) == 0)
888 		sleep((caddr_t)bp, prio);
889 	splx(s);
890 }
891