xref: /original-bsd/sys/vax/vax/machdep.c (revision 8b257aca)
1 /*
2  * Copyright (c) 1982 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)machdep.c	6.17 (Berkeley) 07/02/85
7  */
8 
9 #include "reg.h"
10 #include "pte.h"
11 #include "psl.h"
12 
13 #include "param.h"
14 #include "systm.h"
15 #include "dir.h"
16 #include "user.h"
17 #include "kernel.h"
18 #include "map.h"
19 #include "vm.h"
20 #include "proc.h"
21 #include "buf.h"
22 #include "reboot.h"
23 #include "conf.h"
24 #include "inode.h"
25 #include "file.h"
26 #include "text.h"
27 #include "clist.h"
28 #include "callout.h"
29 #include "cmap.h"
30 #include "mbuf.h"
31 #include "msgbuf.h"
32 #include "quota.h"
33 
34 #include "frame.h"
35 #include "cons.h"
36 #include "cpu.h"
37 #include "mem.h"
38 #include "mtpr.h"
39 #include "rpb.h"
40 #include "../vaxuba/ubavar.h"
41 #include "../vaxuba/ubareg.h"
42 
43 /*
44  * Declare these as initialized data so we can patch them.
45  */
46 int	nswbuf = 0;
47 #ifdef	NBUF
48 int	nbuf = NBUF;
49 #else
50 int	nbuf = 0;
51 #endif
52 #ifdef	BUFPAGES
53 int	bufpages = BUFPAGES;
54 #else
55 int	bufpages = 0;
56 #endif
57 
58 /*
59  * Machine-dependent startup code
60  */
61 startup(firstaddr)
62 	int firstaddr;
63 {
64 	register int unixsize;
65 	register unsigned i;
66 	register struct pte *pte;
67 	int mapaddr, j;
68 	register caddr_t v;
69 	int maxbufs, base, residual;
70 	extern char etext;
71 
72 	/*
73 	 * Initialize error message buffer (at end of core).
74 	 */
75 	maxmem -= btoc(sizeof (struct msgbuf));
76 	pte = msgbufmap;
77 	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
78 		*(int *)pte++ = PG_V | PG_KW | (maxmem + i);
79 	mtpr(TBIA, 1);
80 
81 	/*
82 	 * Good {morning,afternoon,evening,night}.
83 	 */
84 	printf(version);
85 	printf("real mem  = %d\n", ctob(physmem));
86 
87 	/*
88 	 * Allocate space for system data structures.
89 	 * The first available real memory address is in "firstaddr".
90 	 * The first available kernel virtual address is in "v".
91 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
92 	 * As pages of memory are allocated and cleared,
93 	 * "firstaddr" is incremented.
94 	 * An index into the kernel page table corresponding to the
95 	 * virtual memory address maintained in "v" is kept in "mapaddr".
96 	 */
97 	v = (caddr_t)(0x80000000 | (firstaddr * NBPG));
98 #define	valloc(name, type, num) \
99 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
100 #define	valloclim(name, type, num, lim) \
101 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
102 	valloclim(inode, struct inode, ninode, inodeNINODE);
103 	valloclim(file, struct file, nfile, fileNFILE);
104 	valloclim(proc, struct proc, nproc, procNPROC);
105 	valloclim(text, struct text, ntext, textNTEXT);
106 	valloc(cfree, struct cblock, nclist);
107 	valloc(callout, struct callout, ncallout);
108 	valloc(swapmap, struct map, nswapmap = nproc * 2);
109 	valloc(argmap, struct map, ARGMAPSIZE);
110 	valloc(kernelmap, struct map, nproc);
111 	valloc(mbmap, struct map, nmbclusters/4);
112 	valloc(nch, struct nch, nchsize);
113 #ifdef QUOTA
114 	valloclim(quota, struct quota, nquota, quotaNQUOTA);
115 	valloclim(dquot, struct dquot, ndquot, dquotNDQUOT);
116 #endif
117 
118 	/*
119 	 * Determine how many buffers to allocate.
120 	 * Use 10% of memory for the first 2 Meg, 5% of the remaining
121 	 * memory. Insure a minimum of 16 buffers.
122 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
123 	 */
124 	if (bufpages == 0)
125 		if (physmem < (2 * 1024 * 1024))
126 			bufpages = physmem / 10 / CLSIZE;
127 		else
128 			bufpages =
129 			    ((2 * 1024 * 1024) / 5 + physmem / 5) / CLSIZE;
130 	if (nbuf == 0) {
131 		nbuf = bufpages / 2;
132 		if (nbuf < 16)
133 			nbuf = 16;
134 	}
135 	if (nswbuf == 0) {
136 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
137 		if (nswbuf > 256)
138 			nswbuf = 256;		/* sanity */
139 	}
140 	valloc(swbuf, struct buf, nswbuf);
141 
142 	/*
143 	 * Now the amount of virtual memory remaining for buffers
144 	 * can be calculated, estimating needs for the cmap.
145 	 */
146 	ncmap = (maxmem*NBPG - ((int)v &~ 0x80000000)) /
147 		(CLBYTES + sizeof(struct cmap)) + 2;
148 	maxbufs = ((SYSPTSIZE * NBPG) -
149 	    ((int)(v + ncmap * sizeof(struct cmap)) - 0x80000000)) /
150 		(MAXBSIZE + sizeof(struct buf));
151 	if (maxbufs < 16)
152 		panic("sys pt too small");
153 	if (nbuf > maxbufs) {
154 		printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs);
155 		nbuf = maxbufs;
156 	}
157 	if (bufpages > nbuf * (MAXBSIZE / CLBYTES))
158 		bufpages = nbuf * (MAXBSIZE / CLBYTES);
159 	valloc(buf, struct buf, nbuf);
160 
161 	/*
162 	 * Allocate space for core map.
163 	 * Allow space for all of phsical memory minus the amount
164 	 * dedicated to the system. The amount of physical memory
165 	 * dedicated to the system is the total virtual memory of
166 	 * the system thus far, plus core map, buffer pages,
167 	 * and buffer headers not yet allocated.
168 	 * Add 2: 1 because the 0th entry is unused, 1 for rounding.
169 	 */
170 	ncmap = (maxmem*NBPG - ((int)(v + bufpages*CLBYTES) &~ 0x80000000)) /
171 		(CLBYTES + sizeof(struct cmap)) + 2;
172 	valloclim(cmap, struct cmap, ncmap, ecmap);
173 
174 	/*
175 	 * Clear space allocated thus far, and make r/w entries
176 	 * for the space in the kernel map.
177 	 */
178 	unixsize = btoc((int)v &~ 0x80000000);
179 	while (firstaddr < unixsize) {
180 		*(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | firstaddr;
181 		clearseg((unsigned)firstaddr);
182 		firstaddr++;
183 	}
184 
185 	/*
186 	 * Now allocate buffers proper.  They are different than the above
187 	 * in that they usually occupy more virtual memory than physical.
188 	 */
189 	v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET);
190 	valloc(buffers, char, MAXBSIZE * nbuf);
191 	base = bufpages / nbuf;
192 	residual = bufpages % nbuf;
193 	mapaddr = firstaddr;
194 	for (i = 0; i < residual; i++) {
195 		for (j = 0; j < (base + 1) * CLSIZE; j++) {
196 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
197 			clearseg((unsigned)firstaddr);
198 			firstaddr++;
199 		}
200 		mapaddr += MAXBSIZE / NBPG;
201 	}
202 	for (i = residual; i < nbuf; i++) {
203 		for (j = 0; j < base * CLSIZE; j++) {
204 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
205 			clearseg((unsigned)firstaddr);
206 			firstaddr++;
207 		}
208 		mapaddr += MAXBSIZE / NBPG;
209 	}
210 
211 	unixsize = btoc((int)v &~ 0x80000000);
212 	if (firstaddr >= physmem - 8*UPAGES)
213 		panic("no memory");
214 	mtpr(TBIA, 1);			/* After we just cleared it all! */
215 
216 	/*
217 	 * Initialize callouts
218 	 */
219 	callfree = callout;
220 	for (i = 1; i < ncallout; i++)
221 		callout[i-1].c_next = &callout[i];
222 
223 	/*
224 	 * Initialize memory allocator and swap
225 	 * and user page table maps.
226 	 *
227 	 * THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
228 	 * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
229 	 */
230 	meminit(firstaddr, maxmem);
231 	maxmem = freemem;
232 	printf("avail mem = %d\n", ctob(maxmem));
233 	printf("using %d buffers containing %d bytes of memory\n",
234 		nbuf, bufpages * CLBYTES);
235 	rminit(kernelmap, (long)USRPTSIZE, (long)1,
236 	    "usrpt", nproc);
237 	rminit(mbmap, (long)((nmbclusters - 1) * CLSIZE), (long)CLSIZE,
238 	    "mbclusters", nmbclusters/4);
239 
240 	/*
241 	 * Configure the system.
242 	 */
243 	configure();
244 
245 	/*
246 	 * Clear restart inhibit flags.
247 	 */
248 	tocons(TXDB_CWSI);
249 	tocons(TXDB_CCSI);
250 }
251 
252 #ifdef PGINPROF
253 /*
254  * Return the difference (in microseconds)
255  * between the  current time and a previous
256  * time as represented  by the arguments.
257  * If there is a pending clock interrupt
258  * which has not been serviced due to high
259  * ipl, return error code.
260  */
261 vmtime(otime, olbolt, oicr)
262 	register int otime, olbolt, oicr;
263 {
264 
265 	if (mfpr(ICCS)&ICCS_INT)
266 		return(-1);
267 	else
268 		return(((time.tv_sec-otime)*60 + lbolt-olbolt)*16667 + mfpr(ICR)-oicr);
269 }
270 #endif
271 
272 /*
273  * Clear registers on exec
274  */
275 setregs(entry)
276 	u_long entry;
277 {
278 #ifdef notdef
279 	register int *rp;
280 
281 	/* should pass args to init on the stack */
282 	/* should also fix this code before using it, it's wrong */
283 	/* wanna clear the scb? */
284 	for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];)
285 		*rp++ = 0;
286 #endif
287 	u.u_ar0[PC] = entry + 2;
288 }
289 
290 /*
291  * Send an interrupt to process.
292  *
293  * Stack is set up to allow sigcode stored
294  * in u. to call routine, followed by chmk
295  * to sigreturn routine below.  After sigreturn
296  * resets the signal mask, the stack, the frame
297  * pointer, and the argument pointer, it returns
298  * to the user specified pc, psl.
299  */
300 sendsig(p, sig, mask)
301 	int (*p)(), sig, mask;
302 {
303 	register struct sigcontext *scp;
304 	register int *regs;
305 	register struct sigframe {
306 		int	sf_signum;
307 		int	sf_code;
308 		struct	sigcontext *sf_scp;
309 		int	(*sf_handler)();
310 		int	sf_argcount;
311 		struct	sigcontext *sf_scpcopy;
312 	} *fp;
313 	int oonstack;
314 
315 	regs = u.u_ar0;
316 	oonstack = u.u_onstack;
317 	/*
318 	 * Allocate and validate space for the signal handler
319 	 * context. Note that if the stack is in P0 space, the
320 	 * call to grow() is a nop, and the useracc() check
321 	 * will fail if the process has not already allocated
322 	 * the space with a `brk'.
323 	 */
324 	if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) {
325 		scp = (struct sigcontext *)u.u_sigsp - 1;
326 		u.u_onstack = 1;
327 	} else
328 		scp = (struct sigcontext *)regs[SP] - 1;
329 	fp = (struct sigframe *)scp - 1;
330 	if ((int)fp <= USRSTACK - ctob(u.u_ssize))
331 		grow((unsigned)fp);
332 	if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), 1) == 0) {
333 		/*
334 		 * Process has trashed its stack; give it an illegal
335 		 * instruction to halt it in its tracks.
336 		 */
337 		u.u_signal[SIGILL] = SIG_DFL;
338 		sig = sigmask(SIGILL);
339 		u.u_procp->p_sigignore &= ~sig;
340 		u.u_procp->p_sigcatch &= ~sig;
341 		u.u_procp->p_sigmask &= ~sig;
342 		psignal(u.u_procp, SIGILL);
343 		return;
344 	}
345 	/*
346 	 * Build the argument list for the signal handler.
347 	 */
348 	fp->sf_signum = sig;
349 	if (sig == SIGILL || sig == SIGFPE) {
350 		fp->sf_code = u.u_code;
351 		u.u_code = 0;
352 	} else
353 		fp->sf_code = 0;
354 	fp->sf_scp = scp;
355 	fp->sf_handler = p;
356 	/*
357 	 * Build the calls argument frame to be used to call sigreturn
358 	 */
359 	fp->sf_argcount = 1;
360 	fp->sf_scpcopy = scp;
361 	/*
362 	 * Build the signal context to be used by sigreturn.
363 	 */
364 	scp->sc_onstack = oonstack;
365 	scp->sc_mask = mask;
366 	scp->sc_sp = regs[SP];
367 	scp->sc_fp = regs[FP];
368 	scp->sc_ap = regs[AP];
369 	scp->sc_pc = regs[PC];
370 	scp->sc_ps = regs[PS];
371 	regs[SP] = (int)fp;
372 	regs[PS] &= ~(PSL_CM|PSL_FPD);
373 	regs[PC] = (int)u.u_pcb.pcb_sigc;
374 	return;
375 }
376 
377 /*
378  * System call to cleanup state after a signal
379  * has been taken.  Reset signal mask and
380  * stack state from context left by sendsig (above).
381  * Return to previous pc and psl as specified by
382  * context left by sendsig. Check carefully to
383  * make sure that the user has not modified the
384  * psl to gain improper privileges or to cause
385  * a machine fault.
386  */
387 sigreturn()
388 {
389 	struct a {
390 		struct sigcontext *sigcntxp;
391 	};
392 	register struct sigcontext *scp;
393 	register int *regs = u.u_ar0;
394 
395 	scp = ((struct a *)(u.u_ap))->sigcntxp;
396 	if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0)
397 		return;
398 	if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_IS)) != 0 ||
399 	    (scp->sc_ps & (PSL_PRVMOD|PSL_CURMOD)) != (PSL_PRVMOD|PSL_CURMOD) ||
400 	    ((scp->sc_ps & PSL_CM) &&
401 	     (scp->sc_ps & (PSL_FPD|PSL_DV|PSL_FU|PSL_IV)) != 0)) {
402 		u.u_error = EINVAL;
403 		return;
404 	}
405 	u.u_eosys = JUSTRETURN;
406 	u.u_onstack = scp->sc_onstack & 01;
407 	u.u_procp->p_sigmask = scp->sc_mask &~
408 	    (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
409 	regs[FP] = scp->sc_fp;
410 	regs[AP] = scp->sc_ap;
411 	regs[SP] = scp->sc_sp;
412 	regs[PC] = scp->sc_pc;
413 	regs[PS] = scp->sc_ps;
414 }
415 
416 /* XXX - BEGIN 4.2 COMPATIBILITY */
417 /*
418  * Compatibility with 4.2 chmk $139 used by longjmp()
419  */
420 osigcleanup()
421 {
422 	register struct sigcontext *scp;
423 	register int *regs = u.u_ar0;
424 
425 	scp = (struct sigcontext *)fuword((caddr_t)regs[SP]);
426 	if ((int)scp == -1)
427 		return;
428 	if (useracc((caddr_t)scp, 3 * sizeof (int), 0) == 0)
429 		return;
430 	u.u_onstack = scp->sc_onstack & 01;
431 	u.u_procp->p_sigmask = scp->sc_mask &~
432 	    (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
433 	regs[SP] = scp->sc_sp;
434 }
435 /* XXX - END 4.2 COMPATIBILITY */
436 
437 #ifdef notdef
438 dorti()
439 {
440 	struct frame frame;
441 	register int sp;
442 	register int reg, mask;
443 	extern int ipcreg[];
444 
445 	(void) copyin((caddr_t)u.u_ar0[FP], (caddr_t)&frame, sizeof (frame));
446 	sp = u.u_ar0[FP] + sizeof (frame);
447 	u.u_ar0[PC] = frame.fr_savpc;
448 	u.u_ar0[FP] = frame.fr_savfp;
449 	u.u_ar0[AP] = frame.fr_savap;
450 	mask = frame.fr_mask;
451 	for (reg = 0; reg <= 11; reg++) {
452 		if (mask&1) {
453 			u.u_ar0[ipcreg[reg]] = fuword((caddr_t)sp);
454 			sp += 4;
455 		}
456 		mask >>= 1;
457 	}
458 	sp += frame.fr_spa;
459 	u.u_ar0[PS] = (u.u_ar0[PS] & 0xffff0000) | frame.fr_psw;
460 	if (frame.fr_s)
461 		sp += 4 + 4 * (fuword((caddr_t)sp) & 0xff);
462 	/* phew, now the rei */
463 	u.u_ar0[PC] = fuword((caddr_t)sp);
464 	sp += 4;
465 	u.u_ar0[PS] = fuword((caddr_t)sp);
466 	sp += 4;
467 	u.u_ar0[PS] |= PSL_USERSET;
468 	u.u_ar0[PS] &= ~PSL_USERCLR;
469 	u.u_ar0[SP] = (int)sp;
470 }
471 #endif
472 
473 /*
474  * Memenable enables the memory controlle corrected data reporting.
475  * This runs at regular intervals, turning on the interrupt.
476  * The interrupt is turned off, per memory controller, when error
477  * reporting occurs.  Thus we report at most once per memintvl.
478  */
479 int	memintvl = MEMINTVL;
480 
481 memenable()
482 {
483 	register struct mcr *mcr;
484 	register int m;
485 
486 	for (m = 0; m < nmcr; m++) {
487 		mcr = mcraddr[m];
488 		switch (mcrtype[m]) {
489 #if VAX780
490 		case M780C:
491 			M780C_ENA(mcr);
492 			break;
493 		case M780EL:
494 			M780EL_ENA(mcr);
495 			break;
496 		case M780EU:
497 			M780EU_ENA(mcr);
498 			break;
499 #endif
500 #if VAX750
501 		case M750:
502 			M750_ENA(mcr);
503 			break;
504 #endif
505 #if VAX730
506 		case M730:
507 			M730_ENA(mcr);
508 			break;
509 #endif
510 		}
511 	}
512 	if (memintvl > 0)
513 		timeout(memenable, (caddr_t)0, memintvl*hz);
514 }
515 
516 /*
517  * Memerr is the interrupt routine for corrected read data
518  * interrupts.  It looks to see which memory controllers have
519  * unreported errors, reports them, and disables further
520  * reporting for a time on those controller.
521  */
522 memerr()
523 {
524 	register struct mcr *mcr;
525 	register int m;
526 
527 	for (m = 0; m < nmcr; m++) {
528 		mcr = mcraddr[m];
529 		switch (mcrtype[m]) {
530 #if VAX780
531 		case M780C:
532 			if (M780C_ERR(mcr)) {
533 				printf("mcr%d: soft ecc addr %x syn %x\n",
534 				    m, M780C_ADDR(mcr), M780C_SYN(mcr));
535 #ifdef TRENDATA
536 				memlog(m, mcr);
537 #endif
538 				M780C_INH(mcr);
539 			}
540 			break;
541 
542 		case M780EL:
543 			if (M780EL_ERR(mcr)) {
544 				printf("mcr%d: soft ecc addr %x syn %x\n",
545 				    m, M780EL_ADDR(mcr), M780EL_SYN(mcr));
546 				M780EL_INH(mcr);
547 			}
548 			break;
549 
550 		case M780EU:
551 			if (M780EU_ERR(mcr)) {
552 				printf("mcr%d: soft ecc addr %x syn %x\n",
553 				    m, M780EU_ADDR(mcr), M780EU_SYN(mcr));
554 				M780EU_INH(mcr);
555 			}
556 			break;
557 #endif
558 #if VAX750
559 		case M750:
560 			if (M750_ERR(mcr)) {
561 				struct mcr amcr;
562 				amcr.mc_reg[0] = mcr->mc_reg[0];
563 				printf("mcr%d: %s",
564 				    m, (amcr.mc_reg[0] & M750_UNCORR) ?
565 				    "hard error" : "soft ecc");
566 				printf(" addr %x syn %x\n",
567 				    M750_ADDR(&amcr), M750_SYN(&amcr));
568 				M750_INH(mcr);
569 			}
570 			break;
571 #endif
572 #if VAX730
573 		case M730: {
574 			struct mcr amcr;
575 
576 			/*
577 			 * Must be careful on the 730 not to use invalid
578 			 * instructions in I/O space, so make a copy;
579 			 */
580 			amcr.mc_reg[0] = mcr->mc_reg[0];
581 			amcr.mc_reg[1] = mcr->mc_reg[1];
582 			if (M730_ERR(&amcr)) {
583 				printf("mcr%d: %s",
584 				    m, (amcr.mc_reg[1] & M730_UNCORR) ?
585 				    "hard error" : "soft ecc");
586 				printf(" addr %x syn %x\n",
587 				    M730_ADDR(&amcr), M730_SYN(&amcr));
588 				M730_INH(mcr);
589 			}
590 			break;
591 		}
592 #endif
593 		}
594 	}
595 }
596 
597 #ifdef TRENDATA
598 /*
599  * Figure out what chip to replace on Trendata boards.
600  * Assumes all your memory is Trendata or the non-Trendata
601  * memory never fails..
602  */
603 struct {
604 	u_char	m_syndrome;
605 	char	m_chip[4];
606 } memlogtab[] = {
607 	0x01,	"C00",	0x02,	"C01",	0x04,	"C02",	0x08,	"C03",
608 	0x10,	"C04",	0x19,	"L01",	0x1A,	"L02",	0x1C,	"L04",
609 	0x1F,	"L07",	0x20,	"C05",	0x38,	"L00",	0x3B,	"L03",
610 	0x3D,	"L05",	0x3E,	"L06",	0x40,	"C06",	0x49,	"L09",
611 	0x4A,	"L10",	0x4c,	"L12",	0x4F,	"L15",	0x51,	"L17",
612 	0x52,	"L18",	0x54,	"L20",	0x57,	"L23",	0x58,	"L24",
613 	0x5B,	"L27",	0x5D,	"L29",	0x5E,	"L30",	0x68,	"L08",
614 	0x6B,	"L11",	0x6D,	"L13",	0x6E,	"L14",	0x70,	"L16",
615 	0x73,	"L19",	0x75,	"L21",	0x76,	"L22",	0x79,	"L25",
616 	0x7A,	"L26",	0x7C,	"L28",	0x7F,	"L31",	0x80,	"C07",
617 	0x89,	"U01",	0x8A,	"U02",	0x8C,	"U04",	0x8F,	"U07",
618 	0x91,	"U09",	0x92,	"U10",	0x94,	"U12",	0x97, 	"U15",
619 	0x98,	"U16",	0x9B,	"U19",	0x9D,	"U21",	0x9E, 	"U22",
620 	0xA8,	"U00",	0xAB,	"U03",	0xAD,	"U05",	0xAE,	"U06",
621 	0xB0,	"U08",	0xB3,	"U11",	0xB5,	"U13",	0xB6,	"U14",
622 	0xB9,	"U17",	0xBA,	"U18",	0xBC,	"U20",	0xBF,	"U23",
623 	0xC1,	"U25",	0xC2,	"U26",	0xC4,	"U28",	0xC7,	"U31",
624 	0xE0,	"U24",	0xE3,	"U27",	0xE5,	"U29",	0xE6,	"U30"
625 };
626 
627 memlog (m, mcr)
628 	int m;
629 	struct mcr *mcr;
630 {
631 	register i;
632 
633 	switch (mcrtype[m]) {
634 
635 #if VAX780
636 	case M780C:
637 	for (i = 0; i < (sizeof (memlogtab) / sizeof (memlogtab[0])); i++)
638 		if ((u_char)(M780C_SYN(mcr)) == memlogtab[i].m_syndrome) {
639 			printf (
640 	"mcr%d: replace %s chip in %s bank of memory board %d (0-15)\n",
641 				m,
642 				memlogtab[i].m_chip,
643 				(M780C_ADDR(mcr) & 0x8000) ? "upper" : "lower",
644 				(M780C_ADDR(mcr) >> 16));
645 			return;
646 		}
647 	printf ("mcr%d: multiple errors, not traceable\n", m);
648 	break;
649 #endif
650 	}
651 }
652 #endif
653 
654 /*
655  * Invalidate single all pte's in a cluster
656  */
657 tbiscl(v)
658 	unsigned v;
659 {
660 	register caddr_t addr;		/* must be first reg var */
661 	register int i;
662 
663 	asm(".set TBIS,58");
664 	addr = ptob(v);
665 	for (i = 0; i < CLSIZE; i++) {
666 #ifdef lint
667 		mtpr(TBIS, addr);
668 #else
669 		asm("mtpr r11,$TBIS");
670 #endif
671 		addr += NBPG;
672 	}
673 }
674 
675 int	waittime = -1;
676 
677 boot(paniced, arghowto)
678 	int paniced, arghowto;
679 {
680 	register int howto;		/* r11 == how to boot */
681 	register int devtype;		/* r10 == major of root dev */
682 
683 #ifdef lint
684 	howto = 0; devtype = 0;
685 	printf("howto %d, devtype %d\n", arghowto, devtype);
686 #endif
687 	howto = arghowto;
688 	if ((howto&RB_NOSYNC)==0 && waittime < 0 && bfreelist[0].b_forw) {
689 		waittime = 0;
690 		(void) splnet();
691 		printf("syncing disks... ");
692 		update();
693 		{ register struct buf *bp;
694 		  int iter, nbusy;
695 
696 		  for (iter = 0; iter < 20; iter++) {
697 			nbusy = 0;
698 			for (bp = &buf[nbuf]; --bp >= buf; )
699 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
700 					nbusy++;
701 			if (nbusy == 0)
702 				break;
703 			printf("%d ", nbusy);
704 			DELAY(40000 * iter);
705 		  }
706 		}
707 		printf("done\n");
708 	}
709 	splx(0x1f);			/* extreme priority */
710 	devtype = major(rootdev);
711 	if (howto&RB_HALT) {
712 		printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
713 		mtpr(IPL, 0x1f);
714 		for (;;)
715 			;
716 	} else {
717 		if (paniced == RB_PANIC) {
718 			doadump();		/* TXDB_BOOT's itsself */
719 			/*NOTREACHED*/
720 		}
721 		tocons(TXDB_BOOT);
722 	}
723 #if defined(VAX750) || defined(VAX730)
724 	if (cpu != VAX_780)
725 		{ asm("movl r11,r5"); }		/* boot flags go in r5 */
726 #endif
727 	for (;;)
728 		asm("halt");
729 	/*NOTREACHED*/
730 }
731 
732 tocons(c)
733 {
734 
735 	while ((mfpr(TXCS)&TXCS_RDY) == 0)
736 		continue;
737 	mtpr(TXDB, c);
738 }
739 
740 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
741 int	dumpsize = 0;		/* also for savecore */
742 /*
743  * Doadump comes here after turning off memory management and
744  * getting on the dump stack, either when called above, or by
745  * the auto-restart code.
746  */
747 dumpsys()
748 {
749 
750 	rpb.rp_flag = 1;
751 #ifdef notdef
752 	if ((minor(dumpdev)&07) != 1)
753 		return;
754 #endif
755 	dumpsize = physmem;
756 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
757 	printf("dump ");
758 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
759 
760 	case ENXIO:
761 		printf("device bad\n");
762 		break;
763 
764 	case EFAULT:
765 		printf("device not ready\n");
766 		break;
767 
768 	case EINVAL:
769 		printf("area improper\n");
770 		break;
771 
772 	case EIO:
773 		printf("i/o error");
774 		break;
775 
776 	default:
777 		printf("succeeded");
778 		break;
779 	}
780 }
781 
782 /*
783  * Machine check error recovery code.
784  * Print out the machine check frame and then give up.
785  */
786 #if defined(VAX780) || defined(VAX750)
787 char *mc780[] = {
788 	"cp read",	"ctrl str par",	"cp tbuf par",	"cp cache par",
789 	"cp rdtimo", 	"cp rds",	"ucode lost",	0,
790 	0,		0,		"ib tbuf par",	0,
791 	"ib rds",	"ib rd timo",	0,		"ib cache par"
792 };
793 #define MC750_TBERR	2		/* type code of cp tbuf par */
794 #define	MC750_TBPAR	4		/* tbuf par bit in mcesr */
795 #endif
796 #if VAX730
797 #define	NMC730	12
798 char *mc730[] = {
799 	"tb par",	"bad retry",	"bad intr id",	"cant write ptem",
800 	"unkn mcr err",	"iib rd err",	"nxm ref",	"cp rds",
801 	"unalgn ioref",	"nonlw ioref",	"bad ioaddr",	"unalgn ubaddr",
802 };
803 #endif
804 
805 /*
806  * Frame for each cpu
807  */
808 struct mc780frame {
809 	int	mc8_bcnt;		/* byte count == 0x28 */
810 	int	mc8_summary;		/* summary parameter (as above) */
811 	int	mc8_cpues;		/* cpu error status */
812 	int	mc8_upc;		/* micro pc */
813 	int	mc8_vaviba;		/* va/viba register */
814 	int	mc8_dreg;		/* d register */
815 	int	mc8_tber0;		/* tbuf error reg 0 */
816 	int	mc8_tber1;		/* tbuf error reg 1 */
817 	int	mc8_timo;		/* timeout address divided by 4 */
818 	int	mc8_parity;		/* parity */
819 	int	mc8_sbier;		/* sbi error register */
820 	int	mc8_pc;			/* trapped pc */
821 	int	mc8_psl;		/* trapped psl */
822 };
823 struct mc750frame {
824 	int	mc5_bcnt;		/* byte count == 0x28 */
825 	int	mc5_summary;		/* summary parameter (as above) */
826 	int	mc5_va;			/* virtual address register */
827 	int	mc5_errpc;		/* error pc */
828 	int	mc5_mdr;
829 	int	mc5_svmode;		/* saved mode register */
830 	int	mc5_rdtimo;		/* read lock timeout */
831 	int	mc5_tbgpar;		/* tb group parity error register */
832 	int	mc5_cacherr;		/* cache error register */
833 	int	mc5_buserr;		/* bus error register */
834 	int	mc5_mcesr;		/* machine check status register */
835 	int	mc5_pc;			/* trapped pc */
836 	int	mc5_psl;		/* trapped psl */
837 };
838 struct mc730frame {
839 	int	mc3_bcnt;		/* byte count == 0xc */
840 	int	mc3_summary;		/* summary parameter */
841 	int	mc3_parm[2];		/* parameter 1 and 2 */
842 	int	mc3_pc;			/* trapped pc */
843 	int	mc3_psl;		/* trapped psl */
844 };
845 
846 machinecheck(cmcf)
847 	caddr_t cmcf;
848 {
849 	register u_int type = ((struct mc780frame *)cmcf)->mc8_summary;
850 
851 	printf("machine check %x: ", type);
852 	switch (cpu) {
853 #if VAX780
854 	case VAX_780:
855 #endif
856 #if VAX750
857 	case VAX_750:
858 #endif
859 #if defined(VAX780) || defined(VAX750)
860 		printf("%s%s\n", mc780[type&0xf],
861 		    (type&0xf0) ? " abort" : " fault");
862 		break;
863 #endif
864 #if VAX730
865 	case VAX_730:
866 		if (type < NMC730)
867 			printf("%s", mc730[type]);
868 		printf("\n");
869 		break;
870 #endif
871 	}
872 	switch (cpu) {
873 #if VAX780
874 	case VAX_780: {
875 		register struct mc780frame *mcf = (struct mc780frame *)cmcf;
876 		register int sbifs;
877 		printf("\tcpues %x upc %x va/viba %x dreg %x tber %x %x\n",
878 		   mcf->mc8_cpues, mcf->mc8_upc, mcf->mc8_vaviba,
879 		   mcf->mc8_dreg, mcf->mc8_tber0, mcf->mc8_tber1);
880 		sbifs = mfpr(SBIFS);
881 		printf("\ttimo %x parity %x sbier %x pc %x psl %x sbifs %x\n",
882 		   mcf->mc8_timo*4, mcf->mc8_parity, mcf->mc8_sbier,
883 		   mcf->mc8_pc, mcf->mc8_psl, sbifs);
884 		/* THE FUNNY BITS IN THE FOLLOWING ARE FROM THE ``BLACK */
885 		/* BOOK'' AND SHOULD BE PUT IN AN ``sbi.h'' */
886 		mtpr(SBIFS, sbifs &~ 0x2000000);
887 		mtpr(SBIER, mfpr(SBIER) | 0x70c0);
888 		break;
889 	}
890 #endif
891 #if VAX750
892 	case VAX_750: {
893 		register struct mc750frame *mcf = (struct mc750frame *)cmcf;
894 		int mcsr = mfpr(MCSR);
895 
896 		mtpr(TBIA, 0);
897 		mtpr(MCESR, 0xf);
898 		printf("\tva %x errpc %x mdr %x smr %x rdtimo %x tbgpar %x cacherr %x\n",
899 		    mcf->mc5_va, mcf->mc5_errpc, mcf->mc5_mdr, mcf->mc5_svmode,
900 		    mcf->mc5_rdtimo, mcf->mc5_tbgpar, mcf->mc5_cacherr);
901 		printf("\tbuserr %x mcesr %x pc %x psl %x mcsr %x\n",
902 		    mcf->mc5_buserr, mcf->mc5_mcesr, mcf->mc5_pc, mcf->mc5_psl,
903 		    mcsr);
904 		if (type == MC750_TBERR && (mcf->mc5_mcesr&0xe) == MC750_TBPAR){
905 			printf("tbuf par: flushing and returning\n");
906 			return;
907 		}
908 		break;
909 		}
910 #endif
911 #if VAX730
912 	case VAX_730: {
913 		register struct mc730frame *mcf = (struct mc730frame *)cmcf;
914 		printf("params %x,%x pc %x psl %x mcesr %x\n",
915 		    mcf->mc3_parm[0], mcf->mc3_parm[1],
916 		    mcf->mc3_pc, mcf->mc3_psl, mfpr(MCESR));
917 		mtpr(MCESR, 0xf);
918 		break;
919 		}
920 #endif
921 	}
922 	memerr();
923 	panic("mchk");
924 }
925 
926 #ifdef notdef
927 microtime(tvp)
928 	struct timeval *tvp;
929 {
930 	int s = spl7();
931 
932 	tvp->tv_sec = time.tv_sec;
933 	tvp->tv_usec = (lbolt+1)*16667 + mfpr(ICR);
934 	while (tvp->tv_usec > 1000000) {
935 		tvp->tv_sec++;
936 		tvp->tv_usec -= 1000000;
937 	}
938 	splx(s);
939 }
940 #endif
941 
942 physstrat(bp, strat, prio)
943 	struct buf *bp;
944 	int (*strat)(), prio;
945 {
946 	int s;
947 
948 	(*strat)(bp);
949 	/* pageout daemon doesn't wait for pushed pages */
950 	if (bp->b_flags & B_DIRTY)
951 		return;
952 	s = spl6();
953 	while ((bp->b_flags & B_DONE) == 0)
954 		sleep((caddr_t)bp, prio);
955 	splx(s);
956 }
957