xref: /original-bsd/sys/vax/vax/machdep.c (revision 5fd6b0d9)
1 /*
2  * Copyright (c) 1982,1986,1988 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)machdep.c	7.17 (Berkeley) 09/22/88
7  */
8 
9 #include "param.h"
10 #include "systm.h"
11 #include "dir.h"
12 #include "user.h"
13 #include "kernel.h"
14 #include "malloc.h"
15 #include "map.h"
16 #include "vm.h"
17 #include "proc.h"
18 #include "buf.h"
19 #include "reboot.h"
20 #include "conf.h"
21 #include "inode.h"
22 #include "file.h"
23 #include "text.h"
24 #include "clist.h"
25 #include "callout.h"
26 #include "cmap.h"
27 #include "mbuf.h"
28 #include "msgbuf.h"
29 #include "quota.h"
30 
31 #include "reg.h"
32 #include "pte.h"
33 #include "psl.h"
34 #include "frame.h"
35 #include "clock.h"
36 #include "cons.h"
37 #include "cpu.h"
38 #include "mem.h"
39 #include "mtpr.h"
40 #include "rpb.h"
41 #include "ka630.h"
42 #include "ka650.h"
43 
44 #include "../vaxuba/ubavar.h"
45 #include "../vaxuba/ubareg.h"
46 
47 /*
48  * Declare these as initialized data so we can patch them.
49  */
50 int	nswbuf = 0;
51 #ifdef	NBUF
52 int	nbuf = NBUF;
53 #else
54 int	nbuf = 0;
55 #endif
56 #ifdef	BUFPAGES
57 int	bufpages = BUFPAGES;
58 #else
59 int	bufpages = 0;
60 #endif
61 int	msgbufmapped;		/* set when safe to use msgbuf */
62 
63 /*
64  * Machine-dependent startup code
65  */
66 startup(firstaddr)
67 	int firstaddr;
68 {
69 	register int unixsize;
70 	register unsigned i;
71 	register struct pte *pte;
72 	int mapaddr, j, n;
73 	register caddr_t v;
74 	int maxbufs, base, residual;
75 
76 	/*
77 	 * Initialize error message buffer (at end of core).
78 	 */
79 	maxmem -= btoc(sizeof (struct msgbuf));
80 	pte = msgbufmap;
81 	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
82 		*(int *)pte++ = PG_V | PG_KW | (maxmem + i);
83 	mtpr(TBIA, 0);
84 	msgbufmapped = 1;
85 
86 #ifdef QBA
87 #include "qv.h"
88 #if NQV > 0
89 	/*
90 	 * redirect console to qvss if it exists
91 	 */
92 	qvcons_init();
93 #endif
94 #include "qd.h"
95 #if NQD > 0
96 	/*
97 	 * redirect console to qdss if it exists
98 	 */
99 	qdcons_init();
100 #endif
101 #endif
102 
103 #ifdef KADB
104 	kdb_init();
105 #endif
106 	/*
107 	 * Good {morning,afternoon,evening,night}.
108 	 */
109 	printf(version);
110 	printf("real mem  = %d\n", ctob(physmem));
111 
112 	/*
113 	 * Allocate space for system data structures.
114 	 * The first available real memory address is in "firstaddr".
115 	 * The first available kernel virtual address is in "v".
116 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
117 	 * As pages of memory are allocated and cleared,
118 	 * "firstaddr" is incremented.
119 	 * An index into the kernel page table corresponding to the
120 	 * virtual memory address maintained in "v" is kept in "mapaddr".
121 	 */
122 	v = (caddr_t)(KERNBASE | (firstaddr * NBPG));
123 #define	valloc(name, type, num) \
124 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
125 #define	valloclim(name, type, num, lim) \
126 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
127 	valloclim(inode, struct inode, ninode, inodeNINODE);
128 	valloclim(file, struct file, nfile, fileNFILE);
129 	valloclim(proc, struct proc, nproc, procNPROC);
130 	valloclim(text, struct text, ntext, textNTEXT);
131 	valloc(cfree, struct cblock, nclist);
132 	valloc(callout, struct callout, ncallout);
133 	valloc(swapmap, struct map, nswapmap = nproc * 2);
134 	valloc(argmap, struct map, ARGMAPSIZE);
135 	valloc(kernelmap, struct map, nproc);
136 	valloc(mbmap, struct map, nmbclusters/4);
137 	valloc(namecache, struct namecache, nchsize);
138 	valloc(kmemmap, struct map, ekmempt - kmempt);
139 	valloc(kmemusage, struct kmemusage, ekmempt - kmempt);
140 #ifdef QUOTA
141 	valloclim(quota, struct quota, nquota, quotaNQUOTA);
142 	valloclim(dquot, struct dquot, ndquot, dquotNDQUOT);
143 #endif
144 
145 	/*
146 	 * Determine how many buffers to allocate.
147 	 * Use 10% of memory for the first 2 Meg, 5% of the remaining
148 	 * memory. Insure a minimum of 16 buffers.
149 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
150 	 */
151 	if (bufpages == 0)
152 		if (physmem < (2 * 1024 * CLSIZE))
153 			bufpages = physmem / 10 / CLSIZE;
154 		else
155 			bufpages = ((2 * 1024 * CLSIZE + physmem) / 20) / CLSIZE;
156 	if (nbuf == 0) {
157 		nbuf = bufpages / 2;
158 		if (nbuf < 16)
159 			nbuf = 16;
160 	}
161 	if (nswbuf == 0) {
162 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
163 		if (nswbuf > 256)
164 			nswbuf = 256;		/* sanity */
165 	}
166 	valloc(swbuf, struct buf, nswbuf);
167 
168 	/*
169 	 * Now the amount of virtual memory remaining for buffers
170 	 * can be calculated, estimating needs for the cmap.
171 	 */
172 	ncmap = (maxmem*NBPG - ((int)v &~ KERNBASE)) /
173 		(CLBYTES + sizeof(struct cmap)) + 2;
174 	maxbufs = ((SYSPTSIZE * NBPG) -
175 	    ((int)(v + ncmap * sizeof(struct cmap)) - KERNBASE)) /
176 		(MAXBSIZE + sizeof(struct buf));
177 	if (maxbufs < 16)
178 		panic("sys pt too small");
179 	if (nbuf > maxbufs) {
180 		printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs);
181 		nbuf = maxbufs;
182 	}
183 	if (bufpages > nbuf * (MAXBSIZE / CLBYTES))
184 		bufpages = nbuf * (MAXBSIZE / CLBYTES);
185 	valloc(buf, struct buf, nbuf);
186 
187 	/*
188 	 * Allocate space for core map.
189 	 * Allow space for all of phsical memory minus the amount
190 	 * dedicated to the system. The amount of physical memory
191 	 * dedicated to the system is the total virtual memory of
192 	 * the system thus far, plus core map, buffer pages,
193 	 * and buffer headers not yet allocated.
194 	 * Add 2: 1 because the 0th entry is unused, 1 for rounding.
195 	 */
196 	ncmap = (maxmem*NBPG - ((int)(v + bufpages*CLBYTES) &~ KERNBASE)) /
197 		(CLBYTES + sizeof(struct cmap)) + 2;
198 	valloclim(cmap, struct cmap, ncmap, ecmap);
199 
200 	/*
201 	 * Clear space allocated thus far, and make r/w entries
202 	 * for the space in the kernel map.
203 	 */
204 	unixsize = btoc((int)v &~ KERNBASE);
205 	while (firstaddr < unixsize) {
206 		*(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | firstaddr;
207 		clearseg((unsigned)firstaddr);
208 		firstaddr++;
209 	}
210 
211 	/*
212 	 * Now allocate buffers proper.  They are different than the above
213 	 * in that they usually occupy more virtual memory than physical.
214 	 */
215 	v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET);
216 	valloc(buffers, char, MAXBSIZE * nbuf);
217 	base = bufpages / nbuf;
218 	residual = bufpages % nbuf;
219 	mapaddr = firstaddr;
220 	for (i = 0; i < nbuf; i++) {
221 		n = (i < residual ? base + 1 : base) * CLSIZE;
222 		for (j = 0; j < n; j++) {
223 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
224 			clearseg((unsigned)firstaddr);
225 			firstaddr++;
226 		}
227 		mapaddr += MAXBSIZE / NBPG;
228 	}
229 
230 	unixsize = btoc((int)v &~ KERNBASE);
231 	if (firstaddr >= physmem - 8*UPAGES)
232 		panic("no memory");
233 	mtpr(TBIA, 0);			/* After we just cleared it all! */
234 
235 	/*
236 	 * Initialize callouts
237 	 */
238 	callfree = callout;
239 	for (i = 1; i < ncallout; i++)
240 		callout[i-1].c_next = &callout[i];
241 
242 	/*
243 	 * Initialize memory allocator and swap
244 	 * and user page table maps.
245 	 *
246 	 * THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
247 	 * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
248 	 */
249 	meminit(firstaddr, maxmem);
250 	maxmem = freemem;
251 	printf("avail mem = %d\n", ctob(maxmem));
252 	printf("using %d buffers containing %d bytes of memory\n",
253 		nbuf, bufpages * CLBYTES);
254 	rminit(kernelmap, (long)USRPTSIZE, (long)1,
255 	    "usrpt", nproc);
256 	rminit(mbmap, (long)(nmbclusters * CLSIZE), (long)CLSIZE,
257 	    "mbclusters", nmbclusters/4);
258 	kmeminit();	/* now safe to do malloc/free */
259 
260 	/*
261 	 * Set up CPU-specific registers, cache, etc.
262 	 */
263 	initcpu();
264 
265 	/*
266 	 * Set up buffers, so they can be used to read disk labels.
267 	 */
268 	bhinit();
269 	binit();
270 
271 	/*
272 	 * Configure the system.
273 	 */
274 	configure();
275 
276 	/*
277 	 * Clear restart inhibit flags.
278 	 */
279 	tocons(TXDB_CWSI);
280 	tocons(TXDB_CCSI);
281 }
282 
283 #ifdef PGINPROF
284 /*
285  * Return the difference (in microseconds)
286  * between the  current time and a previous
287  * time as represented  by the arguments.
288  * If there is a pending clock interrupt
289  * which has not been serviced due to high
290  * ipl, return error code.
291  */
292 vmtime(otime, olbolt, oicr)
293 	register int otime, olbolt, oicr;
294 {
295 
296 	if (mfpr(ICCS)&ICCS_INT)
297 		return(-1);
298 	else
299 		return(((time.tv_sec-otime)*60 + lbolt-olbolt)*16667 + mfpr(ICR)-oicr);
300 }
301 #endif
302 
303 /*
304  * Clear registers on exec
305  */
306 setregs(entry)
307 	u_long entry;
308 {
309 #ifdef notdef
310 	register int *rp;
311 
312 	/* should pass args to init on the stack */
313 	/* should also fix this code before using it, it's wrong */
314 	/* wanna clear the scb? */
315 	for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];)
316 		*rp++ = 0;
317 #endif
318 	u.u_ar0[PC] = entry + 2;
319 }
320 
321 /*
322  * Send an interrupt to process.
323  *
324  * Stack is set up to allow sigcode stored
325  * in u. to call routine, followed by chmk
326  * to sigreturn routine below.  After sigreturn
327  * resets the signal mask, the stack, the frame
328  * pointer, and the argument pointer, it returns
329  * to the user specified pc, psl.
330  */
331 sendsig(p, sig, mask)
332 	int (*p)(), sig, mask;
333 {
334 	register struct sigcontext *scp;
335 	register int *regs;
336 	register struct sigframe {
337 		int	sf_signum;
338 		int	sf_code;
339 		struct	sigcontext *sf_scp;
340 		int	(*sf_handler)();
341 		int	sf_argcount;
342 		struct	sigcontext *sf_scpcopy;
343 	} *fp;
344 	int oonstack;
345 
346 	regs = u.u_ar0;
347 	oonstack = u.u_onstack;
348 	/*
349 	 * Allocate and validate space for the signal handler
350 	 * context. Note that if the stack is in P0 space, the
351 	 * call to grow() is a nop, and the useracc() check
352 	 * will fail if the process has not already allocated
353 	 * the space with a `brk'.
354 	 */
355 	if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) {
356 		scp = (struct sigcontext *)u.u_sigsp - 1;
357 		u.u_onstack = 1;
358 	} else
359 		scp = (struct sigcontext *)regs[SP] - 1;
360 	fp = (struct sigframe *)scp - 1;
361 	if ((int)fp <= USRSTACK - ctob(u.u_ssize))
362 		(void)grow((unsigned)fp);
363 	if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), B_WRITE) == 0) {
364 		/*
365 		 * Process has trashed its stack; give it an illegal
366 		 * instruction to halt it in its tracks.
367 		 */
368 		u.u_signal[SIGILL] = SIG_DFL;
369 		sig = sigmask(SIGILL);
370 		u.u_procp->p_sigignore &= ~sig;
371 		u.u_procp->p_sigcatch &= ~sig;
372 		u.u_procp->p_sigmask &= ~sig;
373 		psignal(u.u_procp, SIGILL);
374 		return;
375 	}
376 	/*
377 	 * Build the argument list for the signal handler.
378 	 */
379 	fp->sf_signum = sig;
380 	if (sig == SIGILL || sig == SIGFPE) {
381 		fp->sf_code = u.u_code;
382 		u.u_code = 0;
383 	} else
384 		fp->sf_code = 0;
385 	fp->sf_scp = scp;
386 	fp->sf_handler = p;
387 	/*
388 	 * Build the calls argument frame to be used to call sigreturn
389 	 */
390 	fp->sf_argcount = 1;
391 	fp->sf_scpcopy = scp;
392 	/*
393 	 * Build the signal context to be used by sigreturn.
394 	 */
395 	scp->sc_onstack = oonstack;
396 	scp->sc_mask = mask;
397 	scp->sc_sp = regs[SP];
398 	scp->sc_fp = regs[FP];
399 	scp->sc_ap = regs[AP];
400 	scp->sc_pc = regs[PC];
401 	scp->sc_ps = regs[PS];
402 	regs[SP] = (int)fp;
403 	regs[PS] &= ~(PSL_CM|PSL_FPD);
404 	regs[PC] = (int)u.u_pcb.pcb_sigc;
405 	return;
406 }
407 
408 /*
409  * System call to cleanup state after a signal
410  * has been taken.  Reset signal mask and
411  * stack state from context left by sendsig (above).
412  * Return to previous pc and psl as specified by
413  * context left by sendsig. Check carefully to
414  * make sure that the user has not modified the
415  * psl to gain improper priviledges or to cause
416  * a machine fault.
417  */
418 sigreturn()
419 {
420 	struct a {
421 		struct sigcontext *sigcntxp;
422 	};
423 	register struct sigcontext *scp;
424 	register int *regs = u.u_ar0;
425 
426 	scp = ((struct a *)(u.u_ap))->sigcntxp;
427 	if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0)
428 		return;
429 	if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_IS)) != 0 ||
430 	    (scp->sc_ps & (PSL_PRVMOD|PSL_CURMOD)) != (PSL_PRVMOD|PSL_CURMOD) ||
431 	    ((scp->sc_ps & PSL_CM) &&
432 	     (scp->sc_ps & (PSL_FPD|PSL_DV|PSL_FU|PSL_IV)) != 0)) {
433 		u.u_error = EINVAL;
434 		return;
435 	}
436 	u.u_eosys = JUSTRETURN;
437 	u.u_onstack = scp->sc_onstack & 01;
438 	u.u_procp->p_sigmask = scp->sc_mask &~
439 	    (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
440 	regs[FP] = scp->sc_fp;
441 	regs[AP] = scp->sc_ap;
442 	regs[SP] = scp->sc_sp;
443 	regs[PC] = scp->sc_pc;
444 	regs[PS] = scp->sc_ps;
445 }
446 
447 /* XXX - BEGIN 4.2 COMPATIBILITY */
448 /*
449  * Compatibility with 4.2 chmk $139 used by longjmp()
450  */
451 osigcleanup()
452 {
453 	register struct sigcontext *scp;
454 	register int *regs = u.u_ar0;
455 
456 	scp = (struct sigcontext *)fuword((caddr_t)regs[SP]);
457 	if ((int)scp == -1)
458 		return;
459 	if (useracc((caddr_t)scp, 3 * sizeof (int), B_WRITE) == 0)
460 		return;
461 	u.u_onstack = scp->sc_onstack & 01;
462 	u.u_procp->p_sigmask = scp->sc_mask &~
463 	    (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
464 	regs[SP] = scp->sc_sp;
465 }
466 /* XXX - END 4.2 COMPATIBILITY */
467 
468 #ifdef notdef
469 dorti()
470 {
471 	struct frame frame;
472 	register int sp;
473 	register int reg, mask;
474 	extern int ipcreg[];
475 
476 	(void) copyin((caddr_t)u.u_ar0[FP], (caddr_t)&frame, sizeof (frame));
477 	sp = u.u_ar0[FP] + sizeof (frame);
478 	u.u_ar0[PC] = frame.fr_savpc;
479 	u.u_ar0[FP] = frame.fr_savfp;
480 	u.u_ar0[AP] = frame.fr_savap;
481 	mask = frame.fr_mask;
482 	for (reg = 0; reg <= 11; reg++) {
483 		if (mask&1) {
484 			u.u_ar0[ipcreg[reg]] = fuword((caddr_t)sp);
485 			sp += 4;
486 		}
487 		mask >>= 1;
488 	}
489 	sp += frame.fr_spa;
490 	u.u_ar0[PS] = (u.u_ar0[PS] & 0xffff0000) | frame.fr_psw;
491 	if (frame.fr_s)
492 		sp += 4 + 4 * (fuword((caddr_t)sp) & 0xff);
493 	/* phew, now the rei */
494 	u.u_ar0[PC] = fuword((caddr_t)sp);
495 	sp += 4;
496 	u.u_ar0[PS] = fuword((caddr_t)sp);
497 	sp += 4;
498 	u.u_ar0[PS] |= PSL_USERSET;
499 	u.u_ar0[PS] &= ~PSL_USERCLR;
500 	if (u.u_ar0[PS] & PSL_CM)
501 		u.u_ar0[PS] &= ~PSL_CM_CLR;
502 	u.u_ar0[SP] = (int)sp;
503 }
504 #endif
505 
506 /*
507  * Memenable enables memory controller corrected data reporting.
508  * This runs at regular intervals, turning on the interrupt.
509  * The interrupt is turned off, per memory controller, when error
510  * reporting occurs.  Thus we report at most once per memintvl.
511  */
512 int	memintvl = MEMINTVL;
513 
514 memenable()
515 {
516 
517 	(*cpuops->cpu_memenable)();
518 	if (memintvl > 0)
519 		timeout(memenable, (caddr_t)0, memintvl*hz);
520 }
521 
522 /*
523  * Memerr is the interrupt routine for corrected read data
524  * interrupts.  It looks to see which memory controllers have
525  * unreported errors, reports them, and disables further
526  * reporting for a time on those controller.
527  */
528 memerr()
529 {
530 
531 	(*cpuops->cpu_memerr)();
532 }
533 
534 /*
535  * Invalidate single all pte's in a cluster
536  */
537 tbiscl(v)
538 	unsigned v;
539 {
540 	register caddr_t addr;		/* must be first reg var */
541 	register int i;
542 
543 	asm(".set TBIS,58");
544 	addr = ptob(v);
545 	for (i = 0; i < CLSIZE; i++) {
546 #ifdef lint
547 		mtpr(TBIS, addr);
548 #else
549 		asm("mtpr r11,$TBIS");
550 #endif
551 		addr += NBPG;
552 	}
553 }
554 
555 int	waittime = -1;
556 
557 boot(howto)
558 	register int howto;		/* r11 == how to boot */
559 {
560 	register int devtype;		/* r10 == major of root dev */
561 	extern char *panicstr;
562 
563 	if ((howto&RB_NOSYNC)==0 && waittime < 0 && bfreelist[0].b_forw) {
564 		register struct buf *bp;
565 		int iter, nbusy;
566 
567 		waittime = 0;
568 		(void) splnet();
569 		printf("syncing disks... ");
570 		/*
571 		 * Release inodes held by texts before update.
572 		 */
573 		if (panicstr == 0)
574 			xumount(NODEV);
575 		update();
576 
577 		for (iter = 0; iter < 20; iter++) {
578 			nbusy = 0;
579 			for (bp = &buf[nbuf]; --bp >= buf; )
580 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
581 					nbusy++;
582 			if (nbusy == 0)
583 				break;
584 			printf("%d ", nbusy);
585 			DELAY(40000 * iter);
586 		}
587 		if (nbusy)
588 			printf("giving up\n");
589 		else
590 			printf("done\n");
591 		/*
592 		 * If we've been adjusting the clock, the todr
593 		 * will be out of synch; adjust it now.
594 		 */
595 		resettodr();
596 	}
597 	splx(0x1f);			/* extreme priority */
598 	devtype = major(rootdev);
599 	if (howto&RB_HALT) {
600 		switch (cpu) {
601 
602 		/* 630 can be told to halt, but how? */
603 #if VAX650
604 		case VAX_650:
605 			ka650ssc.ssc_cpmbx &= ~CPMB650_HALTACT;
606 			ka650ssc.ssc_cpmbx |= CPMB650_HALT;
607 			asm("halt");
608 #endif
609 		}
610 		printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
611 		for (;;)
612 			;
613 	} else {
614 		if (howto & RB_DUMP)
615 			doadump();
616 		vaxboot();
617 	}
618 #ifdef lint
619 	devtype = devtype;
620 #endif
621 	/*NOTREACHED*/
622 }
623 
624 /*
625  * Reboot after panic or via reboot system call.  Note that r11
626  * and r10 must already have the proper boot values (`call by voodoo').
627  */
628 vaxboot()
629 {
630 
631 	switch (cpu) {
632 
633 #ifdef VAX8200
634 	case VAX_8200:
635 		/*
636 		 * TXDB_BOOT erases memory!  Instead we set the `did
637 		 * a dump' flag in the rpb.
638 		 */
639 		*(int *)&Sysmap[0] &= ~PG_PROT;
640 		*(int *)&Sysmap[0] |= PG_KW;
641 		mtpr(TBIS, &rpb);
642 		rpb.rp_flag = 1;
643 		break;
644 #endif
645 
646 #ifdef VAX650
647 	case VAX_650:
648 		/* set boot-on-halt flag in "console mailbox" */
649 		ka650ssc.ssc_cpmbx &= ~CPMB650_HALTACT;
650 		ka650ssc.ssc_cpmbx |= CPMB650_REBOOT;
651 		break;
652 #endif
653 
654 	default:
655 		tocons(TXDB_BOOT);
656 	}
657 
658 	/*
659 	 * Except on 780s and 8600s, boot flags go in r5.  SBI
660 	 * VAXen do not care, so copy boot flags to r5 always.
661 	 */
662 	asm("movl r11,r5");
663 	for (;;) {
664 		asm("halt");
665 	}
666 }
667 
668 tocons(c)
669 {
670 	register int oldmask;
671 
672 	while (((oldmask = mfpr(TXCS)) & TXCS_RDY) == 0)
673 		continue;
674 
675 	switch (cpu) {
676 
677 #if VAX8200 || VAX780 || VAX750 || VAX730 || VAX630
678 	case VAX_8200:
679 	case VAX_780:
680 	case VAX_750:
681 	case VAX_730:
682 	case VAX_630:
683 		c |= TXDB_CONS;
684 		break;
685 #endif
686 
687 #if VAX8600
688 	case VAX_8600:
689 		mtpr(TXCS, TXCS_LCONS | TXCS_WMASK);
690 		while ((mfpr(TXCS) & TXCS_RDY) == 0)
691 			continue;
692 		break;
693 #endif
694 
695 #if VAX650
696 	case VAX_650:
697 		/* everything is a real console terminal character on ka650 */
698 		return;
699 #endif
700 	}
701 
702 	mtpr(TXDB, c);
703 
704 #if VAX8600
705 	switch (cpu) {
706 
707 	case VAX_8600:
708 		while ((mfpr(TXCS) & TXCS_RDY) == 0)
709 			continue;
710 		mtpr(TXCS, oldmask | TXCS_WMASK);
711 		break;
712 	}
713 #endif
714 #ifdef lint
715 	oldmask = oldmask;
716 #endif
717 }
718 
719 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
720 int	dumpsize = 0;		/* also for savecore */
721 /*
722  * Doadump comes here after turning off memory management and
723  * getting on the dump stack, either when called above, or by
724  * the auto-restart code.
725  */
726 dumpsys()
727 {
728 
729 	rpb.rp_flag = 1;
730 	msgbufmapped = 0;
731 	if (dumpdev == NODEV)
732 		return;
733 	/*
734 	 * For dumps during autoconfiguration,
735 	 * if dump device has already configured...
736 	 */
737 	if (dumplo == 0 && bdevsw[major(dumpdev)].d_psize)
738 		dumplo = (*bdevsw[major(dumpdev)].d_psize)(dumpdev) - physmem;
739 	if (dumplo < 0)
740 		dumplo = 0;
741 	dumpsize = physmem;
742 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
743 	printf("dump ");
744 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
745 
746 	case ENXIO:
747 		printf("device bad\n");
748 		break;
749 
750 	case EFAULT:
751 		printf("device not ready\n");
752 		break;
753 
754 	case EINVAL:
755 		printf("area improper\n");
756 		break;
757 
758 	case EIO:
759 		printf("i/o error");
760 		break;
761 
762 	default:
763 		printf("succeeded");
764 		break;
765 	}
766 }
767 
768 /*
769  * Machine check error recovery code.
770  */
771 machinecheck(cmcf)
772 	caddr_t cmcf;
773 {
774 
775 	if ((*cpuops->cpu_mchk)(cmcf) == MCHK_RECOVERED)
776 		return;
777 	(*cpuops->cpu_memerr)();
778 	panic("mchk");
779 }
780 
781 #if defined(VAX780) || defined(VAX750)
782 /*
783  * These strings are shared between the 780 and 750 machine check code
784  * in ka780.c and ka730.c.
785  */
786 char *mc780750[16] = {
787 	"cp read",	"ctrl str par",	"cp tbuf par",	"cp cache par",
788 	"cp rdtimo", 	"cp rds",	"ucode lost",	0,
789 	0,		0,		"ib tbuf par",	0,
790 	"ib rds",	"ib rd timo",	0,		"ib cache par"
791 };
792 #endif
793 
794 /*
795  * Return the best possible estimate of the time in the timeval
796  * to which tvp points.  We do this by reading the interval count
797  * register to determine the time remaining to the next clock tick.
798  * We must compensate for wraparound which is not yet reflected in the time
799  * (which happens when the ICR hits 0 and wraps after the splhigh(),
800  * but before the mfpr(ICR)).  Also check that this time is no less than
801  * any previously-reported time, which could happen around the time
802  * of a clock adjustment.  Just for fun, we guarantee that the time
803  * will be greater than the value obtained by a previous call.
804  */
805 microtime(tvp)
806 	register struct timeval *tvp;
807 {
808 	int s = splhigh();
809 	static struct timeval lasttime;
810 	register long t;
811 
812 	*tvp = time;
813 	t =  mfpr(ICR);
814 	if (t < -tick / 2 && (mfpr(ICCS) & ICCS_INT))
815 		t += tick;
816 	tvp->tv_usec += tick + t;
817 	if (tvp->tv_usec > 1000000) {
818 		tvp->tv_sec++;
819 		tvp->tv_usec -= 1000000;
820 	}
821 	if (tvp->tv_sec == lasttime.tv_sec &&
822 	    tvp->tv_usec <= lasttime.tv_usec &&
823 	    (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
824 		tvp->tv_sec++;
825 		tvp->tv_usec -= 1000000;
826 	}
827 	lasttime = *tvp;
828 	splx(s);
829 }
830 
831 initcpu()
832 {
833 	/*
834 	 * Enable cache.
835 	 */
836 	switch (cpu) {
837 
838 #if VAX8600
839 	case VAX_8600:
840 		mtpr(CSWP, 3);
841 		break;
842 #endif
843 #if VAX8200
844 	case VAX_8200:
845 		mtpr(CADR, 0);
846 		break;
847 #endif
848 #if VAX780
849 	case VAX_780:
850 		mtpr(SBIMT, 0x200000);
851 		break;
852 #endif
853 #if VAX750
854 	case VAX_750:
855 		mtpr(CADR, 0);
856 		break;
857 #endif
858 	default:
859 		break;
860 	}
861 
862 	/*
863 	 * Enable floating point accelerator if it exists
864 	 * and has control register.
865 	 */
866 	switch(cpu) {
867 
868 #if VAX8600 || VAX780
869 	case VAX_8600:
870 	case VAX_780:
871 		if ((mfpr(ACCS) & 0xff) != 0) {
872 			printf("Enabling FPA\n");
873 			mtpr(ACCS, 0x8000);
874 		}
875 #endif
876 	default:
877 		break;
878 	}
879 }
880 
881 /*
882  * Return a reasonable approximation of the time of day register.
883  * More precisely, return a number that increases by one about
884  * once every ten milliseconds.
885  */
886 todr()
887 {
888 
889 	switch (cpu) {
890 
891 #if VAX8600 || VAX8200 || VAX780 || VAX750 || VAX730 || VAX650
892 	case VAX_8600:
893 	case VAX_8200:
894 	case VAX_780:
895 	case VAX_750:
896 	case VAX_730:
897 	case VAX_650:
898 		return (mfpr(TODR));
899 #endif
900 
901 #if VAX630
902 	case VAX_630:
903 		/* XXX crude */
904 		{ static int t; DELAY(10000); return (++t); }
905 #endif
906 
907 	default:
908 		panic("todr");
909 	}
910 	/* NOTREACHED */
911 }
912