1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department, The Mach Operating System project at
9  * Carnegie-Mellon University, Ralph Campbell, Sony Corp. and Kazumasa
10  * Utashiro of Software Research Associates, Inc.
11  *
12  * %sccs.include.redist.c%
13  *
14  *	@(#)machdep.c	7.11 (Berkeley) 05/13/93
15  */
16 
17 /* from: Utah $Hdr: machdep.c 1.63 91/04/24$ */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/signalvar.h>
22 #include <sys/kernel.h>
23 #include <sys/map.h>
24 #include <sys/proc.h>
25 #include <sys/buf.h>
26 #include <sys/reboot.h>
27 #include <sys/conf.h>
28 #include <sys/file.h>
29 #include <sys/clist.h>
30 #include <sys/callout.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/msgbuf.h>
34 #include <sys/user.h>
35 #include <sys/exec.h>
36 #ifdef SYSVSHM
37 #include <sys/shm.h>
38 #endif
39 
40 #include <vm/vm_kern.h>
41 
42 #include <machine/cpu.h>
43 #include <machine/reg.h>
44 #include <machine/psl.h>
45 #include <machine/pte.h>
46 
47 #include <machine/adrsmap.h>
48 
49 vm_map_t buffer_map;
50 
51 /* the following is used externally (sysctl_hw) */
52 char	machine[] = "SONY";	/* cpu "architecture" */
53 char	cpu_model[30];
54 
55 /*
56  * Declare these as initialized data so we can patch them.
57  */
58 int	nswbuf = 0;
59 #ifdef	NBUF
60 int	nbuf = NBUF;
61 #else
62 int	nbuf = 0;
63 #endif
64 #ifdef	BUFPAGES
65 int	bufpages = BUFPAGES;
66 #else
67 int	bufpages = 0;
68 #endif
69 int	msgbufmapped = 0;	/* set when safe to use msgbuf */
70 int	maxmem;			/* max memory per process */
71 int	physmem;		/* max supported memory, changes to actual */
72 /*
73  * safepri is a safe priority for sleep to set for a spin-wait
74  * during autoconfiguration or after a panic.
75  */
76 int	safepri = PSL_LOWIPL;
77 
78 struct	user *proc0paddr;
79 struct	proc nullproc;		/* for use by swtch_exit() */
80 
81 /*
82  * Do all the stuff that locore normally does before calling main().
83  * Process arguments passed to us by the prom monitor.
84  * Return the first page address following the system.
85  */
86 mach_init(x_boothowto, x_unkown, x_bootdev, x_maxmem)
87 	int x_boothowto;
88 	int x_unkown;
89 	int x_bootdev;
90 	int x_maxmem;
91 {
92 	register char *cp;
93 	register int i;
94 	register unsigned firstaddr;
95 	register caddr_t v;
96 	caddr_t start;
97 	extern u_long bootdev;
98 	extern char edata[], end[];
99 	extern char MachUTLBMiss[], MachUTLBMissEnd[];
100 	extern char MachException[], MachExceptionEnd[];
101 
102 	/*
103 	 * Save parameters into kernel work area.
104 	 */
105 	*(int *)(MACH_CACHED_TO_UNCACHED(MACH_MAXMEMSIZE_ADDR)) = x_maxmem;
106 	*(int *)(MACH_CACHED_TO_UNCACHED(MACH_BOOTDEV_ADDR)) = x_bootdev;
107 	*(int *)(MACH_CACHED_TO_UNCACHED(MACH_BOOTSW_ADDR)) = x_boothowto;
108 
109 	/* clear the BSS segment */
110 	v = (caddr_t)pmax_round_page(end);
111 	bzero(edata, v - edata);
112 
113 	boothowto = x_boothowto;
114 	bootdev = x_bootdev;
115 	maxmem = physmem = pmax_btop(x_maxmem);
116 
117 	/*
118 	 * Look at arguments passed to us and compute boothowto.
119 	 */
120 #ifdef GENERIC
121 	boothowto |= RB_SINGLE | RB_ASKNAME;
122 #endif
123 #ifdef KADB
124 	boothowto |= RB_KDB;
125 #endif
126 
127 #ifdef MFS
128 	/*
129 	 * Check to see if a mini-root was loaded into memory. It resides
130 	 * at the start of the next page just after the end of BSS.
131 	 */
132 	if (boothowto & RB_MINIROOT) {
133 		boothowto |= RB_DFLTROOT;
134 		v += mfs_initminiroot(v);
135 	}
136 #endif
137 
138 	/*
139 	 * Init mapping for u page(s) for proc[0], pm_tlbpid 1.
140 	 */
141 	start = v;
142 	curproc->p_addr = proc0paddr = (struct user *)v;
143 	curproc->p_md.md_regs = proc0paddr->u_pcb.pcb_regs;
144 	firstaddr = MACH_CACHED_TO_PHYS(v);
145 	for (i = 0; i < UPAGES; i++) {
146 		MachTLBWriteIndexed(i,
147 			(UADDR + (i << PGSHIFT)) | (1 << VMMACH_TLB_PID_SHIFT),
148 			curproc->p_md.md_upte[i] = firstaddr | PG_V | PG_M);
149 		firstaddr += NBPG;
150 	}
151 	v += UPAGES * NBPG;
152 	MachSetPID(1);
153 
154 	/*
155 	 * init nullproc for swtch_exit().
156 	 * init mapping for u page(s), pm_tlbpid 0
157 	 * This could be used for an idle process.
158 	 */
159 	nullproc.p_addr = (struct user *)v;
160 	nullproc.p_md.md_regs = nullproc.p_addr->u_pcb.pcb_regs;
161 	bcopy("nullproc", nullproc.p_comm, sizeof("nullproc"));
162 	for (i = 0; i < UPAGES; i++) {
163 		nullproc.p_md.md_upte[i] = firstaddr | PG_V | PG_M;
164 		firstaddr += NBPG;
165 	}
166 	v += UPAGES * NBPG;
167 
168 	/* clear pages for u areas */
169 	bzero(start, v - start);
170 
171 	/*
172 	 * Copy down exception vector code.
173 	 */
174 	if (MachUTLBMissEnd - MachUTLBMiss > 0x80)
175 		panic("startup: UTLB code too large");
176 	bcopy(MachUTLBMiss, (char *)MACH_UTLB_MISS_EXC_VEC,
177 		MachUTLBMissEnd - MachUTLBMiss);
178 	bcopy(MachException, (char *)MACH_GEN_EXC_VEC,
179 		MachExceptionEnd - MachException);
180 
181 	/*
182 	 * Clear out the I and D caches.
183 	 */
184 	MachConfigCache();
185 	MachFlushCache();
186 
187 	/*
188 	 * Initialize error message buffer (at end of core).
189 	 */
190 	maxmem -= btoc(sizeof (struct msgbuf));
191 	msgbufp = (struct msgbuf *)(MACH_PHYS_TO_CACHED(maxmem << PGSHIFT));
192 	msgbufmapped = 1;
193 
194 	/*
195 	 * Allocate space for system data structures.
196 	 * The first available kernel virtual address is in "v".
197 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
198 	 *
199 	 * These data structures are allocated here instead of cpu_startup()
200 	 * because physical memory is directly addressable. We don't have
201 	 * to map these into virtual address space.
202 	 */
203 	start = v;
204 
205 #define	valloc(name, type, num) \
206 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
207 #define	valloclim(name, type, num, lim) \
208 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
209 	valloc(cfree, struct cblock, nclist);
210 	valloc(callout, struct callout, ncallout);
211 	valloc(swapmap, struct map, nswapmap = maxproc * 2);
212 #ifdef SYSVSHM
213 	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
214 #endif
215 
216 	/*
217 	 * Determine how many buffers to allocate.
218 	 * We allocate more buffer space than the BSD standard of
219 	 * using 10% of memory for the first 2 Meg, 5% of remaining.
220 	 * We just allocate a flat 10%.  Insure a minimum of 16 buffers.
221 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
222 	 */
223 	if (bufpages == 0)
224 		bufpages = physmem / 10 / CLSIZE;
225 	if (nbuf == 0) {
226 		nbuf = bufpages;
227 		if (nbuf < 16)
228 			nbuf = 16;
229 	}
230 	if (nswbuf == 0) {
231 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
232 		if (nswbuf > 256)
233 			nswbuf = 256;		/* sanity */
234 	}
235 	valloc(swbuf, struct buf, nswbuf);
236 	valloc(buf, struct buf, nbuf);
237 
238 	/*
239 	 * Clear allocated memory.
240 	 */
241 	bzero(start, v - start);
242 
243 	/*
244 	 * Initialize the virtual memory system.
245 	 */
246 	pmap_bootstrap((vm_offset_t)v);
247 }
248 
249 /*
250  * Console initialization: called early on from main,
251  * before vm init or startup.  Do enough configuration
252  * to choose and initialize a console.
253  * XXX need something better here.
254  */
255 #define	SCC_CONSOLE	0
256 #define	SW_CONSOLE	0x07
257 #define	SW_NWB512	0x04
258 #define	SW_NWB225	0x01
259 #define	SW_FBPOP	0x02
260 #define	SW_FBPOP1	0x06
261 #define	SW_FBPOP2	0x03
262 #define	SW_AUTOSEL	0x07
263 consinit()
264 {
265 	extern dev_t consdev;
266 	extern struct tty *constty, *cn_tty, *rs_tty;
267 	int dipsw = (int)*(volatile u_char *)DIP_SWITCH;
268 
269 #include "bm.h"
270 #if NBM > 0
271 #if defined(news3200) || defined(news3400)	/* KU:XXX */
272 	fbbm_probe(dipsw|2);
273 #else
274 	fbbm_probe(dipsw);
275 #endif
276 	vt100_open();
277 	setup_fnt();
278 	setup_fnt24();
279 #else
280 	dipsw &= SW_CONSOLE;
281 #endif
282 
283 	switch (dipsw & SW_CONSOLE) {
284 	    case 0:
285 		scc_open(SCC_CONSOLE);
286 		consdev = makedev(1, 0);
287 		constty = rs_tty;
288 		break;
289 
290 	    default:
291 #if NBM > 0
292 		consdev = makedev(22, 0);
293 		constty = cn_tty;
294 #endif
295 		break;
296 	}
297 	return(0);
298 }
299 
300 /*
301  * cpu_startup: allocate memory for variable-sized tables,
302  * initialize cpu, and do autoconfiguration.
303  */
304 cpu_startup()
305 {
306 	register unsigned i;
307 	register caddr_t v;
308 	int base, residual;
309 	vm_offset_t minaddr, maxaddr;
310 	vm_size_t size;
311 #ifdef DEBUG
312 	extern int pmapdebug;
313 	int opmapdebug = pmapdebug;
314 
315 	pmapdebug = 0;
316 #endif
317 
318 	/*
319 	 * Good {morning,afternoon,evening,night}.
320 	 */
321 	printf(version);
322 	printf("real mem = %d\n", ctob(physmem));
323 
324 	/*
325 	 * Allocate virtual address space for file I/O buffers.
326 	 * Note they are different than the array of headers, 'buf',
327 	 * and usually occupy more virtual memory than physical.
328 	 */
329 	size = MAXBSIZE * nbuf;
330 	buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
331 				   &maxaddr, size, FALSE);
332 	minaddr = (vm_offset_t)buffers;
333 	if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
334 			&minaddr, size, FALSE) != KERN_SUCCESS)
335 		panic("startup: cannot allocate buffers");
336 	base = bufpages / nbuf;
337 	residual = bufpages % nbuf;
338 	for (i = 0; i < nbuf; i++) {
339 		vm_size_t curbufsize;
340 		vm_offset_t curbuf;
341 
342 		/*
343 		 * First <residual> buffers get (base+1) physical pages
344 		 * allocated for them.  The rest get (base) physical pages.
345 		 *
346 		 * The rest of each buffer occupies virtual space,
347 		 * but has no physical memory allocated for it.
348 		 */
349 		curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
350 		curbufsize = CLBYTES * (i < residual ? base+1 : base);
351 		vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
352 		vm_map_simplify(buffer_map, curbuf);
353 	}
354 	/*
355 	 * Allocate a submap for exec arguments.  This map effectively
356 	 * limits the number of processes exec'ing at any time.
357 	 */
358 	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
359 				 16 * NCARGS, TRUE);
360 	/*
361 	 * Allocate a submap for physio
362 	 */
363 	phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
364 				 VM_PHYS_SIZE, TRUE);
365 
366 	/*
367 	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
368 	 * we use the more space efficient malloc in place of kmem_alloc.
369 	 */
370 	mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
371 				   M_MBUF, M_NOWAIT);
372 	bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
373 	mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
374 			       VM_MBUF_SIZE, FALSE);
375 	/*
376 	 * Initialize callouts
377 	 */
378 	callfree = callout;
379 	for (i = 1; i < ncallout; i++)
380 		callout[i-1].c_next = &callout[i];
381 	callout[i-1].c_next = NULL;
382 
383 #ifdef DEBUG
384 	pmapdebug = opmapdebug;
385 #endif
386 	printf("avail mem = %d\n", ptoa(cnt.v_free_count));
387 	printf("using %d buffers containing %d bytes of memory\n",
388 		nbuf, bufpages * CLBYTES);
389 	/*
390 	 * Set up CPU-specific registers, cache, etc.
391 	 */
392 	initcpu();
393 
394 	/*
395 	 * Set up buffers, so they can be used to read disk labels.
396 	 */
397 	bufinit();
398 
399 	/*
400 	 * Configure the system.
401 	 */
402 	configure();
403 }
404 
405 /*
406  * Set registers on exec.
407  * Clear all registers except sp, pc.
408  */
409 setregs(p, entry, retval)
410 	register struct proc *p;
411 	u_long entry;
412 	int retval[2];
413 {
414 	int sp = p->p_md.md_regs[SP];
415 	extern struct proc *machFPCurProcPtr;
416 
417 	bzero((caddr_t)p->p_md.md_regs, (FSR + 1) * sizeof(int));
418 	p->p_md.md_regs[SP] = sp;
419 	p->p_md.md_regs[PC] = entry & ~3;
420 	p->p_md.md_regs[PS] = PSL_USERSET;
421 	p->p_md.md_flags & ~MDP_FPUSED;
422 	if (machFPCurProcPtr == p)
423 		machFPCurProcPtr = (struct proc *)0;
424 }
425 
426 /*
427  * WARNING: code in locore.s assumes the layout shown for sf_signum
428  * thru sf_handler so... don't screw with them!
429  */
430 struct sigframe {
431 	int	sf_signum;		/* signo for handler */
432 	int	sf_code;		/* additional info for handler */
433 	struct	sigcontext *sf_scp;	/* context ptr for handler */
434 	sig_t	sf_handler;		/* handler addr for u_sigc */
435 	struct	sigcontext sf_sc;	/* actual context */
436 };
437 
438 #ifdef DEBUG
439 int sigdebug = 0;
440 int sigpid = 0;
441 #define SDB_FOLLOW	0x01
442 #define SDB_KSTACK	0x02
443 #define SDB_FPSTATE	0x04
444 #endif
445 
446 /*
447  * Send an interrupt to process.
448  */
449 void
450 sendsig(catcher, sig, mask, code)
451 	sig_t catcher;
452 	int sig, mask;
453 	unsigned code;
454 {
455 	register struct proc *p = curproc;
456 	register struct sigframe *fp;
457 	register int *regs;
458 	register struct sigacts *psp = p->p_sigacts;
459 	int oonstack, fsize;
460 	struct sigcontext ksc;
461 	extern char sigcode[], esigcode[];
462 
463 	regs = p->p_md.md_regs;
464 	oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
465 	/*
466 	 * Allocate and validate space for the signal handler
467 	 * context. Note that if the stack is in data space, the
468 	 * call to grow() is a nop, and the copyout()
469 	 * will fail if the process has not already allocated
470 	 * the space with a `brk'.
471 	 */
472 	fsize = sizeof(struct sigframe);
473 	if ((psp->ps_flags & SAS_ALTSTACK) &&
474 	    (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
475 	    (psp->ps_sigonstack & sigmask(sig))) {
476 		fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
477 					 psp->ps_sigstk.ss_size - fsize);
478 		psp->ps_sigstk.ss_flags |= SA_ONSTACK;
479 	} else
480 		fp = (struct sigframe *)(regs[SP] - fsize);
481 	if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
482 		(void)grow(p, (unsigned)fp);
483 #ifdef DEBUG
484 	if ((sigdebug & SDB_FOLLOW) ||
485 	    (sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
486 		printf("sendsig(%d): sig %d ssp %x usp %x scp %x\n",
487 		       p->p_pid, sig, &oonstack, fp, &fp->sf_sc);
488 #endif
489 	/*
490 	 * Build the signal context to be used by sigreturn.
491 	 */
492 	ksc.sc_onstack = oonstack;
493 	ksc.sc_mask = mask;
494 	ksc.sc_pc = regs[PC];
495 	ksc.sc_regs[ZERO] = 0xACEDBADE;		/* magic number */
496 	bcopy((caddr_t)&regs[1], (caddr_t)&ksc.sc_regs[1],
497 		sizeof(ksc.sc_regs) - sizeof(int));
498 	ksc.sc_fpused = p->p_md.md_flags & MDP_FPUSED;
499 	if (ksc.sc_fpused) {
500 		extern struct proc *machFPCurProcPtr;
501 
502 		/* if FPU has current state, save it first */
503 		if (p == machFPCurProcPtr)
504 			MachSaveCurFPState(p);
505 		bcopy((caddr_t)&p->p_md.md_regs[F0], (caddr_t)ksc.sc_fpregs,
506 			sizeof(ksc.sc_fpregs));
507 	}
508 	if (copyout((caddr_t)&ksc, (caddr_t)&fp->sf_sc, sizeof(ksc))) {
509 		/*
510 		 * Process has trashed its stack; give it an illegal
511 		 * instruction to halt it in its tracks.
512 		 */
513 		SIGACTION(p, SIGILL) = SIG_DFL;
514 		sig = sigmask(SIGILL);
515 		p->p_sigignore &= ~sig;
516 		p->p_sigcatch &= ~sig;
517 		p->p_sigmask &= ~sig;
518 		psignal(p, SIGILL);
519 		return;
520 	}
521 	/*
522 	 * Build the argument list for the signal handler.
523 	 */
524 	regs[A0] = sig;
525 	regs[A1] = code;
526 	regs[A2] = (int)&fp->sf_sc;
527 	regs[A3] = (int)catcher;
528 
529 	regs[PC] = (int)catcher;
530 	regs[SP] = (int)fp;
531 	/*
532 	 * Signal trampoline code is at base of user stack.
533 	 */
534 	regs[RA] = (int)PS_STRINGS - (esigcode - sigcode);
535 #ifdef DEBUG
536 	if ((sigdebug & SDB_FOLLOW) ||
537 	    (sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
538 		printf("sendsig(%d): sig %d returns\n",
539 		       p->p_pid, sig);
540 #endif
541 }
542 
543 /*
544  * System call to cleanup state after a signal
545  * has been taken.  Reset signal mask and
546  * stack state from context left by sendsig (above).
547  * Return to previous pc and psl as specified by
548  * context left by sendsig. Check carefully to
549  * make sure that the user has not modified the
550  * psl to gain improper priviledges or to cause
551  * a machine fault.
552  */
553 struct sigreturn_args {
554 	struct sigcontext *sigcntxp;
555 };
556 /* ARGSUSED */
557 sigreturn(p, uap, retval)
558 	struct proc *p;
559 	struct sigreturn_args *uap;
560 	int *retval;
561 {
562 	register struct sigcontext *scp;
563 	register int *regs;
564 	struct sigcontext ksc;
565 	int error;
566 
567 	scp = uap->sigcntxp;
568 #ifdef DEBUG
569 	if (sigdebug & SDB_FOLLOW)
570 		printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
571 #endif
572 	regs = p->p_md.md_regs;
573 	/*
574 	 * Test and fetch the context structure.
575 	 * We grab it all at once for speed.
576 	 */
577 	error = copyin((caddr_t)scp, (caddr_t)&ksc, sizeof(ksc));
578 	if (error || ksc.sc_regs[ZERO] != 0xACEDBADE) {
579 #ifdef DEBUG
580 		if (!(sigdebug & SDB_FOLLOW))
581 			printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
582 		printf("  old sp %x ra %x pc %x\n",
583 			regs[SP], regs[RA], regs[PC]);
584 		printf("  new sp %x ra %x pc %x err %d z %x\n",
585 			ksc.sc_regs[SP], ksc.sc_regs[RA], ksc.sc_regs[PC],
586 			error, ksc.sc_regs[ZERO]);
587 #endif
588 		return (EINVAL);
589 	}
590 	scp = &ksc;
591 	/*
592 	 * Restore the user supplied information
593 	 */
594 	if (scp->sc_onstack & 01)
595 		p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
596 	else
597 		p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
598 	p->p_sigmask = scp->sc_mask &~ sigcantmask;
599 	regs[PC] = scp->sc_pc;
600 	bcopy((caddr_t)&scp->sc_regs[1], (caddr_t)&regs[1],
601 		sizeof(scp->sc_regs) - sizeof(int));
602 	if (scp->sc_fpused)
603 		bcopy((caddr_t)scp->sc_fpregs, (caddr_t)&p->p_md.md_regs[F0],
604 			sizeof(scp->sc_fpregs));
605 	return (EJUSTRETURN);
606 }
607 
608 int	waittime = -1;
609 
610 boot(howto)
611 	register int howto;
612 {
613 
614 	/* take a snap shot before clobbering any registers */
615 	if (curproc)
616 		savectx(curproc->p_addr, 0);
617 
618 #ifdef DEBUG
619 	if (panicstr)
620 		traceback();
621 #endif
622 
623 	boothowto = howto;
624 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
625 		register struct buf *bp;
626 		int iter, nbusy;
627 
628 		waittime = 0;
629 		(void) spl0();
630 		printf("syncing disks... ");
631 		/*
632 		 * Release vnodes held by texts before sync.
633 		 */
634 		if (panicstr == 0)
635 			vnode_pager_umount(NULL);
636 #ifdef notyet
637 #include "fd.h"
638 #if NFD > 0
639 		fdshutdown();
640 #endif
641 #endif
642 		sync(&proc0, (void *)NULL, (int *)NULL);
643 
644 		for (iter = 0; iter < 20; iter++) {
645 			nbusy = 0;
646 			for (bp = &buf[nbuf]; --bp >= buf; )
647 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
648 					nbusy++;
649 			if (nbusy == 0)
650 				break;
651 			printf("%d ", nbusy);
652 			DELAY(40000 * iter);
653 		}
654 		if (nbusy)
655 			printf("giving up\n");
656 		else
657 			printf("done\n");
658 		/*
659 		 * If we've been adjusting the clock, the todr
660 		 * will be out of synch; adjust it now.
661 		 */
662 		resettodr();
663 	}
664 	(void) splhigh();		/* extreme priority */
665 	if (howto & RB_HALT) {
666 		halt(howto);
667 		/*NOTREACHED*/
668 	} else {
669 		if (howto & RB_DUMP)
670 			dumpsys();
671 		halt(howto);
672 		/*NOTREACHED*/
673 	}
674 	/*NOTREACHED*/
675 }
676 
677 halt(howto)
678 	int howto;
679 {
680 	if (*(volatile u_char *)DIP_SWITCH & 0x20)
681 		howto |= RB_HALT;
682 	to_monitor(howto);
683 	/*NOTREACHED*/
684 }
685 
686 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
687 int	dumpsize = 0;		/* also for savecore */
688 long	dumplo = 0;
689 
690 dumpconf()
691 {
692 	int nblks;
693 
694 	dumpsize = physmem;
695 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
696 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
697 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
698 			dumpsize = btoc(dbtob(nblks - dumplo));
699 		else if (dumplo == 0)
700 			dumplo = nblks - btodb(ctob(physmem));
701 	}
702 	/*
703 	 * Don't dump on the first CLBYTES (why CLBYTES?)
704 	 * in case the dump device includes a disk label.
705 	 */
706 	if (dumplo < btodb(CLBYTES))
707 		dumplo = btodb(CLBYTES);
708 }
709 
710 /*
711  * Doadump comes here after turning off memory management and
712  * getting on the dump stack, either when called above, or by
713  * the auto-restart code.
714  */
715 dumpsys()
716 {
717 	int error;
718 
719 	msgbufmapped = 0;
720 	if (dumpdev == NODEV)
721 		return;
722 	/*
723 	 * For dumps during autoconfiguration,
724 	 * if dump device has already configured...
725 	 */
726 	if (dumpsize == 0)
727 		dumpconf();
728 	if (dumplo < 0)
729 		return;
730 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
731 	printf("dump ");
732 	switch (error = (*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
733 
734 	case ENXIO:
735 		printf("device bad\n");
736 		break;
737 
738 	case EFAULT:
739 		printf("device not ready\n");
740 		break;
741 
742 	case EINVAL:
743 		printf("area improper\n");
744 		break;
745 
746 	case EIO:
747 		printf("i/o error\n");
748 		break;
749 
750 	default:
751 		printf("error %d\n", error);
752 		break;
753 
754 	case 0:
755 		printf("succeeded\n");
756 	}
757 }
758 
759 /*
760  * Return the best possible estimate of the time in the timeval
761  * to which tvp points.  Unfortunately, we can't read the hardware registers.
762  * We guarantee that the time will be greater than the value obtained by a
763  * previous call.
764  */
765 microtime(tvp)
766 	register struct timeval *tvp;
767 {
768 	int s = splclock();
769 	static struct timeval lasttime;
770 
771 	*tvp = time;
772 #ifdef notdef
773 	tvp->tv_usec += clkread();
774 	while (tvp->tv_usec > 1000000) {
775 		tvp->tv_sec++;
776 		tvp->tv_usec -= 1000000;
777 	}
778 #endif
779 	if (tvp->tv_sec == lasttime.tv_sec &&
780 	    tvp->tv_usec <= lasttime.tv_usec &&
781 	    (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
782 		tvp->tv_sec++;
783 		tvp->tv_usec -= 1000000;
784 	}
785 	lasttime = *tvp;
786 	splx(s);
787 }
788 
789 initcpu()
790 {
791 
792 	/*
793 	 * clear LEDs
794 	 */
795 	*(char*)DEBUG_PORT = (char)DP_WRITE|DP_LED0|DP_LED1|DP_LED2|DP_LED3;
796 
797 	/*
798 	 * clear all interrupts
799 	 */
800 	*(char*)INTCLR0 = 0;
801 	*(char*)INTCLR1 = 0;
802 
803 	/*
804 	 * It's not a time to enable timer yet.
805 	 *
806 	 *	INTEN0:  PERR ABORT BERR TIMER KBD  MS    CFLT CBSY
807 	 *		  o     o    o     x    o    o     x    x
808 	 *	INTEN1:  BEEP SCC  LANCE DMA  SLOT1 SLOT3 EXT1 EXT3
809 	 *		  x     o    o     o    o    o     x    x
810 	 */
811 
812 	*(char*)INTEN0 = (char) INTEN0_PERR|INTEN0_ABORT|INTEN0_BERR|
813 				INTEN0_KBDINT|INTEN0_MSINT;
814 
815 	*(char*)INTEN1 = (char) INTEN1_SCC|INTEN1_LANCE|INTEN1_DMA|
816 				INTEN1_SLOT1|INTEN1_SLOT3;
817 
818 	spl0();		/* safe to turn interrupts on now */
819 }
820 
821 /*
822  * Convert an ASCII string into an integer.
823  */
824 int
825 atoi(s)
826 	char *s;
827 {
828 	int c;
829 	unsigned base = 10, d;
830 	int neg = 0, val = 0;
831 
832 	if (s == 0 || (c = *s++) == 0)
833 		goto out;
834 
835 	/* skip spaces if any */
836 	while (c == ' ' || c == '\t')
837 		c = *s++;
838 
839 	/* parse sign, allow more than one (compat) */
840 	while (c == '-') {
841 		neg = !neg;
842 		c = *s++;
843 	}
844 
845 	/* parse base specification, if any */
846 	if (c == '0') {
847 		c = *s++;
848 		switch (c) {
849 		case 'X':
850 		case 'x':
851 			base = 16;
852 			break;
853 		case 'B':
854 		case 'b':
855 			base = 2;
856 			break;
857 		default:
858 			base = 8;
859 			break;
860 		}
861 	}
862 
863 	/* parse number proper */
864 	for (;;) {
865 		if (c >= '0' && c <= '9')
866 			d = c - '0';
867 		else if (c >= 'a' && c <= 'z')
868 			d = c - 'a' + 10;
869 		else if (c >= 'A' && c <= 'Z')
870 			d = c - 'A' + 10;
871 		else
872 			break;
873 		val *= base;
874 		val += d;
875 		c = *s++;
876 	}
877 	if (neg)
878 		val = -val;
879 out:
880 	return val;
881 }
882 
883 #ifdef CPU_SINGLE
884 /*
885  * small ring buffers for keyboard/mouse
886  */
887 struct ring_buf {
888 	u_char head;
889 	u_char tail;
890 	u_char count;
891 	u_char buf[13];
892 } ring_buf[2];
893 
894 xputc(c, chan)
895 	u_char c;
896 	int chan;
897 {
898 	register struct ring_buf *p = &ring_buf[chan];
899 	int s = splhigh();
900 
901 	if (p->count >= sizeof (p->buf)) {
902 		(void) splx(s);
903 		return (-1);
904 	}
905 	p->buf[p->head] = c;
906 	if (++p->head >= sizeof (p->buf))
907 		p->head = 0;
908 	p->count++;
909 	(void) splx(s);
910 	return (c);
911 }
912 
913 xgetc(chan)
914 	int chan;
915 {
916 	register struct ring_buf *p = &ring_buf[chan];
917 	int c;
918 	int s = splhigh();
919 
920 	if (p->count == 0) {
921 		(void) splx(s);
922 		return (-1);
923 	}
924 	c = p->buf[p->tail];
925 	if (++p->tail >= sizeof (p->buf))
926 		p->tail = 0;
927 	p->count--;
928 	(void) splx(s);
929 	return (c);
930 }
931 #endif /* CPU_SINGLE */
932