1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department, The Mach Operating System project at
9  * Carnegie-Mellon University, Ralph Campbell, Sony Corp. and Kazumasa
10  * Utashiro of Software Research Associates, Inc.
11  *
12  * %sccs.include.redist.c%
13  *
14  *	@(#)machdep.c	7.4 (Berkeley) 07/10/92
15  */
16 
17 /* from: Utah $Hdr: machdep.c 1.63 91/04/24$ */
18 
19 #include "param.h"
20 #include "systm.h"
21 #include "signalvar.h"
22 #include "kernel.h"
23 #include "map.h"
24 #include "proc.h"
25 #include "buf.h"
26 #include "reboot.h"
27 #include "conf.h"
28 #include "file.h"
29 #include "clist.h"
30 #include "callout.h"
31 #include "malloc.h"
32 #include "mbuf.h"
33 #include "msgbuf.h"
34 #include "user.h"
35 #ifdef SYSVSHM
36 #include "shm.h"
37 #endif
38 
39 #include "vm/vm.h"
40 #include "vm/vm_kern.h"
41 #include "vm/vm_page.h"
42 
43 #include "../include/cpu.h"
44 #include "../include/reg.h"
45 #include "../include/psl.h"
46 #include "../include/pte.h"
47 
48 #include "../include/adrsmap.h"
49 
50 vm_map_t buffer_map;
51 
52 /*
53  * Declare these as initialized data so we can patch them.
54  */
55 int	nswbuf = 0;
56 #ifdef	NBUF
57 int	nbuf = NBUF;
58 #else
59 int	nbuf = 0;
60 #endif
61 #ifdef	BUFPAGES
62 int	bufpages = BUFPAGES;
63 #else
64 int	bufpages = 0;
65 #endif
66 int	msgbufmapped;		/* set when safe to use msgbuf */
67 int	maxmem;			/* max memory per process */
68 int	physmem;		/* max supported memory, changes to actual */
69 /*
70  * safepri is a safe priority for sleep to set for a spin-wait
71  * during autoconfiguration or after a panic.
72  */
73 int	safepri = PSL_LOWIPL;
74 
75 struct	user *proc0paddr;
76 struct	proc nullproc;		/* for use by swtch_exit() */
77 
78 /*
79  * Do all the stuff that locore normally does before calling main().
80  * Process arguments passed to us by the prom monitor.
81  * Return the first page address following the system.
82  */
83 mach_init(x_boothowto, x_unkown, x_bootdev, x_maxmem)
84 	int x_boothowto;
85 	int x_unkown;
86 	int x_bootdev;
87 	int x_maxmem;
88 {
89 	register char *cp;
90 	register int i;
91 	register unsigned firstaddr;
92 	register caddr_t v;
93 	caddr_t start;
94 	extern u_long bootdev;
95 	extern char edata[], end[];
96 	extern char MachUTLBMiss[], MachUTLBMissEnd[];
97 	extern char MachException[], MachExceptionEnd[];
98 #ifdef ATTR
99 	extern char *pmap_attributes;
100 #endif
101 
102 	/* clear the BSS segment */
103 	v = (caddr_t)pmax_round_page(end);
104 	bzero(edata, v - edata);
105 
106 	boothowto = x_boothowto;
107 	bootdev = x_bootdev;
108 	maxmem = physmem = pmax_btop(x_maxmem);
109 
110 	/*
111 	 * Look at arguments passed to us and compute boothowto.
112 	 */
113 #ifdef GENERIC
114 	boothowto |= RB_SINGLE | RB_ASKNAME;
115 #endif
116 #ifdef KADB
117 	boothowto |= RB_KDB;
118 #endif
119 
120 #ifdef MFS
121 	/*
122 	 * Check to see if a mini-root was loaded into memory. It resides
123 	 * at the start of the next page just after the end of BSS.
124 	 */
125 	if (boothowto & RB_MINIROOT)
126 		v += mfs_initminiroot(v);
127 #endif
128 
129 	/*
130 	 * Init mapping for u page(s) for proc[0], pm_tlbpid 1.
131 	 */
132 	start = v;
133 	curproc->p_addr = proc0paddr = (struct user *)v;
134 	curproc->p_md.md_regs = proc0paddr->u_pcb.pcb_regs;
135 	firstaddr = MACH_CACHED_TO_PHYS(v);
136 	for (i = 0; i < UPAGES; i++) {
137 		MachTLBWriteIndexed(i,
138 			(UADDR + (i << PGSHIFT)) | (1 << VMMACH_TLB_PID_SHIFT),
139 			curproc->p_md.md_upte[i] = firstaddr | PG_V | PG_M);
140 		firstaddr += NBPG;
141 	}
142 	v += UPAGES * NBPG;
143 	MachSetPID(1);
144 
145 	/*
146 	 * init nullproc for swtch_exit().
147 	 * init mapping for u page(s), pm_tlbpid 0
148 	 * This could be used for an idle process.
149 	 */
150 	nullproc.p_addr = (struct user *)v;
151 	nullproc.p_md.md_regs = ((struct user *)v)->u_pcb.pcb_regs;
152 	for (i = 0; i < UPAGES; i++) {
153 		nullproc.p_md.md_upte[i] = firstaddr | PG_V | PG_M;
154 		firstaddr += NBPG;
155 	}
156 	v += UPAGES * NBPG;
157 
158 	/* clear pages for u areas */
159 	bzero(start, v - start);
160 
161 	/*
162 	 * Copy down exception vector code.
163 	 */
164 	if (MachUTLBMissEnd - MachUTLBMiss > 0x80)
165 		panic("startup: UTLB code too large");
166 	bcopy(MachUTLBMiss, (char *)MACH_UTLB_MISS_EXC_VEC,
167 		MachUTLBMissEnd - MachUTLBMiss);
168 	bcopy(MachException, (char *)MACH_GEN_EXC_VEC,
169 		MachExceptionEnd - MachException);
170 
171 	/*
172 	 * Clear out the I and D caches.
173 	 */
174 	MachConfigCache();
175 	MachFlushCache();
176 
177 	/*
178 	 * Initialize error message buffer (at end of core).
179 	 */
180 	maxmem -= btoc(sizeof (struct msgbuf));
181 	msgbufp = (struct msgbuf *)(MACH_PHYS_TO_CACHED(maxmem << PGSHIFT));
182 	msgbufmapped = 1;
183 
184 	/*
185 	 * Allocate space for system data structures.
186 	 * The first available kernel virtual address is in "v".
187 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
188 	 *
189 	 * These data structures are allocated here instead of cpu_startup()
190 	 * because physical memory is directly addressable. We don't have
191 	 * to map these into virtual address space.
192 	 */
193 	start = v;
194 
195 #define	valloc(name, type, num) \
196 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
197 #define	valloclim(name, type, num, lim) \
198 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
199 	valloc(cfree, struct cblock, nclist);
200 	valloc(callout, struct callout, ncallout);
201 	valloc(swapmap, struct map, nswapmap = maxproc * 2);
202 #ifdef SYSVSHM
203 	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
204 #endif
205 #ifdef ATTR
206 	/* this is allocated here just to save a few bytes */
207 	valloc(pmap_attributes, char, physmem);
208 #endif
209 
210 	/*
211 	 * Determine how many buffers to allocate.
212 	 * We allocate more buffer space than the BSD standard of
213 	 * using 10% of memory for the first 2 Meg, 5% of remaining.
214 	 * We just allocate a flat 10%.  Insure a minimum of 16 buffers.
215 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
216 	 */
217 	if (bufpages == 0)
218 		bufpages = physmem / 10 / CLSIZE;
219 	if (nbuf == 0) {
220 		nbuf = bufpages;
221 		if (nbuf < 16)
222 			nbuf = 16;
223 	}
224 	if (nswbuf == 0) {
225 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
226 		if (nswbuf > 256)
227 			nswbuf = 256;		/* sanity */
228 	}
229 	valloc(swbuf, struct buf, nswbuf);
230 	valloc(buf, struct buf, nbuf);
231 
232 	/*
233 	 * Clear allocated memory.
234 	 */
235 	v = (caddr_t)pmax_round_page(v);
236 	bzero(start, v - start);
237 
238 	/*
239 	 * Initialize the virtual memory system.
240 	 */
241 	pmap_bootstrap((vm_offset_t)MACH_CACHED_TO_PHYS(v));
242 }
243 
244 /*
245  * Console initialization: called early on from main,
246  * before vm init or startup.  Do enough configuration
247  * to choose and initialize a console.
248  * XXX need something better here.
249  */
250 #define	SCC_CONSOLE	0
251 #define	SW_CONSOLE	0x07
252 #define	SW_NWB512	0x04
253 #define	SW_NWB225	0x01
254 #define	SW_FBPOP	0x02
255 #define	SW_FBPOP1	0x06
256 #define	SW_FBPOP2	0x03
257 #define	SW_AUTOSEL	0x07
258 consinit()
259 {
260 	extern dev_t consdev;
261 	extern struct tty *constty, *cn_tty, *rs_tty;
262 	int dipsw = (int)*(volatile u_char *)DIP_SWITCH;
263 
264 #include "bm.h"
265 #if NBM > 0
266 #ifdef news3200
267 	fbbm_probe(dipsw|2);
268 #else
269 	fbbm_probe(dipsw);
270 #endif
271 	vt100_open();
272 	setup_fnt();
273 	setup_fnt24();
274 #else
275 	dipsw &= SW_CONSOLE;
276 #endif
277 
278 	switch (dipsw & SW_CONSOLE) {
279 	    case 0:
280 		scc_open(SCC_CONSOLE);
281 		consdev = makedev(1, 0);
282 		constty = rs_tty;
283 		break;
284 
285 	    default:
286 #if NBM > 0
287 		consdev = makedev(22, 0);
288 		constty = cn_tty;
289 #endif
290 		break;
291 	}
292 	return(0);
293 }
294 
295 /*
296  * cpu_startup: allocate memory for variable-sized tables,
297  * initialize cpu, and do autoconfiguration.
298  */
299 cpu_startup()
300 {
301 	register unsigned i;
302 	register caddr_t v;
303 	int base, residual;
304 	extern long Usrptsize;
305 	extern struct map *useriomap;
306 #ifdef DEBUG
307 	extern int pmapdebug;
308 	int opmapdebug = pmapdebug;
309 #endif
310 	vm_offset_t minaddr, maxaddr;
311 	vm_size_t size;
312 
313 #ifdef DEBUG
314 	pmapdebug = 0;
315 #endif
316 
317 	/*
318 	 * Good {morning,afternoon,evening,night}.
319 	 */
320 	printf(version);
321 	printf("real mem = %d\n", ctob(physmem));
322 
323 	/*
324 	 * Allocate virtual address space for file I/O buffers.
325 	 * Note they are different than the array of headers, 'buf',
326 	 * and usually occupy more virtual memory than physical.
327 	 */
328 	size = MAXBSIZE * nbuf;
329 	buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
330 				   &maxaddr, size, FALSE);
331 	minaddr = (vm_offset_t)buffers;
332 	if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
333 			&minaddr, size, FALSE) != KERN_SUCCESS)
334 		panic("startup: cannot allocate buffers");
335 	base = bufpages / nbuf;
336 	residual = bufpages % nbuf;
337 	for (i = 0; i < nbuf; i++) {
338 		vm_size_t curbufsize;
339 		vm_offset_t curbuf;
340 
341 		/*
342 		 * First <residual> buffers get (base+1) physical pages
343 		 * allocated for them.  The rest get (base) physical pages.
344 		 *
345 		 * The rest of each buffer occupies virtual space,
346 		 * but has no physical memory allocated for it.
347 		 */
348 		curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
349 		curbufsize = CLBYTES * (i < residual ? base+1 : base);
350 		vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
351 		vm_map_simplify(buffer_map, curbuf);
352 	}
353 	/*
354 	 * Allocate a submap for exec arguments.  This map effectively
355 	 * limits the number of processes exec'ing at any time.
356 	 */
357 	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
358 				 16*NCARGS, TRUE);
359 	/*
360 	 * Allocate a submap for physio
361 	 */
362 	phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
363 				 VM_PHYS_SIZE, TRUE);
364 
365 	/*
366 	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
367 	 * we use the more space efficient malloc in place of kmem_alloc.
368 	 */
369 	mclrefcnt = malloc(NMBCLUSTERS + CLBYTES/MCLBYTES, M_MBUF, M_NOWAIT);
370 	bzero(mclrefcnt, NMBCLUSTERS + CLBYTES/MCLBYTES);
371 	mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
372 			       VM_MBUF_SIZE, FALSE);
373 	/*
374 	 * Initialize callouts
375 	 */
376 	callfree = callout;
377 	for (i = 1; i < ncallout; i++)
378 		callout[i-1].c_next = &callout[i];
379 	callout[i-1].c_next = NULL;
380 
381 #ifdef DEBUG
382 	pmapdebug = opmapdebug;
383 #endif
384 	printf("avail mem = %d\n", ptoa(cnt.v_free_count));
385 	printf("using %d buffers containing %d bytes of memory\n",
386 		nbuf, bufpages * CLBYTES);
387 	/*
388 	 * Set up CPU-specific registers, cache, etc.
389 	 */
390 	initcpu();
391 
392 	/*
393 	 * Set up buffers, so they can be used to read disk labels.
394 	 */
395 	bufinit();
396 
397 	/*
398 	 * Configure the system.
399 	 */
400 	configure();
401 }
402 
403 /*
404  * Set registers on exec.
405  * Clear all registers except sp, pc.
406  */
407 setregs(p, entry, retval)
408 	register struct proc *p;
409 	u_long entry;
410 	int retval[2];
411 {
412 	int sp = p->p_md.md_regs[SP];
413 	extern struct proc *machFPCurProcPtr;
414 
415 	bzero((caddr_t)p->p_md.md_regs, (FSR + 1) * sizeof(int));
416 	p->p_md.md_regs[SP] = sp;
417 	p->p_md.md_regs[PC] = entry;
418 	p->p_md.md_regs[PS] = PSL_USERSET;
419 	p->p_md.md_flags & ~MDP_FPUSED;
420 	if (machFPCurProcPtr == p)
421 		machFPCurProcPtr = (struct proc *)0;
422 }
423 
424 /*
425  * WARNING: code in locore.s assumes the layout shown for sf_signum
426  * thru sf_handler so... don't screw with them!
427  */
428 struct sigframe {
429 	int	sf_signum;		/* signo for handler */
430 	int	sf_code;		/* additional info for handler */
431 	struct	sigcontext *sf_scp;	/* context ptr for handler */
432 	sig_t	sf_handler;		/* handler addr for u_sigc */
433 };
434 
435 #ifdef DEBUG
436 int sigdebug = 0;
437 int sigpid = 0;
438 #define SDB_FOLLOW	0x01
439 #define SDB_KSTACK	0x02
440 #define SDB_FPSTATE	0x04
441 #endif
442 
443 /*
444  * Send an interrupt to process.
445  */
446 void
447 sendsig(catcher, sig, mask, code)
448 	sig_t catcher;
449 	int sig, mask;
450 	unsigned code;
451 {
452 	register struct proc *p = curproc;
453 	register struct sigframe *fp;
454 	register struct sigacts *psp = p->p_sigacts;
455 	register struct sigcontext *scp;
456 	register int *regs;
457 	int oonstack, fsize;
458 	struct sigcontext ksc;
459 
460 	regs = p->p_md.md_regs;
461 	oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
462 	/*
463 	 * Allocate and validate space for the signal handler
464 	 * context. Note that if the stack is in data space, the
465 	 * call to grow() is a nop, and the copyout()
466 	 * will fail if the process has not already allocated
467 	 * the space with a `brk'.
468 	 */
469 	if ((psp->ps_flags & SAS_ALTSTACK) &&
470 	    (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
471 	    (psp->ps_sigonstack & sigmask(sig))) {
472 		scp = (struct sigcontext *)(psp->ps_sigstk.ss_base +
473 		    psp->ps_sigstk.ss_size) - 1;
474 		psp->ps_sigstk.ss_flags |= SA_ONSTACK;
475 	} else
476 		scp = (struct sigcontext *)regs[SP] - 1;
477 	fp = (struct sigframe *)scp - 1;
478 	if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
479 		(void)grow(p, (unsigned)fp);
480 	/*
481 	 * Build the signal context to be used by sigreturn.
482 	 */
483 	ksc.sc_onstack = oonstack;
484 	ksc.sc_mask = mask;
485 	ksc.sc_pc = regs[PC];
486 	ksc.sc_regs[ZERO] = 0xACEDBADE;		/* magic number */
487 	bcopy((caddr_t)&regs[1], (caddr_t)&ksc.sc_regs[1],
488 		sizeof(ksc.sc_regs) - sizeof(int));
489 	ksc.sc_fpused = p->p_md.md_flags & MDP_FPUSED;
490 	if (ksc.sc_fpused) {
491 		extern struct proc *machFPCurProcPtr;
492 
493 		/* if FPU has current state, save it first */
494 		if (p == machFPCurProcPtr)
495 			MachSaveCurFPState(p);
496 		bcopy((caddr_t)&p->p_md.md_regs[F0], (caddr_t)ksc.sc_fpregs,
497 			sizeof(ksc.sc_fpregs));
498 	}
499 	if (copyout((caddr_t)&ksc, (caddr_t)scp, sizeof(ksc))) {
500 		/*
501 		 * Process has trashed its stack; give it an illegal
502 		 * instruction to halt it in its tracks.
503 		 */
504 		SIGACTION(p, SIGILL) = SIG_DFL;
505 		sig = sigmask(SIGILL);
506 		p->p_sigignore &= ~sig;
507 		p->p_sigcatch &= ~sig;
508 		p->p_sigmask &= ~sig;
509 		psignal(p, SIGILL);
510 		return;
511 	}
512 	/*
513 	 * Build the argument list for the signal handler.
514 	 */
515 	regs[A0] = sig;
516 	regs[A1] = code;
517 	regs[A2] = (int)scp;
518 	regs[A3] = (int)catcher;
519 
520 	regs[PC] = (int)catcher;
521 	regs[SP] = (int)fp;
522 	regs[RA] = KERNBASE;	/* this causes a trap which we interpret as
523 				 * meaning "do a sigreturn". */
524 #ifdef DEBUG
525 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
526 		printf("sendsig(%d): sig %d ssp %x usp %x scp %x\n",
527 		       p->p_pid, sig, &oonstack, fp, fp->sf_scp);
528 #endif
529 }
530 
531 /*
532  * System call to cleanup state after a signal
533  * has been taken.  Reset signal mask and
534  * stack state from context left by sendsig (above).
535  * Return to previous pc and psl as specified by
536  * context left by sendsig. Check carefully to
537  * make sure that the user has not modified the
538  * psl to gain improper priviledges or to cause
539  * a machine fault.
540  */
541 struct sigreturn_args {
542 	struct sigcontext *sigcntxp;
543 };
544 /* ARGSUSED */
545 sigreturn(p, uap, retval)
546 	struct proc *p;
547 	struct sigreturn_args *uap;
548 	int *retval;
549 {
550 	register struct sigcontext *scp;
551 	register int *regs;
552 	struct sigcontext ksc;
553 	int error;
554 
555 	register struct frame *frame;
556 	register int rf;
557 	struct sigcontext tsigc;
558 	int flags;
559 
560 	scp = uap->sigcntxp;
561 #ifdef DEBUG
562 	if (sigdebug & SDB_FOLLOW)
563 		printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
564 #endif
565 	regs = p->p_md.md_regs;
566 	/*
567 	 * Test and fetch the context structure.
568 	 * We grab it all at once for speed.
569 	 */
570 	error = copyin((caddr_t)scp, (caddr_t)&ksc, sizeof(ksc));
571 	if (error != 0 || ksc.sc_regs[ZERO] != 0xACEDBADE ||
572 	    (unsigned)ksc.sc_regs[SP] < (unsigned)regs[SP]) {
573 #ifdef DEBUG
574 		if (!(sigdebug & SDB_FOLLOW))
575 			printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
576 		printf("  old sp %x ra %x pc %x\n",
577 			regs[SP], regs[RA], regs[PC]);
578 		printf("  new sp %x ra %x pc %x err %d z %x\n",
579 			ksc.sc_regs[SP], ksc.sc_regs[RA], ksc.sc_regs[PC],
580 			error, ksc.sc_regs[ZERO]);
581 #endif
582 		if (regs[PC] == KERNBASE) {
583 			int sig;
584 
585 			/*
586 			 * Process has trashed its stack; give it an illegal
587 			 * instruction to halt it in its tracks.
588 			 */
589 			SIGACTION(p, SIGILL) = SIG_DFL;
590 			sig = sigmask(SIGILL);
591 			p->p_sigignore &= ~sig;
592 			p->p_sigcatch &= ~sig;
593 			p->p_sigmask &= ~sig;
594 			psignal(p, SIGILL);
595 		}
596 		return (EINVAL);
597 	}
598 	/*
599 	 * Restore the user supplied information
600 	 */
601 	if (scp->sc_onstack & 01)
602 		p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
603 	else
604 		p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
605 	p->p_sigmask = scp->sc_mask &~ sigcantmask;
606 	regs[PC] = ksc.sc_pc;
607 	bcopy((caddr_t)&ksc.sc_regs[1], (caddr_t)&regs[1],
608 		sizeof(ksc.sc_regs) - sizeof(int));
609 	ksc.sc_fpused = p->p_md.md_flags & MDP_FPUSED;
610 	if (ksc.sc_fpused)
611 		bcopy((caddr_t)ksc.sc_fpregs, (caddr_t)&p->p_md.md_regs[F0],
612 			sizeof(ksc.sc_fpregs));
613 	return (EJUSTRETURN);
614 }
615 
616 int	waittime = -1;
617 
618 boot(howto)
619 	register int howto;
620 {
621 
622 	/* take a snap shot before clobbering any registers */
623 	if (curproc)
624 		savectx(curproc->p_addr, 0);
625 
626 	howto |= RB_HALT; /* XXX */
627 	boothowto = howto;
628 	if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
629 		register struct buf *bp;
630 		int iter, nbusy;
631 
632 		waittime = 0;
633 		(void) spl0();
634 		printf("syncing disks... ");
635 		/*
636 		 * Release vnodes held by texts before sync.
637 		 */
638 		if (panicstr == 0)
639 			vnode_pager_umount(NULL);
640 #ifdef notyet
641 #include "fd.h"
642 #if NFD > 0
643 		fdshutdown();
644 #endif
645 #endif
646 		sync(&proc0, (void *)NULL, (int *)NULL);
647 
648 		for (iter = 0; iter < 20; iter++) {
649 			nbusy = 0;
650 			for (bp = &buf[nbuf]; --bp >= buf; )
651 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
652 					nbusy++;
653 			if (nbusy == 0)
654 				break;
655 			printf("%d ", nbusy);
656 			DELAY(40000 * iter);
657 		}
658 		if (nbusy)
659 			printf("giving up\n");
660 		else
661 			printf("done\n");
662 		/*
663 		 * If we've been adjusting the clock, the todr
664 		 * will be out of synch; adjust it now.
665 		 */
666 		resettodr();
667 	}
668 	(void) splhigh();		/* extreme priority */
669 	if (howto & RB_HALT) {
670 		halt(howto);
671 		/*NOTREACHED*/
672 	} else {
673 		if (howto & RB_DUMP)
674 			dumpsys();
675 		halt(howto);
676 		/*NOTREACHED*/
677 	}
678 	/*NOTREACHED*/
679 }
680 
681 halt(howto)
682 	int howto;
683 {
684 	if (*(volatile u_char *)DIP_SWITCH & 0x20)
685 		howto |= RB_HALT;
686 	to_monitor(howto);
687 	/*NOTREACHED*/
688 }
689 
690 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
691 int	dumpsize = 0;		/* also for savecore */
692 long	dumplo = 0;
693 
694 dumpconf()
695 {
696 	int nblks;
697 
698 	dumpsize = physmem;
699 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
700 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
701 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
702 			dumpsize = btoc(dbtob(nblks - dumplo));
703 		else if (dumplo == 0)
704 			dumplo = nblks - btodb(ctob(physmem));
705 	}
706 	/*
707 	 * Don't dump on the first CLBYTES (why CLBYTES?)
708 	 * in case the dump device includes a disk label.
709 	 */
710 	if (dumplo < btodb(CLBYTES))
711 		dumplo = btodb(CLBYTES);
712 }
713 
714 /*
715  * Doadump comes here after turning off memory management and
716  * getting on the dump stack, either when called above, or by
717  * the auto-restart code.
718  */
719 dumpsys()
720 {
721 	int error;
722 
723 	msgbufmapped = 0;
724 	if (dumpdev == NODEV)
725 		return;
726 	/*
727 	 * For dumps during autoconfiguration,
728 	 * if dump device has already configured...
729 	 */
730 	if (dumpsize == 0)
731 		dumpconf();
732 	if (dumplo < 0)
733 		return;
734 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
735 	printf("dump ");
736 	switch (error = (*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
737 
738 	case ENXIO:
739 		printf("device bad\n");
740 		break;
741 
742 	case EFAULT:
743 		printf("device not ready\n");
744 		break;
745 
746 	case EINVAL:
747 		printf("area improper\n");
748 		break;
749 
750 	case EIO:
751 		printf("i/o error\n");
752 		break;
753 
754 	default:
755 		printf("error %d\n", error);
756 		break;
757 
758 	case 0:
759 		printf("succeeded\n");
760 	}
761 }
762 
763 /*
764  * Return the best possible estimate of the time in the timeval
765  * to which tvp points.  Unfortunately, we can't read the hardware registers.
766  * We guarantee that the time will be greater than the value obtained by a
767  * previous call.
768  */
769 microtime(tvp)
770 	register struct timeval *tvp;
771 {
772 	int s = splclock();
773 	static struct timeval lasttime;
774 
775 	*tvp = time;
776 #ifdef notdef
777 	tvp->tv_usec += clkread();
778 	while (tvp->tv_usec > 1000000) {
779 		tvp->tv_sec++;
780 		tvp->tv_usec -= 1000000;
781 	}
782 #endif
783 	if (tvp->tv_sec == lasttime.tv_sec &&
784 	    tvp->tv_usec <= lasttime.tv_usec &&
785 	    (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
786 		tvp->tv_sec++;
787 		tvp->tv_usec -= 1000000;
788 	}
789 	lasttime = *tvp;
790 	splx(s);
791 }
792 
793 initcpu()
794 {
795 
796 	/*
797 	 * clear LEDs
798 	 */
799 	*(char*)DEBUG_PORT = (char)DP_WRITE|DP_LED0|DP_LED1|DP_LED2|DP_LED3;
800 
801 	/*
802 	 * clear all interrupts
803 	 */
804 	*(char*)INTCLR0 = 0;
805 	*(char*)INTCLR1 = 0;
806 
807 	/*
808 	 * It's not a time to enable timer yet.
809 	 *
810 	 *	INTEN0:  PERR ABORT BERR TIMER KBD  MS    CFLT CBSY
811 	 *		  o     o    o     x    o    o     x    x
812 	 *	INTEN1:  BEEP SCC  LANCE DMA  SLOT1 SLOT3 EXT1 EXT3
813 	 *		  x     o    o     o    o    o     x    x
814 	 */
815 
816 	*(char*)INTEN0 = (char) INTEN0_PERR|INTEN0_ABORT|INTEN0_BERR|
817 				INTEN0_KBDINT|INTEN0_MSINT;
818 
819 	*(char*)INTEN1 = (char) INTEN1_SCC|INTEN1_LANCE|INTEN1_DMA|
820 				INTEN1_SLOT1|INTEN1_SLOT3;
821 
822 	spl0();		/* safe to turn interrupts on now */
823 }
824 
825 /*
826  * Convert an ASCII string into an integer.
827  */
828 int
829 atoi(s)
830 	char *s;
831 {
832 	int c;
833 	unsigned base = 10, d;
834 	int neg = 0, val = 0;
835 
836 	if (s == 0 || (c = *s++) == 0)
837 		goto out;
838 
839 	/* skip spaces if any */
840 	while (c == ' ' || c == '\t')
841 		c = *s++;
842 
843 	/* parse sign, allow more than one (compat) */
844 	while (c == '-') {
845 		neg = !neg;
846 		c = *s++;
847 	}
848 
849 	/* parse base specification, if any */
850 	if (c == '0') {
851 		c = *s++;
852 		switch (c) {
853 		case 'X':
854 		case 'x':
855 			base = 16;
856 			break;
857 		case 'B':
858 		case 'b':
859 			base = 2;
860 			break;
861 		default:
862 			base = 8;
863 			break;
864 		}
865 	}
866 
867 	/* parse number proper */
868 	for (;;) {
869 		if (c >= '0' && c <= '9')
870 			d = c - '0';
871 		else if (c >= 'a' && c <= 'z')
872 			d = c - 'a' + 10;
873 		else if (c >= 'A' && c <= 'Z')
874 			d = c - 'A' + 10;
875 		else
876 			break;
877 		val *= base;
878 		val += d;
879 		c = *s++;
880 	}
881 	if (neg)
882 		val = -val;
883 out:
884 	return val;
885 }
886 
887 #ifdef CPU_SINGLE
888 /*
889  * small ring buffers for keyboard/mouse
890  */
891 struct ring_buf {
892 	u_char head;
893 	u_char tail;
894 	u_char count;
895 	u_char buf[13];
896 } ring_buf[2];
897 
898 xputc(c, chan)
899 	u_char c;
900 	int chan;
901 {
902 	register struct ring_buf *p = &ring_buf[chan];
903 	int s = splhigh();
904 
905 	if (p->count >= sizeof (p->buf)) {
906 		(void) splx(s);
907 		return (-1);
908 	}
909 	p->buf[p->head] = c;
910 	if (++p->head >= sizeof (p->buf))
911 		p->head = 0;
912 	p->count++;
913 	(void) splx(s);
914 	return (c);
915 }
916 
917 xgetc(chan)
918 	int chan;
919 {
920 	register struct ring_buf *p = &ring_buf[chan];
921 	int c;
922 	int s = splhigh();
923 
924 	if (p->count == 0) {
925 		(void) splx(s);
926 		return (-1);
927 	}
928 	c = p->buf[p->tail];
929 	if (++p->tail >= sizeof (p->buf))
930 		p->tail = 0;
931 	p->count--;
932 	(void) splx(s);
933 	return (c);
934 }
935 #endif /* CPU_SINGLE */
936 
937 _delay(time)
938 	register int time;
939 {
940 	extern int cpuspeed;
941 
942 	time *= cpuspeed;
943 	while(time--)
944 		;
945 }
946