xref: /original-bsd/sys/sparc/sparc/machdep.c (revision 8c7fbc72)
1 /*
2  * Copyright (c) 1992 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This software was developed by the Computer Systems Engineering group
6  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
7  * contributed to Berkeley.
8  *
9  * All advertising materials mentioning features or use of this software
10  * must display the following acknowledgement:
11  *	This product includes software developed by the University of
12  *	California, Lawrence Berkeley Laboratory.
13  *
14  * %sccs.include.redist.c%
15  *
16  *	@(#)machdep.c	7.5 (Berkeley) 04/20/93
17  *
18  * from: $Header: machdep.c,v 1.40 93/04/20 11:16:12 torek Exp $
19  */
20 
21 #include <sys/param.h>
22 #include <sys/proc.h>
23 #include <sys/user.h>
24 #include <sys/map.h>
25 #include <sys/buf.h>
26 #include <sys/device.h>
27 #include <sys/reboot.h>
28 #include <sys/systm.h>
29 #include <sys/conf.h>
30 #include <sys/file.h>
31 #include <sys/clist.h>
32 #include <sys/callout.h>
33 #include <sys/malloc.h>
34 #include <sys/mbuf.h>
35 #include <sys/mount.h>
36 #include <sys/msgbuf.h>
37 #ifdef SYSVSHM
38 #include <sys/shm.h>
39 #endif
40 #include <sys/exec.h>
41 
42 #include <machine/autoconf.h>
43 #include <machine/frame.h>
44 #include <machine/cpu.h>
45 
46 #include <vm/vm_kern.h>
47 #include <vm/vm_page.h>
48 
49 #include <sparc/sparc/asm.h>
50 #include <sparc/sparc/cache.h>
51 #include <sparc/sparc/vaddrs.h>
52 
53 vm_map_t buffer_map;
54 extern vm_offset_t avail_end;
55 
56 /*
57  * Declare these as initialized data so we can patch them.
58  */
59 int	nswbuf = 0;
60 #ifdef	NBUF
61 int	nbuf = NBUF;
62 #else
63 int	nbuf = 0;
64 #endif
65 #ifdef	BUFPAGES
66 int	bufpages = BUFPAGES;
67 #else
68 int	bufpages = 0;
69 #endif
70 
71 int	physmem;
72 
73 extern struct msgbuf msgbuf;
74 struct	msgbuf *msgbufp = &msgbuf;
75 int	msgbufmapped = 1;	/* message buffer is always mapped */
76 
77 /*
78  * safepri is a safe priority for sleep to set for a spin-wait
79  * during autoconfiguration or after a panic.
80  */
81 int   safepri = 0;
82 
83 caddr_t allocsys();
84 
85 /*
86  * Machine-dependent startup code
87  */
88 cpu_startup()
89 {
90 	register unsigned i;
91 	register caddr_t v;
92 	register int sz;
93 	int base, residual;
94 #ifdef DEBUG
95 	extern int pmapdebug;
96 	int opmapdebug = pmapdebug;
97 #endif
98 	vm_offset_t minaddr, maxaddr;
99 	vm_size_t size;
100 
101 #ifdef DEBUG
102 	pmapdebug = 0;
103 #endif
104 
105 	/*
106 	 * Good {morning,afternoon,evening,night}.
107 	 */
108 	printf(version);
109 	/*identifycpu();*/
110 	physmem = btoc(avail_end);
111 	printf("real mem = %d\n", avail_end);
112 
113 	/*
114 	 * Find out how much space we need, allocate it,
115 	 * and then give everything true virtual addresses.
116 	 */
117 	sz = (int)allocsys((caddr_t)0);
118 	if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0)
119 		panic("startup: no room for tables");
120 	if (allocsys(v) - v != sz)
121 		panic("startup: table size inconsistency");
122 
123 	/*
124 	 * Now allocate buffers proper.  They are different than the above
125 	 * in that they usually occupy more virtual memory than physical.
126 	 */
127 	size = MAXBSIZE * nbuf;
128 	buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
129 	    &maxaddr, size, FALSE);
130 	minaddr = (vm_offset_t)buffers;
131 	if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
132 			&minaddr, size, FALSE) != KERN_SUCCESS)
133 		panic("startup: cannot allocate buffers");
134 	base = bufpages / nbuf;
135 	residual = bufpages % nbuf;
136 	for (i = 0; i < nbuf; i++) {
137 		vm_size_t curbufsize;
138 		vm_offset_t curbuf;
139 
140 		/*
141 		 * First <residual> buffers get (base+1) physical pages
142 		 * allocated for them.  The rest get (base) physical pages.
143 		 *
144 		 * The rest of each buffer occupies virtual space,
145 		 * but has no physical memory allocated for it.
146 		 */
147 		curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
148 		curbufsize = CLBYTES * (i < residual ? base+1 : base);
149 		vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
150 		vm_map_simplify(buffer_map, curbuf);
151 	}
152 	/*
153 	 * Allocate a submap for exec arguments.  This map effectively
154 	 * limits the number of processes exec'ing at any time.
155 	 */
156 	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
157 	    16*NCARGS, TRUE);
158 	/*
159 	 * Allocate a map for physio.  Others use a submap of the kernel
160 	 * map, but we want one completely separate, even though it uses
161 	 * the same pmap.
162 	 */
163 	phys_map = vm_map_create(kernel_pmap, DVMA_BASE, DVMA_END, 1);
164 	if (phys_map == NULL)
165 		panic("unable to create DVMA map");
166 
167 	/*
168 	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
169 	 * we use the more space efficient malloc in place of kmem_alloc.
170 	 */
171 	mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
172 				   M_MBUF, M_NOWAIT);
173 	bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
174 	mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
175 			       VM_MBUF_SIZE, FALSE);
176 	/*
177 	 * Initialize callouts
178 	 */
179 	callfree = callout;
180 	for (i = 1; i < ncallout; i++)
181 		callout[i-1].c_next = &callout[i];
182 	callout[i-1].c_next = NULL;
183 
184 #ifdef DEBUG
185 	pmapdebug = opmapdebug;
186 #endif
187 	printf("avail mem = %d\n", ptoa(cnt.v_free_count));
188 	printf("using %d buffers containing %d bytes of memory\n",
189 		nbuf, bufpages * CLBYTES);
190 
191 	/*
192 	 * Set up buffers, so they can be used to read disk labels.
193 	 */
194 	bufinit();
195 
196 	/*
197 	 * Configure the system.
198 	 */
199 	configure();
200 
201 	/*
202 	 * Turn on the cache (do after configuration due to a bug in
203 	 * some versions of the SPARC chips -- this info from Gilmore).
204 	 */
205 	cache_enable();
206 }
207 
208 /*
209  * Allocate space for system data structures.  We are given
210  * a starting virtual address and we return a final virtual
211  * address; along the way we set each data structure pointer.
212  *
213  * You call allocsys() with 0 to find out how much space we want,
214  * allocate that much and fill it with zeroes, and then call
215  * allocsys() again with the correct base virtual address.
216  */
217 caddr_t
218 allocsys(v)
219 	register caddr_t v;
220 {
221 
222 #define	valloc(name, type, num) \
223 	    v = (caddr_t)(((name) = (type *)v) + (num))
224 	valloc(cfree, struct cblock, nclist);
225 	valloc(callout, struct callout, ncallout);
226 	valloc(swapmap, struct map, nswapmap = maxproc * 2);
227 #ifdef SYSVSHM
228 	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
229 #endif
230 
231 	/*
232 	 * Determine how many buffers to allocate (enough to
233 	 * hold 5% of total physical memory, but at least 16).
234 	 * Allocate 1/2 as many swap buffer headers as file i/o buffers.
235 	 */
236 	if (bufpages == 0)
237 		bufpages = (physmem / 20) / CLSIZE;
238 	if (nbuf == 0) {
239 		nbuf = bufpages;
240 		if (nbuf < 16)
241 			nbuf = 16;
242 	}
243 	if (nswbuf == 0) {
244 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
245 		if (nswbuf > 256)
246 			nswbuf = 256;		/* sanity */
247 	}
248 	valloc(swbuf, struct buf, nswbuf);
249 	valloc(buf, struct buf, nbuf);
250 	return (v);
251 }
252 
253 /*
254  * Set up registers on exec.
255  *
256  * XXX this entire mess must be fixed
257  */
258 /* ARGSUSED */
259 setregs(p, entry, retval)
260 	register struct proc *p;
261 	u_long entry;
262 	int retval[2];
263 {
264 	register struct trapframe *tf = p->p_md.md_tf;
265 	register struct fpstate *fs;
266 	register int psr, sp;
267 
268 	/*
269 	 * The syscall will ``return'' to npc or %g7 or %g2; set them all.
270 	 * Set the rest of the registers to 0 except for %o6 (stack pointer,
271 	 * built in exec()) and psr (retain CWP and PSR_S bits).
272 	 */
273 	psr = tf->tf_psr & (PSR_S | PSR_CWP);
274 	sp = tf->tf_out[6];
275 	if ((fs = p->p_md.md_fpstate) != NULL) {
276 		/*
277 		 * We hold an FPU state.  If we own *the* FPU chip state
278 		 * we must get rid of it, and the only way to do that is
279 		 * to save it.  In any case, get rid of our FPU state.
280 		 */
281 		if (p == fpproc) {
282 			savefpstate(fs);
283 			fpproc = NULL;
284 		}
285 		free((void *)fs, M_SUBPROC);
286 		p->p_md.md_fpstate = NULL;
287 	}
288 	bzero((caddr_t)tf, sizeof *tf);
289 	tf->tf_psr = psr;
290 	tf->tf_global[2] = tf->tf_global[7] = tf->tf_npc = entry & ~3;
291 	tf->tf_out[6] = sp;
292 	retval[1] = 0;
293 }
294 
295 #ifdef DEBUG
296 int sigdebug = 0;
297 int sigpid = 0;
298 #define SDB_FOLLOW	0x01
299 #define SDB_KSTACK	0x02
300 #define SDB_FPSTATE	0x04
301 #endif
302 
303 struct sigframe {
304 	int	sf_signo;		/* signal number */
305 	int	sf_code;		/* code */
306 #ifdef COMPAT_SUNOS
307 	struct	sigcontext *sf_scp;	/* points to user addr of sigcontext */
308 #else
309 	int	sf_xxx;			/* placeholder */
310 #endif
311 	int	sf_addr;		/* SunOS compat, always 0 for now */
312 	struct	sigcontext sf_sc;	/* actual sigcontext */
313 };
314 
315 /*
316  * Send an interrupt to process.
317  */
318 void
319 sendsig(catcher, sig, mask, code)
320 	sig_t catcher;
321 	int sig, mask;
322 	unsigned code;
323 {
324 	register struct proc *p = curproc;
325 	register struct sigacts *psp = p->p_sigacts;
326 	register struct sigframe *fp;
327 	register struct trapframe *tf;
328 	register int addr, oonstack, oldsp, newsp;
329 	struct sigframe sf;
330 	extern char sigcode[], esigcode[];
331 #define	szsigcode	(esigcode - sigcode)
332 
333 	tf = p->p_md.md_tf;
334 	oldsp = tf->tf_out[6];
335 	oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
336 	/*
337 	 * Compute new user stack addresses, subtract off
338 	 * one signal frame, and align.
339 	 */
340 	if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack &&
341 	    (psp->ps_sigonstack & sigmask(sig))) {
342 		fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
343 					 psp->ps_sigstk.ss_size);
344 		psp->ps_sigstk.ss_flags |= SA_ONSTACK;
345 	} else
346 		fp = (struct sigframe *)oldsp;
347 	fp = (struct sigframe *)((int)(fp - 1) & ~7);
348 
349 #ifdef DEBUG
350 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
351 		printf("sendsig: %s[%d] sig %d newusp %x scp %x\n",
352 		    p->p_comm, p->p_pid, sig, fp, &fp->sf_sc);
353 #endif
354 	/*
355 	 * Now set up the signal frame.  We build it in kernel space
356 	 * and then copy it out.  We probably ought to just build it
357 	 * directly in user space....
358 	 */
359 	sf.sf_signo = sig;
360 	sf.sf_code = code;
361 #ifdef COMPAT_SUNOS
362 	sf.sf_scp = &fp->sf_sc;
363 #endif
364 	sf.sf_addr = 0;			/* XXX */
365 
366 	/*
367 	 * Build the signal context to be used by sigreturn.
368 	 */
369 	sf.sf_sc.sc_onstack = oonstack;
370 	sf.sf_sc.sc_mask = mask;
371 	sf.sf_sc.sc_sp = oldsp;
372 	sf.sf_sc.sc_pc = tf->tf_pc;
373 	sf.sf_sc.sc_npc = tf->tf_npc;
374 	sf.sf_sc.sc_psr = tf->tf_psr;
375 	sf.sf_sc.sc_g1 = tf->tf_global[1];
376 	sf.sf_sc.sc_o0 = tf->tf_out[0];
377 
378 	/*
379 	 * Put the stack in a consistent state before we whack away
380 	 * at it.  Note that write_user_windows may just dump the
381 	 * registers into the pcb; we need them in the process's memory.
382 	 * We also need to make sure that when we start the signal handler,
383 	 * its %i6 (%fp), which is loaded from the newly allocated stack area,
384 	 * joins seamlessly with the frame it was in when the signal occurred,
385 	 * so that the debugger and _longjmp code can back up through it.
386 	 */
387 	newsp = (int)fp - sizeof(struct rwindow);
388 	write_user_windows();
389 	if (rwindow_save(p) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf) ||
390 	    suword(&((struct rwindow *)newsp)->rw_in[6], oldsp)) {
391 		/*
392 		 * Process has trashed its stack; give it an illegal
393 		 * instruction to halt it in its tracks.
394 		 */
395 #ifdef DEBUG
396 		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
397 			printf("sendsig: window save or copyout error\n");
398 #endif
399 		sigexit(p, SIGILL);
400 		/* NOTREACHED */
401 	}
402 #ifdef DEBUG
403 	if (sigdebug & SDB_FOLLOW)
404 		printf("sendsig: %s[%d] sig %d scp %x\n",
405 		       p->p_comm, p->p_pid, sig, &fp->sf_sc);
406 #endif
407 	/*
408 	 * Arrange to continue execution at the code copied out in exec().
409 	 * It needs the function to call in %g1, and a new stack pointer.
410 	 */
411 #ifdef COMPAT_SUNOS
412 	if (psp->ps_usertramp & sigmask(sig)) {
413 		addr = (int)catcher;	/* user does his own trampolining */
414 	} else
415 #endif
416 	{
417 		addr = USRSTACK - sizeof(struct ps_strings) - szsigcode;
418 		tf->tf_global[1] = (int)catcher;
419 	}
420 	tf->tf_pc = addr;
421 	tf->tf_npc = addr + 4;
422 	tf->tf_out[6] = newsp;
423 #ifdef DEBUG
424 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
425 		printf("sendsig: about to return to catcher\n");
426 #endif
427 }
428 
429 /*
430  * System call to cleanup state after a signal
431  * has been taken.  Reset signal mask and
432  * stack state from context left by sendsig (above),
433  * and return to the given trap frame (if there is one).
434  * Check carefully to make sure that the user has not
435  * modified the state to gain improper privileges or to cause
436  * a machine fault.
437  */
438 /* ARGSUSED */
439 struct sigreturn_args {
440 	struct sigcontext *scp;
441 };
442 sigreturn(p, uap, retval)
443 	register struct proc *p;
444 	struct sigreturn_args *uap;
445 	int *retval;
446 {
447 	register struct sigcontext *scp;
448 	register struct trapframe *tf;
449 
450 	/* First ensure consistent stack state (see sendsig). */
451 	write_user_windows();
452 	if (rwindow_save(p))
453 		sigexit(p, SIGILL);
454 #ifdef DEBUG
455 	if (sigdebug & SDB_FOLLOW)
456 		printf("sigreturn: %s[%d], scp %x\n",
457 		    p->p_comm, p->p_pid, uap->scp);
458 #endif
459 	scp = uap->scp;
460 	if ((int)scp & 3 || useracc((caddr_t)scp, sizeof *scp, B_WRITE) == 0)
461 		return (EINVAL);
462 	tf = p->p_md.md_tf;
463 	/*
464 	 * Only the icc bits in the psr are used, so it need not be
465 	 * verified.  pc and npc must be multiples of 4.  This is all
466 	 * that is required; if it holds, just do it.
467 	 */
468 	if (((scp->sc_pc | scp->sc_npc) & 3) != 0)
469 		return (EINVAL);
470 	/* take only psr ICC field */
471 	tf->tf_psr = (tf->tf_psr & ~PSR_ICC) | (scp->sc_psr & PSR_ICC);
472 	tf->tf_pc = scp->sc_pc;
473 	tf->tf_npc = scp->sc_npc;
474 	tf->tf_global[1] = scp->sc_g1;
475 	tf->tf_out[0] = scp->sc_o0;
476 	tf->tf_out[6] = scp->sc_sp;
477 	if (scp->sc_onstack & 1)
478 		p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
479 	else
480 		p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
481 	p->p_sigmask = scp->sc_mask & ~sigcantmask;
482 	return (EJUSTRETURN);
483 }
484 
485 int	waittime = -1;
486 
487 boot(howto)
488 	register int howto;
489 {
490 	int i;
491 	static char str[4];	/* room for "-sd\0" */
492 	extern volatile void romhalt(void);
493 	extern volatile void romboot(char *);
494 
495 	fb_unblank();
496 	boothowto = howto;
497 	if ((howto & RB_NOSYNC) == 0 && waittime < 0 && rootfs) {
498 		register struct buf *bp;
499 		int iter, nbusy;
500 #if 1
501 		extern struct proc proc0;
502 
503 		/* protect against curproc->p_stats.foo refs in sync()   XXX */
504 		if (curproc == NULL)
505 			curproc = &proc0;
506 #endif
507 		waittime = 0;
508 		(void) spl0();
509 		printf("syncing disks... ");
510 		/*
511 		 * Release vnodes held by texts before sync.
512 		 */
513 		if (panicstr == 0)
514 			vnode_pager_umount((struct mount *)NULL);
515 		sync(&proc0, (void *)NULL, (int *)NULL);
516 
517 		for (iter = 0; iter < 20; iter++) {
518 			nbusy = 0;
519 			for (bp = &buf[nbuf]; --bp >= buf; )
520 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
521 					nbusy++;
522 			if (nbusy == 0)
523 				break;
524 			printf("%d ", nbusy);
525 			DELAY(40000 * iter);
526 		}
527 		if (nbusy)
528 			printf("giving up\n");
529 		else
530 			printf("done\n");
531 		/*
532 		 * If we've been adjusting the clock, the todr
533 		 * will be out of synch; adjust it now.
534 		 */
535 		resettodr();
536 	}
537 	(void) splhigh();		/* ??? */
538 	if (howto & RB_HALT) {
539 		printf("halted\n\n");
540 		romhalt();
541 	}
542 	if (howto & RB_DUMP)
543 		dumpsys();
544 	printf("rebooting\n\n");
545 	i = 1;
546 	if (howto & RB_SINGLE)
547 		str[i++] = 's';
548 	if (howto & RB_KDB)
549 		str[i++] = 'd';
550 	if (i > 1) {
551 		str[0] = '-';
552 		str[i] = 0;
553 	} else
554 		str[0] = 0;
555 	romboot(str);
556 	/*NOTREACHED*/
557 }
558 
559 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
560 int	dumpsize = 0;		/* also for savecore */
561 long	dumplo = 0;
562 
563 dumpconf()
564 {
565 	int nblks;
566 
567 	dumpsize = physmem;
568 #define DUMPMMU
569 #ifdef DUMPMMU
570 #define NPMEG 128
571 	/*
572 	 * savecore views the image in units of pages (i.e., dumpsize is in
573 	 * pages) so we round the two mmu entities into page-sized chunks.
574 	 * The PMEGs (32kB) and the segment table (512 bytes plus padding)
575 	 * are appending to the end of the crash dump.
576 	 */
577 	dumpsize += btoc(sizeof(((struct kpmap *)0)->pm_rsegmap)) +
578 		btoc(NPMEG * NPTESG * sizeof(int));
579 #endif
580 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
581 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
582 		/*
583 		 * Don't dump on the first CLBYTES (why CLBYTES?)
584 		 * in case the dump device includes a disk label.
585 		 */
586 		if (dumplo < btodb(CLBYTES))
587 			dumplo = btodb(CLBYTES);
588 
589 		/*
590 		 * If dumpsize is too big for the partition, truncate it.
591 		 * Otherwise, put the dump at the end of the partition
592 		 * by making dumplo as large as possible.
593 		 */
594 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
595 			dumpsize = btoc(dbtob(nblks - dumplo));
596 		else if (dumplo + ctod(dumpsize) > nblks)
597 			dumplo = nblks - ctod(dumpsize);
598 	}
599 }
600 
601 #ifdef DUMPMMU
602 /* XXX */
603 #include <machine/ctlreg.h>
604 #define	getpte(va)		lda(va, ASI_PTE)
605 #define	setsegmap(va, pmeg)	stba(va, ASI_SEGMAP, pmeg)
606 
607 /*
608  * Write the mmu contents to the dump device.
609  * This gets appended to the end of a crash dump since
610  * there is no in-core copy of kernel memory mappings.
611  */
612 int
613 dumpmmu(blkno)
614 	register daddr_t blkno;
615 {
616 	register int (*dump)(/*dev_t, daddr_t, caddr_t, int*/);
617 	register int pmeg;
618 	register int addr;	/* unused kernel virtual address */
619 	register int i;
620 	register int *pte, *ptend;
621 	register int error;
622 	register struct kpmap *kpmap = &kernel_pmap_store;
623 	int buffer[dbtob(1) / sizeof(int)];
624 	extern int seginval;	/* from pmap.c */
625 
626 
627 	dump = bdevsw[major(dumpdev)].d_dump;
628 
629 	/*
630 	 * dump page table entries
631 	 *
632 	 * We dump each pmeg in order (by segment number).  Since the MMU
633 	 * automatically maps the given virtual segment to a pmeg we must
634 	 * iterate over the segments by incrementing an unused segment slot
635 	 * in the MMU.  This fixed segment number is used in the virtual
636 	 * address argument to getpte().
637 	 */
638 
639 	/* First find an unused virtual segment. */
640 	i = NKSEG;
641 	while (kpmap->pm_rsegmap[--i] != seginval)
642 		if (i <= 0)
643 			return (-1);
644 	/*
645 	 * Compute the base address corresponding to the unused segment.
646 	 * Note that the kernel segments start after all the user segments
647 	 * so we must account for this offset.
648 	 */
649 	addr = VSTOVA(i + NUSEG);
650 	/*
651 	 * Go through the pmegs and dump each one.
652 	 */
653 	pte = buffer;
654 	ptend = &buffer[sizeof(buffer) / sizeof(buffer[0])];
655 	for (pmeg = 0; pmeg < NPMEG; ++pmeg) {
656 		register int va = addr;
657 
658 		setsegmap(addr, pmeg);
659 		i = NPTESG;
660 		do {
661 			*pte++ = getpte(va);
662 			if (pte >= ptend) {
663 				/*
664 				 * Note that we'll dump the last block
665 				 * the last time through the loops because
666 				 * all the PMEGs occupy 32KB which is
667 				 * a multiple of the block size.
668 				 */
669 				error = (*dump)(dumpdev, blkno,
670 						(caddr_t)buffer,
671 						dbtob(1));
672 				if (error != 0)
673 					return (error);
674 				++blkno;
675 				pte = buffer;
676 			}
677 			va += NBPG;
678 		} while (--i > 0);
679 	}
680 	setsegmap(addr, seginval);
681 
682 	/*
683 	 * dump (512 byte) segment map
684 	 * XXX assume it's a multiple of the block size
685 	 */
686 	error = (*dump)(dumpdev, blkno, (caddr_t)kpmap->pm_rsegmap,
687 			sizeof(kpmap->pm_rsegmap));
688 	return (error);
689 }
690 #endif
691 
692 #define	BYTES_PER_DUMP	(32 * 1024)	/* must be a multiple of pagesize */
693 static vm_offset_t dumpspace;
694 
695 caddr_t
696 reserve_dumppages(p)
697 	caddr_t p;
698 {
699 
700 	dumpspace = (vm_offset_t)p;
701 	return (p + BYTES_PER_DUMP);
702 }
703 
704 /*
705  * Write a crash dump.
706  */
707 dumpsys()
708 {
709 	register unsigned bytes, i, n;
710 	register int maddr, psize;
711 	register daddr_t blkno;
712 	register int (*dump)(/*dev_t, daddr_t, caddr_t, int, int*/);
713 	int error = 0;
714 
715 	if (dumpdev == NODEV)
716 		return;
717 	/* copy registers to memory */
718 	snapshot(cpcb);
719 	/*
720 	 * For dumps during autoconfiguration,
721 	 * if dump device has already configured...
722 	 */
723 	if (dumpsize == 0)
724 		dumpconf();
725 	if (dumplo < 0)
726 		return;
727 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
728 
729 	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
730 	printf("dump ");
731 	if (psize == -1) {
732 		printf("area unavailable\n");
733 		return;
734 	}
735 	bytes = physmem << PGSHIFT;
736 	maddr = 0;
737 	blkno = dumplo;
738 	dump = bdevsw[major(dumpdev)].d_dump;
739 	for (i = 0; i < bytes; i += n) {
740 		n = bytes - i;
741 		if (n > BYTES_PER_DUMP)
742 			 n = BYTES_PER_DUMP;
743 #ifdef DEBUG
744 		/* print out how many MBs we have dumped */
745 		if (i && (i % (1024*1024)) == 0)
746 			printf("%d ", i / (1024*1024));
747 #endif
748 		(void) pmap_map(dumpspace, maddr, maddr + n, VM_PROT_READ);
749 		error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, (int)n);
750 		if (error)
751 			break;
752 		maddr += n;
753 		blkno += btodb(n);
754 	}
755 #ifdef DUMPMMU
756 	if (!error)
757 		error = dumpmmu(blkno);
758 #endif
759 	switch (error) {
760 
761 	case ENXIO:
762 		printf("device bad\n");
763 		break;
764 
765 	case EFAULT:
766 		printf("device not ready\n");
767 		break;
768 
769 	case EINVAL:
770 		printf("area improper\n");
771 		break;
772 
773 	case EIO:
774 		printf("i/o error\n");
775 		break;
776 
777 	case 0:
778 		printf("succeeded\n");
779 		break;
780 
781 	default:
782 		printf("error %d\n", error);
783 		break;
784 	}
785 }
786 
787 /*
788  * Map an I/O device given physical address and size in bytes, e.g.,
789  *
790  *	mydev = (struct mydev *)mapdev(myioaddr, 0, sizeof(struct mydev));
791  *
792  * See also machine/autoconf.h.
793  */
794 void *
795 mapdev(phys, virt, size)
796 	register void *phys;
797 	register int virt, size;
798 {
799 	register vm_offset_t v;
800 	register void *ret;
801 	static vm_offset_t iobase = IODEV_BASE;
802 
803 	size = round_page(size);
804 	if (virt)
805 		v = trunc_page(virt);
806 	else {
807 		v = iobase;
808 		iobase += size;
809 		if (iobase > IODEV_END)	/* unlikely */
810 			panic("mapiodev");
811 	}
812 	ret = (void *)v;
813 	phys = (void *)trunc_page(phys);
814 	do {
815 		pmap_enter(kernel_pmap, v,
816 		    (vm_offset_t)phys | PMAP_OBIO | PMAP_NC,
817 		    VM_PROT_READ | VM_PROT_WRITE, 1);
818 		v += PAGE_SIZE;
819 		phys += PAGE_SIZE;
820 	} while ((size -= PAGE_SIZE) > 0);
821 	return (ret);
822 }
823