xref: /netbsd/sys/arch/sparc/sparc/machdep.c (revision 6550d01e)
1 /*	$NetBSD: machdep.c,v 1.305 2011/01/14 02:06:31 rmind Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1992, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This software was developed by the Computer Systems Engineering group
38  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
39  * contributed to Berkeley.
40  *
41  * All advertising materials mentioning features or use of this software
42  * must display the following acknowledgement:
43  *	This product includes software developed by the University of
44  *	California, Lawrence Berkeley Laboratory.
45  *
46  * Redistribution and use in source and binary forms, with or without
47  * modification, are permitted provided that the following conditions
48  * are met:
49  * 1. Redistributions of source code must retain the above copyright
50  *    notice, this list of conditions and the following disclaimer.
51  * 2. Redistributions in binary form must reproduce the above copyright
52  *    notice, this list of conditions and the following disclaimer in the
53  *    documentation and/or other materials provided with the distribution.
54  * 3. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  *	@(#)machdep.c	8.6 (Berkeley) 1/14/94
71  */
72 
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.305 2011/01/14 02:06:31 rmind Exp $");
75 
76 #include "opt_compat_netbsd.h"
77 #include "opt_compat_sunos.h"
78 #include "opt_sparc_arch.h"
79 #include "opt_modular.h"
80 #include "opt_multiprocessor.h"
81 
82 #include <sys/param.h>
83 #include <sys/signal.h>
84 #include <sys/signalvar.h>
85 #include <sys/proc.h>
86 #include <sys/extent.h>
87 #include <sys/savar.h>
88 #include <sys/cpu.h>
89 #include <sys/buf.h>
90 #include <sys/device.h>
91 #include <sys/reboot.h>
92 #include <sys/systm.h>
93 #include <sys/kernel.h>
94 #include <sys/conf.h>
95 #include <sys/file.h>
96 #include <sys/malloc.h>
97 #include <sys/mbuf.h>
98 #include <sys/mount.h>
99 #include <sys/msgbuf.h>
100 #include <sys/syscallargs.h>
101 #include <sys/exec.h>
102 #include <sys/exec_aout.h>
103 #include <sys/ucontext.h>
104 #include <sys/simplelock.h>
105 #include <sys/module.h>
106 #include <sys/mutex.h>
107 
108 #include <uvm/uvm.h>		/* we use uvm.kernel_object */
109 
110 #include <sys/sysctl.h>
111 
112 #ifdef COMPAT_13
113 #include <compat/sys/signal.h>
114 #include <compat/sys/signalvar.h>
115 #endif
116 
117 #define _SPARC_BUS_DMA_PRIVATE
118 #include <machine/autoconf.h>
119 #include <machine/bus.h>
120 #include <machine/frame.h>
121 #include <machine/cpu.h>
122 #include <machine/pcb.h>
123 #include <machine/pmap.h>
124 #include <machine/oldmon.h>
125 #include <machine/bsd_openprom.h>
126 #include <machine/bootinfo.h>
127 
128 #include <sparc/sparc/asm.h>
129 #include <sparc/sparc/cache.h>
130 #include <sparc/sparc/vaddrs.h>
131 #include <sparc/sparc/cpuvar.h>
132 
133 #include "fb.h"
134 #include "power.h"
135 
136 #if NPOWER > 0
137 #include <sparc/dev/power.h>
138 #endif
139 
140 extern paddr_t avail_end;
141 
142 int	physmem;
143 
144 kmutex_t fpu_mtx;
145 
146 /*
147  * safepri is a safe priority for sleep to set for a spin-wait
148  * during autoconfiguration or after a panic.
149  */
150 int   safepri = 0;
151 
152 /*
153  * dvmamap24 is used to manage DVMA memory for devices that have the upper
154  * eight address bits wired to all-ones (e.g. `le' and `ie')
155  */
156 struct extent *dvmamap24;
157 
158 void	dumpsys(void);
159 void	stackdump(void);
160 
161 /*
162  * Machine-dependent startup code
163  */
164 void
165 cpu_startup(void)
166 {
167 #ifdef DEBUG
168 	extern int pmapdebug;
169 	int opmapdebug = pmapdebug;
170 #endif
171 	struct pcb *pcb;
172 	vsize_t size;
173 	paddr_t pa;
174 	char pbuf[9];
175 
176 #ifdef DEBUG
177 	pmapdebug = 0;
178 #endif
179 
180 	/* XXX */
181 	pcb = lwp_getpcb(&lwp0);
182 	if (pcb && pcb->pcb_psr == 0)
183 		pcb->pcb_psr = getpsr();
184 
185 	/*
186 	 * Re-map the message buffer from its temporary address
187 	 * at KERNBASE to MSGBUF_VA.
188 	 */
189 #if !defined(MSGBUFSIZE) || MSGBUFSIZE <= 8192
190 	/*
191 	 * We use the free page(s) in front of the kernel load address.
192 	 */
193 	size = 8192;
194 
195 	/* Get physical address of the message buffer */
196 	pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa);
197 
198 	/* Invalidate the current mapping at KERNBASE. */
199 	pmap_kremove((vaddr_t)KERNBASE, size);
200 	pmap_update(pmap_kernel());
201 
202 	/* Enter the new mapping */
203 	pmap_map(MSGBUF_VA, pa, pa + size, VM_PROT_READ|VM_PROT_WRITE);
204 
205 	/*
206 	 * Re-initialize the message buffer.
207 	 */
208 	initmsgbuf((void *)MSGBUF_VA, size);
209 #else /* MSGBUFSIZE */
210 	{
211 	struct pglist mlist;
212 	struct vm_page *m;
213 	vaddr_t va0, va;
214 
215 	/*
216 	 * We use the free page(s) in front of the kernel load address,
217 	 * and then allocate some more.
218 	 */
219 	size = round_page(MSGBUFSIZE);
220 
221 	/* Get physical address of first 8192 chunk of the message buffer */
222 	pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa);
223 
224 	/* Allocate additional physical pages */
225 	if (uvm_pglistalloc(size - 8192,
226 			    vm_first_phys, vm_first_phys+vm_num_phys,
227 			    0, 0, &mlist, 1, 0) != 0)
228 		panic("cpu_start: no memory for message buffer");
229 
230 	/* Invalidate the current mapping at KERNBASE. */
231 	pmap_kremove((vaddr_t)KERNBASE, 8192);
232 	pmap_update(pmap_kernel());
233 
234 	/* Allocate virtual memory space */
235 	va0 = va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
236 	if (va == 0)
237 		panic("cpu_start: no virtual memory for message buffer");
238 
239 	/* Map first 8192 */
240 	while (va < va0 + 8192) {
241 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
242 		pa += PAGE_SIZE;
243 		va += PAGE_SIZE;
244 	}
245 	pmap_update(pmap_kernel());
246 
247 	/* Map the rest of the pages */
248 	TAILQ_FOREACH(m, &mlist ,pageq.queue) {
249 		if (va >= va0 + size)
250 			panic("cpu_start: memory buffer size botch");
251 		pa = VM_PAGE_TO_PHYS(m);
252 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
253 		va += PAGE_SIZE;
254 	}
255 	pmap_update(pmap_kernel());
256 
257 	/*
258 	 * Re-initialize the message buffer.
259 	 */
260 	initmsgbuf((void *)va0, size);
261 	}
262 #endif /* MSGBUFSIZE */
263 
264 	/*
265 	 * Good {morning,afternoon,evening,night}.
266 	 */
267 	printf("%s%s", copyright, version);
268 	/*identifycpu();*/
269 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
270 	printf("total memory = %s\n", pbuf);
271 
272 	/*
273 	 * Tune buffer cache variables based on the capabilities of the MMU
274 	 * to cut down on VM space allocated for the buffer caches that
275 	 * would lead to MMU resource shortage.
276 	 */
277 	if (CPU_ISSUN4 || CPU_ISSUN4C) {
278 		/* Clip UBC windows */
279 		if (cpuinfo.mmu_nsegment <= 128) {
280 			/*
281 			 * ubc_nwins and ubc_winshift control the amount
282 			 * of VM used by the UBC. Normally, this VM is
283 			 * not wired in the kernel map, hence non-locked
284 			 * `PMEGs' (see pmap.c) are used for this space.
285 			 * We still limit possible fragmentation to prevent
286 			 * the occasional wired UBC mappings from tying up
287 			 * too many PMEGs.
288 			 *
289 			 * Set the upper limit to 9 segments (default
290 			 * winshift = 13).
291 			 */
292 			ubc_nwins = 512;
293 
294 			/*
295 			 * buf_setvalimit() allocates a submap for buffer
296 			 * allocation. We use it to limit the number of locked
297 			 * `PMEGs' (see pmap.c) dedicated to the buffer cache.
298 			 *
299 			 * Set the upper limit to 12 segments (3MB), which
300 			 * corresponds approximately to the size of the
301 			 * traditional 5% rule (assuming a maximum 64MB of
302 			 * memory in small sun4c machines).
303 			 */
304 			buf_setvalimit(12 * 256*1024);
305 		}
306 
307 		/* Clip max data & stack to avoid running into the MMU hole */
308 #if MAXDSIZ > 256*1024*1024
309 		maxdmap = 256*1024*1024;
310 #endif
311 #if MAXSSIZ > 256*1024*1024
312 		maxsmap = 256*1024*1024;
313 #endif
314 	}
315 
316 	if (CPU_ISSUN4 || CPU_ISSUN4C) {
317 		/*
318 		 * Allocate DMA map for 24-bit devices (le, ie)
319 		 * [dvma_base - dvma_end] is for VME devices..
320 		 */
321 		dvmamap24 = extent_create("dvmamap24",
322 					  D24_DVMA_BASE, D24_DVMA_END,
323 					  M_DEVBUF, 0, 0, EX_NOWAIT);
324 		if (dvmamap24 == NULL)
325 			panic("unable to allocate DVMA map");
326 	}
327 
328 #ifdef DEBUG
329 	pmapdebug = opmapdebug;
330 #endif
331 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
332 	printf("avail memory = %s\n", pbuf);
333 
334 	pmap_redzone();
335 
336 	mutex_init(&fpu_mtx, MUTEX_DEFAULT, IPL_SCHED);
337 }
338 
339 /*
340  * Set up registers on exec.
341  *
342  * XXX this entire mess must be fixed
343  */
344 /* ARGSUSED */
345 void
346 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
347 {
348 	struct trapframe *tf = l->l_md.md_tf;
349 	struct fpstate *fs;
350 	int psr;
351 
352 	/* Don't allow unaligned data references by default */
353 	l->l_proc->p_md.md_flags &= ~MDP_FIXALIGN;
354 
355 	/*
356 	 * Set the registers to 0 except for:
357 	 *	%o6: stack pointer, built in exec())
358 	 *	%psr: (retain CWP and PSR_S bits)
359 	 *	%g1: address of p->p_psstr (used by crt0)
360 	 *	%pc,%npc: entry point of program
361 	 */
362 	psr = tf->tf_psr & (PSR_S | PSR_CWP);
363 	if ((fs = l->l_md.md_fpstate) != NULL) {
364 		struct cpu_info *cpi;
365 		int s;
366 		/*
367 		 * We hold an FPU state.  If we own *some* FPU chip state
368 		 * we must get rid of it, and the only way to do that is
369 		 * to save it.  In any case, get rid of our FPU state.
370 		 */
371 		FPU_LOCK(s);
372 		if ((cpi = l->l_md.md_fpu) != NULL) {
373 			if (cpi->fplwp != l)
374 				panic("FPU(%d): fplwp %p",
375 					cpi->ci_cpuid, cpi->fplwp);
376 			if (l == cpuinfo.fplwp)
377 				savefpstate(fs);
378 #if defined(MULTIPROCESSOR)
379 			else
380 				XCALL1(ipi_savefpstate, fs, 1 << cpi->ci_cpuid);
381 #endif
382 			cpi->fplwp = NULL;
383 		}
384 		l->l_md.md_fpu = NULL;
385 		FPU_UNLOCK(s);
386 		free((void *)fs, M_SUBPROC);
387 		l->l_md.md_fpstate = NULL;
388 	}
389 	memset((void *)tf, 0, sizeof *tf);
390 	tf->tf_psr = psr;
391 	tf->tf_global[1] = (int)l->l_proc->p_psstr;
392 	tf->tf_pc = pack->ep_entry & ~3;
393 	tf->tf_npc = tf->tf_pc + 4;
394 	stack -= sizeof(struct rwindow);
395 	tf->tf_out[6] = stack;
396 }
397 
398 #ifdef DEBUG
399 int sigdebug = 0;
400 int sigpid = 0;
401 #define SDB_FOLLOW	0x01
402 #define SDB_KSTACK	0x02
403 #define SDB_FPSTATE	0x04
404 #endif
405 
406 /*
407  * machine dependent system variables.
408  */
409 static int
410 sysctl_machdep_boot(SYSCTLFN_ARGS)
411 {
412 	struct sysctlnode node = *rnode;
413 	struct btinfo_kernelfile *bi_file;
414 	const char *cp;
415 
416 
417 	switch (node.sysctl_num) {
418 	case CPU_BOOTED_KERNEL:
419 		if ((bi_file = lookup_bootinfo(BTINFO_KERNELFILE)) != NULL)
420 			cp = bi_file->name;
421 		else
422 			cp = prom_getbootfile();
423 		if (cp != NULL && cp[0] == '\0')
424 			cp = "netbsd";
425 		break;
426 	case CPU_BOOTED_DEVICE:
427 		cp = prom_getbootpath();
428 		break;
429 	case CPU_BOOT_ARGS:
430 		cp = prom_getbootargs();
431 		break;
432 	default:
433 		return (EINVAL);
434 	}
435 
436 	if (cp == NULL || cp[0] == '\0')
437 		return (ENOENT);
438 
439 	node.sysctl_data = __UNCONST(cp);
440 	node.sysctl_size = strlen(cp) + 1;
441 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
442 }
443 
444 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
445 {
446 
447 	sysctl_createv(clog, 0, NULL, NULL,
448 		       CTLFLAG_PERMANENT,
449 		       CTLTYPE_NODE, "machdep", NULL,
450 		       NULL, 0, NULL, 0,
451 		       CTL_MACHDEP, CTL_EOL);
452 
453 	sysctl_createv(clog, 0, NULL, NULL,
454 		       CTLFLAG_PERMANENT,
455 		       CTLTYPE_STRING, "booted_kernel", NULL,
456 		       sysctl_machdep_boot, 0, NULL, 0,
457 		       CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
458 	sysctl_createv(clog, 0, NULL, NULL,
459 		       CTLFLAG_PERMANENT,
460 		       CTLTYPE_STRING, "booted_device", NULL,
461 		       sysctl_machdep_boot, 0, NULL, 0,
462 		       CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
463 	sysctl_createv(clog, 0, NULL, NULL,
464 		       CTLFLAG_PERMANENT,
465 		       CTLTYPE_STRING, "boot_args", NULL,
466 		       sysctl_machdep_boot, 0, NULL, 0,
467 		       CTL_MACHDEP, CPU_BOOT_ARGS, CTL_EOL);
468 	sysctl_createv(clog, 0, NULL, NULL,
469 		       CTLFLAG_PERMANENT,
470 		       CTLTYPE_INT, "cpu_arch", NULL,
471 		       NULL, 0, &cpu_arch, 0,
472 		       CTL_MACHDEP, CPU_ARCH, CTL_EOL);
473 }
474 
475 /*
476  * Send an interrupt to process.
477  */
478 struct sigframe {
479 	siginfo_t sf_si;
480 	ucontext_t sf_uc;
481 };
482 
483 void
484 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
485 {
486 	struct lwp *l = curlwp;
487 	struct proc *p = l->l_proc;
488 	struct sigacts *ps = p->p_sigacts;
489 	struct trapframe *tf;
490 	ucontext_t uc;
491 	struct sigframe *fp;
492 	u_int onstack, oldsp, newsp;
493 	u_int catcher;
494 	int sig, error;
495 	size_t ucsz;
496 
497 	sig = ksi->ksi_signo;
498 
499 	tf = l->l_md.md_tf;
500 	oldsp = tf->tf_out[6];
501 
502 	/*
503 	 * Compute new user stack addresses, subtract off
504 	 * one signal frame, and align.
505 	 */
506 	onstack =
507 	    (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
508 	    (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
509 
510 	if (onstack)
511 		fp = (struct sigframe *)
512 			((char *)l->l_sigstk.ss_sp +
513 				  l->l_sigstk.ss_size);
514 	else
515 		fp = (struct sigframe *)oldsp;
516 
517 	fp = (struct sigframe *)((int)(fp - 1) & ~7);
518 
519 #ifdef DEBUG
520 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
521 		printf("sendsig: %s[%d] sig %d newusp %p si %p uc %p\n",
522 		    p->p_comm, p->p_pid, sig, fp, &fp->sf_si, &fp->sf_uc);
523 #endif
524 
525 	/*
526 	 * Build the signal context to be used by sigreturn.
527 	 */
528 	uc.uc_flags = _UC_SIGMASK |
529 		((l->l_sigstk.ss_flags & SS_ONSTACK)
530 			? _UC_SETSTACK : _UC_CLRSTACK);
531 	uc.uc_sigmask = *mask;
532 	uc.uc_link = l->l_ctxlink;
533 	memset(&uc.uc_stack, 0, sizeof(uc.uc_stack));
534 
535 	/*
536 	 * Now copy the stack contents out to user space.
537 	 * We need to make sure that when we start the signal handler,
538 	 * its %i6 (%fp), which is loaded from the newly allocated stack area,
539 	 * joins seamlessly with the frame it was in when the signal occurred,
540 	 * so that the debugger and _longjmp code can back up through it.
541 	 * Since we're calling the handler directly, allocate a full size
542 	 * C stack frame.
543 	 */
544 	sendsig_reset(l, sig);
545 	mutex_exit(p->p_lock);
546 	newsp = (int)fp - sizeof(struct frame);
547 	cpu_getmcontext(l, &uc.uc_mcontext, &uc.uc_flags);
548 	ucsz = (int)&uc.__uc_pad - (int)&uc;
549 	error = (copyout(&ksi->ksi_info, &fp->sf_si, sizeof ksi->ksi_info) ||
550 	    copyout(&uc, &fp->sf_uc, ucsz) ||
551 	    suword(&((struct rwindow *)newsp)->rw_in[6], oldsp));
552 	mutex_enter(p->p_lock);
553 
554 	if (error) {
555 		/*
556 		 * Process has trashed its stack; give it an illegal
557 		 * instruction to halt it in its tracks.
558 		 */
559 #ifdef DEBUG
560 		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
561 			printf("sendsig: window save or copyout error\n");
562 #endif
563 		sigexit(l, SIGILL);
564 		/* NOTREACHED */
565 	}
566 
567 	switch (ps->sa_sigdesc[sig].sd_vers) {
568 	default:
569 		/* Unsupported trampoline version; kill the process. */
570 		sigexit(l, SIGILL);
571 	case 2:
572 		/*
573 		 * Arrange to continue execution at the user's handler.
574 		 * It needs a new stack pointer, a return address and
575 		 * three arguments: (signo, siginfo *, ucontext *).
576 		 */
577 		catcher = (u_int)SIGACTION(p, sig).sa_handler;
578 		tf->tf_pc = catcher;
579 		tf->tf_npc = catcher + 4;
580 		tf->tf_out[0] = sig;
581 		tf->tf_out[1] = (int)&fp->sf_si;
582 		tf->tf_out[2] = (int)&fp->sf_uc;
583 		tf->tf_out[6] = newsp;
584 		tf->tf_out[7] = (int)ps->sa_sigdesc[sig].sd_tramp - 8;
585 		break;
586 	}
587 
588 	/* Remember that we're now on the signal stack. */
589 	if (onstack)
590 		l->l_sigstk.ss_flags |= SS_ONSTACK;
591 
592 #ifdef DEBUG
593 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
594 		printf("sendsig: about to return to catcher\n");
595 #endif
596 }
597 
598 /*
599  * cpu_upcall:
600  *
601  *	Send an an upcall to userland.
602  */
603 void
604 cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted,
605 	   void *sas, void *ap, void *sp, sa_upcall_t upcall)
606 {
607 	struct trapframe *tf;
608 	vaddr_t addr;
609 
610 	tf = l->l_md.md_tf;
611 	addr = (vaddr_t) upcall;
612 
613 	/* Arguments to the upcall... */
614 	tf->tf_out[0] = type;
615 	tf->tf_out[1] = (vaddr_t) sas;
616 	tf->tf_out[2] = nevents;
617 	tf->tf_out[3] = ninterrupted;
618 	tf->tf_out[4] = (vaddr_t) ap;
619 
620 	/*
621 	 * Ensure the stack is double-word aligned, and provide a
622 	 * C call frame.
623 	 */
624 	sp = (void *)(((vaddr_t)sp & ~0x7) - CCFSZ);
625 
626 	/* Arrange to begin execution at the upcall handler. */
627 	tf->tf_pc = addr;
628 	tf->tf_npc = addr + 4;
629 	tf->tf_out[6] = (vaddr_t) sp;
630 	tf->tf_out[7] = -1;		/* "you lose" if upcall returns */
631 }
632 
633 void
634 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
635 {
636 	struct trapframe *tf = (struct trapframe *)l->l_md.md_tf;
637 	__greg_t *r = mcp->__gregs;
638 #ifdef FPU_CONTEXT
639 	__fpregset_t *f = &mcp->__fpregs;
640 	struct fpstate *fps = l->l_md.md_fpstate;
641 #endif
642 
643 	/*
644 	 * Put the stack in a consistent state before we whack away
645 	 * at it.  Note that write_user_windows may just dump the
646 	 * registers into the pcb; we need them in the process's memory.
647 	 */
648 	write_user_windows();
649 	if ((l->l_flag & LW_SA_SWITCHING) == 0 && rwindow_save(l)) {
650 		mutex_enter(l->l_proc->p_lock);
651 		sigexit(l, SIGILL);
652 	}
653 
654 	/*
655 	 * Get the general purpose registers
656 	 */
657 	r[_REG_PSR] = tf->tf_psr;
658 	r[_REG_PC] = tf->tf_pc;
659 	r[_REG_nPC] = tf->tf_npc;
660 	r[_REG_Y] = tf->tf_y;
661 	r[_REG_G1] = tf->tf_global[1];
662 	r[_REG_G2] = tf->tf_global[2];
663 	r[_REG_G3] = tf->tf_global[3];
664 	r[_REG_G4] = tf->tf_global[4];
665 	r[_REG_G5] = tf->tf_global[5];
666 	r[_REG_G6] = tf->tf_global[6];
667 	r[_REG_G7] = tf->tf_global[7];
668 	r[_REG_O0] = tf->tf_out[0];
669 	r[_REG_O1] = tf->tf_out[1];
670 	r[_REG_O2] = tf->tf_out[2];
671 	r[_REG_O3] = tf->tf_out[3];
672 	r[_REG_O4] = tf->tf_out[4];
673 	r[_REG_O5] = tf->tf_out[5];
674 	r[_REG_O6] = tf->tf_out[6];
675 	r[_REG_O7] = tf->tf_out[7];
676 
677 	*flags |= _UC_CPU;
678 
679 #ifdef FPU_CONTEXT
680 	/*
681 	 * Get the floating point registers
682 	 */
683 	memcpy(f->__fpu_regs, fps->fs_regs, sizeof(fps->fs_regs));
684 	f->__fp_nqsize = sizeof(struct fp_qentry);
685 	f->__fp_nqel = fps->fs_qsize;
686 	f->__fp_fsr = fps->fs_fsr;
687 	if (f->__fp_q != NULL) {
688 		size_t sz = f->__fp_nqel * f->__fp_nqsize;
689 		if (sz > sizeof(fps->fs_queue)) {
690 #ifdef DIAGNOSTIC
691 			printf("getcontext: fp_queue too large\n");
692 #endif
693 			return;
694 		}
695 		if (copyout(fps->fs_queue, f->__fp_q, sz) != 0) {
696 #ifdef DIAGNOSTIC
697 			printf("getcontext: copy of fp_queue failed %d\n",
698 			    error);
699 #endif
700 			return;
701 		}
702 	}
703 	f->fp_busy = 0;	/* XXX: How do we determine that? */
704 	*flags |= _UC_FPU;
705 #endif
706 
707 	return;
708 }
709 
710 /*
711  * Set to mcontext specified.
712  * Return to previous pc and psl as specified by
713  * context left by sendsig. Check carefully to
714  * make sure that the user has not modified the
715  * psl to gain improper privileges or to cause
716  * a machine fault.
717  * This is almost like sigreturn() and it shows.
718  */
719 int
720 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
721 {
722 	struct trapframe *tf;
723 	const __greg_t *r = mcp->__gregs;
724 	struct proc *p = l->l_proc;
725 #ifdef FPU_CONTEXT
726 	__fpregset_t *f = &mcp->__fpregs;
727 	struct fpstate *fps = l->l_md.md_fpstate;
728 #endif
729 
730 	write_user_windows();
731 	if (rwindow_save(l)) {
732 		mutex_enter(p->p_lock);
733 		sigexit(l, SIGILL);
734 	}
735 
736 #ifdef DEBUG
737 	if (sigdebug & SDB_FOLLOW)
738 		printf("__setmcontext: %s[%d], __mcontext %p\n",
739 		    l->l_proc->p_comm, l->l_proc->p_pid, mcp);
740 #endif
741 
742 	if (flags & _UC_CPU) {
743 		/* Restore register context. */
744 		tf = (struct trapframe *)l->l_md.md_tf;
745 
746 		/*
747 		 * Only the icc bits in the psr are used, so it need not be
748 		 * verified.  pc and npc must be multiples of 4.  This is all
749 		 * that is required; if it holds, just do it.
750 		 */
751 		if (((r[_REG_PC] | r[_REG_nPC]) & 3) != 0) {
752 			printf("pc or npc are not multiples of 4!\n");
753 			return (EINVAL);
754 		}
755 
756 		/* take only psr ICC field */
757 		tf->tf_psr = (tf->tf_psr & ~PSR_ICC) |
758 		    (r[_REG_PSR] & PSR_ICC);
759 		tf->tf_pc = r[_REG_PC];
760 		tf->tf_npc = r[_REG_nPC];
761 		tf->tf_y = r[_REG_Y];
762 
763 		/* Restore everything */
764 		tf->tf_global[1] = r[_REG_G1];
765 		tf->tf_global[2] = r[_REG_G2];
766 		tf->tf_global[3] = r[_REG_G3];
767 		tf->tf_global[4] = r[_REG_G4];
768 		tf->tf_global[5] = r[_REG_G5];
769 		tf->tf_global[6] = r[_REG_G6];
770 		tf->tf_global[7] = r[_REG_G7];
771 
772 		tf->tf_out[0] = r[_REG_O0];
773 		tf->tf_out[1] = r[_REG_O1];
774 		tf->tf_out[2] = r[_REG_O2];
775 		tf->tf_out[3] = r[_REG_O3];
776 		tf->tf_out[4] = r[_REG_O4];
777 		tf->tf_out[5] = r[_REG_O5];
778 		tf->tf_out[6] = r[_REG_O6];
779 		tf->tf_out[7] = r[_REG_O7];
780 	}
781 
782 #ifdef FPU_CONTEXT
783 	if (flags & _UC_FPU) {
784 		/*
785 		 * Set the floating point registers
786 		 */
787 		int error;
788 		size_t sz = f->__fp_nqel * f->__fp_nqsize;
789 		if (sz > sizeof(fps->fs_queue)) {
790 #ifdef DIAGNOSTIC
791 			printf("setmcontext: fp_queue too large\n");
792 #endif
793 			return (EINVAL);
794 		}
795 		memcpy(fps->fs_regs, f->__fpu_regs, sizeof(fps->fs_regs));
796 		fps->fs_qsize = f->__fp_nqel;
797 		fps->fs_fsr = f->__fp_fsr;
798 		if (f->__fp_q != NULL) {
799 			if ((error = copyin(f->__fp_q, fps->fs_queue, sz)) != 0) {
800 #ifdef DIAGNOSTIC
801 				printf("setmcontext: fp_queue copy failed\n");
802 #endif
803 				return (error);
804 			}
805 		}
806 	}
807 #endif
808 
809 	mutex_enter(p->p_lock);
810 	if (flags & _UC_SETSTACK)
811 		l->l_sigstk.ss_flags |= SS_ONSTACK;
812 	if (flags & _UC_CLRSTACK)
813 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
814 	mutex_exit(p->p_lock);
815 
816 	return (0);
817 }
818 
819 int	waittime = -1;
820 
821 void
822 cpu_reboot(int howto, char *user_boot_string)
823 {
824 	int i;
825 	char opts[4];
826 	static char str[128];
827 
828 	/* If system is cold, just halt. */
829 	if (cold) {
830 		howto |= RB_HALT;
831 		goto haltsys;
832 	}
833 
834 #if NFB > 0
835 	fb_unblank();
836 #endif
837 	boothowto = howto;
838 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
839 		extern struct lwp lwp0;
840 
841 		/* XXX protect against curlwp->p_stats.foo refs in sync() */
842 		if (curlwp == NULL)
843 			curlwp = &lwp0;
844 		waittime = 0;
845 		vfs_shutdown();
846 
847 		/*
848 		 * If we've been adjusting the clock, the todr
849 		 * will be out of synch; adjust it now.
850 		 * resettodr will only do this only if inittodr()
851 		 * has already been called.
852 		 */
853 		resettodr();
854 	}
855 
856 	/* Disable interrupts. But still allow IPI on MP systems */
857 	if (sparc_ncpus > 1)
858 		(void)splsched();
859 	else
860 		(void)splhigh();
861 
862 #if defined(MULTIPROCESSOR)
863 	/* Direct system interrupts to this CPU, since dump uses polled I/O */
864 	if (CPU_ISSUN4M)
865 		*((u_int *)ICR_ITR) = cpuinfo.mid - 8;
866 #endif
867 
868 	/* If rebooting and a dump is requested, do it. */
869 #if 0
870 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
871 #else
872 	if (howto & RB_DUMP)
873 #endif
874 		dumpsys();
875 
876  haltsys:
877 
878 	/* Run any shutdown hooks. */
879 	doshutdownhooks();
880 
881 	pmf_system_shutdown(boothowto);
882 
883 	/* If powerdown was requested, do it. */
884 	if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
885 		prom_interpret("power-off");
886 #if NPOWER > 0
887 		/* Fall back on `power' device if the PROM can't do it */
888 		powerdown();
889 #endif
890 		printf("WARNING: powerdown not supported\n");
891 		/*
892 		 * RB_POWERDOWN implies RB_HALT... fall into it...
893 		 */
894 	}
895 
896 	if (howto & RB_HALT) {
897 #if defined(MULTIPROCESSOR)
898 		mp_halt_cpus();
899 		printf("cpu%d halted\n\n", cpu_number());
900 #else
901 		printf("halted\n\n");
902 #endif
903 		prom_halt();
904 	}
905 
906 	printf("rebooting\n\n");
907 
908 	i = 1;
909 	if (howto & RB_SINGLE)
910 		opts[i++] = 's';
911 	if (howto & RB_KDB)
912 		opts[i++] = 'd';
913 	opts[i] = '\0';
914 	opts[0] = (i > 1) ? '-' : '\0';
915 
916 	if (user_boot_string && *user_boot_string) {
917 		i = strlen(user_boot_string);
918 		if (i > sizeof(str) - sizeof(opts) - 1)
919 			prom_boot(user_boot_string);	/* XXX */
920 		memcpy(str, user_boot_string, i);
921 		if (opts[0] != '\0')
922 			str[i] = ' ';
923 	}
924 	strcat(str, opts);
925 	prom_boot(str);
926 	/*NOTREACHED*/
927 }
928 
929 uint32_t dumpmag = 0x8fca0101;	/* magic number for savecore */
930 int	dumpsize = 0;		/* also for savecore */
931 long	dumplo = 0;
932 
933 void
934 cpu_dumpconf(void)
935 {
936 	const struct bdevsw *bdev;
937 	int nblks, dumpblks;
938 
939 	if (dumpdev == NODEV)
940 		return;
941 	bdev = bdevsw_lookup(dumpdev);
942 	if (bdev == NULL || bdev->d_psize == NULL)
943 		return;
944 
945 	nblks = (*bdev->d_psize)(dumpdev);
946 
947 	dumpblks = ctod(physmem) + pmap_dumpsize();
948 	if (dumpblks > (nblks - ctod(1)))
949 		/*
950 		 * dump size is too big for the partition.
951 		 * Note, we safeguard a click at the front for a
952 		 * possible disk label.
953 		 */
954 		return;
955 
956 	/* Put the dump at the end of the partition */
957 	dumplo = nblks - dumpblks;
958 
959 	/*
960 	 * savecore(8) expects dumpsize to be the number of pages
961 	 * of actual core dumped (i.e. excluding the MMU stuff).
962 	 */
963 	dumpsize = physmem;
964 }
965 
966 #define	BYTES_PER_DUMP	(32 * 1024)	/* must be a multiple of pagesize */
967 static vaddr_t dumpspace;
968 
969 void *
970 reserve_dumppages(void *p)
971 {
972 
973 	dumpspace = (vaddr_t)p;
974 	return ((char *)p + BYTES_PER_DUMP);
975 }
976 
977 /*
978  * Write a crash dump.
979  */
980 void
981 dumpsys(void)
982 {
983 	const struct bdevsw *bdev;
984 	int psize;
985 	daddr_t blkno;
986 	int (*dump)(dev_t, daddr_t, void *, size_t);
987 	int error = 0;
988 	struct memarr *mp;
989 	int nmem;
990 	extern struct memarr pmemarr[];
991 	extern int npmemarr;
992 
993 	/* copy registers to memory */
994 	snapshot(cpuinfo.curpcb);
995 	stackdump();
996 
997 	if (dumpdev == NODEV)
998 		return;
999 	bdev = bdevsw_lookup(dumpdev);
1000 	if (bdev == NULL || bdev->d_psize == NULL)
1001 		return;
1002 
1003 	/*
1004 	 * For dumps during autoconfiguration,
1005 	 * if dump device has already configured...
1006 	 */
1007 	if (dumpsize == 0)
1008 		cpu_dumpconf();
1009 	if (dumplo <= 0) {
1010 		printf("\ndump to dev %u,%u not possible\n",
1011 		    major(dumpdev), minor(dumpdev));
1012 		return;
1013 	}
1014 	printf("\ndumping to dev %u,%u offset %ld\n",
1015 	    major(dumpdev), minor(dumpdev), dumplo);
1016 
1017 	psize = (*bdev->d_psize)(dumpdev);
1018 	printf("dump ");
1019 	if (psize == -1) {
1020 		printf("area unavailable\n");
1021 		return;
1022 	}
1023 	blkno = dumplo;
1024 	dump = bdev->d_dump;
1025 
1026 	error = pmap_dumpmmu(dump, blkno);
1027 	blkno += pmap_dumpsize();
1028 
1029 	for (mp = pmemarr, nmem = npmemarr; --nmem >= 0 && error == 0; mp++) {
1030 		unsigned i = 0, n;
1031 		int maddr = mp->addr;
1032 
1033 		if (maddr == 0) {
1034 			/* Skip first page at physical address 0 */
1035 			maddr += PAGE_SIZE;
1036 			i += PAGE_SIZE;
1037 			blkno += btodb(PAGE_SIZE);
1038 		}
1039 
1040 		for (; i < mp->len; i += n) {
1041 			n = mp->len - i;
1042 			if (n > BYTES_PER_DUMP)
1043 				 n = BYTES_PER_DUMP;
1044 
1045 			/* print out how many MBs we have dumped */
1046 			if (i && (i % (1024*1024)) == 0)
1047 				printf_nolog("%d ", i / (1024*1024));
1048 
1049 			(void) pmap_map(dumpspace, maddr, maddr + n,
1050 					VM_PROT_READ);
1051 			error = (*dump)(dumpdev, blkno,
1052 					(void *)dumpspace, (int)n);
1053 			pmap_kremove(dumpspace, n);
1054 			pmap_update(pmap_kernel());
1055 			if (error)
1056 				break;
1057 			maddr += n;
1058 			blkno += btodb(n);
1059 		}
1060 	}
1061 
1062 	switch (error) {
1063 
1064 	case ENXIO:
1065 		printf("device bad\n");
1066 		break;
1067 
1068 	case EFAULT:
1069 		printf("device not ready\n");
1070 		break;
1071 
1072 	case EINVAL:
1073 		printf("area improper\n");
1074 		break;
1075 
1076 	case EIO:
1077 		printf("i/o error\n");
1078 		break;
1079 
1080 	case 0:
1081 		printf("succeeded\n");
1082 		break;
1083 
1084 	default:
1085 		printf("error %d\n", error);
1086 		break;
1087 	}
1088 }
1089 
1090 /*
1091  * get the fp and dump the stack as best we can.  don't leave the
1092  * current stack page
1093  */
1094 void
1095 stackdump(void)
1096 {
1097 	struct frame *fp = getfp(), *sfp;
1098 
1099 	sfp = fp;
1100 	printf("Frame pointer is at %p\n", fp);
1101 	printf("Call traceback:\n");
1102 	while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) {
1103 		printf("  pc = 0x%x  args = (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp = %p\n",
1104 		    fp->fr_pc, fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2],
1105 		    fp->fr_arg[3], fp->fr_arg[4], fp->fr_arg[5], fp->fr_arg[6],
1106 		    fp->fr_fp);
1107 		fp = fp->fr_fp;
1108 	}
1109 }
1110 
1111 int
1112 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp)
1113 {
1114 
1115 	return (ENOEXEC);
1116 }
1117 
1118 #if defined(SUN4)
1119 void
1120 oldmon_w_trace(u_long va)
1121 {
1122 	struct cpu_info * const ci = curcpu();
1123 	u_long stop;
1124 	struct frame *fp;
1125 
1126 	printf("curlwp = %p, pid %d\n", curlwp, curproc->p_pid);
1127 
1128 	printf("uvm: cpu%u: swtch %"PRIu64", trap %"PRIu64", sys %"PRIu64", "
1129 	    "intr %"PRIu64", soft %"PRIu64", faults %"PRIu64"\n",
1130 	    cpu_index(ci), ci->ci_data.cpu_nswtch, ci->ci_data.cpu_ntrap,
1131 	    ci->ci_data.cpu_nsyscall, ci->ci_data.cpu_nintr,
1132 	    ci->ci_data.cpu_nsoft, ci->ci_data.cpu_nfault);
1133 	write_user_windows();
1134 
1135 #define round_up(x) (( (x) + (PAGE_SIZE-1) ) & (~(PAGE_SIZE-1)) )
1136 
1137 	printf("\nstack trace with sp = 0x%lx\n", va);
1138 	stop = round_up(va);
1139 	printf("stop at 0x%lx\n", stop);
1140 	fp = (struct frame *) va;
1141 	while (round_up((u_long) fp) == stop) {
1142 		printf("  0x%x(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp %p\n", fp->fr_pc,
1143 		    fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2], fp->fr_arg[3],
1144 		    fp->fr_arg[4], fp->fr_arg[5], fp->fr_arg[6], fp->fr_fp);
1145 		fp = fp->fr_fp;
1146 		if (fp == NULL)
1147 			break;
1148 	}
1149 	printf("end of stack trace\n");
1150 }
1151 
1152 void
1153 oldmon_w_cmd(u_long va, char *ar)
1154 {
1155 	switch (*ar) {
1156 	case '\0':
1157 		switch (va) {
1158 		case 0:
1159 			panic("g0 panic");
1160 		case 4:
1161 			printf("w: case 4\n");
1162 			break;
1163 		default:
1164 			printf("w: unknown case %ld\n", va);
1165 			break;
1166 		}
1167 		break;
1168 	case 't':
1169 		oldmon_w_trace(va);
1170 		break;
1171 	default:
1172 		printf("w: arg not allowed\n");
1173 	}
1174 }
1175 
1176 int
1177 ldcontrolb(void *addr)
1178 {
1179 	struct pcb *xpcb;
1180 	u_long saveonfault;
1181 	int res;
1182 	int s;
1183 
1184 	if (CPU_ISSUN4M || CPU_ISSUN4D) {
1185 		printf("warning: ldcontrolb called on sun4m/sun4d\n");
1186 		return 0;
1187 	}
1188 
1189 	s = splhigh();
1190 	xpcb = lwp_getpcb(curlwp);
1191 
1192 	saveonfault = (u_long)xpcb->pcb_onfault;
1193         res = xldcontrolb(addr, xpcb);
1194 	xpcb->pcb_onfault = (void *)saveonfault;
1195 
1196 	splx(s);
1197 	return (res);
1198 }
1199 #endif /* SUN4 */
1200 
1201 void
1202 wzero(void *vb, u_int l)
1203 {
1204 	u_char *b = vb;
1205 	u_char *be = b + l;
1206 	u_short *sp;
1207 
1208 	if (l == 0)
1209 		return;
1210 
1211 	/* front, */
1212 	if ((u_long)b & 1)
1213 		*b++ = 0;
1214 
1215 	/* back, */
1216 	if (b != be && ((u_long)be & 1) != 0) {
1217 		be--;
1218 		*be = 0;
1219 	}
1220 
1221 	/* and middle. */
1222 	sp = (u_short *)b;
1223 	while (sp != (u_short *)be)
1224 		*sp++ = 0;
1225 }
1226 
1227 void
1228 wcopy(const void *vb1, void *vb2, u_int l)
1229 {
1230 	const u_char *b1e, *b1 = vb1;
1231 	u_char *b2 = vb2;
1232 	const u_short *sp;
1233 	int bstore = 0;
1234 
1235 	if (l == 0)
1236 		return;
1237 
1238 	/* front, */
1239 	if ((u_long)b1 & 1) {
1240 		*b2++ = *b1++;
1241 		l--;
1242 	}
1243 
1244 	/* middle, */
1245 	sp = (const u_short *)b1;
1246 	b1e = b1 + l;
1247 	if (l & 1)
1248 		b1e--;
1249 	bstore = (u_long)b2 & 1;
1250 
1251 	while (sp < (const u_short *)b1e) {
1252 		if (bstore) {
1253 			b2[1] = *sp & 0xff;
1254 			b2[0] = *sp >> 8;
1255 		} else
1256 			*((short *)b2) = *sp;
1257 		sp++;
1258 		b2 += 2;
1259 	}
1260 
1261 	/* and back. */
1262 	if (l & 1)
1263 		*b2 = *b1e;
1264 }
1265 
1266 #ifdef MODULAR
1267 void
1268 module_init_md(void)
1269 {
1270 }
1271 #endif
1272 
1273 /*
1274  * Common function for DMA map creation.  May be called by bus-specific
1275  * DMA map creation functions.
1276  */
1277 int
1278 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
1279 		   bus_size_t maxsegsz, bus_size_t boundary, int flags,
1280 		   bus_dmamap_t *dmamp)
1281 {
1282 	struct sparc_bus_dmamap *map;
1283 	void *mapstore;
1284 	size_t mapsize;
1285 
1286 	/*
1287 	 * Allocate and initialize the DMA map.  The end of the map
1288 	 * is a variable-sized array of segments, so we allocate enough
1289 	 * room for them in one shot.
1290 	 *
1291 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
1292 	 * of ALLOCNOW notifies others that we've reserved these resources,
1293 	 * and they are not to be freed.
1294 	 *
1295 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
1296 	 * the (nsegments - 1).
1297 	 */
1298 	mapsize = sizeof(struct sparc_bus_dmamap) +
1299 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
1300 	if ((mapstore = malloc(mapsize, M_DMAMAP,
1301 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1302 		return (ENOMEM);
1303 
1304 	memset(mapstore, 0, mapsize);
1305 	map = (struct sparc_bus_dmamap *)mapstore;
1306 	map->_dm_size = size;
1307 	map->_dm_segcnt = nsegments;
1308 	map->_dm_maxmaxsegsz = maxsegsz;
1309 	map->_dm_boundary = boundary;
1310 	map->_dm_align = PAGE_SIZE;
1311 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
1312 	map->dm_maxsegsz = maxsegsz;
1313 	map->dm_mapsize = 0;		/* no valid mappings */
1314 	map->dm_nsegs = 0;
1315 
1316 	*dmamp = map;
1317 	return (0);
1318 }
1319 
1320 /*
1321  * Common function for DMA map destruction.  May be called by bus-specific
1322  * DMA map destruction functions.
1323  */
1324 void
1325 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
1326 {
1327 
1328 	free(map, M_DMAMAP);
1329 }
1330 
1331 /*
1332  * Like _bus_dmamap_load(), but for mbufs.
1333  */
1334 int
1335 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
1336 		      struct mbuf *m, int flags)
1337 {
1338 
1339 	panic("_bus_dmamap_load_mbuf: not implemented");
1340 }
1341 
1342 /*
1343  * Like _bus_dmamap_load(), but for uios.
1344  */
1345 int
1346 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
1347 		     struct uio *uio, int flags)
1348 {
1349 
1350 	panic("_bus_dmamap_load_uio: not implemented");
1351 }
1352 
1353 /*
1354  * Like _bus_dmamap_load(), but for raw memory allocated with
1355  * bus_dmamem_alloc().
1356  */
1357 int
1358 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
1359 		     bus_dma_segment_t *segs, int nsegs, bus_size_t size,
1360 		     int flags)
1361 {
1362 
1363 	panic("_bus_dmamap_load_raw: not implemented");
1364 }
1365 
1366 /*
1367  * Common function for DMA map synchronization.  May be called
1368  * by bus-specific DMA map synchronization functions.
1369  */
1370 void
1371 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
1372 		 bus_addr_t offset, bus_size_t len, int ops)
1373 {
1374 }
1375 
1376 /*
1377  * Common function for DMA-safe memory allocation.  May be called
1378  * by bus-specific DMA memory allocation functions.
1379  */
1380 int
1381 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
1382 		  bus_size_t alignment, bus_size_t boundary,
1383 		  bus_dma_segment_t *segs, int nsegs, int *rsegs,
1384 		  int flags)
1385 {
1386 	vaddr_t low, high;
1387 	struct pglist *mlist;
1388 	int error;
1389 
1390 	/* Always round the size. */
1391 	size = round_page(size);
1392 	low = vm_first_phys;
1393 	high = vm_first_phys + vm_num_phys - PAGE_SIZE;
1394 
1395 	if ((mlist = malloc(sizeof(*mlist), M_DEVBUF,
1396 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1397 		return (ENOMEM);
1398 
1399 	/*
1400 	 * Allocate pages from the VM system.
1401 	 */
1402 	error = uvm_pglistalloc(size, low, high, 0, 0,
1403 				mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1404 	if (error)
1405 		return (error);
1406 
1407 	/*
1408 	 * Simply keep a pointer around to the linked list, so
1409 	 * bus_dmamap_free() can return it.
1410 	 *
1411 	 * NOBODY SHOULD TOUCH THE pageq.queue FIELDS WHILE THESE PAGES
1412 	 * ARE IN OUR CUSTODY.
1413 	 */
1414 	segs[0]._ds_mlist = mlist;
1415 
1416 	/*
1417 	 * We now have physical pages, but no DVMA addresses yet. These
1418 	 * will be allocated in bus_dmamap_load*() routines. Hence we
1419 	 * save any alignment and boundary requirements in this DMA
1420 	 * segment.
1421 	 */
1422 	segs[0].ds_addr = 0;
1423 	segs[0].ds_len = 0;
1424 	segs[0]._ds_va = 0;
1425 	*rsegs = 1;
1426 	return (0);
1427 }
1428 
1429 /*
1430  * Common function for freeing DMA-safe memory.  May be called by
1431  * bus-specific DMA memory free functions.
1432  */
1433 void
1434 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1435 {
1436 
1437 	if (nsegs != 1)
1438 		panic("bus_dmamem_free: nsegs = %d", nsegs);
1439 
1440 	/*
1441 	 * Return the list of pages back to the VM system.
1442 	 */
1443 	uvm_pglistfree(segs[0]._ds_mlist);
1444 	free(segs[0]._ds_mlist, M_DEVBUF);
1445 }
1446 
1447 /*
1448  * Common function for unmapping DMA-safe memory.  May be called by
1449  * bus-specific DMA memory unmapping functions.
1450  */
1451 void
1452 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1453 {
1454 
1455 #ifdef DIAGNOSTIC
1456 	if ((u_long)kva & PAGE_MASK)
1457 		panic("_bus_dmamem_unmap");
1458 #endif
1459 
1460 	size = round_page(size);
1461 	pmap_kremove((vaddr_t)kva, size);
1462 	pmap_update(pmap_kernel());
1463 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1464 }
1465 
1466 /*
1467  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
1468  * bus-specific DMA mmap(2)'ing functions.
1469  */
1470 paddr_t
1471 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1472 		 off_t off, int prot, int flags)
1473 {
1474 
1475 	panic("_bus_dmamem_mmap: not implemented");
1476 }
1477 
1478 /*
1479  * Utility to allocate an aligned kernel virtual address range
1480  */
1481 vaddr_t
1482 _bus_dma_valloc_skewed(size_t size, u_long boundary, u_long align, u_long skew)
1483 {
1484 	size_t oversize;
1485 	vaddr_t va, sva;
1486 
1487 	/*
1488 	 * Find a region of kernel virtual addresses that is aligned
1489 	 * to the given address modulo the requested alignment, i.e.
1490 	 *
1491 	 *	(va - skew) == 0 mod align
1492 	 *
1493 	 * The following conditions apply to the arguments:
1494 	 *
1495 	 *	- `size' must be a multiple of the VM page size
1496 	 *	- `align' must be a power of two
1497 	 *	   and greater than or equal to the VM page size
1498 	 *	- `skew' must be smaller than `align'
1499 	 *	- `size' must be smaller than `boundary'
1500 	 */
1501 
1502 #ifdef DIAGNOSTIC
1503 	if ((size & PAGE_MASK) != 0)
1504 		panic("_bus_dma_valloc_skewed: invalid size %lx", size);
1505 	if ((align & PAGE_MASK) != 0)
1506 		panic("_bus_dma_valloc_skewed: invalid alignment %lx", align);
1507 	if (align < skew)
1508 		panic("_bus_dma_valloc_skewed: align %lx < skew %lx",
1509 			align, skew);
1510 #endif
1511 
1512 	/* XXX - Implement this! */
1513 	if (boundary) {
1514 		printf("_bus_dma_valloc_skewed: "
1515 			"boundary check not implemented");
1516 		return (0);
1517 	}
1518 
1519 	/*
1520 	 * First, find a region large enough to contain any aligned chunk
1521 	 */
1522 	oversize = size + align - PAGE_SIZE;
1523 	sva = vm_map_min(kernel_map);
1524 	if (uvm_map(kernel_map, &sva, oversize, NULL, UVM_UNKNOWN_OFFSET,
1525 	    align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
1526 	    UVM_ADV_RANDOM, UVM_FLAG_NOWAIT)))
1527 		return (0);
1528 
1529 	/*
1530 	 * Compute start of aligned region
1531 	 */
1532 	va = sva;
1533 	va += (skew + align - va) & (align - 1);
1534 
1535 	/*
1536 	 * Return excess virtual addresses
1537 	 */
1538 	if (va != sva)
1539 		(void)uvm_unmap(kernel_map, sva, va);
1540 	if (va + size != sva + oversize)
1541 		(void)uvm_unmap(kernel_map, va + size, sva + oversize);
1542 
1543 	return (va);
1544 }
1545 
1546 /* sun4/sun4c DMA map functions */
1547 int	sun4_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
1548 				bus_size_t, struct proc *, int);
1549 int	sun4_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
1550 				bus_dma_segment_t *, int, bus_size_t, int);
1551 void	sun4_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
1552 int	sun4_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *,
1553 				int, size_t, void **, int);
1554 
1555 /*
1556  * sun4/sun4c: load DMA map with a linear buffer.
1557  */
1558 int
1559 sun4_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
1560 		 void *buf, bus_size_t buflen,
1561 		 struct proc *p, int flags)
1562 {
1563 	bus_size_t sgsize;
1564 	vaddr_t va = (vaddr_t)buf;
1565 	int pagesz = PAGE_SIZE;
1566 	vaddr_t dva;
1567 	pmap_t pmap;
1568 
1569 	/*
1570 	 * Make sure that on error condition we return "no valid mappings".
1571 	 */
1572 	map->dm_nsegs = 0;
1573 
1574 	if (buflen > map->_dm_size)
1575 		return (EINVAL);
1576 
1577 	cache_flush(buf, buflen);
1578 
1579 	if ((map->_dm_flags & BUS_DMA_24BIT) == 0) {
1580 		/*
1581 		 * XXX Need to implement "don't DMA across this boundry".
1582 		 */
1583 		if (map->_dm_boundary != 0) {
1584 			bus_addr_t baddr;
1585 
1586 			/* Calculate first boundary line after `buf' */
1587 			baddr = ((bus_addr_t)va + map->_dm_boundary) &
1588 					-map->_dm_boundary;
1589 
1590 			/*
1591 			 * If the requested segment crosses the boundary,
1592 			 * we can't grant a direct map. For now, steal some
1593 			 * space from the `24BIT' map instead.
1594 			 *
1595 			 * (XXX - no overflow detection here)
1596 			 */
1597 			if (buflen > (baddr - (bus_addr_t)va))
1598 				goto no_fit;
1599 		}
1600 		map->dm_mapsize = buflen;
1601 		map->dm_nsegs = 1;
1602 		map->dm_segs[0].ds_addr = (bus_addr_t)va;
1603 		map->dm_segs[0].ds_len = buflen;
1604 		map->_dm_flags |= _BUS_DMA_DIRECTMAP;
1605 		return (0);
1606 	}
1607 
1608 no_fit:
1609 	sgsize = round_page(buflen + (va & (pagesz - 1)));
1610 
1611 	if (extent_alloc(dvmamap24, sgsize, pagesz, map->_dm_boundary,
1612 			 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
1613 			 &dva) != 0) {
1614 		return (ENOMEM);
1615 	}
1616 
1617 	/*
1618 	 * We always use just one segment.
1619 	 */
1620 	map->dm_mapsize = buflen;
1621 	map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
1622 	map->dm_segs[0].ds_len = buflen;
1623 	map->dm_segs[0]._ds_sgsize = sgsize;
1624 
1625 	if (p != NULL)
1626 		pmap = p->p_vmspace->vm_map.pmap;
1627 	else
1628 		pmap = pmap_kernel();
1629 
1630 	for (; buflen > 0; ) {
1631 		paddr_t pa;
1632 
1633 		/*
1634 		 * Get the physical address for this page.
1635 		 */
1636 		(void) pmap_extract(pmap, va, &pa);
1637 
1638 		/*
1639 		 * Compute the segment size, and adjust counts.
1640 		 */
1641 		sgsize = pagesz - (va & (pagesz - 1));
1642 		if (buflen < sgsize)
1643 			sgsize = buflen;
1644 
1645 #ifdef notyet
1646 #if defined(SUN4)
1647 		if (have_iocache)
1648 			pa |= PG_IOC;
1649 #endif
1650 #endif
1651 		pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC,
1652 		    VM_PROT_READ | VM_PROT_WRITE, 0);
1653 
1654 		dva += pagesz;
1655 		va += sgsize;
1656 		buflen -= sgsize;
1657 	}
1658 	pmap_update(pmap_kernel());
1659 
1660 	map->dm_nsegs = 1;
1661 	return (0);
1662 }
1663 
1664 /*
1665  * Like _bus_dmamap_load(), but for raw memory allocated with
1666  * bus_dmamem_alloc().
1667  */
1668 int
1669 sun4_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
1670 		     bus_dma_segment_t *segs, int nsegs, bus_size_t size,
1671 		     int flags)
1672 {
1673 	struct vm_page *m;
1674 	paddr_t pa;
1675 	vaddr_t dva;
1676 	bus_size_t sgsize;
1677 	struct pglist *mlist;
1678 	int pagesz = PAGE_SIZE;
1679 	int error;
1680 
1681 	map->dm_nsegs = 0;
1682 	sgsize = (size + pagesz - 1) & -pagesz;
1683 
1684 	/* Allocate DVMA addresses */
1685 	if ((map->_dm_flags & BUS_DMA_24BIT) != 0) {
1686 		error = extent_alloc(dvmamap24, sgsize, pagesz,
1687 					map->_dm_boundary,
1688 					(flags & BUS_DMA_NOWAIT) == 0
1689 						? EX_WAITOK : EX_NOWAIT,
1690 					&dva);
1691 		if (error)
1692 			return (error);
1693 	} else {
1694 		/* Any properly aligned virtual address will do */
1695 		dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary,
1696 					     pagesz, 0);
1697 		if (dva == 0)
1698 			return (ENOMEM);
1699 	}
1700 
1701 	map->dm_segs[0].ds_addr = dva;
1702 	map->dm_segs[0].ds_len = size;
1703 	map->dm_segs[0]._ds_sgsize = sgsize;
1704 
1705 	/* Map physical pages into IOMMU */
1706 	mlist = segs[0]._ds_mlist;
1707 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
1708 		if (sgsize == 0)
1709 			panic("sun4_dmamap_load_raw: size botch");
1710 		pa = VM_PAGE_TO_PHYS(m);
1711 #ifdef notyet
1712 #if defined(SUN4)
1713 		if (have_iocache)
1714 			pa |= PG_IOC;
1715 #endif
1716 #endif
1717 		pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC,
1718 		    VM_PROT_READ | VM_PROT_WRITE, 0);
1719 
1720 		dva += pagesz;
1721 		sgsize -= pagesz;
1722 	}
1723 	pmap_update(pmap_kernel());
1724 
1725 	map->dm_nsegs = 1;
1726 	map->dm_mapsize = size;
1727 
1728 	return (0);
1729 }
1730 
1731 /*
1732  * sun4/sun4c function for unloading a DMA map.
1733  */
1734 void
1735 sun4_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
1736 {
1737 	bus_dma_segment_t *segs = map->dm_segs;
1738 	int nsegs = map->dm_nsegs;
1739 	int flags = map->_dm_flags;
1740 	vaddr_t dva;
1741 	bus_size_t len;
1742 	int i, s, error;
1743 
1744 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
1745 
1746 	if ((flags & _BUS_DMA_DIRECTMAP) != 0) {
1747 		/* Nothing to release */
1748 		map->dm_mapsize = 0;
1749 		map->dm_nsegs = 0;
1750 		map->_dm_flags &= ~_BUS_DMA_DIRECTMAP;
1751 		return;
1752 	}
1753 
1754 	for (i = 0; i < nsegs; i++) {
1755 		dva = segs[i].ds_addr & -PAGE_SIZE;
1756 		len = segs[i]._ds_sgsize;
1757 
1758 		pmap_kremove(dva, len);
1759 
1760 		if ((flags & BUS_DMA_24BIT) != 0) {
1761 			s = splhigh();
1762 			error = extent_free(dvmamap24, dva, len, EX_NOWAIT);
1763 			splx(s);
1764 			if (error != 0)
1765 				printf("warning: %ld of DVMA space lost\n", len);
1766 		} else {
1767 			uvm_unmap(kernel_map, dva, dva + len);
1768 		}
1769 	}
1770 	pmap_update(pmap_kernel());
1771 
1772 	/* Mark the mappings as invalid. */
1773 	map->dm_mapsize = 0;
1774 	map->dm_nsegs = 0;
1775 }
1776 
1777 /*
1778  * Common function for mapping DMA-safe memory.  May be called by
1779  * bus-specific DMA memory map functions.
1780  */
1781 int
1782 sun4_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1783 		size_t size, void **kvap, int flags)
1784 {
1785 	struct vm_page *m;
1786 	vaddr_t va;
1787 	struct pglist *mlist;
1788 	const uvm_flag_t kmflags =
1789 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1790 
1791 	if (nsegs != 1)
1792 		panic("sun4_dmamem_map: nsegs = %d", nsegs);
1793 
1794 	size = round_page(size);
1795 
1796 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1797 	if (va == 0)
1798 		return (ENOMEM);
1799 
1800 	segs[0]._ds_va = va;
1801 	*kvap = (void *)va;
1802 
1803 	mlist = segs[0]._ds_mlist;
1804 	TAILQ_FOREACH(m, mlist, pageq.queue) {
1805 		paddr_t pa;
1806 
1807 		if (size == 0)
1808 			panic("sun4_dmamem_map: size botch");
1809 
1810 		pa = VM_PAGE_TO_PHYS(m);
1811 		pmap_kenter_pa(va, pa | PMAP_NC,
1812 		    VM_PROT_READ | VM_PROT_WRITE, 0);
1813 
1814 		va += PAGE_SIZE;
1815 		size -= PAGE_SIZE;
1816 	}
1817 	pmap_update(pmap_kernel());
1818 
1819 	return (0);
1820 }
1821 
1822 
1823 struct sparc_bus_dma_tag mainbus_dma_tag = {
1824 	NULL,
1825 	_bus_dmamap_create,
1826 	_bus_dmamap_destroy,
1827 	sun4_dmamap_load,
1828 	_bus_dmamap_load_mbuf,
1829 	_bus_dmamap_load_uio,
1830 	sun4_dmamap_load_raw,
1831 	sun4_dmamap_unload,
1832 	_bus_dmamap_sync,
1833 
1834 	_bus_dmamem_alloc,
1835 	_bus_dmamem_free,
1836 	sun4_dmamem_map,
1837 	_bus_dmamem_unmap,
1838 	_bus_dmamem_mmap
1839 };
1840 
1841 
1842 /*
1843  * Base bus space handlers.
1844  */
1845 static int	sparc_bus_map(bus_space_tag_t, bus_addr_t,
1846 				    bus_size_t, int, vaddr_t,
1847 				    bus_space_handle_t *);
1848 static int	sparc_bus_unmap(bus_space_tag_t, bus_space_handle_t,
1849 				     bus_size_t);
1850 static int	sparc_bus_subregion(bus_space_tag_t, bus_space_handle_t,
1851 					 bus_size_t, bus_size_t,
1852 					 bus_space_handle_t *);
1853 static paddr_t	sparc_bus_mmap(bus_space_tag_t, bus_addr_t, off_t,
1854 				    int, int);
1855 static void	*sparc_mainbus_intr_establish(bus_space_tag_t, int, int,
1856 						   int (*)(void *),
1857 						   void *,
1858 						   void (*)(void));
1859 static void     sparc_bus_barrier(bus_space_tag_t, bus_space_handle_t,
1860 					bus_size_t, bus_size_t, int);
1861 
1862 /*
1863  * Allocate a new bus tag and have it inherit the methods of the
1864  * given parent.
1865  */
1866 bus_space_tag_t
1867 bus_space_tag_alloc(bus_space_tag_t parent, void *cookie)
1868 {
1869 	struct sparc_bus_space_tag *sbt;
1870 
1871 	sbt = malloc(sizeof(struct sparc_bus_space_tag),
1872 		     M_DEVBUF, M_NOWAIT|M_ZERO);
1873 	if (sbt == NULL)
1874 		return (NULL);
1875 
1876 	if (parent) {
1877 		memcpy(sbt, parent, sizeof(*sbt));
1878 		sbt->parent = parent;
1879 		sbt->ranges = NULL;
1880 		sbt->nranges = 0;
1881 	}
1882 
1883 	sbt->cookie = cookie;
1884 	return (sbt);
1885 }
1886 
1887 /*
1888  * Generic routine to translate an address using OpenPROM `ranges'.
1889  */
1890 int
1891 bus_space_translate_address_generic(struct openprom_range *ranges, int nranges,
1892     bus_addr_t *bap)
1893 {
1894 	int i, space = BUS_ADDR_IOSPACE(*bap);
1895 
1896 	for (i = 0; i < nranges; i++) {
1897 		struct openprom_range *rp = &ranges[i];
1898 
1899 		if (rp->or_child_space != space)
1900 			continue;
1901 
1902 		/* We've found the connection to the parent bus. */
1903 		*bap = BUS_ADDR(rp->or_parent_space,
1904 		    rp->or_parent_base + BUS_ADDR_PADDR(*bap));
1905 		return (0);
1906 	}
1907 
1908 	return (EINVAL);
1909 }
1910 
1911 static int
1912 sparc_bus_map_iodev(bus_space_tag_t t, bus_addr_t ba, bus_size_t size, int flags,
1913 	      vaddr_t va, bus_space_handle_t *hp)
1914 {
1915 	vaddr_t v;
1916 	paddr_t pa;
1917 	unsigned int pmtype;
1918 	bus_space_tag_t pt;
1919 static	vaddr_t iobase;
1920 
1921 	/*
1922 	 * This base class bus map function knows about address range
1923 	 * translation so bus drivers that need no other special
1924 	 * handling can just keep this method in their tags.
1925 	 *
1926 	 * We expect to resolve range translations iteratively, but allow
1927 	 * for recursion just in case.
1928 	 */
1929 	while ((pt = t->parent) != NULL) {
1930 		if (t->ranges != NULL) {
1931 			int error;
1932 
1933 			if ((error = bus_space_translate_address_generic(
1934 					t->ranges, t->nranges, &ba)) != 0)
1935 				return (error);
1936 		}
1937 		if (pt->sparc_bus_map != sparc_bus_map)
1938 			return (bus_space_map2(pt, ba, size, flags, va, hp));
1939 		t = pt;
1940 	}
1941 
1942 	if (iobase == 0)
1943 		iobase = IODEV_BASE;
1944 
1945 	size = round_page(size);
1946 	if (size == 0) {
1947 		printf("sparc_bus_map: zero size\n");
1948 		return (EINVAL);
1949 	}
1950 
1951 	if (va)
1952 		v = trunc_page(va);
1953 	else {
1954 		v = iobase;
1955 		iobase += size;
1956 		if (iobase > IODEV_END)	/* unlikely */
1957 			panic("sparc_bus_map: iobase=0x%lx", iobase);
1958 	}
1959 
1960 	pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba));
1961 	pa = BUS_ADDR_PADDR(ba);
1962 
1963 	/* note: preserve page offset */
1964 	*hp = (bus_space_handle_t)(v | ((u_long)pa & PGOFSET));
1965 
1966 	pa = trunc_page(pa);
1967 	do {
1968 		pmap_kenter_pa(v, pa | pmtype | PMAP_NC,
1969 		    VM_PROT_READ | VM_PROT_WRITE, 0);
1970 		v += PAGE_SIZE;
1971 		pa += PAGE_SIZE;
1972 	} while ((size -= PAGE_SIZE) > 0);
1973 
1974 	pmap_update(pmap_kernel());
1975 	return (0);
1976 }
1977 
1978 static int
1979 sparc_bus_map_large(bus_space_tag_t t, bus_addr_t ba,
1980 		    bus_size_t size, int flags, bus_space_handle_t *hp)
1981 {
1982 	vaddr_t v = 0;
1983 
1984 	if (uvm_map(kernel_map, &v, size, NULL, 0, PAGE_SIZE,
1985 	    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_NORMAL,
1986 			0)) == 0) {
1987 		return sparc_bus_map_iodev(t, ba, size, flags, v, hp);
1988 	}
1989 	return -1;
1990 }
1991 
1992 int
1993 sparc_bus_map(bus_space_tag_t t, bus_addr_t ba,
1994 		    bus_size_t size, int flags, vaddr_t va,
1995 		    bus_space_handle_t *hp)
1996 {
1997 
1998 	if (flags & BUS_SPACE_MAP_LARGE) {
1999 		return sparc_bus_map_large(t, ba, size, flags, hp);
2000 	} else
2001 		return sparc_bus_map_iodev(t, ba, size, flags, va, hp);
2002 
2003 }
2004 
2005 int
2006 sparc_bus_unmap(bus_space_tag_t t, bus_space_handle_t bh, bus_size_t size)
2007 {
2008 	vaddr_t va = trunc_page((vaddr_t)bh);
2009 
2010 	/*
2011 	 * XXX
2012 	 * mappings with BUS_SPACE_MAP_LARGE need additional care here
2013 	 * we can just check if the VA is in the IODEV range
2014 	 */
2015 
2016 	pmap_kremove(va, round_page(size));
2017 	pmap_update(pmap_kernel());
2018 	return (0);
2019 }
2020 
2021 int
2022 sparc_bus_subregion(bus_space_tag_t tag, bus_space_handle_t handle,
2023 		    bus_size_t offset, bus_size_t size,
2024 		    bus_space_handle_t *nhandlep)
2025 {
2026 
2027 	*nhandlep = handle + offset;
2028 	return (0);
2029 }
2030 
2031 paddr_t
2032 sparc_bus_mmap(bus_space_tag_t t, bus_addr_t ba, off_t off,
2033 	       int prot, int flags)
2034 {
2035 	u_int pmtype;
2036 	paddr_t pa;
2037 	bus_space_tag_t pt;
2038 
2039 	/*
2040 	 * Base class bus mmap function; see also sparc_bus_map
2041 	 */
2042 	while ((pt = t->parent) != NULL) {
2043 		if (t->ranges != NULL) {
2044 			int error;
2045 
2046 			if ((error = bus_space_translate_address_generic(
2047 					t->ranges, t->nranges, &ba)) != 0)
2048 				return (-1);
2049 		}
2050 		if (pt->sparc_bus_mmap != sparc_bus_mmap)
2051 			return (bus_space_mmap(pt, ba, off, prot, flags));
2052 		t = pt;
2053 	}
2054 
2055 	pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba));
2056 	pa = trunc_page(BUS_ADDR_PADDR(ba) + off);
2057 
2058 	return (paddr_t)(pa | pmtype | PMAP_NC);
2059 }
2060 
2061 /*
2062  * Establish a temporary bus mapping for device probing.
2063  */
2064 int
2065 bus_space_probe(bus_space_tag_t tag, bus_addr_t paddr, bus_size_t size,
2066 		size_t offset, int flags,
2067 		int (*callback)(void *, void *), void *arg)
2068 {
2069 	bus_space_handle_t bh;
2070 	void *tmp;
2071 	int result;
2072 
2073 	if (bus_space_map2(tag, paddr, size, flags, TMPMAP_VA, &bh) != 0)
2074 		return (0);
2075 
2076 	tmp = (void *)bh;
2077 	result = (probeget((char *)tmp + offset, size) != -1);
2078 	if (result && callback != NULL)
2079 		result = (*callback)(tmp, arg);
2080 	bus_space_unmap(tag, bh, size);
2081 	return (result);
2082 }
2083 
2084 
2085 void *
2086 sparc_mainbus_intr_establish(bus_space_tag_t t, int pil, int level,
2087 			     int (*handler)(void *), void *arg,
2088 			     void (*fastvec)(void))
2089 {
2090 	struct intrhand *ih;
2091 
2092 	ih = (struct intrhand *)
2093 		malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
2094 	if (ih == NULL)
2095 		return (NULL);
2096 
2097 	ih->ih_fun = handler;
2098 	ih->ih_arg = arg;
2099 	intr_establish(pil, level, ih, fastvec, false);
2100 	return (ih);
2101 }
2102 
2103 void sparc_bus_barrier (bus_space_tag_t t, bus_space_handle_t h,
2104 			bus_size_t offset, bus_size_t size, int flags)
2105 {
2106 
2107 	/* No default barrier action defined */
2108 	return;
2109 }
2110 
2111 static uint8_t
2112 sparc_bus_space_read_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
2113 {
2114 
2115 	return bus_space_read_1_real(t, h, o);
2116 }
2117 
2118 static uint16_t
2119 sparc_bus_space_read_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
2120 {
2121 
2122 	return bus_space_read_2_real(t, h, o);
2123 }
2124 
2125 static uint32_t
2126 sparc_bus_space_read_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
2127 {
2128 
2129 	return bus_space_read_4_real(t, h, o);
2130 }
2131 
2132 static uint64_t
2133 sparc_bus_space_read_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
2134 {
2135 
2136 	return bus_space_read_8_real(t, h, o);
2137 }
2138 
2139 static void
2140 sparc_bus_space_write_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2141 			uint8_t v)
2142 {
2143 
2144 	bus_space_write_1_real(t, h, o, v);
2145 }
2146 
2147 static void
2148 sparc_bus_space_write_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2149 			uint16_t v)
2150 {
2151 
2152 	bus_space_write_2_real(t, h, o, v);
2153 }
2154 
2155 static void
2156 sparc_bus_space_write_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2157 			uint32_t v)
2158 {
2159 
2160 	bus_space_write_4_real(t, h, o, v);
2161 }
2162 
2163 static void
2164 sparc_bus_space_write_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2165 			uint64_t v)
2166 {
2167 
2168 	bus_space_write_8_real(t, h, o, v);
2169 }
2170 
2171 struct sparc_bus_space_tag mainbus_space_tag = {
2172 	NULL,				/* cookie */
2173 	NULL,				/* parent bus tag */
2174 	NULL,				/* ranges */
2175 	0,				/* nranges */
2176 	sparc_bus_map,			/* bus_space_map */
2177 	sparc_bus_unmap,		/* bus_space_unmap */
2178 	sparc_bus_subregion,		/* bus_space_subregion */
2179 	sparc_bus_barrier,		/* bus_space_barrier */
2180 	sparc_bus_mmap,			/* bus_space_mmap */
2181 	sparc_mainbus_intr_establish,	/* bus_intr_establish */
2182 
2183 	sparc_bus_space_read_1,		/* bus_space_read_1 */
2184 	sparc_bus_space_read_2,		/* bus_space_read_2 */
2185 	sparc_bus_space_read_4,		/* bus_space_read_4 */
2186 	sparc_bus_space_read_8,		/* bus_space_read_8 */
2187 	sparc_bus_space_write_1,	/* bus_space_write_1 */
2188 	sparc_bus_space_write_2,	/* bus_space_write_2 */
2189 	sparc_bus_space_write_4,	/* bus_space_write_4 */
2190 	sparc_bus_space_write_8		/* bus_space_write_8 */
2191 };
2192