xref: /netbsd/sys/arch/sparc/sparc/machdep.c (revision 41ba3596)
1 /*	$NetBSD: machdep.c,v 1.316 2012/02/12 16:34:10 matt Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1992, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This software was developed by the Computer Systems Engineering group
38  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
39  * contributed to Berkeley.
40  *
41  * All advertising materials mentioning features or use of this software
42  * must display the following acknowledgement:
43  *	This product includes software developed by the University of
44  *	California, Lawrence Berkeley Laboratory.
45  *
46  * Redistribution and use in source and binary forms, with or without
47  * modification, are permitted provided that the following conditions
48  * are met:
49  * 1. Redistributions of source code must retain the above copyright
50  *    notice, this list of conditions and the following disclaimer.
51  * 2. Redistributions in binary form must reproduce the above copyright
52  *    notice, this list of conditions and the following disclaimer in the
53  *    documentation and/or other materials provided with the distribution.
54  * 3. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  *	@(#)machdep.c	8.6 (Berkeley) 1/14/94
71  */
72 
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.316 2012/02/12 16:34:10 matt Exp $");
75 
76 #include "opt_compat_netbsd.h"
77 #include "opt_compat_sunos.h"
78 #include "opt_sparc_arch.h"
79 #include "opt_modular.h"
80 #include "opt_multiprocessor.h"
81 
82 #include <sys/param.h>
83 #include <sys/signal.h>
84 #include <sys/signalvar.h>
85 #include <sys/proc.h>
86 #include <sys/extent.h>
87 #include <sys/savar.h>
88 #include <sys/cpu.h>
89 #include <sys/buf.h>
90 #include <sys/device.h>
91 #include <sys/reboot.h>
92 #include <sys/systm.h>
93 #include <sys/kernel.h>
94 #include <sys/conf.h>
95 #include <sys/file.h>
96 #include <sys/malloc.h>
97 #include <sys/mbuf.h>
98 #include <sys/mount.h>
99 #include <sys/msgbuf.h>
100 #include <sys/syscallargs.h>
101 #include <sys/exec.h>
102 #include <sys/exec_aout.h>
103 #include <sys/ucontext.h>
104 #include <sys/simplelock.h>
105 #include <sys/module.h>
106 #include <sys/mutex.h>
107 
108 #include <dev/mm.h>
109 
110 #include <uvm/uvm.h>		/* we use uvm.kernel_object */
111 
112 #include <sys/sysctl.h>
113 
114 #ifdef COMPAT_13
115 #include <compat/sys/signal.h>
116 #include <compat/sys/signalvar.h>
117 #endif
118 
119 #define _SPARC_BUS_DMA_PRIVATE
120 #include <machine/autoconf.h>
121 #include <sys/bus.h>
122 #include <machine/frame.h>
123 #include <machine/cpu.h>
124 #include <machine/pcb.h>
125 #include <machine/pmap.h>
126 #include <machine/oldmon.h>
127 #include <machine/bsd_openprom.h>
128 #include <machine/bootinfo.h>
129 #include <machine/eeprom.h>
130 
131 #include <sparc/sparc/asm.h>
132 #include <sparc/sparc/cache.h>
133 #include <sparc/sparc/vaddrs.h>
134 #include <sparc/sparc/cpuvar.h>
135 
136 #include "fb.h"
137 #include "power.h"
138 
139 #if NPOWER > 0
140 #include <sparc/dev/power.h>
141 #endif
142 
143 extern paddr_t avail_end;
144 
145 int	physmem;
146 
147 kmutex_t fpu_mtx;
148 
149 /*
150  * safepri is a safe priority for sleep to set for a spin-wait
151  * during autoconfiguration or after a panic.
152  */
153 int   safepri = 0;
154 
155 /*
156  * dvmamap24 is used to manage DVMA memory for devices that have the upper
157  * eight address bits wired to all-ones (e.g. `le' and `ie')
158  */
159 struct extent *dvmamap24;
160 
161 void	dumpsys(void);
162 void	stackdump(void);
163 
164 /*
165  * Machine-dependent startup code
166  */
167 void
168 cpu_startup(void)
169 {
170 #ifdef DEBUG
171 	extern int pmapdebug;
172 	int opmapdebug = pmapdebug;
173 #endif
174 	struct pcb *pcb;
175 	vsize_t size;
176 	paddr_t pa;
177 	char pbuf[9];
178 
179 #ifdef DEBUG
180 	pmapdebug = 0;
181 #endif
182 
183 	/* XXX */
184 	pcb = lwp_getpcb(&lwp0);
185 	if (pcb && pcb->pcb_psr == 0)
186 		pcb->pcb_psr = getpsr();
187 
188 	/*
189 	 * Re-map the message buffer from its temporary address
190 	 * at KERNBASE to MSGBUF_VA.
191 	 */
192 #if !defined(MSGBUFSIZE) || MSGBUFSIZE <= 8192
193 	/*
194 	 * We use the free page(s) in front of the kernel load address.
195 	 */
196 	size = 8192;
197 
198 	/* Get physical address of the message buffer */
199 	pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa);
200 
201 	/* Invalidate the current mapping at KERNBASE. */
202 	pmap_kremove((vaddr_t)KERNBASE, size);
203 	pmap_update(pmap_kernel());
204 
205 	/* Enter the new mapping */
206 	pmap_map(MSGBUF_VA, pa, pa + size, VM_PROT_READ|VM_PROT_WRITE);
207 
208 	/*
209 	 * Re-initialize the message buffer.
210 	 */
211 	initmsgbuf((void *)MSGBUF_VA, size);
212 #else /* MSGBUFSIZE */
213 	{
214 	struct pglist mlist;
215 	struct vm_page *m;
216 	vaddr_t va0, va;
217 
218 	/*
219 	 * We use the free page(s) in front of the kernel load address,
220 	 * and then allocate some more.
221 	 */
222 	size = round_page(MSGBUFSIZE);
223 
224 	/* Get physical address of first 8192 chunk of the message buffer */
225 	pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa);
226 
227 	/* Allocate additional physical pages */
228 	if (uvm_pglistalloc(size - 8192,
229 			    vm_first_phys, vm_first_phys+vm_num_phys,
230 			    0, 0, &mlist, 1, 0) != 0)
231 		panic("cpu_start: no memory for message buffer");
232 
233 	/* Invalidate the current mapping at KERNBASE. */
234 	pmap_kremove((vaddr_t)KERNBASE, 8192);
235 	pmap_update(pmap_kernel());
236 
237 	/* Allocate virtual memory space */
238 	va0 = va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
239 	if (va == 0)
240 		panic("cpu_start: no virtual memory for message buffer");
241 
242 	/* Map first 8192 */
243 	while (va < va0 + 8192) {
244 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
245 		pa += PAGE_SIZE;
246 		va += PAGE_SIZE;
247 	}
248 	pmap_update(pmap_kernel());
249 
250 	/* Map the rest of the pages */
251 	TAILQ_FOREACH(m, &mlist ,pageq.queue) {
252 		if (va >= va0 + size)
253 			panic("cpu_start: memory buffer size botch");
254 		pa = VM_PAGE_TO_PHYS(m);
255 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
256 		va += PAGE_SIZE;
257 	}
258 	pmap_update(pmap_kernel());
259 
260 	/*
261 	 * Re-initialize the message buffer.
262 	 */
263 	initmsgbuf((void *)va0, size);
264 	}
265 #endif /* MSGBUFSIZE */
266 
267 	/*
268 	 * Good {morning,afternoon,evening,night}.
269 	 */
270 	printf("%s%s", copyright, version);
271 	/*identifycpu();*/
272 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
273 	printf("total memory = %s\n", pbuf);
274 
275 	/*
276 	 * Tune buffer cache variables based on the capabilities of the MMU
277 	 * to cut down on VM space allocated for the buffer caches that
278 	 * would lead to MMU resource shortage.
279 	 */
280 	if (CPU_ISSUN4 || CPU_ISSUN4C) {
281 		/* Clip UBC windows */
282 		if (cpuinfo.mmu_nsegment <= 128) {
283 			/*
284 			 * ubc_nwins and ubc_winshift control the amount
285 			 * of VM used by the UBC. Normally, this VM is
286 			 * not wired in the kernel map, hence non-locked
287 			 * `PMEGs' (see pmap.c) are used for this space.
288 			 * We still limit possible fragmentation to prevent
289 			 * the occasional wired UBC mappings from tying up
290 			 * too many PMEGs.
291 			 *
292 			 * Set the upper limit to 9 segments (default
293 			 * winshift = 13).
294 			 */
295 			ubc_nwins = 512;
296 
297 			/*
298 			 * buf_setvalimit() allocates a submap for buffer
299 			 * allocation. We use it to limit the number of locked
300 			 * `PMEGs' (see pmap.c) dedicated to the buffer cache.
301 			 *
302 			 * Set the upper limit to 12 segments (3MB), which
303 			 * corresponds approximately to the size of the
304 			 * traditional 5% rule (assuming a maximum 64MB of
305 			 * memory in small sun4c machines).
306 			 */
307 			buf_setvalimit(12 * 256*1024);
308 		}
309 
310 		/* Clip max data & stack to avoid running into the MMU hole */
311 #if MAXDSIZ > 256*1024*1024
312 		maxdmap = 256*1024*1024;
313 #endif
314 #if MAXSSIZ > 256*1024*1024
315 		maxsmap = 256*1024*1024;
316 #endif
317 	}
318 
319 	if (CPU_ISSUN4 || CPU_ISSUN4C) {
320 		/*
321 		 * Allocate DMA map for 24-bit devices (le, ie)
322 		 * [dvma_base - dvma_end] is for VME devices..
323 		 */
324 		dvmamap24 = extent_create("dvmamap24",
325 					  D24_DVMA_BASE, D24_DVMA_END,
326 					  0, 0, EX_NOWAIT);
327 		if (dvmamap24 == NULL)
328 			panic("unable to allocate DVMA map");
329 	}
330 
331 #ifdef DEBUG
332 	pmapdebug = opmapdebug;
333 #endif
334 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
335 	printf("avail memory = %s\n", pbuf);
336 
337 	pmap_redzone();
338 
339 	mutex_init(&fpu_mtx, MUTEX_DEFAULT, IPL_SCHED);
340 }
341 
342 /*
343  * Set up registers on exec.
344  *
345  * XXX this entire mess must be fixed
346  */
347 /* ARGSUSED */
348 void
349 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
350 {
351 	struct trapframe *tf = l->l_md.md_tf;
352 	struct fpstate *fs;
353 	int psr;
354 
355 	/* Don't allow unaligned data references by default */
356 	l->l_proc->p_md.md_flags &= ~MDP_FIXALIGN;
357 
358 	/*
359 	 * Set the registers to 0 except for:
360 	 *	%o6: stack pointer, built in exec())
361 	 *	%psr: (retain CWP and PSR_S bits)
362 	 *	%g1: p->p_psstrp (used by crt0)
363 	 *	%pc,%npc: entry point of program
364 	 */
365 	psr = tf->tf_psr & (PSR_S | PSR_CWP);
366 	if ((fs = l->l_md.md_fpstate) != NULL) {
367 		struct cpu_info *cpi;
368 		int s;
369 		/*
370 		 * We hold an FPU state.  If we own *some* FPU chip state
371 		 * we must get rid of it, and the only way to do that is
372 		 * to save it.  In any case, get rid of our FPU state.
373 		 */
374 		FPU_LOCK(s);
375 		if ((cpi = l->l_md.md_fpu) != NULL) {
376 			if (cpi->fplwp != l)
377 				panic("FPU(%d): fplwp %p",
378 					cpi->ci_cpuid, cpi->fplwp);
379 			if (l == cpuinfo.fplwp)
380 				savefpstate(fs);
381 #if defined(MULTIPROCESSOR)
382 			else
383 				XCALL1(ipi_savefpstate, fs, 1 << cpi->ci_cpuid);
384 #endif
385 			cpi->fplwp = NULL;
386 		}
387 		l->l_md.md_fpu = NULL;
388 		FPU_UNLOCK(s);
389 		kmem_free(fs, sizeof(struct fpstate));
390 		l->l_md.md_fpstate = NULL;
391 	}
392 	memset((void *)tf, 0, sizeof *tf);
393 	tf->tf_psr = psr;
394 	tf->tf_global[1] = l->l_proc->p_psstrp;
395 	tf->tf_pc = pack->ep_entry & ~3;
396 	tf->tf_npc = tf->tf_pc + 4;
397 	stack -= sizeof(struct rwindow);
398 	tf->tf_out[6] = stack;
399 }
400 
401 #ifdef DEBUG
402 int sigdebug = 0;
403 int sigpid = 0;
404 #define SDB_FOLLOW	0x01
405 #define SDB_KSTACK	0x02
406 #define SDB_FPSTATE	0x04
407 #endif
408 
409 /*
410  * machine dependent system variables.
411  */
412 static int
413 sysctl_machdep_boot(SYSCTLFN_ARGS)
414 {
415 	struct sysctlnode node = *rnode;
416 	struct btinfo_kernelfile *bi_file;
417 	const char *cp;
418 
419 
420 	switch (node.sysctl_num) {
421 	case CPU_BOOTED_KERNEL:
422 		if ((bi_file = lookup_bootinfo(BTINFO_KERNELFILE)) != NULL)
423 			cp = bi_file->name;
424 		else
425 			cp = prom_getbootfile();
426 		if (cp != NULL && cp[0] == '\0')
427 			cp = "netbsd";
428 		break;
429 	case CPU_BOOTED_DEVICE:
430 		cp = prom_getbootpath();
431 		break;
432 	case CPU_BOOT_ARGS:
433 		cp = prom_getbootargs();
434 		break;
435 	default:
436 		return (EINVAL);
437 	}
438 
439 	if (cp == NULL || cp[0] == '\0')
440 		return (ENOENT);
441 
442 	node.sysctl_data = __UNCONST(cp);
443 	node.sysctl_size = strlen(cp) + 1;
444 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
445 }
446 
447 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
448 {
449 
450 	sysctl_createv(clog, 0, NULL, NULL,
451 		       CTLFLAG_PERMANENT,
452 		       CTLTYPE_NODE, "machdep", NULL,
453 		       NULL, 0, NULL, 0,
454 		       CTL_MACHDEP, CTL_EOL);
455 
456 	sysctl_createv(clog, 0, NULL, NULL,
457 		       CTLFLAG_PERMANENT,
458 		       CTLTYPE_STRING, "booted_kernel", NULL,
459 		       sysctl_machdep_boot, 0, NULL, 0,
460 		       CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
461 	sysctl_createv(clog, 0, NULL, NULL,
462 		       CTLFLAG_PERMANENT,
463 		       CTLTYPE_STRING, "booted_device", NULL,
464 		       sysctl_machdep_boot, 0, NULL, 0,
465 		       CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
466 	sysctl_createv(clog, 0, NULL, NULL,
467 		       CTLFLAG_PERMANENT,
468 		       CTLTYPE_STRING, "boot_args", NULL,
469 		       sysctl_machdep_boot, 0, NULL, 0,
470 		       CTL_MACHDEP, CPU_BOOT_ARGS, CTL_EOL);
471 	sysctl_createv(clog, 0, NULL, NULL,
472 		       CTLFLAG_PERMANENT,
473 		       CTLTYPE_INT, "cpu_arch", NULL,
474 		       NULL, 0, &cpu_arch, 0,
475 		       CTL_MACHDEP, CPU_ARCH, CTL_EOL);
476 }
477 
478 /*
479  * Send an interrupt to process.
480  */
481 struct sigframe {
482 	siginfo_t sf_si;
483 	ucontext_t sf_uc;
484 };
485 
486 void
487 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
488 {
489 	struct lwp *l = curlwp;
490 	struct proc *p = l->l_proc;
491 	struct sigacts *ps = p->p_sigacts;
492 	struct trapframe *tf;
493 	ucontext_t uc;
494 	struct sigframe *fp;
495 	u_int onstack, oldsp, newsp;
496 	u_int catcher;
497 	int sig, error;
498 	size_t ucsz;
499 
500 	sig = ksi->ksi_signo;
501 
502 	tf = l->l_md.md_tf;
503 	oldsp = tf->tf_out[6];
504 
505 	/*
506 	 * Compute new user stack addresses, subtract off
507 	 * one signal frame, and align.
508 	 */
509 	onstack =
510 	    (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
511 	    (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
512 
513 	if (onstack)
514 		fp = (struct sigframe *)
515 			((char *)l->l_sigstk.ss_sp +
516 				  l->l_sigstk.ss_size);
517 	else
518 		fp = (struct sigframe *)oldsp;
519 
520 	fp = (struct sigframe *)((int)(fp - 1) & ~7);
521 
522 #ifdef DEBUG
523 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
524 		printf("sendsig: %s[%d] sig %d newusp %p si %p uc %p\n",
525 		    p->p_comm, p->p_pid, sig, fp, &fp->sf_si, &fp->sf_uc);
526 #endif
527 
528 	/*
529 	 * Build the signal context to be used by sigreturn.
530 	 */
531 	uc.uc_flags = _UC_SIGMASK |
532 		((l->l_sigstk.ss_flags & SS_ONSTACK)
533 			? _UC_SETSTACK : _UC_CLRSTACK);
534 	uc.uc_sigmask = *mask;
535 	uc.uc_link = l->l_ctxlink;
536 	memset(&uc.uc_stack, 0, sizeof(uc.uc_stack));
537 
538 	/*
539 	 * Now copy the stack contents out to user space.
540 	 * We need to make sure that when we start the signal handler,
541 	 * its %i6 (%fp), which is loaded from the newly allocated stack area,
542 	 * joins seamlessly with the frame it was in when the signal occurred,
543 	 * so that the debugger and _longjmp code can back up through it.
544 	 * Since we're calling the handler directly, allocate a full size
545 	 * C stack frame.
546 	 */
547 	sendsig_reset(l, sig);
548 	mutex_exit(p->p_lock);
549 	newsp = (int)fp - sizeof(struct frame);
550 	cpu_getmcontext(l, &uc.uc_mcontext, &uc.uc_flags);
551 	ucsz = (int)&uc.__uc_pad - (int)&uc;
552 	error = (copyout(&ksi->ksi_info, &fp->sf_si, sizeof ksi->ksi_info) ||
553 	    copyout(&uc, &fp->sf_uc, ucsz) ||
554 	    suword(&((struct rwindow *)newsp)->rw_in[6], oldsp));
555 	mutex_enter(p->p_lock);
556 
557 	if (error) {
558 		/*
559 		 * Process has trashed its stack; give it an illegal
560 		 * instruction to halt it in its tracks.
561 		 */
562 #ifdef DEBUG
563 		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
564 			printf("sendsig: window save or copyout error\n");
565 #endif
566 		sigexit(l, SIGILL);
567 		/* NOTREACHED */
568 	}
569 
570 	switch (ps->sa_sigdesc[sig].sd_vers) {
571 	default:
572 		/* Unsupported trampoline version; kill the process. */
573 		sigexit(l, SIGILL);
574 	case 2:
575 		/*
576 		 * Arrange to continue execution at the user's handler.
577 		 * It needs a new stack pointer, a return address and
578 		 * three arguments: (signo, siginfo *, ucontext *).
579 		 */
580 		catcher = (u_int)SIGACTION(p, sig).sa_handler;
581 		tf->tf_pc = catcher;
582 		tf->tf_npc = catcher + 4;
583 		tf->tf_out[0] = sig;
584 		tf->tf_out[1] = (int)&fp->sf_si;
585 		tf->tf_out[2] = (int)&fp->sf_uc;
586 		tf->tf_out[6] = newsp;
587 		tf->tf_out[7] = (int)ps->sa_sigdesc[sig].sd_tramp - 8;
588 		break;
589 	}
590 
591 	/* Remember that we're now on the signal stack. */
592 	if (onstack)
593 		l->l_sigstk.ss_flags |= SS_ONSTACK;
594 
595 #ifdef DEBUG
596 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
597 		printf("sendsig: about to return to catcher\n");
598 #endif
599 }
600 
601 /*
602  * cpu_upcall:
603  *
604  *	Send an an upcall to userland.
605  */
606 void
607 cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted,
608 	   void *sas, void *ap, void *sp, sa_upcall_t upcall)
609 {
610 	struct trapframe *tf;
611 	vaddr_t addr;
612 
613 	tf = l->l_md.md_tf;
614 	addr = (vaddr_t) upcall;
615 
616 	/* Arguments to the upcall... */
617 	tf->tf_out[0] = type;
618 	tf->tf_out[1] = (vaddr_t) sas;
619 	tf->tf_out[2] = nevents;
620 	tf->tf_out[3] = ninterrupted;
621 	tf->tf_out[4] = (vaddr_t) ap;
622 
623 	/*
624 	 * Ensure the stack is double-word aligned, and provide a
625 	 * C call frame.
626 	 */
627 	sp = (void *)(((vaddr_t)sp & ~0x7) - CCFSZ);
628 
629 	/* Arrange to begin execution at the upcall handler. */
630 	tf->tf_pc = addr;
631 	tf->tf_npc = addr + 4;
632 	tf->tf_out[6] = (vaddr_t) sp;
633 	tf->tf_out[7] = -1;		/* "you lose" if upcall returns */
634 }
635 
636 void
637 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
638 {
639 	struct trapframe *tf = (struct trapframe *)l->l_md.md_tf;
640 	__greg_t *r = mcp->__gregs;
641 #ifdef FPU_CONTEXT
642 	__fpregset_t *f = &mcp->__fpregs;
643 	struct fpstate *fps = l->l_md.md_fpstate;
644 #endif
645 
646 	/*
647 	 * Put the stack in a consistent state before we whack away
648 	 * at it.  Note that write_user_windows may just dump the
649 	 * registers into the pcb; we need them in the process's memory.
650 	 */
651 	write_user_windows();
652 	if ((l->l_flag & LW_SA_SWITCHING) == 0 && rwindow_save(l)) {
653 		mutex_enter(l->l_proc->p_lock);
654 		sigexit(l, SIGILL);
655 	}
656 
657 	/*
658 	 * Get the general purpose registers
659 	 */
660 	r[_REG_PSR] = tf->tf_psr;
661 	r[_REG_PC] = tf->tf_pc;
662 	r[_REG_nPC] = tf->tf_npc;
663 	r[_REG_Y] = tf->tf_y;
664 	r[_REG_G1] = tf->tf_global[1];
665 	r[_REG_G2] = tf->tf_global[2];
666 	r[_REG_G3] = tf->tf_global[3];
667 	r[_REG_G4] = tf->tf_global[4];
668 	r[_REG_G5] = tf->tf_global[5];
669 	r[_REG_G6] = tf->tf_global[6];
670 	r[_REG_G7] = tf->tf_global[7];
671 	r[_REG_O0] = tf->tf_out[0];
672 	r[_REG_O1] = tf->tf_out[1];
673 	r[_REG_O2] = tf->tf_out[2];
674 	r[_REG_O3] = tf->tf_out[3];
675 	r[_REG_O4] = tf->tf_out[4];
676 	r[_REG_O5] = tf->tf_out[5];
677 	r[_REG_O6] = tf->tf_out[6];
678 	r[_REG_O7] = tf->tf_out[7];
679 
680 	*flags |= _UC_CPU;
681 
682 #ifdef FPU_CONTEXT
683 	/*
684 	 * Get the floating point registers
685 	 */
686 	memcpy(f->__fpu_regs, fps->fs_regs, sizeof(fps->fs_regs));
687 	f->__fp_nqsize = sizeof(struct fp_qentry);
688 	f->__fp_nqel = fps->fs_qsize;
689 	f->__fp_fsr = fps->fs_fsr;
690 	if (f->__fp_q != NULL) {
691 		size_t sz = f->__fp_nqel * f->__fp_nqsize;
692 		if (sz > sizeof(fps->fs_queue)) {
693 #ifdef DIAGNOSTIC
694 			printf("getcontext: fp_queue too large\n");
695 #endif
696 			return;
697 		}
698 		if (copyout(fps->fs_queue, f->__fp_q, sz) != 0) {
699 #ifdef DIAGNOSTIC
700 			printf("getcontext: copy of fp_queue failed %d\n",
701 			    error);
702 #endif
703 			return;
704 		}
705 	}
706 	f->fp_busy = 0;	/* XXX: How do we determine that? */
707 	*flags |= _UC_FPU;
708 #endif
709 
710 	return;
711 }
712 
713 /*
714  * Set to mcontext specified.
715  * Return to previous pc and psl as specified by
716  * context left by sendsig. Check carefully to
717  * make sure that the user has not modified the
718  * psl to gain improper privileges or to cause
719  * a machine fault.
720  * This is almost like sigreturn() and it shows.
721  */
722 int
723 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
724 {
725 	struct trapframe *tf;
726 	const __greg_t *r = mcp->__gregs;
727 	struct proc *p = l->l_proc;
728 #ifdef FPU_CONTEXT
729 	__fpregset_t *f = &mcp->__fpregs;
730 	struct fpstate *fps = l->l_md.md_fpstate;
731 #endif
732 
733 	write_user_windows();
734 	if (rwindow_save(l)) {
735 		mutex_enter(p->p_lock);
736 		sigexit(l, SIGILL);
737 	}
738 
739 #ifdef DEBUG
740 	if (sigdebug & SDB_FOLLOW)
741 		printf("__setmcontext: %s[%d], __mcontext %p\n",
742 		    l->l_proc->p_comm, l->l_proc->p_pid, mcp);
743 #endif
744 
745 	if (flags & _UC_CPU) {
746 		/* Restore register context. */
747 		tf = (struct trapframe *)l->l_md.md_tf;
748 
749 		/*
750 		 * Only the icc bits in the psr are used, so it need not be
751 		 * verified.  pc and npc must be multiples of 4.  This is all
752 		 * that is required; if it holds, just do it.
753 		 */
754 		if (((r[_REG_PC] | r[_REG_nPC]) & 3) != 0) {
755 			printf("pc or npc are not multiples of 4!\n");
756 			return (EINVAL);
757 		}
758 
759 		/* take only psr ICC field */
760 		tf->tf_psr = (tf->tf_psr & ~PSR_ICC) |
761 		    (r[_REG_PSR] & PSR_ICC);
762 		tf->tf_pc = r[_REG_PC];
763 		tf->tf_npc = r[_REG_nPC];
764 		tf->tf_y = r[_REG_Y];
765 
766 		/* Restore everything */
767 		tf->tf_global[1] = r[_REG_G1];
768 		tf->tf_global[2] = r[_REG_G2];
769 		tf->tf_global[3] = r[_REG_G3];
770 		tf->tf_global[4] = r[_REG_G4];
771 		tf->tf_global[5] = r[_REG_G5];
772 		tf->tf_global[6] = r[_REG_G6];
773 		tf->tf_global[7] = r[_REG_G7];
774 
775 		tf->tf_out[0] = r[_REG_O0];
776 		tf->tf_out[1] = r[_REG_O1];
777 		tf->tf_out[2] = r[_REG_O2];
778 		tf->tf_out[3] = r[_REG_O3];
779 		tf->tf_out[4] = r[_REG_O4];
780 		tf->tf_out[5] = r[_REG_O5];
781 		tf->tf_out[6] = r[_REG_O6];
782 		tf->tf_out[7] = r[_REG_O7];
783 
784 		lwp_setprivate(l, (void *)(uintptr_t)r[_REG_G7]);
785 	}
786 
787 #ifdef FPU_CONTEXT
788 	if (flags & _UC_FPU) {
789 		/*
790 		 * Set the floating point registers
791 		 */
792 		int error;
793 		size_t sz = f->__fp_nqel * f->__fp_nqsize;
794 		if (sz > sizeof(fps->fs_queue)) {
795 #ifdef DIAGNOSTIC
796 			printf("setmcontext: fp_queue too large\n");
797 #endif
798 			return (EINVAL);
799 		}
800 		memcpy(fps->fs_regs, f->__fpu_regs, sizeof(fps->fs_regs));
801 		fps->fs_qsize = f->__fp_nqel;
802 		fps->fs_fsr = f->__fp_fsr;
803 		if (f->__fp_q != NULL) {
804 			if ((error = copyin(f->__fp_q, fps->fs_queue, sz)) != 0) {
805 #ifdef DIAGNOSTIC
806 				printf("setmcontext: fp_queue copy failed\n");
807 #endif
808 				return (error);
809 			}
810 		}
811 	}
812 #endif
813 
814 	mutex_enter(p->p_lock);
815 	if (flags & _UC_SETSTACK)
816 		l->l_sigstk.ss_flags |= SS_ONSTACK;
817 	if (flags & _UC_CLRSTACK)
818 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
819 	mutex_exit(p->p_lock);
820 
821 	return (0);
822 }
823 
824 int	waittime = -1;
825 
826 void
827 cpu_reboot(int howto, char *user_boot_string)
828 {
829 	int i;
830 	char opts[4];
831 	static char str[128];
832 
833 	/* If system is cold, just halt. */
834 	if (cold) {
835 		howto |= RB_HALT;
836 		goto haltsys;
837 	}
838 
839 #if NFB > 0
840 	fb_unblank();
841 #endif
842 	boothowto = howto;
843 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
844 		extern struct lwp lwp0;
845 
846 		/* XXX protect against curlwp->p_stats.foo refs in sync() */
847 		if (curlwp == NULL)
848 			curlwp = &lwp0;
849 		waittime = 0;
850 		vfs_shutdown();
851 
852 		/*
853 		 * If we've been adjusting the clock, the todr
854 		 * will be out of synch; adjust it now.
855 		 * resettodr will only do this only if inittodr()
856 		 * has already been called.
857 		 */
858 		resettodr();
859 	}
860 
861 	/* Disable interrupts. But still allow IPI on MP systems */
862 	if (sparc_ncpus > 1)
863 		(void)splsched();
864 	else
865 		(void)splhigh();
866 
867 #if defined(MULTIPROCESSOR)
868 	/* Direct system interrupts to this CPU, since dump uses polled I/O */
869 	if (CPU_ISSUN4M)
870 		*((u_int *)ICR_ITR) = cpuinfo.mid - 8;
871 #endif
872 
873 	/* If rebooting and a dump is requested, do it. */
874 #if 0
875 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
876 #else
877 	if (howto & RB_DUMP)
878 #endif
879 		dumpsys();
880 
881  haltsys:
882 
883 	/* Run any shutdown hooks. */
884 	doshutdownhooks();
885 
886 	pmf_system_shutdown(boothowto);
887 
888 	/* If powerdown was requested, do it. */
889 	if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
890 		prom_interpret("power-off");
891 #if NPOWER > 0
892 		/* Fall back on `power' device if the PROM can't do it */
893 		powerdown();
894 #endif
895 		printf("WARNING: powerdown not supported\n");
896 		/*
897 		 * RB_POWERDOWN implies RB_HALT... fall into it...
898 		 */
899 	}
900 
901 	if (howto & RB_HALT) {
902 #if defined(MULTIPROCESSOR)
903 		mp_halt_cpus();
904 		printf("cpu%d halted\n\n", cpu_number());
905 #else
906 		printf("halted\n\n");
907 #endif
908 		prom_halt();
909 	}
910 
911 	printf("rebooting\n\n");
912 
913 	i = 1;
914 	if (howto & RB_SINGLE)
915 		opts[i++] = 's';
916 	if (howto & RB_KDB)
917 		opts[i++] = 'd';
918 	opts[i] = '\0';
919 	opts[0] = (i > 1) ? '-' : '\0';
920 
921 	if (user_boot_string && *user_boot_string) {
922 		i = strlen(user_boot_string);
923 		if (i > sizeof(str) - sizeof(opts) - 1)
924 			prom_boot(user_boot_string);	/* XXX */
925 		memcpy(str, user_boot_string, i);
926 		if (opts[0] != '\0')
927 			str[i] = ' ';
928 	}
929 	strcat(str, opts);
930 	prom_boot(str);
931 	/*NOTREACHED*/
932 }
933 
934 uint32_t dumpmag = 0x8fca0101;	/* magic number for savecore */
935 int	dumpsize = 0;		/* also for savecore */
936 long	dumplo = 0;
937 
938 void
939 cpu_dumpconf(void)
940 {
941 	int nblks, dumpblks;
942 
943 	if (dumpdev == NODEV)
944 		return;
945 	nblks = bdev_size(dumpdev);
946 
947 	dumpblks = ctod(physmem) + pmap_dumpsize();
948 	if (dumpblks > (nblks - ctod(1)))
949 		/*
950 		 * dump size is too big for the partition.
951 		 * Note, we safeguard a click at the front for a
952 		 * possible disk label.
953 		 */
954 		return;
955 
956 	/* Put the dump at the end of the partition */
957 	dumplo = nblks - dumpblks;
958 
959 	/*
960 	 * savecore(8) expects dumpsize to be the number of pages
961 	 * of actual core dumped (i.e. excluding the MMU stuff).
962 	 */
963 	dumpsize = physmem;
964 }
965 
966 #define	BYTES_PER_DUMP	(32 * 1024)	/* must be a multiple of pagesize */
967 static vaddr_t dumpspace;
968 
969 void *
970 reserve_dumppages(void *p)
971 {
972 
973 	dumpspace = (vaddr_t)p;
974 	return ((char *)p + BYTES_PER_DUMP);
975 }
976 
977 /*
978  * Write a crash dump.
979  */
980 void
981 dumpsys(void)
982 {
983 	const struct bdevsw *bdev;
984 	int psize;
985 	daddr_t blkno;
986 	int (*dump)(dev_t, daddr_t, void *, size_t);
987 	int error = 0;
988 	struct memarr *mp;
989 	int nmem;
990 	extern struct memarr pmemarr[];
991 	extern int npmemarr;
992 
993 	/* copy registers to memory */
994 	snapshot(cpuinfo.curpcb);
995 	stackdump();
996 
997 	if (dumpdev == NODEV)
998 		return;
999 	bdev = bdevsw_lookup(dumpdev);
1000 	if (bdev == NULL || bdev->d_psize == NULL)
1001 		return;
1002 
1003 	/*
1004 	 * For dumps during autoconfiguration,
1005 	 * if dump device has already configured...
1006 	 */
1007 	if (dumpsize == 0)
1008 		cpu_dumpconf();
1009 	if (dumplo <= 0) {
1010 		printf("\ndump to dev %u,%u not possible\n",
1011 		    major(dumpdev), minor(dumpdev));
1012 		return;
1013 	}
1014 	printf("\ndumping to dev %u,%u offset %ld\n",
1015 	    major(dumpdev), minor(dumpdev), dumplo);
1016 
1017 	psize = bdev_size(dumpdev);
1018 	printf("dump ");
1019 	if (psize == -1) {
1020 		printf("area unavailable\n");
1021 		return;
1022 	}
1023 	blkno = dumplo;
1024 	dump = bdev->d_dump;
1025 
1026 	error = pmap_dumpmmu(dump, blkno);
1027 	blkno += pmap_dumpsize();
1028 
1029 	for (mp = pmemarr, nmem = npmemarr; --nmem >= 0 && error == 0; mp++) {
1030 		unsigned i = 0, n;
1031 		int maddr = mp->addr;
1032 
1033 		if (maddr == 0) {
1034 			/* Skip first page at physical address 0 */
1035 			maddr += PAGE_SIZE;
1036 			i += PAGE_SIZE;
1037 			blkno += btodb(PAGE_SIZE);
1038 		}
1039 
1040 		for (; i < mp->len; i += n) {
1041 			n = mp->len - i;
1042 			if (n > BYTES_PER_DUMP)
1043 				 n = BYTES_PER_DUMP;
1044 
1045 			/* print out how many MBs we have dumped */
1046 			if (i && (i % (1024*1024)) == 0)
1047 				printf_nolog("%d ", i / (1024*1024));
1048 
1049 			(void) pmap_map(dumpspace, maddr, maddr + n,
1050 					VM_PROT_READ);
1051 			error = (*dump)(dumpdev, blkno,
1052 					(void *)dumpspace, (int)n);
1053 			pmap_kremove(dumpspace, n);
1054 			pmap_update(pmap_kernel());
1055 			if (error)
1056 				break;
1057 			maddr += n;
1058 			blkno += btodb(n);
1059 		}
1060 	}
1061 
1062 	switch (error) {
1063 
1064 	case ENXIO:
1065 		printf("device bad\n");
1066 		break;
1067 
1068 	case EFAULT:
1069 		printf("device not ready\n");
1070 		break;
1071 
1072 	case EINVAL:
1073 		printf("area improper\n");
1074 		break;
1075 
1076 	case EIO:
1077 		printf("i/o error\n");
1078 		break;
1079 
1080 	case 0:
1081 		printf("succeeded\n");
1082 		break;
1083 
1084 	default:
1085 		printf("error %d\n", error);
1086 		break;
1087 	}
1088 }
1089 
1090 /*
1091  * get the fp and dump the stack as best we can.  don't leave the
1092  * current stack page
1093  */
1094 void
1095 stackdump(void)
1096 {
1097 	struct frame *fp = getfp(), *sfp;
1098 
1099 	sfp = fp;
1100 	printf("Frame pointer is at %p\n", fp);
1101 	printf("Call traceback:\n");
1102 	while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) {
1103 		printf("  pc = 0x%x  args = (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp = %p\n",
1104 		    fp->fr_pc, fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2],
1105 		    fp->fr_arg[3], fp->fr_arg[4], fp->fr_arg[5], fp->fr_fp);
1106 		fp = fp->fr_fp;
1107 	}
1108 }
1109 
1110 int
1111 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp)
1112 {
1113 
1114 	return (ENOEXEC);
1115 }
1116 
1117 #if defined(SUN4)
1118 void
1119 oldmon_w_trace(u_long va)
1120 {
1121 	struct cpu_info * const ci = curcpu();
1122 	u_long stop;
1123 	struct frame *fp;
1124 
1125 	printf("curlwp = %p, pid %d\n", curlwp, curproc->p_pid);
1126 
1127 	printf("uvm: cpu%u: swtch %"PRIu64", trap %"PRIu64", sys %"PRIu64", "
1128 	    "intr %"PRIu64", soft %"PRIu64", faults %"PRIu64"\n",
1129 	    cpu_index(ci), ci->ci_data.cpu_nswtch, ci->ci_data.cpu_ntrap,
1130 	    ci->ci_data.cpu_nsyscall, ci->ci_data.cpu_nintr,
1131 	    ci->ci_data.cpu_nsoft, ci->ci_data.cpu_nfault);
1132 	write_user_windows();
1133 
1134 #define round_up(x) (( (x) + (PAGE_SIZE-1) ) & (~(PAGE_SIZE-1)) )
1135 
1136 	printf("\nstack trace with sp = 0x%lx\n", va);
1137 	stop = round_up(va);
1138 	printf("stop at 0x%lx\n", stop);
1139 	fp = (struct frame *) va;
1140 	while (round_up((u_long) fp) == stop) {
1141 		printf("  0x%x(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp %p\n", fp->fr_pc,
1142 		    fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2], fp->fr_arg[3],
1143 		    fp->fr_arg[4], fp->fr_arg[5], fp->fr_fp);
1144 		fp = fp->fr_fp;
1145 		if (fp == NULL)
1146 			break;
1147 	}
1148 	printf("end of stack trace\n");
1149 }
1150 
1151 void
1152 oldmon_w_cmd(u_long va, char *ar)
1153 {
1154 	switch (*ar) {
1155 	case '\0':
1156 		switch (va) {
1157 		case 0:
1158 			panic("g0 panic");
1159 		case 4:
1160 			printf("w: case 4\n");
1161 			break;
1162 		default:
1163 			printf("w: unknown case %ld\n", va);
1164 			break;
1165 		}
1166 		break;
1167 	case 't':
1168 		oldmon_w_trace(va);
1169 		break;
1170 	default:
1171 		printf("w: arg not allowed\n");
1172 	}
1173 }
1174 
1175 int
1176 ldcontrolb(void *addr)
1177 {
1178 	struct pcb *xpcb;
1179 	u_long saveonfault;
1180 	int res;
1181 	int s;
1182 
1183 	if (CPU_ISSUN4M || CPU_ISSUN4D) {
1184 		printf("warning: ldcontrolb called on sun4m/sun4d\n");
1185 		return 0;
1186 	}
1187 
1188 	s = splhigh();
1189 	xpcb = lwp_getpcb(curlwp);
1190 
1191 	saveonfault = (u_long)xpcb->pcb_onfault;
1192         res = xldcontrolb(addr, xpcb);
1193 	xpcb->pcb_onfault = (void *)saveonfault;
1194 
1195 	splx(s);
1196 	return (res);
1197 }
1198 #endif /* SUN4 */
1199 
1200 void
1201 wzero(void *vb, u_int l)
1202 {
1203 	u_char *b = vb;
1204 	u_char *be = b + l;
1205 	u_short *sp;
1206 
1207 	if (l == 0)
1208 		return;
1209 
1210 	/* front, */
1211 	if ((u_long)b & 1)
1212 		*b++ = 0;
1213 
1214 	/* back, */
1215 	if (b != be && ((u_long)be & 1) != 0) {
1216 		be--;
1217 		*be = 0;
1218 	}
1219 
1220 	/* and middle. */
1221 	sp = (u_short *)b;
1222 	while (sp != (u_short *)be)
1223 		*sp++ = 0;
1224 }
1225 
1226 void
1227 wcopy(const void *vb1, void *vb2, u_int l)
1228 {
1229 	const u_char *b1e, *b1 = vb1;
1230 	u_char *b2 = vb2;
1231 	const u_short *sp;
1232 	int bstore = 0;
1233 
1234 	if (l == 0)
1235 		return;
1236 
1237 	/* front, */
1238 	if ((u_long)b1 & 1) {
1239 		*b2++ = *b1++;
1240 		l--;
1241 	}
1242 
1243 	/* middle, */
1244 	sp = (const u_short *)b1;
1245 	b1e = b1 + l;
1246 	if (l & 1)
1247 		b1e--;
1248 	bstore = (u_long)b2 & 1;
1249 
1250 	while (sp < (const u_short *)b1e) {
1251 		if (bstore) {
1252 			b2[1] = *sp & 0xff;
1253 			b2[0] = *sp >> 8;
1254 		} else
1255 			*((short *)b2) = *sp;
1256 		sp++;
1257 		b2 += 2;
1258 	}
1259 
1260 	/* and back. */
1261 	if (l & 1)
1262 		*b2 = *b1e;
1263 }
1264 
1265 #ifdef MODULAR
1266 void
1267 module_init_md(void)
1268 {
1269 }
1270 #endif
1271 
1272 /*
1273  * Common function for DMA map creation.  May be called by bus-specific
1274  * DMA map creation functions.
1275  */
1276 int
1277 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
1278 		   bus_size_t maxsegsz, bus_size_t boundary, int flags,
1279 		   bus_dmamap_t *dmamp)
1280 {
1281 	struct sparc_bus_dmamap *map;
1282 	void *mapstore;
1283 	size_t mapsize;
1284 
1285 	/*
1286 	 * Allocate and initialize the DMA map.  The end of the map
1287 	 * is a variable-sized array of segments, so we allocate enough
1288 	 * room for them in one shot.
1289 	 *
1290 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
1291 	 * of ALLOCNOW notifies others that we've reserved these resources,
1292 	 * and they are not to be freed.
1293 	 *
1294 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
1295 	 * the (nsegments - 1).
1296 	 */
1297 	mapsize = sizeof(struct sparc_bus_dmamap) +
1298 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
1299 	if ((mapstore = malloc(mapsize, M_DMAMAP,
1300 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1301 		return (ENOMEM);
1302 
1303 	memset(mapstore, 0, mapsize);
1304 	map = (struct sparc_bus_dmamap *)mapstore;
1305 	map->_dm_size = size;
1306 	map->_dm_segcnt = nsegments;
1307 	map->_dm_maxmaxsegsz = maxsegsz;
1308 	map->_dm_boundary = boundary;
1309 	map->_dm_align = PAGE_SIZE;
1310 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
1311 	map->dm_maxsegsz = maxsegsz;
1312 	map->dm_mapsize = 0;		/* no valid mappings */
1313 	map->dm_nsegs = 0;
1314 
1315 	*dmamp = map;
1316 	return (0);
1317 }
1318 
1319 /*
1320  * Common function for DMA map destruction.  May be called by bus-specific
1321  * DMA map destruction functions.
1322  */
1323 void
1324 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
1325 {
1326 
1327 	free(map, M_DMAMAP);
1328 }
1329 
1330 /*
1331  * Like _bus_dmamap_load(), but for mbufs.
1332  */
1333 int
1334 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
1335 		      struct mbuf *m, int flags)
1336 {
1337 
1338 	panic("_bus_dmamap_load_mbuf: not implemented");
1339 }
1340 
1341 /*
1342  * Like _bus_dmamap_load(), but for uios.
1343  */
1344 int
1345 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
1346 		     struct uio *uio, int flags)
1347 {
1348 
1349 	panic("_bus_dmamap_load_uio: not implemented");
1350 }
1351 
1352 /*
1353  * Like _bus_dmamap_load(), but for raw memory allocated with
1354  * bus_dmamem_alloc().
1355  */
1356 int
1357 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
1358 		     bus_dma_segment_t *segs, int nsegs, bus_size_t size,
1359 		     int flags)
1360 {
1361 
1362 	panic("_bus_dmamap_load_raw: not implemented");
1363 }
1364 
1365 /*
1366  * Common function for DMA map synchronization.  May be called
1367  * by bus-specific DMA map synchronization functions.
1368  */
1369 void
1370 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
1371 		 bus_addr_t offset, bus_size_t len, int ops)
1372 {
1373 }
1374 
1375 /*
1376  * Common function for DMA-safe memory allocation.  May be called
1377  * by bus-specific DMA memory allocation functions.
1378  */
1379 int
1380 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
1381 		  bus_size_t alignment, bus_size_t boundary,
1382 		  bus_dma_segment_t *segs, int nsegs, int *rsegs,
1383 		  int flags)
1384 {
1385 	vaddr_t low, high;
1386 	struct pglist *mlist;
1387 	int error;
1388 
1389 	/* Always round the size. */
1390 	size = round_page(size);
1391 	low = vm_first_phys;
1392 	high = vm_first_phys + vm_num_phys - PAGE_SIZE;
1393 
1394 	if ((mlist = malloc(sizeof(*mlist), M_DEVBUF,
1395 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1396 		return (ENOMEM);
1397 
1398 	/*
1399 	 * Allocate pages from the VM system.
1400 	 */
1401 	error = uvm_pglistalloc(size, low, high, 0, 0,
1402 				mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1403 	if (error)
1404 		return (error);
1405 
1406 	/*
1407 	 * Simply keep a pointer around to the linked list, so
1408 	 * bus_dmamap_free() can return it.
1409 	 *
1410 	 * NOBODY SHOULD TOUCH THE pageq.queue FIELDS WHILE THESE PAGES
1411 	 * ARE IN OUR CUSTODY.
1412 	 */
1413 	segs[0]._ds_mlist = mlist;
1414 
1415 	/*
1416 	 * We now have physical pages, but no DVMA addresses yet. These
1417 	 * will be allocated in bus_dmamap_load*() routines. Hence we
1418 	 * save any alignment and boundary requirements in this DMA
1419 	 * segment.
1420 	 */
1421 	segs[0].ds_addr = 0;
1422 	segs[0].ds_len = 0;
1423 	segs[0]._ds_va = 0;
1424 	*rsegs = 1;
1425 	return (0);
1426 }
1427 
1428 /*
1429  * Common function for freeing DMA-safe memory.  May be called by
1430  * bus-specific DMA memory free functions.
1431  */
1432 void
1433 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1434 {
1435 
1436 	if (nsegs != 1)
1437 		panic("bus_dmamem_free: nsegs = %d", nsegs);
1438 
1439 	/*
1440 	 * Return the list of pages back to the VM system.
1441 	 */
1442 	uvm_pglistfree(segs[0]._ds_mlist);
1443 	free(segs[0]._ds_mlist, M_DEVBUF);
1444 }
1445 
1446 /*
1447  * Common function for unmapping DMA-safe memory.  May be called by
1448  * bus-specific DMA memory unmapping functions.
1449  */
1450 void
1451 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1452 {
1453 
1454 #ifdef DIAGNOSTIC
1455 	if ((u_long)kva & PAGE_MASK)
1456 		panic("_bus_dmamem_unmap");
1457 #endif
1458 
1459 	size = round_page(size);
1460 	pmap_kremove((vaddr_t)kva, size);
1461 	pmap_update(pmap_kernel());
1462 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1463 }
1464 
1465 /*
1466  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
1467  * bus-specific DMA mmap(2)'ing functions.
1468  */
1469 paddr_t
1470 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1471 		 off_t off, int prot, int flags)
1472 {
1473 
1474 	panic("_bus_dmamem_mmap: not implemented");
1475 }
1476 
1477 /*
1478  * Utility to allocate an aligned kernel virtual address range
1479  */
1480 vaddr_t
1481 _bus_dma_valloc_skewed(size_t size, u_long boundary, u_long align, u_long skew)
1482 {
1483 	size_t oversize;
1484 	vaddr_t va, sva;
1485 
1486 	/*
1487 	 * Find a region of kernel virtual addresses that is aligned
1488 	 * to the given address modulo the requested alignment, i.e.
1489 	 *
1490 	 *	(va - skew) == 0 mod align
1491 	 *
1492 	 * The following conditions apply to the arguments:
1493 	 *
1494 	 *	- `size' must be a multiple of the VM page size
1495 	 *	- `align' must be a power of two
1496 	 *	   and greater than or equal to the VM page size
1497 	 *	- `skew' must be smaller than `align'
1498 	 *	- `size' must be smaller than `boundary'
1499 	 */
1500 
1501 #ifdef DIAGNOSTIC
1502 	if ((size & PAGE_MASK) != 0)
1503 		panic("_bus_dma_valloc_skewed: invalid size %lx", size);
1504 	if ((align & PAGE_MASK) != 0)
1505 		panic("_bus_dma_valloc_skewed: invalid alignment %lx", align);
1506 	if (align < skew)
1507 		panic("_bus_dma_valloc_skewed: align %lx < skew %lx",
1508 			align, skew);
1509 #endif
1510 
1511 	/* XXX - Implement this! */
1512 	if (boundary) {
1513 		printf("_bus_dma_valloc_skewed: "
1514 			"boundary check not implemented");
1515 		return (0);
1516 	}
1517 
1518 	/*
1519 	 * First, find a region large enough to contain any aligned chunk
1520 	 */
1521 	oversize = size + align - PAGE_SIZE;
1522 	sva = vm_map_min(kernel_map);
1523 	if (uvm_map(kernel_map, &sva, oversize, NULL, UVM_UNKNOWN_OFFSET,
1524 	    align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
1525 	    UVM_ADV_RANDOM, UVM_FLAG_NOWAIT)))
1526 		return (0);
1527 
1528 	/*
1529 	 * Compute start of aligned region
1530 	 */
1531 	va = sva;
1532 	va += (skew + align - va) & (align - 1);
1533 
1534 	/*
1535 	 * Return excess virtual addresses
1536 	 */
1537 	if (va != sva)
1538 		(void)uvm_unmap(kernel_map, sva, va);
1539 	if (va + size != sva + oversize)
1540 		(void)uvm_unmap(kernel_map, va + size, sva + oversize);
1541 
1542 	return (va);
1543 }
1544 
1545 /* sun4/sun4c DMA map functions */
1546 int	sun4_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
1547 				bus_size_t, struct proc *, int);
1548 int	sun4_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
1549 				bus_dma_segment_t *, int, bus_size_t, int);
1550 void	sun4_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
1551 int	sun4_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *,
1552 				int, size_t, void **, int);
1553 
1554 /*
1555  * sun4/sun4c: load DMA map with a linear buffer.
1556  */
1557 int
1558 sun4_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
1559 		 void *buf, bus_size_t buflen,
1560 		 struct proc *p, int flags)
1561 {
1562 	bus_size_t sgsize;
1563 	vaddr_t va = (vaddr_t)buf;
1564 	int pagesz = PAGE_SIZE;
1565 	vaddr_t dva;
1566 	pmap_t pmap;
1567 
1568 	/*
1569 	 * Make sure that on error condition we return "no valid mappings".
1570 	 */
1571 	map->dm_nsegs = 0;
1572 
1573 	if (buflen > map->_dm_size)
1574 		return (EINVAL);
1575 
1576 	cache_flush(buf, buflen);
1577 
1578 	if ((map->_dm_flags & BUS_DMA_24BIT) == 0) {
1579 		/*
1580 		 * XXX Need to implement "don't DMA across this boundry".
1581 		 */
1582 		if (map->_dm_boundary != 0) {
1583 			bus_addr_t baddr;
1584 
1585 			/* Calculate first boundary line after `buf' */
1586 			baddr = ((bus_addr_t)va + map->_dm_boundary) &
1587 					-map->_dm_boundary;
1588 
1589 			/*
1590 			 * If the requested segment crosses the boundary,
1591 			 * we can't grant a direct map. For now, steal some
1592 			 * space from the `24BIT' map instead.
1593 			 *
1594 			 * (XXX - no overflow detection here)
1595 			 */
1596 			if (buflen > (baddr - (bus_addr_t)va))
1597 				goto no_fit;
1598 		}
1599 		map->dm_mapsize = buflen;
1600 		map->dm_nsegs = 1;
1601 		map->dm_segs[0].ds_addr = (bus_addr_t)va;
1602 		map->dm_segs[0].ds_len = buflen;
1603 		map->_dm_flags |= _BUS_DMA_DIRECTMAP;
1604 		return (0);
1605 	}
1606 
1607 no_fit:
1608 	sgsize = round_page(buflen + (va & (pagesz - 1)));
1609 
1610 	if (extent_alloc(dvmamap24, sgsize, pagesz, map->_dm_boundary,
1611 			 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
1612 			 &dva) != 0) {
1613 		return (ENOMEM);
1614 	}
1615 
1616 	/*
1617 	 * We always use just one segment.
1618 	 */
1619 	map->dm_mapsize = buflen;
1620 	map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
1621 	map->dm_segs[0].ds_len = buflen;
1622 	map->dm_segs[0]._ds_sgsize = sgsize;
1623 
1624 	if (p != NULL)
1625 		pmap = p->p_vmspace->vm_map.pmap;
1626 	else
1627 		pmap = pmap_kernel();
1628 
1629 	for (; buflen > 0; ) {
1630 		paddr_t pa;
1631 
1632 		/*
1633 		 * Get the physical address for this page.
1634 		 */
1635 		(void) pmap_extract(pmap, va, &pa);
1636 
1637 		/*
1638 		 * Compute the segment size, and adjust counts.
1639 		 */
1640 		sgsize = pagesz - (va & (pagesz - 1));
1641 		if (buflen < sgsize)
1642 			sgsize = buflen;
1643 
1644 #ifdef notyet
1645 #if defined(SUN4)
1646 		if (have_iocache)
1647 			pa |= PG_IOC;
1648 #endif
1649 #endif
1650 		pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC,
1651 		    VM_PROT_READ | VM_PROT_WRITE, 0);
1652 
1653 		dva += pagesz;
1654 		va += sgsize;
1655 		buflen -= sgsize;
1656 	}
1657 	pmap_update(pmap_kernel());
1658 
1659 	map->dm_nsegs = 1;
1660 	return (0);
1661 }
1662 
1663 /*
1664  * Like _bus_dmamap_load(), but for raw memory allocated with
1665  * bus_dmamem_alloc().
1666  */
1667 int
1668 sun4_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
1669 		     bus_dma_segment_t *segs, int nsegs, bus_size_t size,
1670 		     int flags)
1671 {
1672 	struct vm_page *m;
1673 	paddr_t pa;
1674 	vaddr_t dva;
1675 	bus_size_t sgsize;
1676 	struct pglist *mlist;
1677 	int pagesz = PAGE_SIZE;
1678 	int error;
1679 
1680 	map->dm_nsegs = 0;
1681 	sgsize = (size + pagesz - 1) & -pagesz;
1682 
1683 	/* Allocate DVMA addresses */
1684 	if ((map->_dm_flags & BUS_DMA_24BIT) != 0) {
1685 		error = extent_alloc(dvmamap24, sgsize, pagesz,
1686 					map->_dm_boundary,
1687 					(flags & BUS_DMA_NOWAIT) == 0
1688 						? EX_WAITOK : EX_NOWAIT,
1689 					&dva);
1690 		if (error)
1691 			return (error);
1692 	} else {
1693 		/* Any properly aligned virtual address will do */
1694 		dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary,
1695 					     pagesz, 0);
1696 		if (dva == 0)
1697 			return (ENOMEM);
1698 	}
1699 
1700 	map->dm_segs[0].ds_addr = dva;
1701 	map->dm_segs[0].ds_len = size;
1702 	map->dm_segs[0]._ds_sgsize = sgsize;
1703 
1704 	/* Map physical pages into IOMMU */
1705 	mlist = segs[0]._ds_mlist;
1706 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
1707 		if (sgsize == 0)
1708 			panic("sun4_dmamap_load_raw: size botch");
1709 		pa = VM_PAGE_TO_PHYS(m);
1710 #ifdef notyet
1711 #if defined(SUN4)
1712 		if (have_iocache)
1713 			pa |= PG_IOC;
1714 #endif
1715 #endif
1716 		pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC,
1717 		    VM_PROT_READ | VM_PROT_WRITE, 0);
1718 
1719 		dva += pagesz;
1720 		sgsize -= pagesz;
1721 	}
1722 	pmap_update(pmap_kernel());
1723 
1724 	map->dm_nsegs = 1;
1725 	map->dm_mapsize = size;
1726 
1727 	return (0);
1728 }
1729 
1730 /*
1731  * sun4/sun4c function for unloading a DMA map.
1732  */
1733 void
1734 sun4_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
1735 {
1736 	bus_dma_segment_t *segs = map->dm_segs;
1737 	int nsegs = map->dm_nsegs;
1738 	int flags = map->_dm_flags;
1739 	vaddr_t dva;
1740 	bus_size_t len;
1741 	int i, s, error;
1742 
1743 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
1744 
1745 	if ((flags & _BUS_DMA_DIRECTMAP) != 0) {
1746 		/* Nothing to release */
1747 		map->dm_mapsize = 0;
1748 		map->dm_nsegs = 0;
1749 		map->_dm_flags &= ~_BUS_DMA_DIRECTMAP;
1750 		return;
1751 	}
1752 
1753 	for (i = 0; i < nsegs; i++) {
1754 		dva = segs[i].ds_addr & -PAGE_SIZE;
1755 		len = segs[i]._ds_sgsize;
1756 
1757 		pmap_kremove(dva, len);
1758 
1759 		if ((flags & BUS_DMA_24BIT) != 0) {
1760 			s = splhigh();
1761 			error = extent_free(dvmamap24, dva, len, EX_NOWAIT);
1762 			splx(s);
1763 			if (error != 0)
1764 				printf("warning: %ld of DVMA space lost\n", len);
1765 		} else {
1766 			uvm_unmap(kernel_map, dva, dva + len);
1767 		}
1768 	}
1769 	pmap_update(pmap_kernel());
1770 
1771 	/* Mark the mappings as invalid. */
1772 	map->dm_mapsize = 0;
1773 	map->dm_nsegs = 0;
1774 }
1775 
1776 /*
1777  * Common function for mapping DMA-safe memory.  May be called by
1778  * bus-specific DMA memory map functions.
1779  */
1780 int
1781 sun4_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1782 		size_t size, void **kvap, int flags)
1783 {
1784 	struct vm_page *m;
1785 	vaddr_t va;
1786 	struct pglist *mlist;
1787 	const uvm_flag_t kmflags =
1788 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1789 
1790 	if (nsegs != 1)
1791 		panic("sun4_dmamem_map: nsegs = %d", nsegs);
1792 
1793 	size = round_page(size);
1794 
1795 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1796 	if (va == 0)
1797 		return (ENOMEM);
1798 
1799 	segs[0]._ds_va = va;
1800 	*kvap = (void *)va;
1801 
1802 	mlist = segs[0]._ds_mlist;
1803 	TAILQ_FOREACH(m, mlist, pageq.queue) {
1804 		paddr_t pa;
1805 
1806 		if (size == 0)
1807 			panic("sun4_dmamem_map: size botch");
1808 
1809 		pa = VM_PAGE_TO_PHYS(m);
1810 		pmap_kenter_pa(va, pa | PMAP_NC,
1811 		    VM_PROT_READ | VM_PROT_WRITE, 0);
1812 
1813 		va += PAGE_SIZE;
1814 		size -= PAGE_SIZE;
1815 	}
1816 	pmap_update(pmap_kernel());
1817 
1818 	return (0);
1819 }
1820 
1821 
1822 struct sparc_bus_dma_tag mainbus_dma_tag = {
1823 	NULL,
1824 	_bus_dmamap_create,
1825 	_bus_dmamap_destroy,
1826 	sun4_dmamap_load,
1827 	_bus_dmamap_load_mbuf,
1828 	_bus_dmamap_load_uio,
1829 	sun4_dmamap_load_raw,
1830 	sun4_dmamap_unload,
1831 	_bus_dmamap_sync,
1832 
1833 	_bus_dmamem_alloc,
1834 	_bus_dmamem_free,
1835 	sun4_dmamem_map,
1836 	_bus_dmamem_unmap,
1837 	_bus_dmamem_mmap
1838 };
1839 
1840 
1841 /*
1842  * Base bus space handlers.
1843  */
1844 static int	sparc_bus_map(bus_space_tag_t, bus_addr_t,
1845 				    bus_size_t, int, vaddr_t,
1846 				    bus_space_handle_t *);
1847 static int	sparc_bus_unmap(bus_space_tag_t, bus_space_handle_t,
1848 				     bus_size_t);
1849 static int	sparc_bus_subregion(bus_space_tag_t, bus_space_handle_t,
1850 					 bus_size_t, bus_size_t,
1851 					 bus_space_handle_t *);
1852 static paddr_t	sparc_bus_mmap(bus_space_tag_t, bus_addr_t, off_t,
1853 				    int, int);
1854 static void	*sparc_mainbus_intr_establish(bus_space_tag_t, int, int,
1855 						   int (*)(void *),
1856 						   void *,
1857 						   void (*)(void));
1858 static void     sparc_bus_barrier(bus_space_tag_t, bus_space_handle_t,
1859 					bus_size_t, bus_size_t, int);
1860 
1861 int
1862 bus_space_map(
1863 	bus_space_tag_t	t,
1864 	bus_addr_t	a,
1865 	bus_size_t	s,
1866 	int		f,
1867 	bus_space_handle_t *hp)
1868 {
1869 	return (*t->sparc_bus_map)(t, a, s, f, (vaddr_t)0, hp);
1870 }
1871 
1872 int
1873 bus_space_map2(
1874 	bus_space_tag_t	t,
1875 	bus_addr_t	a,
1876 	bus_size_t	s,
1877 	int		f,
1878 	vaddr_t		v,
1879 	bus_space_handle_t *hp)
1880 {
1881 	return (*t->sparc_bus_map)(t, a, s, f, v, hp);
1882 }
1883 
1884 void
1885 bus_space_unmap(
1886 	bus_space_tag_t t,
1887 	bus_space_handle_t h,
1888 	bus_size_t	s)
1889 {
1890 	(*t->sparc_bus_unmap)(t, h, s);
1891 }
1892 
1893 int
1894 bus_space_subregion(
1895 	bus_space_tag_t	t,
1896 	bus_space_handle_t h,
1897 	bus_size_t	o,
1898 	bus_size_t	s,
1899 	bus_space_handle_t *hp)
1900 {
1901 	return (*t->sparc_bus_subregion)(t, h, o, s, hp);
1902 }
1903 
1904 paddr_t
1905 bus_space_mmap(
1906 	bus_space_tag_t	t,
1907 	bus_addr_t	a,
1908 	off_t		o,
1909 	int		p,
1910 	int		f)
1911 {
1912 	return (*t->sparc_bus_mmap)(t, a, o, p, f);
1913 }
1914 
1915 void *
1916 bus_intr_establish(
1917 	bus_space_tag_t t,
1918 	int	p,
1919 	int	l,
1920 	int	(*h)(void *),
1921 	void	*a)
1922 {
1923 	return (*t->sparc_intr_establish)(t, p, l, h, a, NULL);
1924 }
1925 
1926 void *
1927 bus_intr_establish2(
1928 	bus_space_tag_t t,
1929 	int	p,
1930 	int	l,
1931 	int	(*h)(void *),
1932 	void	*a,
1933 	void	(*v)(void))
1934 {
1935 	return (*t->sparc_intr_establish)(t, p, l, h, a, v);
1936 }
1937 
1938 void
1939 bus_space_barrier(
1940 	bus_space_tag_t t,
1941 	bus_space_handle_t h,
1942 	bus_size_t o,
1943 	bus_size_t s,
1944 	int f)
1945 {
1946 	(*t->sparc_bus_barrier)(t, h, o, s, f);
1947 }
1948 
1949 void
1950 bus_space_write_multi_stream_2(
1951 	bus_space_tag_t		t,
1952 	bus_space_handle_t	h,
1953 	bus_size_t		o,
1954 	const uint16_t		*a,
1955 	bus_size_t		c)
1956 {
1957 	while (c-- > 0)
1958 		bus_space_write_2_real(t, h, o, *a++);
1959 }
1960 
1961 void
1962 bus_space_write_multi_stream_4(
1963 	bus_space_tag_t		t,
1964 	bus_space_handle_t	h,
1965 	bus_size_t		o,
1966 	const uint32_t		*a,
1967 	bus_size_t		c)
1968 {
1969 	while (c-- > 0)
1970 		bus_space_write_4_real(t, h, o, *a++);
1971 }
1972 
1973 void
1974 bus_space_write_multi_stream_8(
1975 	bus_space_tag_t		t,
1976 	bus_space_handle_t	h,
1977 	bus_size_t		o,
1978 	const uint64_t		*a,
1979 	bus_size_t		c)
1980 {
1981 	while (c-- > 0)
1982 		bus_space_write_8_real(t, h, o, *a++);
1983 }
1984 
1985 
1986 /*
1987  *	void bus_space_set_multi_N(bus_space_tag_t tag,
1988  *	    bus_space_handle_t bsh, bus_size_t offset, u_intN_t val,
1989  *	    bus_size_t count);
1990  *
1991  * Write the 1, 2, 4, or 8 byte value `val' to bus space described
1992  * by tag/handle/offset `count' times.
1993  */
1994 void
1995 bus_space_set_multi_1(
1996 	bus_space_tag_t		t,
1997 	bus_space_handle_t	h,
1998 	bus_size_t		o,
1999 	const uint8_t		v,
2000 	bus_size_t		c)
2001 {
2002 	while (c-- > 0)
2003 		bus_space_write_1(t, h, o, v);
2004 }
2005 
2006 void
2007 bus_space_set_multi_2(
2008 	bus_space_tag_t		t,
2009 	bus_space_handle_t	h,
2010 	bus_size_t		o,
2011 	const uint16_t		v,
2012 	bus_size_t		c)
2013 {
2014 	while (c-- > 0)
2015 		bus_space_write_2(t, h, o, v);
2016 }
2017 
2018 void
2019 bus_space_set_multi_4(
2020 	bus_space_tag_t		t,
2021 	bus_space_handle_t	h,
2022 	bus_size_t		o,
2023 	const uint32_t		v,
2024 	bus_size_t		c)
2025 {
2026 	while (c-- > 0)
2027 		bus_space_write_4(t, h, o, v);
2028 }
2029 
2030 void
2031 bus_space_set_multi_8(
2032 	bus_space_tag_t		t,
2033 	bus_space_handle_t	h,
2034 	bus_size_t		o,
2035 	const uint64_t		v,
2036 	bus_size_t		c)
2037 {
2038 	while (c-- > 0)
2039 		bus_space_write_8(t, h, o, v);
2040 }
2041 
2042 
2043 /*
2044  *	void bus_space_read_region_N(bus_space_tag_t tag,
2045  *	    bus_space_handle_t bsh, bus_size_t off,
2046  *	    u_intN_t *addr, bus_size_t count);
2047  *
2048  */
2049 void
2050 bus_space_read_region_1(
2051 	bus_space_tag_t		t,
2052 	bus_space_handle_t	h,
2053 	bus_size_t		o,
2054 	uint8_t			*a,
2055 	bus_size_t		c)
2056 {
2057 	for (; c; a++, c--, o++)
2058 		*a = bus_space_read_1(t, h, o);
2059 }
2060 
2061 void
2062 bus_space_read_region_2(
2063 	bus_space_tag_t		t,
2064 	bus_space_handle_t	h,
2065 	bus_size_t		o,
2066 	uint16_t		*a,
2067 	bus_size_t		c)
2068 {
2069 	for (; c; a++, c--, o+=2)
2070 		*a = bus_space_read_2(t, h, o);
2071 }
2072 
2073 void
2074 bus_space_read_region_4(
2075 	bus_space_tag_t		t,
2076 	bus_space_handle_t	h,
2077 	bus_size_t		o,
2078 	uint32_t		*a,
2079 	bus_size_t		c)
2080 {
2081 	for (; c; a++, c--, o+=4)
2082 		*a = bus_space_read_4(t, h, o);
2083 }
2084 
2085 void
2086 bus_space_read_region_8(
2087 	bus_space_tag_t		t,
2088 	bus_space_handle_t	h,
2089 	bus_size_t		o,
2090 	uint64_t		*a,
2091 	bus_size_t		c)
2092 {
2093 	for (; c; a++, c--, o+=8)
2094 		*a = bus_space_read_8(t, h, o);
2095 }
2096 
2097 /*
2098  *	void bus_space_write_region_N(bus_space_tag_t tag,
2099  *	    bus_space_handle_t bsh, bus_size_t off,
2100  *	    u_intN_t *addr, bus_size_t count);
2101  *
2102  */
2103 void
2104 bus_space_write_region_1(
2105 	bus_space_tag_t		t,
2106 	bus_space_handle_t	h,
2107 	bus_size_t		o,
2108 	const uint8_t		*a,
2109 	bus_size_t		c)
2110 {
2111 	for (; c; a++, c--, o++)
2112 		bus_space_write_1(t, h, o, *a);
2113 }
2114 
2115 void
2116 bus_space_write_region_2(
2117 	bus_space_tag_t		t,
2118 	bus_space_handle_t	h,
2119 	bus_size_t		o,
2120 	const uint16_t		*a,
2121 	bus_size_t		c)
2122 {
2123 	for (; c; a++, c--, o+=2)
2124 		bus_space_write_2(t, h, o, *a);
2125 }
2126 
2127 void
2128 bus_space_write_region_4(
2129 	bus_space_tag_t		t,
2130 	bus_space_handle_t	h,
2131 	bus_size_t		o,
2132 	const uint32_t		*a,
2133 	bus_size_t		c)
2134 {
2135 	for (; c; a++, c--, o+=4)
2136 		bus_space_write_4(t, h, o, *a);
2137 }
2138 
2139 void
2140 bus_space_write_region_8(
2141 	bus_space_tag_t		t,
2142 	bus_space_handle_t	h,
2143 	bus_size_t		o,
2144 	const uint64_t		*a,
2145 	bus_size_t		c)
2146 {
2147 	for (; c; a++, c--, o+=8)
2148 		bus_space_write_8(t, h, o, *a);
2149 }
2150 
2151 
2152 /*
2153  *	void bus_space_set_region_N(bus_space_tag_t tag,
2154  *	    bus_space_handle_t bsh, bus_size_t off,
2155  *	    u_intN_t *addr, bus_size_t count);
2156  *
2157  */
2158 void
2159 bus_space_set_region_1(
2160 	bus_space_tag_t		t,
2161 	bus_space_handle_t	h,
2162 	bus_size_t		o,
2163 	const uint8_t		v,
2164 	bus_size_t		c)
2165 {
2166 	for (; c; c--, o++)
2167 		bus_space_write_1(t, h, o, v);
2168 }
2169 
2170 void
2171 bus_space_set_region_2(
2172 	bus_space_tag_t		t,
2173 	bus_space_handle_t	h,
2174 	bus_size_t		o,
2175 	const uint16_t		v,
2176 	bus_size_t		c)
2177 {
2178 	for (; c; c--, o+=2)
2179 		bus_space_write_2(t, h, o, v);
2180 }
2181 
2182 void
2183 bus_space_set_region_4(
2184 	bus_space_tag_t		t,
2185 	bus_space_handle_t	h,
2186 	bus_size_t		o,
2187 	const uint32_t		v,
2188 	bus_size_t		c)
2189 {
2190 	for (; c; c--, o+=4)
2191 		bus_space_write_4(t, h, o, v);
2192 }
2193 
2194 void
2195 bus_space_set_region_8(
2196 	bus_space_tag_t		t,
2197 	bus_space_handle_t	h,
2198 	bus_size_t		o,
2199 	const uint64_t		v,
2200 	bus_size_t		c)
2201 {
2202 	for (; c; c--, o+=8)
2203 		bus_space_write_8(t, h, o, v);
2204 }
2205 
2206 
2207 /*
2208  *	void bus_space_copy_region_N(bus_space_tag_t tag,
2209  *	    bus_space_handle_t bsh1, bus_size_t off1,
2210  *	    bus_space_handle_t bsh2, bus_size_t off2,
2211  *	    bus_size_t count);
2212  *
2213  * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
2214  * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
2215  */
2216 void
2217 bus_space_copy_region_1(
2218 	bus_space_tag_t		t,
2219 	bus_space_handle_t	h1,
2220 	bus_size_t		o1,
2221 	bus_space_handle_t	h2,
2222 	bus_size_t		o2,
2223 	bus_size_t		c)
2224 {
2225 	for (; c; c--, o1++, o2++)
2226 	    bus_space_write_1(t, h1, o1, bus_space_read_1(t, h2, o2));
2227 }
2228 
2229 void
2230 bus_space_copy_region_2(
2231 	bus_space_tag_t		t,
2232 	bus_space_handle_t	h1,
2233 	bus_size_t		o1,
2234 	bus_space_handle_t	h2,
2235 	bus_size_t		o2,
2236 	bus_size_t		c)
2237 {
2238 	for (; c; c--, o1+=2, o2+=2)
2239 	    bus_space_write_2(t, h1, o1, bus_space_read_2(t, h2, o2));
2240 }
2241 
2242 void
2243 bus_space_copy_region_4(
2244 	bus_space_tag_t		t,
2245 	bus_space_handle_t	h1,
2246 	bus_size_t		o1,
2247 	bus_space_handle_t	h2,
2248 	bus_size_t		o2,
2249 	bus_size_t		c)
2250 {
2251 	for (; c; c--, o1+=4, o2+=4)
2252 	    bus_space_write_4(t, h1, o1, bus_space_read_4(t, h2, o2));
2253 }
2254 
2255 void
2256 bus_space_copy_region_8(
2257 	bus_space_tag_t		t,
2258 	bus_space_handle_t	h1,
2259 	bus_size_t		o1,
2260 	bus_space_handle_t	h2,
2261 	bus_size_t		o2,
2262 	bus_size_t		c)
2263 {
2264 	for (; c; c--, o1+=8, o2+=8)
2265 	    bus_space_write_8(t, h1, o1, bus_space_read_8(t, h2, o2));
2266 }
2267 
2268 /*
2269  *	void bus_space_read_region_stream_N(bus_space_tag_t tag,
2270  *	    bus_space_handle_t bsh, bus_size_t off,
2271  *	    u_intN_t *addr, bus_size_t count);
2272  *
2273  */
2274 void
2275 bus_space_read_region_stream_1(
2276 	bus_space_tag_t		t,
2277 	bus_space_handle_t	h,
2278 	bus_size_t		o,
2279 	uint8_t			*a,
2280 	bus_size_t		c)
2281 {
2282 	for (; c; a++, c--, o++)
2283 		*a = bus_space_read_stream_1(t, h, o);
2284 }
2285 void
2286 bus_space_read_region_stream_2(
2287 	bus_space_tag_t		t,
2288 	bus_space_handle_t	h,
2289 	bus_size_t		o,
2290 	uint16_t		*a,
2291 	bus_size_t		c)
2292 {
2293 	for (; c; a++, c--, o+=2)
2294 		*a = bus_space_read_stream_2(t, h, o);
2295  }
2296 void
2297 bus_space_read_region_stream_4(
2298 	bus_space_tag_t		t,
2299 	bus_space_handle_t	h,
2300 	bus_size_t		o,
2301 	uint32_t		*a,
2302 	bus_size_t		c)
2303 {
2304 	for (; c; a++, c--, o+=4)
2305 		*a = bus_space_read_stream_4(t, h, o);
2306 }
2307 void
2308 bus_space_read_region_stream_8(
2309 	bus_space_tag_t		t,
2310 	bus_space_handle_t	h,
2311 	bus_size_t		o,
2312 	uint64_t		*a,
2313 	bus_size_t		c)
2314 {
2315 	for (; c; a++, c--, o+=8)
2316 		*a = bus_space_read_stream_8(t, h, o);
2317 }
2318 
2319 /*
2320  *	void bus_space_write_region_stream_N(bus_space_tag_t tag,
2321  *	    bus_space_handle_t bsh, bus_size_t off,
2322  *	    u_intN_t *addr, bus_size_t count);
2323  *
2324  */
2325 void
2326 bus_space_write_region_stream_1(
2327 	bus_space_tag_t		t,
2328 	bus_space_handle_t	h,
2329 	bus_size_t		o,
2330 	const uint8_t		*a,
2331 	bus_size_t		c)
2332 {
2333 	for (; c; a++, c--, o++)
2334 		bus_space_write_stream_1(t, h, o, *a);
2335 }
2336 
2337 void
2338 bus_space_write_region_stream_2(
2339 	bus_space_tag_t		t,
2340 	bus_space_handle_t	h,
2341 	bus_size_t		o,
2342 	const uint16_t		*a,
2343 	bus_size_t		c)
2344 {
2345 	for (; c; a++, c--, o+=2)
2346 		bus_space_write_stream_2(t, h, o, *a);
2347 }
2348 
2349 void
2350 bus_space_write_region_stream_4(
2351 	bus_space_tag_t		t,
2352 	bus_space_handle_t	h,
2353 	bus_size_t		o,
2354 	const uint32_t		*a,
2355 	bus_size_t		c)
2356 {
2357 	for (; c; a++, c--, o+=4)
2358 		bus_space_write_stream_4(t, h, o, *a);
2359 }
2360 
2361 void
2362 bus_space_write_region_stream_8(
2363 	bus_space_tag_t		t,
2364 	bus_space_handle_t	h,
2365 	bus_size_t		o,
2366 	const uint64_t		*a,
2367 	bus_size_t		c)
2368 {
2369 	for (; c; a++, c--, o+=8)
2370 		bus_space_write_stream_8(t, h, o, *a);
2371 }
2372 
2373 
2374 /*
2375  *	void bus_space_set_region_stream_N(bus_space_tag_t tag,
2376  *	    bus_space_handle_t bsh, bus_size_t off,
2377  *	    u_intN_t *addr, bus_size_t count);
2378  *
2379  */
2380 void
2381 bus_space_set_region_stream_1(
2382 	bus_space_tag_t		t,
2383 	bus_space_handle_t	h,
2384 	bus_size_t		o,
2385 	const uint8_t		v,
2386 	bus_size_t		c)
2387 {
2388 	for (; c; c--, o++)
2389 		bus_space_write_stream_1(t, h, o, v);
2390 }
2391 
2392 void
2393 bus_space_set_region_stream_2(
2394 	bus_space_tag_t		t,
2395 	bus_space_handle_t	h,
2396 	bus_size_t		o,
2397 	const uint16_t		v,
2398 	bus_size_t		c)
2399 {
2400 	for (; c; c--, o+=2)
2401 		bus_space_write_stream_2(t, h, o, v);
2402 }
2403 
2404 void
2405 bus_space_set_region_stream_4(
2406 	bus_space_tag_t		t,
2407 	bus_space_handle_t	h,
2408 	bus_size_t		o,
2409 	const uint32_t		v,
2410 	bus_size_t		c)
2411 {
2412 	for (; c; c--, o+=4)
2413 		bus_space_write_stream_4(t, h, o, v);
2414 }
2415 
2416 void
2417 bus_space_set_region_stream_8(
2418 	bus_space_tag_t		t,
2419 	bus_space_handle_t	h,
2420 	bus_size_t		o,
2421 	const uint64_t		v,
2422 	bus_size_t		c)
2423 {
2424 	for (; c; c--, o+=8)
2425 		bus_space_write_stream_8(t, h, o, v);
2426 }
2427 
2428 /*
2429  *	void bus_space_copy_region_stream_N(bus_space_tag_t tag,
2430  *	    bus_space_handle_t bsh1, bus_size_t off1,
2431  *	    bus_space_handle_t bsh2, bus_size_t off2,
2432  *	    bus_size_t count);
2433  *
2434  * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
2435  * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
2436  */
2437 
2438 void
2439 bus_space_copy_region_stream_1(
2440 	bus_space_tag_t		t,
2441 	bus_space_handle_t	h1,
2442 	bus_size_t		o1,
2443 	bus_space_handle_t	h2,
2444 	bus_size_t		o2,
2445 	bus_size_t		c)
2446 {
2447 	for (; c; c--, o1++, o2++)
2448 	    bus_space_write_stream_1(t, h1, o1, bus_space_read_stream_1(t, h2, o2));
2449 }
2450 
2451 void
2452 bus_space_copy_region_stream_2(
2453 	bus_space_tag_t		t,
2454 	bus_space_handle_t	h1,
2455 	bus_size_t		o1,
2456 	bus_space_handle_t	h2,
2457 	bus_size_t		o2,
2458 	bus_size_t		c)
2459 {
2460 	for (; c; c--, o1+=2, o2+=2)
2461 	    bus_space_write_stream_2(t, h1, o1, bus_space_read_stream_2(t, h2, o2));
2462 }
2463 
2464 void
2465 bus_space_copy_region_stream_4(
2466 	bus_space_tag_t		t,
2467 	bus_space_handle_t	h1,
2468 	bus_size_t		o1,
2469 	bus_space_handle_t	h2,
2470 	bus_size_t		o2,
2471 	bus_size_t		c)
2472 {
2473 	for (; c; c--, o1+=4, o2+=4)
2474 	    bus_space_write_stream_4(t, h1, o1, bus_space_read_stream_4(t, h2, o2));
2475 }
2476 
2477 void
2478 bus_space_copy_region_stream_8(
2479 	bus_space_tag_t		t,
2480 	bus_space_handle_t	h1,
2481 	bus_size_t		o1,
2482 	bus_space_handle_t	h2,
2483 	bus_size_t		o2,
2484 	bus_size_t		c)
2485 {
2486 	for (; c; c--, o1+=8, o2+=8)
2487 	    bus_space_write_stream_8(t, h1, o1, bus_space_read_8(t, h2, o2));
2488 }
2489 
2490 void
2491 bus_space_write_1(
2492 	bus_space_tag_t		t,
2493 	bus_space_handle_t	h,
2494 	bus_size_t		o,
2495 	uint8_t			v)
2496 {
2497 	(*t->sparc_write_1)(t, h, o, v);
2498 }
2499 
2500 void
2501 bus_space_write_2(
2502 	bus_space_tag_t		t,
2503 	bus_space_handle_t	h,
2504 	bus_size_t		o,
2505 	uint16_t		v)
2506 {
2507 	(*t->sparc_write_2)(t, h, o, v);
2508 }
2509 
2510 void
2511 bus_space_write_4(
2512 	bus_space_tag_t		t,
2513 	bus_space_handle_t	h,
2514 	bus_size_t		o,
2515 	uint32_t		v)
2516 {
2517 	(*t->sparc_write_4)(t, h, o, v);
2518 }
2519 
2520 void
2521 bus_space_write_8(
2522 	bus_space_tag_t		t,
2523 	bus_space_handle_t	h,
2524 	bus_size_t		o,
2525 	uint64_t		v)
2526 {
2527 	(*t->sparc_write_8)(t, h, o, v);
2528 }
2529 
2530 #if __SLIM_SPARC_BUS_SPACE
2531 
2532 void
2533 bus_space_write_1(
2534 	bus_space_tag_t		t,
2535 	bus_space_handle_t	h,
2536 	bus_size_t		o,
2537 	uint8_t			v)
2538 {
2539 	__insn_barrier();
2540 	bus_space_write_1_real(t, h, o, v);
2541 }
2542 
2543 void
2544 bus_space_write_2(
2545 	bus_space_tag_t		t,
2546 	bus_space_handle_t	h,
2547 	bus_size_t		o,
2548 	uint16_t		v)
2549 {
2550 	__insn_barrier();
2551 	bus_space_write_2_real(t, h, o, v);
2552 }
2553 
2554 void
2555 bus_space_write_4(
2556 	bus_space_tag_t		t,
2557 	bus_space_handle_t	h,
2558 	bus_size_t		o,
2559 	uint32_t		v)
2560 {
2561 	__insn_barrier();
2562 	bus_space_write_4_real(
2563 }
2564 
2565 void
2566 bus_space_write_8(
2567 	bus_space_tag_t		t,
2568 	bus_space_handle_t	h,
2569 	bus_size_t		o,
2570 	uint64_t		v)
2571 {
2572 	__insn_barrier();
2573 	bus_space_write_8_real(t, h, o, v);
2574 }
2575 
2576 #endif /* __SLIM_SPARC_BUS_SPACE */
2577 
2578 uint8_t
2579 bus_space_read_1(
2580 	bus_space_tag_t		t,
2581 	bus_space_handle_t	h,
2582 	bus_size_t		o)
2583 {
2584 	return (*t->sparc_read_1)(t, h, o);
2585 }
2586 
2587 uint16_t
2588 bus_space_read_2(
2589 	bus_space_tag_t		t,
2590 	bus_space_handle_t	h,
2591 	bus_size_t		o)
2592 {
2593 	return (*t->sparc_read_2)(t, h, o);
2594 }
2595 
2596 uint32_t
2597 bus_space_read_4(
2598 	bus_space_tag_t		t,
2599 	bus_space_handle_t	h,
2600 	bus_size_t		o)
2601 {
2602 	return (*t->sparc_read_4)(t, h, o);
2603 }
2604 
2605 uint64_t
2606 bus_space_read_8(
2607 	bus_space_tag_t		t,
2608 	bus_space_handle_t	h,
2609 	bus_size_t		o)
2610 {
2611 	return (*t->sparc_read_8)(t, h, o);
2612 }
2613 
2614 #if __SLIM_SPARC_BUS_SPACE
2615 uint8_t
2616 bus_space_read_1(
2617 	bus_space_tag_t		t,
2618 	bus_space_handle_t	h,
2619 	bus_size_t		o)
2620 {
2621 	__insn_barrier();
2622 	return bus_space_read_1_real(t, h, o);
2623 }
2624 
2625 uint16_t
2626 bus_space_read_2(
2627 	bus_space_tag_t		t,
2628 	bus_space_handle_t	h,
2629 	bus_size_t		o)
2630 {
2631 	__insn_barrier();
2632 	return bus_space_read_2_real(t, h, o);
2633 }
2634 
2635 uint32_t
2636 bus_space_read_4(
2637 	bus_space_tag_t		t,
2638 	bus_space_handle_t	h,
2639 	bus_size_t		o)
2640 {
2641 	__insn_barrier();
2642 	return bus_space_read_4_real(t, h, o);
2643 }
2644 
2645 uint64_t
2646 bus_space_read_8(
2647 	bus_space_tag_t		t,
2648 	bus_space_handle_t	h,
2649 	bus_size_t		o)
2650 {
2651 	__insn_barrier();
2652 	return bus_space_read_8_real(t, h, o);
2653 }
2654 
2655 #endif /* __SLIM_SPARC_BUS_SPACE */
2656 
2657 void
2658 bus_space_read_multi_1(
2659 	bus_space_tag_t		t,
2660 	bus_space_handle_t	h,
2661 	bus_size_t		o,
2662 	uint8_t			*a,
2663 	bus_size_t		c)
2664 {
2665 	while (c-- > 0)
2666 		*a++ = bus_space_read_1(t, h, o);
2667 }
2668 
2669 void
2670 bus_space_read_multi_2(
2671 	bus_space_tag_t		t,
2672 	bus_space_handle_t	h,
2673 	bus_size_t		o,
2674 	uint16_t		*a,
2675 	bus_size_t		c)
2676 {
2677 	while (c-- > 0)
2678 		*a++ = bus_space_read_2(t, h, o);
2679 }
2680 
2681 void
2682 bus_space_read_multi_4(
2683 	bus_space_tag_t		t,
2684 	bus_space_handle_t	h,
2685 	bus_size_t		o,
2686 	uint32_t		*a,
2687 	bus_size_t		c)
2688 {
2689 	while (c-- > 0)
2690 		*a++ = bus_space_read_4(t, h, o);
2691 }
2692 
2693 void
2694 bus_space_read_multi_8(
2695 	bus_space_tag_t		t,
2696 	bus_space_handle_t	h,
2697 	bus_size_t		o,
2698 	uint64_t		*a,
2699 	bus_size_t		c)
2700 {
2701 	while (c-- > 0)
2702 		*a++ = bus_space_read_8(t, h, o);
2703 }
2704 
2705 /*
2706  *	void bus_space_read_multi_N(bus_space_tag_t tag,
2707  *	    bus_space_handle_t bsh, bus_size_t offset,
2708  *	    u_intN_t *addr, bus_size_t count);
2709  *
2710  * Read `count' 1, 2, 4, or 8 byte quantities from bus space
2711  * described by tag/handle/offset and copy into buffer provided.
2712  */
2713 void
2714 bus_space_read_multi_stream_2(
2715 	bus_space_tag_t		t,
2716 	bus_space_handle_t	h,
2717 	bus_size_t		o,
2718 	uint16_t		*a,
2719 	bus_size_t		c)
2720 {
2721 	while (c-- > 0)
2722 		*a++ = bus_space_read_2_real(t, h, o);
2723 }
2724 
2725 void
2726 bus_space_read_multi_stream_4(
2727 	bus_space_tag_t		t,
2728 	bus_space_handle_t	h,
2729 	bus_size_t		o,
2730 	uint32_t		*a,
2731 	bus_size_t		c)
2732 {
2733 	while (c-- > 0)
2734 		*a++ = bus_space_read_4_real(t, h, o);
2735 }
2736 
2737 void
2738 bus_space_read_multi_stream_8(
2739 	bus_space_tag_t		t,
2740 	bus_space_handle_t	h,
2741 	bus_size_t		o,
2742 	uint64_t		*a,
2743 	bus_size_t		c)
2744 {
2745 	while (c-- > 0)
2746 		*a++ = bus_space_read_8_real(t, h, o);
2747 }
2748 
2749 /*
2750  *	void bus_space_write_multi_N(bus_space_tag_t tag,
2751  *	    bus_space_handle_t bsh, bus_size_t offset,
2752  *	    const u_intN_t *addr, bus_size_t count);
2753  *
2754  * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
2755  * provided to bus space described by tag/handle/offset.
2756  */
2757 void
2758 bus_space_write_multi_1(
2759 	bus_space_tag_t		t,
2760 	bus_space_handle_t	h,
2761 	bus_size_t		o,
2762 	const uint8_t		*a,
2763 	bus_size_t		c)
2764 {
2765 	while (c-- > 0)
2766 		bus_space_write_1(t, h, o, *a++);
2767 }
2768 
2769 void
2770 bus_space_write_multi_2(
2771 	bus_space_tag_t		t,
2772 	bus_space_handle_t	h,
2773 	bus_size_t		o,
2774 	const uint16_t		*a,
2775 	bus_size_t		c)
2776 {
2777 	while (c-- > 0)
2778 		bus_space_write_2(t, h, o, *a++);
2779 }
2780 
2781 void
2782 bus_space_write_multi_4(
2783 	bus_space_tag_t		t,
2784 	bus_space_handle_t	h,
2785 	bus_size_t		o,
2786 	const uint32_t		*a,
2787 	bus_size_t		c)
2788 {
2789 	while (c-- > 0)
2790 		bus_space_write_4(t, h, o, *a++);
2791 }
2792 
2793 void
2794 bus_space_write_multi_8(
2795 	bus_space_tag_t		t,
2796 	bus_space_handle_t	h,
2797 	bus_size_t		o,
2798 	const uint64_t		*a,
2799 	bus_size_t		c)
2800 {
2801 	while (c-- > 0)
2802 		bus_space_write_8(t, h, o, *a++);
2803 }
2804 
2805 /*
2806  * Allocate a new bus tag and have it inherit the methods of the
2807  * given parent.
2808  */
2809 bus_space_tag_t
2810 bus_space_tag_alloc(bus_space_tag_t parent, void *cookie)
2811 {
2812 	struct sparc_bus_space_tag *sbt;
2813 
2814 	sbt = malloc(sizeof(struct sparc_bus_space_tag),
2815 		     M_DEVBUF, M_NOWAIT|M_ZERO);
2816 	if (sbt == NULL)
2817 		return (NULL);
2818 
2819 	if (parent) {
2820 		memcpy(sbt, parent, sizeof(*sbt));
2821 		sbt->parent = parent;
2822 		sbt->ranges = NULL;
2823 		sbt->nranges = 0;
2824 	}
2825 
2826 	sbt->cookie = cookie;
2827 	return (sbt);
2828 }
2829 
2830 /*
2831  * Generic routine to translate an address using OpenPROM `ranges'.
2832  */
2833 int
2834 bus_space_translate_address_generic(struct openprom_range *ranges, int nranges,
2835     bus_addr_t *bap)
2836 {
2837 	int i, space = BUS_ADDR_IOSPACE(*bap);
2838 
2839 	for (i = 0; i < nranges; i++) {
2840 		struct openprom_range *rp = &ranges[i];
2841 
2842 		if (rp->or_child_space != space)
2843 			continue;
2844 
2845 		/* We've found the connection to the parent bus. */
2846 		*bap = BUS_ADDR(rp->or_parent_space,
2847 		    rp->or_parent_base + BUS_ADDR_PADDR(*bap));
2848 		return (0);
2849 	}
2850 
2851 	return (EINVAL);
2852 }
2853 
2854 static int
2855 sparc_bus_map_iodev(bus_space_tag_t t, bus_addr_t ba, bus_size_t size, int flags,
2856 	      vaddr_t va, bus_space_handle_t *hp)
2857 {
2858 	vaddr_t v;
2859 	paddr_t pa;
2860 	unsigned int pmtype;
2861 	bus_space_tag_t pt;
2862 static	vaddr_t iobase;
2863 
2864 	/*
2865 	 * This base class bus map function knows about address range
2866 	 * translation so bus drivers that need no other special
2867 	 * handling can just keep this method in their tags.
2868 	 *
2869 	 * We expect to resolve range translations iteratively, but allow
2870 	 * for recursion just in case.
2871 	 */
2872 	while ((pt = t->parent) != NULL) {
2873 		if (t->ranges != NULL) {
2874 			int error;
2875 
2876 			if ((error = bus_space_translate_address_generic(
2877 					t->ranges, t->nranges, &ba)) != 0)
2878 				return (error);
2879 		}
2880 		if (pt->sparc_bus_map != sparc_bus_map)
2881 			return (bus_space_map2(pt, ba, size, flags, va, hp));
2882 		t = pt;
2883 	}
2884 
2885 	if (iobase == 0)
2886 		iobase = IODEV_BASE;
2887 
2888 	size = round_page(size);
2889 	if (size == 0) {
2890 		printf("sparc_bus_map: zero size\n");
2891 		return (EINVAL);
2892 	}
2893 
2894 	if (va)
2895 		v = trunc_page(va);
2896 	else {
2897 		v = iobase;
2898 		iobase += size;
2899 		if (iobase > IODEV_END)	/* unlikely */
2900 			panic("sparc_bus_map: iobase=0x%lx", iobase);
2901 	}
2902 
2903 	pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba));
2904 	pa = BUS_ADDR_PADDR(ba);
2905 
2906 	/* note: preserve page offset */
2907 	*hp = (bus_space_handle_t)(v | ((u_long)pa & PGOFSET));
2908 
2909 	pa = trunc_page(pa);
2910 	do {
2911 		pmap_kenter_pa(v, pa | pmtype | PMAP_NC,
2912 		    VM_PROT_READ | VM_PROT_WRITE, 0);
2913 		v += PAGE_SIZE;
2914 		pa += PAGE_SIZE;
2915 	} while ((size -= PAGE_SIZE) > 0);
2916 
2917 	pmap_update(pmap_kernel());
2918 	return (0);
2919 }
2920 
2921 static int
2922 sparc_bus_map_large(bus_space_tag_t t, bus_addr_t ba,
2923 		    bus_size_t size, int flags, bus_space_handle_t *hp)
2924 {
2925 	vaddr_t v = 0;
2926 
2927 	if (uvm_map(kernel_map, &v, size, NULL, 0, PAGE_SIZE,
2928 	    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_NORMAL,
2929 			0)) == 0) {
2930 		return sparc_bus_map_iodev(t, ba, size, flags, v, hp);
2931 	}
2932 	return -1;
2933 }
2934 
2935 int
2936 sparc_bus_map(bus_space_tag_t t, bus_addr_t ba,
2937 		    bus_size_t size, int flags, vaddr_t va,
2938 		    bus_space_handle_t *hp)
2939 {
2940 
2941 	if (flags & BUS_SPACE_MAP_LARGE) {
2942 		return sparc_bus_map_large(t, ba, size, flags, hp);
2943 	} else
2944 		return sparc_bus_map_iodev(t, ba, size, flags, va, hp);
2945 
2946 }
2947 
2948 int
2949 sparc_bus_unmap(bus_space_tag_t t, bus_space_handle_t bh, bus_size_t size)
2950 {
2951 	vaddr_t va = trunc_page((vaddr_t)bh);
2952 
2953 	/*
2954 	 * XXX
2955 	 * mappings with BUS_SPACE_MAP_LARGE need additional care here
2956 	 * we can just check if the VA is in the IODEV range
2957 	 */
2958 
2959 	pmap_kremove(va, round_page(size));
2960 	pmap_update(pmap_kernel());
2961 	return (0);
2962 }
2963 
2964 int
2965 sparc_bus_subregion(bus_space_tag_t tag, bus_space_handle_t handle,
2966 		    bus_size_t offset, bus_size_t size,
2967 		    bus_space_handle_t *nhandlep)
2968 {
2969 
2970 	*nhandlep = handle + offset;
2971 	return (0);
2972 }
2973 
2974 paddr_t
2975 sparc_bus_mmap(bus_space_tag_t t, bus_addr_t ba, off_t off,
2976 	       int prot, int flags)
2977 {
2978 	u_int pmtype;
2979 	paddr_t pa;
2980 	bus_space_tag_t pt;
2981 
2982 	/*
2983 	 * Base class bus mmap function; see also sparc_bus_map
2984 	 */
2985 	while ((pt = t->parent) != NULL) {
2986 		if (t->ranges != NULL) {
2987 			int error;
2988 
2989 			if ((error = bus_space_translate_address_generic(
2990 					t->ranges, t->nranges, &ba)) != 0)
2991 				return (-1);
2992 		}
2993 		if (pt->sparc_bus_mmap != sparc_bus_mmap)
2994 			return (bus_space_mmap(pt, ba, off, prot, flags));
2995 		t = pt;
2996 	}
2997 
2998 	pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba));
2999 	pa = trunc_page(BUS_ADDR_PADDR(ba) + off);
3000 
3001 	return (paddr_t)(pa | pmtype | PMAP_NC);
3002 }
3003 
3004 /*
3005  * Establish a temporary bus mapping for device probing.
3006  */
3007 int
3008 bus_space_probe(bus_space_tag_t tag, bus_addr_t paddr, bus_size_t size,
3009 		size_t offset, int flags,
3010 		int (*callback)(void *, void *), void *arg)
3011 {
3012 	bus_space_handle_t bh;
3013 	void *tmp;
3014 	int result;
3015 
3016 	if (bus_space_map2(tag, paddr, size, flags, TMPMAP_VA, &bh) != 0)
3017 		return (0);
3018 
3019 	tmp = (void *)bh;
3020 	result = (probeget((char *)tmp + offset, size) != -1);
3021 	if (result && callback != NULL)
3022 		result = (*callback)(tmp, arg);
3023 	bus_space_unmap(tag, bh, size);
3024 	return (result);
3025 }
3026 
3027 
3028 void *
3029 sparc_mainbus_intr_establish(bus_space_tag_t t, int pil, int level,
3030 			     int (*handler)(void *), void *arg,
3031 			     void (*fastvec)(void))
3032 {
3033 	struct intrhand *ih;
3034 
3035 	ih = (struct intrhand *)
3036 		malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
3037 	if (ih == NULL)
3038 		return (NULL);
3039 
3040 	ih->ih_fun = handler;
3041 	ih->ih_arg = arg;
3042 	intr_establish(pil, level, ih, fastvec, false);
3043 	return (ih);
3044 }
3045 
3046 void sparc_bus_barrier (bus_space_tag_t t, bus_space_handle_t h,
3047 			bus_size_t offset, bus_size_t size, int flags)
3048 {
3049 
3050 	/* No default barrier action defined */
3051 	return;
3052 }
3053 
3054 static uint8_t
3055 sparc_bus_space_read_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3056 {
3057 
3058 	return bus_space_read_1_real(t, h, o);
3059 }
3060 
3061 static uint16_t
3062 sparc_bus_space_read_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3063 {
3064 
3065 	return bus_space_read_2_real(t, h, o);
3066 }
3067 
3068 static uint32_t
3069 sparc_bus_space_read_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3070 {
3071 
3072 	return bus_space_read_4_real(t, h, o);
3073 }
3074 
3075 static uint64_t
3076 sparc_bus_space_read_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3077 {
3078 
3079 	return bus_space_read_8_real(t, h, o);
3080 }
3081 
3082 static void
3083 sparc_bus_space_write_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3084 			uint8_t v)
3085 {
3086 
3087 	bus_space_write_1_real(t, h, o, v);
3088 }
3089 
3090 static void
3091 sparc_bus_space_write_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3092 			uint16_t v)
3093 {
3094 
3095 	bus_space_write_2_real(t, h, o, v);
3096 }
3097 
3098 static void
3099 sparc_bus_space_write_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3100 			uint32_t v)
3101 {
3102 
3103 	bus_space_write_4_real(t, h, o, v);
3104 }
3105 
3106 static void
3107 sparc_bus_space_write_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3108 			uint64_t v)
3109 {
3110 
3111 	bus_space_write_8_real(t, h, o, v);
3112 }
3113 
3114 struct sparc_bus_space_tag mainbus_space_tag = {
3115 	NULL,				/* cookie */
3116 	NULL,				/* parent bus tag */
3117 	NULL,				/* ranges */
3118 	0,				/* nranges */
3119 	sparc_bus_map,			/* bus_space_map */
3120 	sparc_bus_unmap,		/* bus_space_unmap */
3121 	sparc_bus_subregion,		/* bus_space_subregion */
3122 	sparc_bus_barrier,		/* bus_space_barrier */
3123 	sparc_bus_mmap,			/* bus_space_mmap */
3124 	sparc_mainbus_intr_establish,	/* bus_intr_establish */
3125 
3126 	sparc_bus_space_read_1,		/* bus_space_read_1 */
3127 	sparc_bus_space_read_2,		/* bus_space_read_2 */
3128 	sparc_bus_space_read_4,		/* bus_space_read_4 */
3129 	sparc_bus_space_read_8,		/* bus_space_read_8 */
3130 	sparc_bus_space_write_1,	/* bus_space_write_1 */
3131 	sparc_bus_space_write_2,	/* bus_space_write_2 */
3132 	sparc_bus_space_write_4,	/* bus_space_write_4 */
3133 	sparc_bus_space_write_8		/* bus_space_write_8 */
3134 };
3135 
3136 int
3137 mm_md_physacc(paddr_t pa, vm_prot_t prot)
3138 {
3139 
3140 	return pmap_pa_exists(pa) ? 0 : EFAULT;
3141 }
3142 
3143 int
3144 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled)
3145 {
3146 	extern vaddr_t prom_vstart;
3147 	extern vaddr_t prom_vend;
3148 	const vaddr_t v = (vaddr_t)ptr;
3149 
3150 	*handled = (v >= MSGBUF_VA && v < MSGBUF_VA + PAGE_SIZE) ||
3151 	    (v >= prom_vstart && v < prom_vend && (prot & VM_PROT_WRITE) == 0);
3152 	return 0;
3153 }
3154 
3155 int
3156 mm_md_readwrite(dev_t dev, struct uio *uio)
3157 {
3158 
3159 	switch (minor(dev)) {
3160 #if defined(SUN4)
3161 	case DEV_EEPROM:
3162 		if (cputyp == CPU_SUN4)
3163 			return eeprom_uio(uio);
3164 		else
3165 #endif
3166 		return ENXIO;
3167 	default:
3168 		return ENXIO;
3169 	}
3170 }
3171