1 /*	$NetBSD: machdep.c,v 1.327 2014/09/21 16:36:32 christos Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1992, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This software was developed by the Computer Systems Engineering group
38  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
39  * contributed to Berkeley.
40  *
41  * All advertising materials mentioning features or use of this software
42  * must display the following acknowledgement:
43  *	This product includes software developed by the University of
44  *	California, Lawrence Berkeley Laboratory.
45  *
46  * Redistribution and use in source and binary forms, with or without
47  * modification, are permitted provided that the following conditions
48  * are met:
49  * 1. Redistributions of source code must retain the above copyright
50  *    notice, this list of conditions and the following disclaimer.
51  * 2. Redistributions in binary form must reproduce the above copyright
52  *    notice, this list of conditions and the following disclaimer in the
53  *    documentation and/or other materials provided with the distribution.
54  * 3. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  *	@(#)machdep.c	8.6 (Berkeley) 1/14/94
71  */
72 
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.327 2014/09/21 16:36:32 christos Exp $");
75 
76 #include "opt_compat_netbsd.h"
77 #include "opt_compat_sunos.h"
78 #include "opt_sparc_arch.h"
79 #include "opt_modular.h"
80 #include "opt_multiprocessor.h"
81 
82 #include <sys/param.h>
83 #include <sys/signal.h>
84 #include <sys/signalvar.h>
85 #include <sys/proc.h>
86 #include <sys/extent.h>
87 #include <sys/cpu.h>
88 #include <sys/buf.h>
89 #include <sys/device.h>
90 #include <sys/reboot.h>
91 #include <sys/systm.h>
92 #include <sys/kernel.h>
93 #include <sys/conf.h>
94 #include <sys/file.h>
95 #include <sys/malloc.h>
96 #include <sys/mbuf.h>
97 #include <sys/mount.h>
98 #include <sys/msgbuf.h>
99 #include <sys/syscallargs.h>
100 #include <sys/exec.h>
101 #include <sys/exec_aout.h>
102 #include <sys/ucontext.h>
103 #include <sys/module.h>
104 #include <sys/mutex.h>
105 #include <sys/ras.h>
106 
107 #include <dev/mm.h>
108 
109 #include <uvm/uvm.h>		/* we use uvm.kernel_object */
110 
111 #include <sys/sysctl.h>
112 
113 #ifdef COMPAT_13
114 #include <compat/sys/signal.h>
115 #include <compat/sys/signalvar.h>
116 #endif
117 
118 #define _SPARC_BUS_DMA_PRIVATE
119 #include <machine/autoconf.h>
120 #include <sys/bus.h>
121 #include <machine/frame.h>
122 #include <machine/cpu.h>
123 #include <machine/pcb.h>
124 #include <machine/pmap.h>
125 #include <machine/oldmon.h>
126 #include <machine/bsd_openprom.h>
127 #include <machine/bootinfo.h>
128 #include <machine/eeprom.h>
129 
130 #include <sparc/sparc/asm.h>
131 #include <sparc/sparc/cache.h>
132 #include <sparc/sparc/vaddrs.h>
133 #include <sparc/sparc/cpuvar.h>
134 
135 #include "fb.h"
136 #include "power.h"
137 
138 #if NPOWER > 0
139 #include <sparc/dev/power.h>
140 #endif
141 
142 extern paddr_t avail_end;
143 
144 kmutex_t fpu_mtx;
145 
146 /*
147  * dvmamap24 is used to manage DVMA memory for devices that have the upper
148  * eight address bits wired to all-ones (e.g. `le' and `ie')
149  */
150 struct extent *dvmamap24;
151 
152 void	dumpsys(void);
153 void	stackdump(void);
154 
155 /*
156  * Machine-dependent startup code
157  */
158 void
cpu_startup(void)159 cpu_startup(void)
160 {
161 #ifdef DEBUG
162 	extern int pmapdebug;
163 	int opmapdebug = pmapdebug;
164 #endif
165 	struct pcb *pcb;
166 	vsize_t size;
167 	paddr_t pa;
168 	char pbuf[9];
169 
170 #ifdef DEBUG
171 	pmapdebug = 0;
172 #endif
173 
174 	/* XXX */
175 	pcb = lwp_getpcb(&lwp0);
176 	if (pcb && pcb->pcb_psr == 0)
177 		pcb->pcb_psr = getpsr();
178 
179 	/*
180 	 * Re-map the message buffer from its temporary address
181 	 * at KERNBASE to MSGBUF_VA.
182 	 */
183 #if !defined(MSGBUFSIZE) || MSGBUFSIZE <= 8192
184 	/*
185 	 * We use the free page(s) in front of the kernel load address.
186 	 */
187 	size = 8192;
188 
189 	/* Get physical address of the message buffer */
190 	pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa);
191 
192 	/* Invalidate the current mapping at KERNBASE. */
193 	pmap_kremove((vaddr_t)KERNBASE, size);
194 	pmap_update(pmap_kernel());
195 
196 	/* Enter the new mapping */
197 	pmap_map(MSGBUF_VA, pa, pa + size, VM_PROT_READ|VM_PROT_WRITE);
198 
199 	/*
200 	 * Re-initialize the message buffer.
201 	 */
202 	initmsgbuf((void *)MSGBUF_VA, size);
203 #else /* MSGBUFSIZE */
204 	{
205 	struct pglist mlist;
206 	struct vm_page *m;
207 	vaddr_t va0, va;
208 
209 	/*
210 	 * We use the free page(s) in front of the kernel load address,
211 	 * and then allocate some more.
212 	 */
213 	size = round_page(MSGBUFSIZE);
214 
215 	/* Get physical address of first 8192 chunk of the message buffer */
216 	pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa);
217 
218 	/* Allocate additional physical pages */
219 	if (uvm_pglistalloc(size - 8192,
220 			    vm_first_phys, vm_first_phys+vm_num_phys,
221 			    0, 0, &mlist, 1, 0) != 0)
222 		panic("cpu_start: no memory for message buffer");
223 
224 	/* Invalidate the current mapping at KERNBASE. */
225 	pmap_kremove((vaddr_t)KERNBASE, 8192);
226 	pmap_update(pmap_kernel());
227 
228 	/* Allocate virtual memory space */
229 	va0 = va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
230 	if (va == 0)
231 		panic("cpu_start: no virtual memory for message buffer");
232 
233 	/* Map first 8192 */
234 	while (va < va0 + 8192) {
235 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
236 		pa += PAGE_SIZE;
237 		va += PAGE_SIZE;
238 	}
239 	pmap_update(pmap_kernel());
240 
241 	/* Map the rest of the pages */
242 	TAILQ_FOREACH(m, &mlist ,pageq.queue) {
243 		if (va >= va0 + size)
244 			panic("cpu_start: memory buffer size botch");
245 		pa = VM_PAGE_TO_PHYS(m);
246 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
247 		va += PAGE_SIZE;
248 	}
249 	pmap_update(pmap_kernel());
250 
251 	/*
252 	 * Re-initialize the message buffer.
253 	 */
254 	initmsgbuf((void *)va0, size);
255 	}
256 #endif /* MSGBUFSIZE */
257 
258 	/*
259 	 * Good {morning,afternoon,evening,night}.
260 	 */
261 	printf("%s%s", copyright, version);
262 	/*identifycpu();*/
263 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
264 	printf("total memory = %s\n", pbuf);
265 
266 	/*
267 	 * Tune buffer cache variables based on the capabilities of the MMU
268 	 * to cut down on VM space allocated for the buffer caches that
269 	 * would lead to MMU resource shortage.
270 	 */
271 	if (CPU_ISSUN4 || CPU_ISSUN4C) {
272 		/* Clip UBC windows */
273 		if (cpuinfo.mmu_nsegment <= 128) {
274 			/*
275 			 * ubc_nwins and ubc_winshift control the amount
276 			 * of VM used by the UBC. Normally, this VM is
277 			 * not wired in the kernel map, hence non-locked
278 			 * `PMEGs' (see pmap.c) are used for this space.
279 			 * We still limit possible fragmentation to prevent
280 			 * the occasional wired UBC mappings from tying up
281 			 * too many PMEGs.
282 			 *
283 			 * Set the upper limit to 9 segments (default
284 			 * winshift = 13).
285 			 */
286 			ubc_nwins = 512;
287 
288 			/*
289 			 * buf_setvalimit() allocates a submap for buffer
290 			 * allocation. We use it to limit the number of locked
291 			 * `PMEGs' (see pmap.c) dedicated to the buffer cache.
292 			 *
293 			 * Set the upper limit to 12 segments (3MB), which
294 			 * corresponds approximately to the size of the
295 			 * traditional 5% rule (assuming a maximum 64MB of
296 			 * memory in small sun4c machines).
297 			 */
298 			buf_setvalimit(12 * 256*1024);
299 		}
300 
301 		/* Clip max data & stack to avoid running into the MMU hole */
302 #if MAXDSIZ > 256*1024*1024
303 		maxdmap = 256*1024*1024;
304 #endif
305 #if MAXSSIZ > 256*1024*1024
306 		maxsmap = 256*1024*1024;
307 #endif
308 	}
309 
310 	if (CPU_ISSUN4 || CPU_ISSUN4C) {
311 		/*
312 		 * Allocate DMA map for 24-bit devices (le, ie)
313 		 * [dvma_base - dvma_end] is for VME devices..
314 		 */
315 		dvmamap24 = extent_create("dvmamap24",
316 					  D24_DVMA_BASE, D24_DVMA_END,
317 					  0, 0, EX_NOWAIT);
318 		if (dvmamap24 == NULL)
319 			panic("unable to allocate DVMA map");
320 	}
321 
322 #ifdef DEBUG
323 	pmapdebug = opmapdebug;
324 #endif
325 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
326 	printf("avail memory = %s\n", pbuf);
327 
328 	pmap_redzone();
329 
330 	mutex_init(&fpu_mtx, MUTEX_DEFAULT, IPL_SCHED);
331 }
332 
333 /*
334  * Set up registers on exec.
335  *
336  * XXX this entire mess must be fixed
337  */
338 /* ARGSUSED */
339 void
setregs(struct lwp * l,struct exec_package * pack,vaddr_t stack)340 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
341 {
342 	struct trapframe *tf = l->l_md.md_tf;
343 	struct fpstate *fs;
344 	int psr;
345 
346 	/* Don't allow unaligned data references by default */
347 	l->l_proc->p_md.md_flags &= ~MDP_FIXALIGN;
348 
349 	/*
350 	 * Set the registers to 0 except for:
351 	 *	%o6: stack pointer, built in exec())
352 	 *	%psr: (retain CWP and PSR_S bits)
353 	 *	%g1: p->p_psstrp (used by crt0)
354 	 *	%pc,%npc: entry point of program
355 	 */
356 	psr = tf->tf_psr & (PSR_S | PSR_CWP);
357 	if ((fs = l->l_md.md_fpstate) != NULL) {
358 		struct cpu_info *cpi;
359 		int s;
360 		/*
361 		 * We hold an FPU state.  If we own *some* FPU chip state
362 		 * we must get rid of it, and the only way to do that is
363 		 * to save it.  In any case, get rid of our FPU state.
364 		 */
365 		FPU_LOCK(s);
366 		if ((cpi = l->l_md.md_fpu) != NULL) {
367 			if (cpi->fplwp != l)
368 				panic("FPU(%d): fplwp %p",
369 					cpi->ci_cpuid, cpi->fplwp);
370 			if (l == cpuinfo.fplwp)
371 				savefpstate(fs);
372 #if defined(MULTIPROCESSOR)
373 			else
374 				XCALL1(ipi_savefpstate, fs, 1 << cpi->ci_cpuid);
375 #endif
376 			cpi->fplwp = NULL;
377 		}
378 		l->l_md.md_fpu = NULL;
379 		FPU_UNLOCK(s);
380 		kmem_free(fs, sizeof(struct fpstate));
381 		l->l_md.md_fpstate = NULL;
382 	}
383 	memset((void *)tf, 0, sizeof *tf);
384 	tf->tf_psr = psr;
385 	tf->tf_global[1] = l->l_proc->p_psstrp;
386 	tf->tf_pc = pack->ep_entry & ~3;
387 	tf->tf_npc = tf->tf_pc + 4;
388 	stack -= sizeof(struct rwindow);
389 	tf->tf_out[6] = stack;
390 }
391 
392 #ifdef DEBUG
393 int sigdebug = 0;
394 int sigpid = 0;
395 #define SDB_FOLLOW	0x01
396 #define SDB_KSTACK	0x02
397 #define SDB_FPSTATE	0x04
398 #endif
399 
400 /*
401  * machine dependent system variables.
402  */
403 static int
sysctl_machdep_boot(SYSCTLFN_ARGS)404 sysctl_machdep_boot(SYSCTLFN_ARGS)
405 {
406 	struct sysctlnode node = *rnode;
407 	struct btinfo_kernelfile *bi_file;
408 	const char *cp;
409 
410 
411 	switch (node.sysctl_num) {
412 	case CPU_BOOTED_KERNEL:
413 		if ((bi_file = lookup_bootinfo(BTINFO_KERNELFILE)) != NULL)
414 			cp = bi_file->name;
415 		else
416 			cp = prom_getbootfile();
417 		if (cp != NULL && cp[0] == '\0')
418 			cp = "netbsd";
419 		break;
420 	case CPU_BOOTED_DEVICE:
421 		cp = prom_getbootpath();
422 		break;
423 	case CPU_BOOT_ARGS:
424 		cp = prom_getbootargs();
425 		break;
426 	default:
427 		return (EINVAL);
428 	}
429 
430 	if (cp == NULL || cp[0] == '\0')
431 		return (ENOENT);
432 
433 	node.sysctl_data = __UNCONST(cp);
434 	node.sysctl_size = strlen(cp) + 1;
435 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
436 }
437 
438 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
439 {
440 
441 	sysctl_createv(clog, 0, NULL, NULL,
442 		       CTLFLAG_PERMANENT,
443 		       CTLTYPE_NODE, "machdep", NULL,
444 		       NULL, 0, NULL, 0,
445 		       CTL_MACHDEP, CTL_EOL);
446 
447 	sysctl_createv(clog, 0, NULL, NULL,
448 		       CTLFLAG_PERMANENT,
449 		       CTLTYPE_STRING, "booted_kernel", NULL,
450 		       sysctl_machdep_boot, 0, NULL, 0,
451 		       CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
452 	sysctl_createv(clog, 0, NULL, NULL,
453 		       CTLFLAG_PERMANENT,
454 		       CTLTYPE_STRING, "booted_device", NULL,
455 		       sysctl_machdep_boot, 0, NULL, 0,
456 		       CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
457 	sysctl_createv(clog, 0, NULL, NULL,
458 		       CTLFLAG_PERMANENT,
459 		       CTLTYPE_STRING, "boot_args", NULL,
460 		       sysctl_machdep_boot, 0, NULL, 0,
461 		       CTL_MACHDEP, CPU_BOOT_ARGS, CTL_EOL);
462 	sysctl_createv(clog, 0, NULL, NULL,
463 		       CTLFLAG_PERMANENT,
464 		       CTLTYPE_INT, "cpu_arch", NULL,
465 		       NULL, 0, &cpu_arch, 0,
466 		       CTL_MACHDEP, CPU_ARCH, CTL_EOL);
467 }
468 
469 /*
470  * Send an interrupt to process.
471  */
472 struct sigframe {
473 	siginfo_t sf_si;
474 	ucontext_t sf_uc;
475 };
476 
477 void
sendsig_siginfo(const ksiginfo_t * ksi,const sigset_t * mask)478 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
479 {
480 	struct lwp *l = curlwp;
481 	struct proc *p = l->l_proc;
482 	struct sigacts *ps = p->p_sigacts;
483 	struct trapframe *tf;
484 	ucontext_t uc;
485 	struct sigframe *fp;
486 	u_int onstack, oldsp, newsp;
487 	u_int catcher;
488 	int sig, error;
489 	size_t ucsz;
490 
491 	sig = ksi->ksi_signo;
492 
493 	tf = l->l_md.md_tf;
494 	oldsp = tf->tf_out[6];
495 
496 	/*
497 	 * Compute new user stack addresses, subtract off
498 	 * one signal frame, and align.
499 	 */
500 	onstack =
501 	    (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
502 	    (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
503 
504 	if (onstack)
505 		fp = (struct sigframe *)
506 			((char *)l->l_sigstk.ss_sp +
507 				  l->l_sigstk.ss_size);
508 	else
509 		fp = (struct sigframe *)oldsp;
510 
511 	fp = (struct sigframe *)((int)(fp - 1) & ~7);
512 
513 #ifdef DEBUG
514 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
515 		printf("sendsig: %s[%d] sig %d newusp %p si %p uc %p\n",
516 		    p->p_comm, p->p_pid, sig, fp, &fp->sf_si, &fp->sf_uc);
517 #endif
518 
519 	/*
520 	 * Build the signal context to be used by sigreturn.
521 	 */
522 	uc.uc_flags = _UC_SIGMASK |
523 		((l->l_sigstk.ss_flags & SS_ONSTACK)
524 			? _UC_SETSTACK : _UC_CLRSTACK);
525 	uc.uc_sigmask = *mask;
526 	uc.uc_link = l->l_ctxlink;
527 	memset(&uc.uc_stack, 0, sizeof(uc.uc_stack));
528 
529 	/*
530 	 * Now copy the stack contents out to user space.
531 	 * We need to make sure that when we start the signal handler,
532 	 * its %i6 (%fp), which is loaded from the newly allocated stack area,
533 	 * joins seamlessly with the frame it was in when the signal occurred,
534 	 * so that the debugger and _longjmp code can back up through it.
535 	 * Since we're calling the handler directly, allocate a full size
536 	 * C stack frame.
537 	 */
538 	sendsig_reset(l, sig);
539 	mutex_exit(p->p_lock);
540 	newsp = (int)fp - sizeof(struct frame);
541 	cpu_getmcontext(l, &uc.uc_mcontext, &uc.uc_flags);
542 	ucsz = (int)&uc.__uc_pad - (int)&uc;
543 	error = (copyout(&ksi->ksi_info, &fp->sf_si, sizeof ksi->ksi_info) ||
544 	    copyout(&uc, &fp->sf_uc, ucsz) ||
545 	    suword(&((struct rwindow *)newsp)->rw_in[6], oldsp));
546 	mutex_enter(p->p_lock);
547 
548 	if (error) {
549 		/*
550 		 * Process has trashed its stack; give it an illegal
551 		 * instruction to halt it in its tracks.
552 		 */
553 #ifdef DEBUG
554 		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
555 			printf("sendsig: window save or copyout error\n");
556 #endif
557 		sigexit(l, SIGILL);
558 		/* NOTREACHED */
559 	}
560 
561 	switch (ps->sa_sigdesc[sig].sd_vers) {
562 	default:
563 		/* Unsupported trampoline version; kill the process. */
564 		sigexit(l, SIGILL);
565 	case 2:
566 		/*
567 		 * Arrange to continue execution at the user's handler.
568 		 * It needs a new stack pointer, a return address and
569 		 * three arguments: (signo, siginfo *, ucontext *).
570 		 */
571 		catcher = (u_int)SIGACTION(p, sig).sa_handler;
572 		tf->tf_pc = catcher;
573 		tf->tf_npc = catcher + 4;
574 		tf->tf_out[0] = sig;
575 		tf->tf_out[1] = (int)&fp->sf_si;
576 		tf->tf_out[2] = (int)&fp->sf_uc;
577 		tf->tf_out[6] = newsp;
578 		tf->tf_out[7] = (int)ps->sa_sigdesc[sig].sd_tramp - 8;
579 		break;
580 	}
581 
582 	/* Remember that we're now on the signal stack. */
583 	if (onstack)
584 		l->l_sigstk.ss_flags |= SS_ONSTACK;
585 
586 #ifdef DEBUG
587 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
588 		printf("sendsig: about to return to catcher\n");
589 #endif
590 }
591 
592 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flags)593 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
594 {
595 	struct trapframe *tf = (struct trapframe *)l->l_md.md_tf;
596 	__greg_t *r = mcp->__gregs;
597 	__greg_t ras_pc;
598 #ifdef FPU_CONTEXT
599 	__fpregset_t *f = &mcp->__fpregs;
600 	struct fpstate *fps = l->l_md.md_fpstate;
601 #endif
602 
603 	/*
604 	 * Put the stack in a consistent state before we whack away
605 	 * at it.  Note that write_user_windows may just dump the
606 	 * registers into the pcb; we need them in the process's memory.
607 	 */
608 	write_user_windows();
609 	if (rwindow_save(l)) {
610 		mutex_enter(l->l_proc->p_lock);
611 		sigexit(l, SIGILL);
612 	}
613 
614 	/*
615 	 * Get the general purpose registers
616 	 */
617 	r[_REG_PSR] = tf->tf_psr;
618 	r[_REG_PC] = tf->tf_pc;
619 	r[_REG_nPC] = tf->tf_npc;
620 	r[_REG_Y] = tf->tf_y;
621 	r[_REG_G1] = tf->tf_global[1];
622 	r[_REG_G2] = tf->tf_global[2];
623 	r[_REG_G3] = tf->tf_global[3];
624 	r[_REG_G4] = tf->tf_global[4];
625 	r[_REG_G5] = tf->tf_global[5];
626 	r[_REG_G6] = tf->tf_global[6];
627 	r[_REG_G7] = tf->tf_global[7];
628 	r[_REG_O0] = tf->tf_out[0];
629 	r[_REG_O1] = tf->tf_out[1];
630 	r[_REG_O2] = tf->tf_out[2];
631 	r[_REG_O3] = tf->tf_out[3];
632 	r[_REG_O4] = tf->tf_out[4];
633 	r[_REG_O5] = tf->tf_out[5];
634 	r[_REG_O6] = tf->tf_out[6];
635 	r[_REG_O7] = tf->tf_out[7];
636 
637 	if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
638 	    (void *) r[_REG_PC])) != -1) {
639 		r[_REG_PC] = ras_pc;
640 		r[_REG_nPC] = ras_pc + 4;
641 	}
642 
643 	*flags |= (_UC_CPU|_UC_TLSBASE);
644 
645 #ifdef FPU_CONTEXT
646 	/*
647 	 * Get the floating point registers
648 	 */
649 	memcpy(f->__fpu_regs, fps->fs_regs, sizeof(fps->fs_regs));
650 	f->__fp_nqsize = sizeof(struct fp_qentry);
651 	f->__fp_nqel = fps->fs_qsize;
652 	f->__fp_fsr = fps->fs_fsr;
653 	if (f->__fp_q != NULL) {
654 		size_t sz = f->__fp_nqel * f->__fp_nqsize;
655 		if (sz > sizeof(fps->fs_queue)) {
656 #ifdef DIAGNOSTIC
657 			printf("getcontext: fp_queue too large\n");
658 #endif
659 			return;
660 		}
661 		if (copyout(fps->fs_queue, f->__fp_q, sz) != 0) {
662 #ifdef DIAGNOSTIC
663 			printf("getcontext: copy of fp_queue failed %d\n",
664 			    error);
665 #endif
666 			return;
667 		}
668 	}
669 	f->fp_busy = 0;	/* XXX: How do we determine that? */
670 	*flags |= _UC_FPU;
671 #endif
672 
673 	return;
674 }
675 
676 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mc)677 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mc)
678 {
679 	const __greg_t *gr = mc->__gregs;
680 
681 	/*
682  	 * Only the icc bits in the psr are used, so it need not be
683  	 * verified.  pc and npc must be multiples of 4.  This is all
684  	 * that is required; if it holds, just do it.
685 	 */
686 	if (((gr[_REG_PC] | gr[_REG_nPC]) & 3) != 0 ||
687 	    gr[_REG_PC] == 0 || gr[_REG_nPC] == 0)
688 		return EINVAL;
689 
690 	return 0;
691 }
692 
693 /*
694  * Set to mcontext specified.
695  * Return to previous pc and psl as specified by
696  * context left by sendsig. Check carefully to
697  * make sure that the user has not modified the
698  * psl to gain improper privileges or to cause
699  * a machine fault.
700  * This is almost like sigreturn() and it shows.
701  */
702 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)703 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
704 {
705 	struct trapframe *tf;
706 	const __greg_t *r = mcp->__gregs;
707 	struct proc *p = l->l_proc;
708 	int error;
709 #ifdef FPU_CONTEXT
710 	__fpregset_t *f = &mcp->__fpregs;
711 	struct fpstate *fps = l->l_md.md_fpstate;
712 #endif
713 
714 	write_user_windows();
715 	if (rwindow_save(l)) {
716 		mutex_enter(p->p_lock);
717 		sigexit(l, SIGILL);
718 	}
719 
720 #ifdef DEBUG
721 	if (sigdebug & SDB_FOLLOW)
722 		printf("__setmcontext: %s[%d], __mcontext %p\n",
723 		    l->l_proc->p_comm, l->l_proc->p_pid, mcp);
724 #endif
725 
726 	if (flags & _UC_CPU) {
727 		/* Validate */
728 		error = cpu_mcontext_validate(l, mcp);
729 		if (error)
730 			return error;
731 
732 		/* Restore register context. */
733 		tf = (struct trapframe *)l->l_md.md_tf;
734 
735 		/* take only psr ICC field */
736 		tf->tf_psr = (tf->tf_psr & ~PSR_ICC) |
737 		    (r[_REG_PSR] & PSR_ICC);
738 		tf->tf_pc = r[_REG_PC];
739 		tf->tf_npc = r[_REG_nPC];
740 		tf->tf_y = r[_REG_Y];
741 
742 		/* Restore everything */
743 		tf->tf_global[1] = r[_REG_G1];
744 		tf->tf_global[2] = r[_REG_G2];
745 		tf->tf_global[3] = r[_REG_G3];
746 		tf->tf_global[4] = r[_REG_G4];
747 		tf->tf_global[5] = r[_REG_G5];
748 		tf->tf_global[6] = r[_REG_G6];
749 		/* done in lwp_setprivate */
750 		/* tf->tf_global[7] = r[_REG_G7]; */
751 
752 		tf->tf_out[0] = r[_REG_O0];
753 		tf->tf_out[1] = r[_REG_O1];
754 		tf->tf_out[2] = r[_REG_O2];
755 		tf->tf_out[3] = r[_REG_O3];
756 		tf->tf_out[4] = r[_REG_O4];
757 		tf->tf_out[5] = r[_REG_O5];
758 		tf->tf_out[6] = r[_REG_O6];
759 		tf->tf_out[7] = r[_REG_O7];
760 
761 		if (flags & _UC_TLSBASE)
762 			lwp_setprivate(l, (void *)(uintptr_t)r[_REG_G7]);
763 	}
764 
765 #ifdef FPU_CONTEXT
766 	if (flags & _UC_FPU) {
767 		/*
768 		 * Set the floating point registers
769 		 */
770 		int error;
771 		size_t sz = f->__fp_nqel * f->__fp_nqsize;
772 		if (sz > sizeof(fps->fs_queue)) {
773 #ifdef DIAGNOSTIC
774 			printf("setmcontext: fp_queue too large\n");
775 #endif
776 			return (EINVAL);
777 		}
778 		memcpy(fps->fs_regs, f->__fpu_regs, sizeof(fps->fs_regs));
779 		fps->fs_qsize = f->__fp_nqel;
780 		fps->fs_fsr = f->__fp_fsr;
781 		if (f->__fp_q != NULL) {
782 			if ((error = copyin(f->__fp_q, fps->fs_queue, sz)) != 0) {
783 #ifdef DIAGNOSTIC
784 				printf("setmcontext: fp_queue copy failed\n");
785 #endif
786 				return (error);
787 			}
788 		}
789 	}
790 #endif
791 
792 	mutex_enter(p->p_lock);
793 	if (flags & _UC_SETSTACK)
794 		l->l_sigstk.ss_flags |= SS_ONSTACK;
795 	if (flags & _UC_CLRSTACK)
796 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
797 	mutex_exit(p->p_lock);
798 
799 	return (0);
800 }
801 
802 int	waittime = -1;
803 
804 void
cpu_reboot(int howto,char * user_boot_string)805 cpu_reboot(int howto, char *user_boot_string)
806 {
807 	int i;
808 	char opts[4];
809 	static char str[128];
810 
811 	/* If system is cold, just halt. */
812 	if (cold) {
813 		howto |= RB_HALT;
814 		goto haltsys;
815 	}
816 
817 #if NFB > 0
818 	fb_unblank();
819 #endif
820 	boothowto = howto;
821 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
822 		extern struct lwp lwp0;
823 
824 		/* XXX protect against curlwp->p_stats.foo refs in sync() */
825 		if (curlwp == NULL)
826 			curlwp = &lwp0;
827 		waittime = 0;
828 		vfs_shutdown();
829 
830 		/*
831 		 * If we've been adjusting the clock, the todr
832 		 * will be out of synch; adjust it now.
833 		 * resettodr will only do this only if inittodr()
834 		 * has already been called.
835 		 */
836 		resettodr();
837 	}
838 
839 	/* Disable interrupts. But still allow IPI on MP systems */
840 	if (sparc_ncpus > 1)
841 		(void)splsched();
842 	else
843 		(void)splhigh();
844 
845 #if defined(MULTIPROCESSOR)
846 	/* Direct system interrupts to this CPU, since dump uses polled I/O */
847 	if (CPU_ISSUN4M)
848 		*((u_int *)ICR_ITR) = cpuinfo.mid - 8;
849 #endif
850 
851 	/* If rebooting and a dump is requested, do it. */
852 #if 0
853 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
854 #else
855 	if (howto & RB_DUMP)
856 #endif
857 		dumpsys();
858 
859  haltsys:
860 
861 	/* Run any shutdown hooks. */
862 	doshutdownhooks();
863 
864 	pmf_system_shutdown(boothowto);
865 
866 	/* If powerdown was requested, do it. */
867 	if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
868 		prom_interpret("power-off");
869 #if NPOWER > 0
870 		/* Fall back on `power' device if the PROM can't do it */
871 		powerdown();
872 #endif
873 		printf("WARNING: powerdown not supported\n");
874 		/*
875 		 * RB_POWERDOWN implies RB_HALT... fall into it...
876 		 */
877 	}
878 
879 	if (howto & RB_HALT) {
880 #if defined(MULTIPROCESSOR)
881 		mp_halt_cpus();
882 		printf("cpu%d halted\n\n", cpu_number());
883 #else
884 		printf("halted\n\n");
885 #endif
886 		prom_halt();
887 	}
888 
889 	printf("rebooting\n\n");
890 
891 	i = 1;
892 	if (howto & RB_SINGLE)
893 		opts[i++] = 's';
894 	if (howto & RB_KDB)
895 		opts[i++] = 'd';
896 	opts[i] = '\0';
897 	opts[0] = (i > 1) ? '-' : '\0';
898 
899 	if (user_boot_string && *user_boot_string) {
900 		i = strlen(user_boot_string);
901 		if (i > sizeof(str) - sizeof(opts) - 1)
902 			prom_boot(user_boot_string);	/* XXX */
903 		memcpy(str, user_boot_string, i);
904 		if (opts[0] != '\0')
905 			str[i] = ' ';
906 	}
907 	strcat(str, opts);
908 	prom_boot(str);
909 	/*NOTREACHED*/
910 }
911 
912 uint32_t dumpmag = 0x8fca0101;	/* magic number for savecore */
913 int	dumpsize = 0;		/* also for savecore */
914 long	dumplo = 0;
915 
916 void
cpu_dumpconf(void)917 cpu_dumpconf(void)
918 {
919 	int nblks, dumpblks;
920 
921 	if (dumpdev == NODEV)
922 		return;
923 	nblks = bdev_size(dumpdev);
924 
925 	dumpblks = ctod(physmem) + pmap_dumpsize();
926 	if (dumpblks > (nblks - ctod(1)))
927 		/*
928 		 * dump size is too big for the partition.
929 		 * Note, we safeguard a click at the front for a
930 		 * possible disk label.
931 		 */
932 		return;
933 
934 	/* Put the dump at the end of the partition */
935 	dumplo = nblks - dumpblks;
936 
937 	/*
938 	 * savecore(8) expects dumpsize to be the number of pages
939 	 * of actual core dumped (i.e. excluding the MMU stuff).
940 	 */
941 	dumpsize = physmem;
942 }
943 
944 #define	BYTES_PER_DUMP	(32 * 1024)	/* must be a multiple of pagesize */
945 static vaddr_t dumpspace;
946 
947 void *
reserve_dumppages(void * p)948 reserve_dumppages(void *p)
949 {
950 
951 	dumpspace = (vaddr_t)p;
952 	return ((char *)p + BYTES_PER_DUMP);
953 }
954 
955 /*
956  * Write a crash dump.
957  */
958 void
dumpsys(void)959 dumpsys(void)
960 {
961 	const struct bdevsw *bdev;
962 	int psize;
963 	daddr_t blkno;
964 	int (*dump)(dev_t, daddr_t, void *, size_t);
965 	int error = 0;
966 	struct memarr *mp;
967 	int nmem;
968 	extern struct memarr pmemarr[];
969 	extern int npmemarr;
970 
971 	/* copy registers to memory */
972 	snapshot(cpuinfo.curpcb);
973 	stackdump();
974 
975 	if (dumpdev == NODEV)
976 		return;
977 	bdev = bdevsw_lookup(dumpdev);
978 	if (bdev == NULL || bdev->d_psize == NULL)
979 		return;
980 
981 	/*
982 	 * For dumps during autoconfiguration,
983 	 * if dump device has already configured...
984 	 */
985 	if (dumpsize == 0)
986 		cpu_dumpconf();
987 	if (dumplo <= 0) {
988 		printf("\ndump to dev %u,%u not possible\n",
989 		    major(dumpdev), minor(dumpdev));
990 		return;
991 	}
992 	printf("\ndumping to dev %u,%u offset %ld\n",
993 	    major(dumpdev), minor(dumpdev), dumplo);
994 
995 	psize = bdev_size(dumpdev);
996 	printf("dump ");
997 	if (psize == -1) {
998 		printf("area unavailable\n");
999 		return;
1000 	}
1001 	blkno = dumplo;
1002 	dump = bdev->d_dump;
1003 
1004 	error = pmap_dumpmmu(dump, blkno);
1005 	blkno += pmap_dumpsize();
1006 
1007 	for (mp = pmemarr, nmem = npmemarr; --nmem >= 0 && error == 0; mp++) {
1008 		unsigned i = 0, n;
1009 		int maddr = mp->addr;
1010 
1011 		if (maddr == 0) {
1012 			/* Skip first page at physical address 0 */
1013 			maddr += PAGE_SIZE;
1014 			i += PAGE_SIZE;
1015 			blkno += btodb(PAGE_SIZE);
1016 		}
1017 
1018 		for (; i < mp->len; i += n) {
1019 			n = mp->len - i;
1020 			if (n > BYTES_PER_DUMP)
1021 				 n = BYTES_PER_DUMP;
1022 
1023 			/* print out how many MBs we have dumped */
1024 			if (i && (i % (1024*1024)) == 0)
1025 				printf_nolog("%d ", i / (1024*1024));
1026 
1027 			(void) pmap_map(dumpspace, maddr, maddr + n,
1028 					VM_PROT_READ);
1029 			error = (*dump)(dumpdev, blkno,
1030 					(void *)dumpspace, (int)n);
1031 			pmap_kremove(dumpspace, n);
1032 			pmap_update(pmap_kernel());
1033 			if (error)
1034 				break;
1035 			maddr += n;
1036 			blkno += btodb(n);
1037 		}
1038 	}
1039 
1040 	switch (error) {
1041 
1042 	case ENXIO:
1043 		printf("device bad\n");
1044 		break;
1045 
1046 	case EFAULT:
1047 		printf("device not ready\n");
1048 		break;
1049 
1050 	case EINVAL:
1051 		printf("area improper\n");
1052 		break;
1053 
1054 	case EIO:
1055 		printf("i/o error\n");
1056 		break;
1057 
1058 	case 0:
1059 		printf("succeeded\n");
1060 		break;
1061 
1062 	default:
1063 		printf("error %d\n", error);
1064 		break;
1065 	}
1066 }
1067 
1068 /*
1069  * get the fp and dump the stack as best we can.  don't leave the
1070  * current stack page
1071  */
1072 void
stackdump(void)1073 stackdump(void)
1074 {
1075 	struct frame *fp = getfp(), *sfp;
1076 
1077 	sfp = fp;
1078 	printf("Frame pointer is at %p\n", fp);
1079 	printf("Call traceback:\n");
1080 	while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) {
1081 		printf("  pc = 0x%x  args = (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp = %p\n",
1082 		    fp->fr_pc, fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2],
1083 		    fp->fr_arg[3], fp->fr_arg[4], fp->fr_arg[5], fp->fr_fp);
1084 		fp = fp->fr_fp;
1085 	}
1086 }
1087 
1088 int
cpu_exec_aout_makecmds(struct lwp * l,struct exec_package * epp)1089 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp)
1090 {
1091 
1092 	return (ENOEXEC);
1093 }
1094 
1095 #if defined(SUN4)
1096 void
oldmon_w_trace(u_long va)1097 oldmon_w_trace(u_long va)
1098 {
1099 	struct cpu_info * const ci = curcpu();
1100 	u_long stop;
1101 	struct frame *fp;
1102 
1103 	printf("curlwp = %p, pid %d\n", curlwp, curproc->p_pid);
1104 
1105 	printf("uvm: cpu%u: swtch %"PRIu64", trap %"PRIu64", sys %"PRIu64", "
1106 	    "intr %"PRIu64", soft %"PRIu64", faults %"PRIu64"\n",
1107 	    cpu_index(ci), ci->ci_data.cpu_nswtch, ci->ci_data.cpu_ntrap,
1108 	    ci->ci_data.cpu_nsyscall, ci->ci_data.cpu_nintr,
1109 	    ci->ci_data.cpu_nsoft, ci->ci_data.cpu_nfault);
1110 	write_user_windows();
1111 
1112 #define round_up(x) (( (x) + (PAGE_SIZE-1) ) & (~(PAGE_SIZE-1)) )
1113 
1114 	printf("\nstack trace with sp = 0x%lx\n", va);
1115 	stop = round_up(va);
1116 	printf("stop at 0x%lx\n", stop);
1117 	fp = (struct frame *) va;
1118 	while (round_up((u_long) fp) == stop) {
1119 		printf("  0x%x(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp %p\n", fp->fr_pc,
1120 		    fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2], fp->fr_arg[3],
1121 		    fp->fr_arg[4], fp->fr_arg[5], fp->fr_fp);
1122 		fp = fp->fr_fp;
1123 		if (fp == NULL)
1124 			break;
1125 	}
1126 	printf("end of stack trace\n");
1127 }
1128 
1129 void
oldmon_w_cmd(u_long va,char * ar)1130 oldmon_w_cmd(u_long va, char *ar)
1131 {
1132 	switch (*ar) {
1133 	case '\0':
1134 		switch (va) {
1135 		case 0:
1136 			panic("g0 panic");
1137 		case 4:
1138 			printf("w: case 4\n");
1139 			break;
1140 		default:
1141 			printf("w: unknown case %ld\n", va);
1142 			break;
1143 		}
1144 		break;
1145 	case 't':
1146 		oldmon_w_trace(va);
1147 		break;
1148 	default:
1149 		printf("w: arg not allowed\n");
1150 	}
1151 }
1152 
1153 int
ldcontrolb(void * addr)1154 ldcontrolb(void *addr)
1155 {
1156 	struct pcb *xpcb;
1157 	u_long saveonfault;
1158 	int res;
1159 	int s;
1160 
1161 	if (CPU_ISSUN4M || CPU_ISSUN4D) {
1162 		printf("warning: ldcontrolb called on sun4m/sun4d\n");
1163 		return 0;
1164 	}
1165 
1166 	s = splhigh();
1167 	xpcb = lwp_getpcb(curlwp);
1168 
1169 	saveonfault = (u_long)xpcb->pcb_onfault;
1170         res = xldcontrolb(addr, xpcb);
1171 	xpcb->pcb_onfault = (void *)saveonfault;
1172 
1173 	splx(s);
1174 	return (res);
1175 }
1176 #endif /* SUN4 */
1177 
1178 void
wzero(void * vb,u_int l)1179 wzero(void *vb, u_int l)
1180 {
1181 	u_char *b = vb;
1182 	u_char *be = b + l;
1183 	u_short *sp;
1184 
1185 	if (l == 0)
1186 		return;
1187 
1188 	/* front, */
1189 	if ((u_long)b & 1)
1190 		*b++ = 0;
1191 
1192 	/* back, */
1193 	if (b != be && ((u_long)be & 1) != 0) {
1194 		be--;
1195 		*be = 0;
1196 	}
1197 
1198 	/* and middle. */
1199 	sp = (u_short *)b;
1200 	while (sp != (u_short *)be)
1201 		*sp++ = 0;
1202 }
1203 
1204 void
wcopy(const void * vb1,void * vb2,u_int l)1205 wcopy(const void *vb1, void *vb2, u_int l)
1206 {
1207 	const u_char *b1e, *b1 = vb1;
1208 	u_char *b2 = vb2;
1209 	const u_short *sp;
1210 	int bstore = 0;
1211 
1212 	if (l == 0)
1213 		return;
1214 
1215 	/* front, */
1216 	if ((u_long)b1 & 1) {
1217 		*b2++ = *b1++;
1218 		l--;
1219 	}
1220 
1221 	/* middle, */
1222 	sp = (const u_short *)b1;
1223 	b1e = b1 + l;
1224 	if (l & 1)
1225 		b1e--;
1226 	bstore = (u_long)b2 & 1;
1227 
1228 	while (sp < (const u_short *)b1e) {
1229 		if (bstore) {
1230 			b2[1] = *sp & 0xff;
1231 			b2[0] = *sp >> 8;
1232 		} else
1233 			*((short *)b2) = *sp;
1234 		sp++;
1235 		b2 += 2;
1236 	}
1237 
1238 	/* and back. */
1239 	if (l & 1)
1240 		*b2 = *b1e;
1241 }
1242 
1243 #ifdef MODULAR
1244 void
module_init_md(void)1245 module_init_md(void)
1246 {
1247 }
1248 #endif
1249 
1250 /*
1251  * Common function for DMA map creation.  May be called by bus-specific
1252  * DMA map creation functions.
1253  */
1254 int
_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)1255 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
1256 		   bus_size_t maxsegsz, bus_size_t boundary, int flags,
1257 		   bus_dmamap_t *dmamp)
1258 {
1259 	struct sparc_bus_dmamap *map;
1260 	void *mapstore;
1261 	size_t mapsize;
1262 
1263 	/*
1264 	 * Allocate and initialize the DMA map.  The end of the map
1265 	 * is a variable-sized array of segments, so we allocate enough
1266 	 * room for them in one shot.
1267 	 *
1268 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
1269 	 * of ALLOCNOW notifies others that we've reserved these resources,
1270 	 * and they are not to be freed.
1271 	 *
1272 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
1273 	 * the (nsegments - 1).
1274 	 */
1275 	mapsize = sizeof(struct sparc_bus_dmamap) +
1276 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
1277 	if ((mapstore = malloc(mapsize, M_DMAMAP,
1278 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1279 		return (ENOMEM);
1280 
1281 	memset(mapstore, 0, mapsize);
1282 	map = (struct sparc_bus_dmamap *)mapstore;
1283 	map->_dm_size = size;
1284 	map->_dm_segcnt = nsegments;
1285 	map->_dm_maxmaxsegsz = maxsegsz;
1286 	map->_dm_boundary = boundary;
1287 	map->_dm_align = PAGE_SIZE;
1288 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
1289 	map->dm_maxsegsz = maxsegsz;
1290 	map->dm_mapsize = 0;		/* no valid mappings */
1291 	map->dm_nsegs = 0;
1292 
1293 	*dmamp = map;
1294 	return (0);
1295 }
1296 
1297 /*
1298  * Common function for DMA map destruction.  May be called by bus-specific
1299  * DMA map destruction functions.
1300  */
1301 void
_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)1302 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
1303 {
1304 
1305 	free(map, M_DMAMAP);
1306 }
1307 
1308 /*
1309  * Like _bus_dmamap_load(), but for mbufs.
1310  */
1311 int
_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m,int flags)1312 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
1313 		      struct mbuf *m, int flags)
1314 {
1315 
1316 	panic("_bus_dmamap_load_mbuf: not implemented");
1317 }
1318 
1319 /*
1320  * Like _bus_dmamap_load(), but for uios.
1321  */
1322 int
_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)1323 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
1324 		     struct uio *uio, int flags)
1325 {
1326 
1327 	panic("_bus_dmamap_load_uio: not implemented");
1328 }
1329 
1330 /*
1331  * Like _bus_dmamap_load(), but for raw memory allocated with
1332  * bus_dmamem_alloc().
1333  */
1334 int
_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)1335 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
1336 		     bus_dma_segment_t *segs, int nsegs, bus_size_t size,
1337 		     int flags)
1338 {
1339 
1340 	panic("_bus_dmamap_load_raw: not implemented");
1341 }
1342 
1343 /*
1344  * Common function for DMA map synchronization.  May be called
1345  * by bus-specific DMA map synchronization functions.
1346  */
1347 void
_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)1348 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
1349 		 bus_addr_t offset, bus_size_t len, int ops)
1350 {
1351 }
1352 
1353 /*
1354  * Common function for DMA-safe memory allocation.  May be called
1355  * by bus-specific DMA memory allocation functions.
1356  */
1357 int
_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)1358 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
1359 		  bus_size_t alignment, bus_size_t boundary,
1360 		  bus_dma_segment_t *segs, int nsegs, int *rsegs,
1361 		  int flags)
1362 {
1363 	vaddr_t low, high;
1364 	struct pglist *mlist;
1365 	int error;
1366 
1367 	/* Always round the size. */
1368 	size = round_page(size);
1369 	low = vm_first_phys;
1370 	high = vm_first_phys + vm_num_phys - PAGE_SIZE;
1371 
1372 	if ((mlist = malloc(sizeof(*mlist), M_DEVBUF,
1373 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1374 		return (ENOMEM);
1375 
1376 	/*
1377 	 * Allocate pages from the VM system.
1378 	 */
1379 	error = uvm_pglistalloc(size, low, high, 0, 0,
1380 				mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1381 	if (error) {
1382 		free(mlist, M_DEVBUF);
1383 		return (error);
1384 	}
1385 
1386 	/*
1387 	 * Simply keep a pointer around to the linked list, so
1388 	 * bus_dmamap_free() can return it.
1389 	 *
1390 	 * NOBODY SHOULD TOUCH THE pageq.queue FIELDS WHILE THESE PAGES
1391 	 * ARE IN OUR CUSTODY.
1392 	 */
1393 	segs[0]._ds_mlist = mlist;
1394 
1395 	/*
1396 	 * We now have physical pages, but no DVMA addresses yet. These
1397 	 * will be allocated in bus_dmamap_load*() routines. Hence we
1398 	 * save any alignment and boundary requirements in this DMA
1399 	 * segment.
1400 	 */
1401 	segs[0].ds_addr = 0;
1402 	segs[0].ds_len = 0;
1403 	segs[0]._ds_va = 0;
1404 	*rsegs = 1;
1405 	return (0);
1406 }
1407 
1408 /*
1409  * Common function for freeing DMA-safe memory.  May be called by
1410  * bus-specific DMA memory free functions.
1411  */
1412 void
_bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)1413 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1414 {
1415 
1416 	if (nsegs != 1)
1417 		panic("bus_dmamem_free: nsegs = %d", nsegs);
1418 
1419 	/*
1420 	 * Return the list of pages back to the VM system.
1421 	 */
1422 	uvm_pglistfree(segs[0]._ds_mlist);
1423 	free(segs[0]._ds_mlist, M_DEVBUF);
1424 }
1425 
1426 /*
1427  * Common function for unmapping DMA-safe memory.  May be called by
1428  * bus-specific DMA memory unmapping functions.
1429  */
1430 void
_bus_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)1431 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1432 {
1433 
1434 #ifdef DIAGNOSTIC
1435 	if ((u_long)kva & PAGE_MASK)
1436 		panic("_bus_dmamem_unmap");
1437 #endif
1438 
1439 	size = round_page(size);
1440 	pmap_kremove((vaddr_t)kva, size);
1441 	pmap_update(pmap_kernel());
1442 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1443 }
1444 
1445 /*
1446  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
1447  * bus-specific DMA mmap(2)'ing functions.
1448  */
1449 paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)1450 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1451 		 off_t off, int prot, int flags)
1452 {
1453 
1454 	panic("_bus_dmamem_mmap: not implemented");
1455 }
1456 
1457 /*
1458  * Utility to allocate an aligned kernel virtual address range
1459  */
1460 vaddr_t
_bus_dma_valloc_skewed(size_t size,u_long boundary,u_long align,u_long skew)1461 _bus_dma_valloc_skewed(size_t size, u_long boundary, u_long align, u_long skew)
1462 {
1463 	size_t oversize;
1464 	vaddr_t va, sva;
1465 
1466 	/*
1467 	 * Find a region of kernel virtual addresses that is aligned
1468 	 * to the given address modulo the requested alignment, i.e.
1469 	 *
1470 	 *	(va - skew) == 0 mod align
1471 	 *
1472 	 * The following conditions apply to the arguments:
1473 	 *
1474 	 *	- `size' must be a multiple of the VM page size
1475 	 *	- `align' must be a power of two
1476 	 *	   and greater than or equal to the VM page size
1477 	 *	- `skew' must be smaller than `align'
1478 	 *	- `size' must be smaller than `boundary'
1479 	 */
1480 
1481 #ifdef DIAGNOSTIC
1482 	if ((size & PAGE_MASK) != 0)
1483 		panic("_bus_dma_valloc_skewed: invalid size %lx", size);
1484 	if ((align & PAGE_MASK) != 0)
1485 		panic("_bus_dma_valloc_skewed: invalid alignment %lx", align);
1486 	if (align < skew)
1487 		panic("_bus_dma_valloc_skewed: align %lx < skew %lx",
1488 			align, skew);
1489 #endif
1490 
1491 	/* XXX - Implement this! */
1492 	if (boundary) {
1493 		printf("_bus_dma_valloc_skewed: "
1494 			"boundary check not implemented");
1495 		return (0);
1496 	}
1497 
1498 	/*
1499 	 * First, find a region large enough to contain any aligned chunk
1500 	 */
1501 	oversize = size + align - PAGE_SIZE;
1502 	sva = vm_map_min(kernel_map);
1503 	if (uvm_map(kernel_map, &sva, oversize, NULL, UVM_UNKNOWN_OFFSET,
1504 	    align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
1505 	    UVM_ADV_RANDOM, UVM_FLAG_NOWAIT)))
1506 		return (0);
1507 
1508 	/*
1509 	 * Compute start of aligned region
1510 	 */
1511 	va = sva;
1512 	va += (skew + align - va) & (align - 1);
1513 
1514 	/*
1515 	 * Return excess virtual addresses
1516 	 */
1517 	if (va != sva)
1518 		(void)uvm_unmap(kernel_map, sva, va);
1519 	if (va + size != sva + oversize)
1520 		(void)uvm_unmap(kernel_map, va + size, sva + oversize);
1521 
1522 	return (va);
1523 }
1524 
1525 /* sun4/sun4c DMA map functions */
1526 int	sun4_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
1527 				bus_size_t, struct proc *, int);
1528 int	sun4_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
1529 				bus_dma_segment_t *, int, bus_size_t, int);
1530 void	sun4_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
1531 int	sun4_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *,
1532 				int, size_t, void **, int);
1533 
1534 /*
1535  * sun4/sun4c: load DMA map with a linear buffer.
1536  */
1537 int
sun4_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)1538 sun4_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
1539 		 void *buf, bus_size_t buflen,
1540 		 struct proc *p, int flags)
1541 {
1542 	bus_size_t sgsize;
1543 	vaddr_t va = (vaddr_t)buf;
1544 	int pagesz = PAGE_SIZE;
1545 	vaddr_t dva;
1546 	pmap_t pmap;
1547 
1548 	/*
1549 	 * Make sure that on error condition we return "no valid mappings".
1550 	 */
1551 	map->dm_nsegs = 0;
1552 
1553 	if (buflen > map->_dm_size)
1554 		return (EINVAL);
1555 
1556 	cache_flush(buf, buflen);
1557 
1558 	if ((map->_dm_flags & BUS_DMA_24BIT) == 0) {
1559 		/*
1560 		 * XXX Need to implement "don't DMA across this boundry".
1561 		 */
1562 		if (map->_dm_boundary != 0) {
1563 			bus_addr_t baddr;
1564 
1565 			/* Calculate first boundary line after `buf' */
1566 			baddr = ((bus_addr_t)va + map->_dm_boundary) &
1567 					-map->_dm_boundary;
1568 
1569 			/*
1570 			 * If the requested segment crosses the boundary,
1571 			 * we can't grant a direct map. For now, steal some
1572 			 * space from the `24BIT' map instead.
1573 			 *
1574 			 * (XXX - no overflow detection here)
1575 			 */
1576 			if (buflen > (baddr - (bus_addr_t)va))
1577 				goto no_fit;
1578 		}
1579 		map->dm_mapsize = buflen;
1580 		map->dm_nsegs = 1;
1581 		map->dm_segs[0].ds_addr = (bus_addr_t)va;
1582 		map->dm_segs[0].ds_len = buflen;
1583 		map->_dm_flags |= _BUS_DMA_DIRECTMAP;
1584 		return (0);
1585 	}
1586 
1587 no_fit:
1588 	sgsize = round_page(buflen + (va & (pagesz - 1)));
1589 
1590 	if (extent_alloc(dvmamap24, sgsize, pagesz, map->_dm_boundary,
1591 			 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
1592 			 &dva) != 0) {
1593 		return (ENOMEM);
1594 	}
1595 
1596 	/*
1597 	 * We always use just one segment.
1598 	 */
1599 	map->dm_mapsize = buflen;
1600 	map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
1601 	map->dm_segs[0].ds_len = buflen;
1602 	map->dm_segs[0]._ds_sgsize = sgsize;
1603 
1604 	if (p != NULL)
1605 		pmap = p->p_vmspace->vm_map.pmap;
1606 	else
1607 		pmap = pmap_kernel();
1608 
1609 	for (; buflen > 0; ) {
1610 		paddr_t pa;
1611 
1612 		/*
1613 		 * Get the physical address for this page.
1614 		 */
1615 		(void) pmap_extract(pmap, va, &pa);
1616 
1617 		/*
1618 		 * Compute the segment size, and adjust counts.
1619 		 */
1620 		sgsize = pagesz - (va & (pagesz - 1));
1621 		if (buflen < sgsize)
1622 			sgsize = buflen;
1623 
1624 #ifdef notyet
1625 #if defined(SUN4)
1626 		if (have_iocache)
1627 			pa |= PG_IOC;
1628 #endif
1629 #endif
1630 		pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC,
1631 		    VM_PROT_READ | VM_PROT_WRITE, 0);
1632 
1633 		dva += pagesz;
1634 		va += sgsize;
1635 		buflen -= sgsize;
1636 	}
1637 	pmap_update(pmap_kernel());
1638 
1639 	map->dm_nsegs = 1;
1640 	return (0);
1641 }
1642 
1643 /*
1644  * Like _bus_dmamap_load(), but for raw memory allocated with
1645  * bus_dmamem_alloc().
1646  */
1647 int
sun4_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)1648 sun4_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
1649 		     bus_dma_segment_t *segs, int nsegs, bus_size_t size,
1650 		     int flags)
1651 {
1652 	struct vm_page *m;
1653 	paddr_t pa;
1654 	vaddr_t dva;
1655 	bus_size_t sgsize;
1656 	struct pglist *mlist;
1657 	int pagesz = PAGE_SIZE;
1658 	int error;
1659 
1660 	map->dm_nsegs = 0;
1661 	sgsize = (size + pagesz - 1) & -pagesz;
1662 
1663 	/* Allocate DVMA addresses */
1664 	if ((map->_dm_flags & BUS_DMA_24BIT) != 0) {
1665 		error = extent_alloc(dvmamap24, sgsize, pagesz,
1666 					map->_dm_boundary,
1667 					(flags & BUS_DMA_NOWAIT) == 0
1668 						? EX_WAITOK : EX_NOWAIT,
1669 					&dva);
1670 		if (error)
1671 			return (error);
1672 	} else {
1673 		/* Any properly aligned virtual address will do */
1674 		dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary,
1675 					     pagesz, 0);
1676 		if (dva == 0)
1677 			return (ENOMEM);
1678 	}
1679 
1680 	map->dm_segs[0].ds_addr = dva;
1681 	map->dm_segs[0].ds_len = size;
1682 	map->dm_segs[0]._ds_sgsize = sgsize;
1683 
1684 	/* Map physical pages into IOMMU */
1685 	mlist = segs[0]._ds_mlist;
1686 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
1687 		if (sgsize == 0)
1688 			panic("sun4_dmamap_load_raw: size botch");
1689 		pa = VM_PAGE_TO_PHYS(m);
1690 #ifdef notyet
1691 #if defined(SUN4)
1692 		if (have_iocache)
1693 			pa |= PG_IOC;
1694 #endif
1695 #endif
1696 		pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC,
1697 		    VM_PROT_READ | VM_PROT_WRITE, 0);
1698 
1699 		dva += pagesz;
1700 		sgsize -= pagesz;
1701 	}
1702 	pmap_update(pmap_kernel());
1703 
1704 	map->dm_nsegs = 1;
1705 	map->dm_mapsize = size;
1706 
1707 	return (0);
1708 }
1709 
1710 /*
1711  * sun4/sun4c function for unloading a DMA map.
1712  */
1713 void
sun4_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)1714 sun4_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
1715 {
1716 	bus_dma_segment_t *segs = map->dm_segs;
1717 	int nsegs = map->dm_nsegs;
1718 	int flags = map->_dm_flags;
1719 	vaddr_t dva;
1720 	bus_size_t len;
1721 	int i, s, error;
1722 
1723 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
1724 
1725 	if ((flags & _BUS_DMA_DIRECTMAP) != 0) {
1726 		/* Nothing to release */
1727 		map->dm_mapsize = 0;
1728 		map->dm_nsegs = 0;
1729 		map->_dm_flags &= ~_BUS_DMA_DIRECTMAP;
1730 		return;
1731 	}
1732 
1733 	for (i = 0; i < nsegs; i++) {
1734 		dva = segs[i].ds_addr & -PAGE_SIZE;
1735 		len = segs[i]._ds_sgsize;
1736 
1737 		pmap_kremove(dva, len);
1738 
1739 		if ((flags & BUS_DMA_24BIT) != 0) {
1740 			s = splhigh();
1741 			error = extent_free(dvmamap24, dva, len, EX_NOWAIT);
1742 			splx(s);
1743 			if (error != 0)
1744 				printf("warning: %ld of DVMA space lost\n", len);
1745 		} else {
1746 			uvm_unmap(kernel_map, dva, dva + len);
1747 		}
1748 	}
1749 	pmap_update(pmap_kernel());
1750 
1751 	/* Mark the mappings as invalid. */
1752 	map->dm_mapsize = 0;
1753 	map->dm_nsegs = 0;
1754 }
1755 
1756 /*
1757  * Common function for mapping DMA-safe memory.  May be called by
1758  * bus-specific DMA memory map functions.
1759  */
1760 int
sun4_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)1761 sun4_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1762 		size_t size, void **kvap, int flags)
1763 {
1764 	struct vm_page *m;
1765 	vaddr_t va;
1766 	struct pglist *mlist;
1767 	const uvm_flag_t kmflags =
1768 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1769 
1770 	if (nsegs != 1)
1771 		panic("sun4_dmamem_map: nsegs = %d", nsegs);
1772 
1773 	size = round_page(size);
1774 
1775 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1776 	if (va == 0)
1777 		return (ENOMEM);
1778 
1779 	segs[0]._ds_va = va;
1780 	*kvap = (void *)va;
1781 
1782 	mlist = segs[0]._ds_mlist;
1783 	TAILQ_FOREACH(m, mlist, pageq.queue) {
1784 		paddr_t pa;
1785 
1786 		if (size == 0)
1787 			panic("sun4_dmamem_map: size botch");
1788 
1789 		pa = VM_PAGE_TO_PHYS(m);
1790 		pmap_kenter_pa(va, pa | PMAP_NC,
1791 		    VM_PROT_READ | VM_PROT_WRITE, 0);
1792 
1793 		va += PAGE_SIZE;
1794 		size -= PAGE_SIZE;
1795 	}
1796 	pmap_update(pmap_kernel());
1797 
1798 	return (0);
1799 }
1800 
1801 
1802 struct sparc_bus_dma_tag mainbus_dma_tag = {
1803 	NULL,
1804 	_bus_dmamap_create,
1805 	_bus_dmamap_destroy,
1806 	sun4_dmamap_load,
1807 	_bus_dmamap_load_mbuf,
1808 	_bus_dmamap_load_uio,
1809 	sun4_dmamap_load_raw,
1810 	sun4_dmamap_unload,
1811 	_bus_dmamap_sync,
1812 
1813 	_bus_dmamem_alloc,
1814 	_bus_dmamem_free,
1815 	sun4_dmamem_map,
1816 	_bus_dmamem_unmap,
1817 	_bus_dmamem_mmap
1818 };
1819 
1820 
1821 /*
1822  * Base bus space handlers.
1823  */
1824 static int	sparc_bus_map(bus_space_tag_t, bus_addr_t,
1825 				    bus_size_t, int, vaddr_t,
1826 				    bus_space_handle_t *);
1827 static int	sparc_bus_unmap(bus_space_tag_t, bus_space_handle_t,
1828 				     bus_size_t);
1829 static int	sparc_bus_subregion(bus_space_tag_t, bus_space_handle_t,
1830 					 bus_size_t, bus_size_t,
1831 					 bus_space_handle_t *);
1832 static paddr_t	sparc_bus_mmap(bus_space_tag_t, bus_addr_t, off_t,
1833 				    int, int);
1834 static void	*sparc_mainbus_intr_establish(bus_space_tag_t, int, int,
1835 						   int (*)(void *),
1836 						   void *,
1837 						   void (*)(void));
1838 static void     sparc_bus_barrier(bus_space_tag_t, bus_space_handle_t,
1839 					bus_size_t, bus_size_t, int);
1840 
1841 int
bus_space_map(bus_space_tag_t t,bus_addr_t a,bus_size_t s,int f,bus_space_handle_t * hp)1842 bus_space_map(
1843 	bus_space_tag_t	t,
1844 	bus_addr_t	a,
1845 	bus_size_t	s,
1846 	int		f,
1847 	bus_space_handle_t *hp)
1848 {
1849 	return (*t->sparc_bus_map)(t, a, s, f, (vaddr_t)0, hp);
1850 }
1851 
1852 int
bus_space_map2(bus_space_tag_t t,bus_addr_t a,bus_size_t s,int f,vaddr_t v,bus_space_handle_t * hp)1853 bus_space_map2(
1854 	bus_space_tag_t	t,
1855 	bus_addr_t	a,
1856 	bus_size_t	s,
1857 	int		f,
1858 	vaddr_t		v,
1859 	bus_space_handle_t *hp)
1860 {
1861 	return (*t->sparc_bus_map)(t, a, s, f, v, hp);
1862 }
1863 
1864 void
bus_space_unmap(bus_space_tag_t t,bus_space_handle_t h,bus_size_t s)1865 bus_space_unmap(
1866 	bus_space_tag_t t,
1867 	bus_space_handle_t h,
1868 	bus_size_t	s)
1869 {
1870 	(*t->sparc_bus_unmap)(t, h, s);
1871 }
1872 
1873 int
bus_space_subregion(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,bus_size_t s,bus_space_handle_t * hp)1874 bus_space_subregion(
1875 	bus_space_tag_t	t,
1876 	bus_space_handle_t h,
1877 	bus_size_t	o,
1878 	bus_size_t	s,
1879 	bus_space_handle_t *hp)
1880 {
1881 	return (*t->sparc_bus_subregion)(t, h, o, s, hp);
1882 }
1883 
1884 paddr_t
bus_space_mmap(bus_space_tag_t t,bus_addr_t a,off_t o,int p,int f)1885 bus_space_mmap(
1886 	bus_space_tag_t	t,
1887 	bus_addr_t	a,
1888 	off_t		o,
1889 	int		p,
1890 	int		f)
1891 {
1892 	return (*t->sparc_bus_mmap)(t, a, o, p, f);
1893 }
1894 
1895 void *
bus_intr_establish(bus_space_tag_t t,int p,int l,int (* h)(void *),void * a)1896 bus_intr_establish(
1897 	bus_space_tag_t t,
1898 	int	p,
1899 	int	l,
1900 	int	(*h)(void *),
1901 	void	*a)
1902 {
1903 	return (*t->sparc_intr_establish)(t, p, l, h, a, NULL);
1904 }
1905 
1906 void *
bus_intr_establish2(bus_space_tag_t t,int p,int l,int (* h)(void *),void * a,void (* v)(void))1907 bus_intr_establish2(
1908 	bus_space_tag_t t,
1909 	int	p,
1910 	int	l,
1911 	int	(*h)(void *),
1912 	void	*a,
1913 	void	(*v)(void))
1914 {
1915 	return (*t->sparc_intr_establish)(t, p, l, h, a, v);
1916 }
1917 
1918 void
bus_space_barrier(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,bus_size_t s,int f)1919 bus_space_barrier(
1920 	bus_space_tag_t t,
1921 	bus_space_handle_t h,
1922 	bus_size_t o,
1923 	bus_size_t s,
1924 	int f)
1925 {
1926 	(*t->sparc_bus_barrier)(t, h, o, s, f);
1927 }
1928 
1929 void
bus_space_write_multi_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)1930 bus_space_write_multi_stream_2(
1931 	bus_space_tag_t		t,
1932 	bus_space_handle_t	h,
1933 	bus_size_t		o,
1934 	const uint16_t		*a,
1935 	bus_size_t		c)
1936 {
1937 	while (c-- > 0)
1938 		bus_space_write_2_real(t, h, o, *a++);
1939 }
1940 
1941 void
bus_space_write_multi_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)1942 bus_space_write_multi_stream_4(
1943 	bus_space_tag_t		t,
1944 	bus_space_handle_t	h,
1945 	bus_size_t		o,
1946 	const uint32_t		*a,
1947 	bus_size_t		c)
1948 {
1949 	while (c-- > 0)
1950 		bus_space_write_4_real(t, h, o, *a++);
1951 }
1952 
1953 void
bus_space_write_multi_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)1954 bus_space_write_multi_stream_8(
1955 	bus_space_tag_t		t,
1956 	bus_space_handle_t	h,
1957 	bus_size_t		o,
1958 	const uint64_t		*a,
1959 	bus_size_t		c)
1960 {
1961 	while (c-- > 0)
1962 		bus_space_write_8_real(t, h, o, *a++);
1963 }
1964 
1965 
1966 /*
1967  *	void bus_space_set_multi_N(bus_space_tag_t tag,
1968  *	    bus_space_handle_t bsh, bus_size_t offset, u_intN_t val,
1969  *	    bus_size_t count);
1970  *
1971  * Write the 1, 2, 4, or 8 byte value `val' to bus space described
1972  * by tag/handle/offset `count' times.
1973  */
1974 void
bus_space_set_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t v,bus_size_t c)1975 bus_space_set_multi_1(
1976 	bus_space_tag_t		t,
1977 	bus_space_handle_t	h,
1978 	bus_size_t		o,
1979 	const uint8_t		v,
1980 	bus_size_t		c)
1981 {
1982 	while (c-- > 0)
1983 		bus_space_write_1(t, h, o, v);
1984 }
1985 
1986 void
bus_space_set_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t v,bus_size_t c)1987 bus_space_set_multi_2(
1988 	bus_space_tag_t		t,
1989 	bus_space_handle_t	h,
1990 	bus_size_t		o,
1991 	const uint16_t		v,
1992 	bus_size_t		c)
1993 {
1994 	while (c-- > 0)
1995 		bus_space_write_2(t, h, o, v);
1996 }
1997 
1998 void
bus_space_set_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t v,bus_size_t c)1999 bus_space_set_multi_4(
2000 	bus_space_tag_t		t,
2001 	bus_space_handle_t	h,
2002 	bus_size_t		o,
2003 	const uint32_t		v,
2004 	bus_size_t		c)
2005 {
2006 	while (c-- > 0)
2007 		bus_space_write_4(t, h, o, v);
2008 }
2009 
2010 void
bus_space_set_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t v,bus_size_t c)2011 bus_space_set_multi_8(
2012 	bus_space_tag_t		t,
2013 	bus_space_handle_t	h,
2014 	bus_size_t		o,
2015 	const uint64_t		v,
2016 	bus_size_t		c)
2017 {
2018 	while (c-- > 0)
2019 		bus_space_write_8(t, h, o, v);
2020 }
2021 
2022 
2023 /*
2024  *	void bus_space_read_region_N(bus_space_tag_t tag,
2025  *	    bus_space_handle_t bsh, bus_size_t off,
2026  *	    u_intN_t *addr, bus_size_t count);
2027  *
2028  */
2029 void
bus_space_read_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)2030 bus_space_read_region_1(
2031 	bus_space_tag_t		t,
2032 	bus_space_handle_t	h,
2033 	bus_size_t		o,
2034 	uint8_t			*a,
2035 	bus_size_t		c)
2036 {
2037 	for (; c; a++, c--, o++)
2038 		*a = bus_space_read_1(t, h, o);
2039 }
2040 
2041 void
bus_space_read_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2042 bus_space_read_region_2(
2043 	bus_space_tag_t		t,
2044 	bus_space_handle_t	h,
2045 	bus_size_t		o,
2046 	uint16_t		*a,
2047 	bus_size_t		c)
2048 {
2049 	for (; c; a++, c--, o+=2)
2050 		*a = bus_space_read_2(t, h, o);
2051 }
2052 
2053 void
bus_space_read_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2054 bus_space_read_region_4(
2055 	bus_space_tag_t		t,
2056 	bus_space_handle_t	h,
2057 	bus_size_t		o,
2058 	uint32_t		*a,
2059 	bus_size_t		c)
2060 {
2061 	for (; c; a++, c--, o+=4)
2062 		*a = bus_space_read_4(t, h, o);
2063 }
2064 
2065 void
bus_space_read_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2066 bus_space_read_region_8(
2067 	bus_space_tag_t		t,
2068 	bus_space_handle_t	h,
2069 	bus_size_t		o,
2070 	uint64_t		*a,
2071 	bus_size_t		c)
2072 {
2073 	for (; c; a++, c--, o+=8)
2074 		*a = bus_space_read_8(t, h, o);
2075 }
2076 
2077 /*
2078  *	void bus_space_write_region_N(bus_space_tag_t tag,
2079  *	    bus_space_handle_t bsh, bus_size_t off,
2080  *	    u_intN_t *addr, bus_size_t count);
2081  *
2082  */
2083 void
bus_space_write_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)2084 bus_space_write_region_1(
2085 	bus_space_tag_t		t,
2086 	bus_space_handle_t	h,
2087 	bus_size_t		o,
2088 	const uint8_t		*a,
2089 	bus_size_t		c)
2090 {
2091 	for (; c; a++, c--, o++)
2092 		bus_space_write_1(t, h, o, *a);
2093 }
2094 
2095 void
bus_space_write_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)2096 bus_space_write_region_2(
2097 	bus_space_tag_t		t,
2098 	bus_space_handle_t	h,
2099 	bus_size_t		o,
2100 	const uint16_t		*a,
2101 	bus_size_t		c)
2102 {
2103 	for (; c; a++, c--, o+=2)
2104 		bus_space_write_2(t, h, o, *a);
2105 }
2106 
2107 void
bus_space_write_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)2108 bus_space_write_region_4(
2109 	bus_space_tag_t		t,
2110 	bus_space_handle_t	h,
2111 	bus_size_t		o,
2112 	const uint32_t		*a,
2113 	bus_size_t		c)
2114 {
2115 	for (; c; a++, c--, o+=4)
2116 		bus_space_write_4(t, h, o, *a);
2117 }
2118 
2119 void
bus_space_write_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)2120 bus_space_write_region_8(
2121 	bus_space_tag_t		t,
2122 	bus_space_handle_t	h,
2123 	bus_size_t		o,
2124 	const uint64_t		*a,
2125 	bus_size_t		c)
2126 {
2127 	for (; c; a++, c--, o+=8)
2128 		bus_space_write_8(t, h, o, *a);
2129 }
2130 
2131 
2132 /*
2133  *	void bus_space_set_region_N(bus_space_tag_t tag,
2134  *	    bus_space_handle_t bsh, bus_size_t off,
2135  *	    u_intN_t *addr, bus_size_t count);
2136  *
2137  */
2138 void
bus_space_set_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t v,bus_size_t c)2139 bus_space_set_region_1(
2140 	bus_space_tag_t		t,
2141 	bus_space_handle_t	h,
2142 	bus_size_t		o,
2143 	const uint8_t		v,
2144 	bus_size_t		c)
2145 {
2146 	for (; c; c--, o++)
2147 		bus_space_write_1(t, h, o, v);
2148 }
2149 
2150 void
bus_space_set_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t v,bus_size_t c)2151 bus_space_set_region_2(
2152 	bus_space_tag_t		t,
2153 	bus_space_handle_t	h,
2154 	bus_size_t		o,
2155 	const uint16_t		v,
2156 	bus_size_t		c)
2157 {
2158 	for (; c; c--, o+=2)
2159 		bus_space_write_2(t, h, o, v);
2160 }
2161 
2162 void
bus_space_set_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t v,bus_size_t c)2163 bus_space_set_region_4(
2164 	bus_space_tag_t		t,
2165 	bus_space_handle_t	h,
2166 	bus_size_t		o,
2167 	const uint32_t		v,
2168 	bus_size_t		c)
2169 {
2170 	for (; c; c--, o+=4)
2171 		bus_space_write_4(t, h, o, v);
2172 }
2173 
2174 void
bus_space_set_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t v,bus_size_t c)2175 bus_space_set_region_8(
2176 	bus_space_tag_t		t,
2177 	bus_space_handle_t	h,
2178 	bus_size_t		o,
2179 	const uint64_t		v,
2180 	bus_size_t		c)
2181 {
2182 	for (; c; c--, o+=8)
2183 		bus_space_write_8(t, h, o, v);
2184 }
2185 
2186 
2187 /*
2188  *	void bus_space_copy_region_N(bus_space_tag_t tag,
2189  *	    bus_space_handle_t bsh1, bus_size_t off1,
2190  *	    bus_space_handle_t bsh2, bus_size_t off2,
2191  *	    bus_size_t count);
2192  *
2193  * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
2194  * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
2195  */
2196 void
bus_space_copy_region_1(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2197 bus_space_copy_region_1(
2198 	bus_space_tag_t		t,
2199 	bus_space_handle_t	h1,
2200 	bus_size_t		o1,
2201 	bus_space_handle_t	h2,
2202 	bus_size_t		o2,
2203 	bus_size_t		c)
2204 {
2205 	for (; c; c--, o1++, o2++)
2206 	    bus_space_write_1(t, h1, o1, bus_space_read_1(t, h2, o2));
2207 }
2208 
2209 void
bus_space_copy_region_2(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2210 bus_space_copy_region_2(
2211 	bus_space_tag_t		t,
2212 	bus_space_handle_t	h1,
2213 	bus_size_t		o1,
2214 	bus_space_handle_t	h2,
2215 	bus_size_t		o2,
2216 	bus_size_t		c)
2217 {
2218 	for (; c; c--, o1+=2, o2+=2)
2219 	    bus_space_write_2(t, h1, o1, bus_space_read_2(t, h2, o2));
2220 }
2221 
2222 void
bus_space_copy_region_4(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2223 bus_space_copy_region_4(
2224 	bus_space_tag_t		t,
2225 	bus_space_handle_t	h1,
2226 	bus_size_t		o1,
2227 	bus_space_handle_t	h2,
2228 	bus_size_t		o2,
2229 	bus_size_t		c)
2230 {
2231 	for (; c; c--, o1+=4, o2+=4)
2232 	    bus_space_write_4(t, h1, o1, bus_space_read_4(t, h2, o2));
2233 }
2234 
2235 void
bus_space_copy_region_8(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2236 bus_space_copy_region_8(
2237 	bus_space_tag_t		t,
2238 	bus_space_handle_t	h1,
2239 	bus_size_t		o1,
2240 	bus_space_handle_t	h2,
2241 	bus_size_t		o2,
2242 	bus_size_t		c)
2243 {
2244 	for (; c; c--, o1+=8, o2+=8)
2245 	    bus_space_write_8(t, h1, o1, bus_space_read_8(t, h2, o2));
2246 }
2247 
2248 /*
2249  *	void bus_space_read_region_stream_N(bus_space_tag_t tag,
2250  *	    bus_space_handle_t bsh, bus_size_t off,
2251  *	    u_intN_t *addr, bus_size_t count);
2252  *
2253  */
2254 void
bus_space_read_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)2255 bus_space_read_region_stream_1(
2256 	bus_space_tag_t		t,
2257 	bus_space_handle_t	h,
2258 	bus_size_t		o,
2259 	uint8_t			*a,
2260 	bus_size_t		c)
2261 {
2262 	for (; c; a++, c--, o++)
2263 		*a = bus_space_read_stream_1(t, h, o);
2264 }
2265 void
bus_space_read_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2266 bus_space_read_region_stream_2(
2267 	bus_space_tag_t		t,
2268 	bus_space_handle_t	h,
2269 	bus_size_t		o,
2270 	uint16_t		*a,
2271 	bus_size_t		c)
2272 {
2273 	for (; c; a++, c--, o+=2)
2274 		*a = bus_space_read_stream_2(t, h, o);
2275  }
2276 void
bus_space_read_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2277 bus_space_read_region_stream_4(
2278 	bus_space_tag_t		t,
2279 	bus_space_handle_t	h,
2280 	bus_size_t		o,
2281 	uint32_t		*a,
2282 	bus_size_t		c)
2283 {
2284 	for (; c; a++, c--, o+=4)
2285 		*a = bus_space_read_stream_4(t, h, o);
2286 }
2287 void
bus_space_read_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2288 bus_space_read_region_stream_8(
2289 	bus_space_tag_t		t,
2290 	bus_space_handle_t	h,
2291 	bus_size_t		o,
2292 	uint64_t		*a,
2293 	bus_size_t		c)
2294 {
2295 	for (; c; a++, c--, o+=8)
2296 		*a = bus_space_read_stream_8(t, h, o);
2297 }
2298 
2299 /*
2300  *	void bus_space_write_region_stream_N(bus_space_tag_t tag,
2301  *	    bus_space_handle_t bsh, bus_size_t off,
2302  *	    u_intN_t *addr, bus_size_t count);
2303  *
2304  */
2305 void
bus_space_write_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)2306 bus_space_write_region_stream_1(
2307 	bus_space_tag_t		t,
2308 	bus_space_handle_t	h,
2309 	bus_size_t		o,
2310 	const uint8_t		*a,
2311 	bus_size_t		c)
2312 {
2313 	for (; c; a++, c--, o++)
2314 		bus_space_write_stream_1(t, h, o, *a);
2315 }
2316 
2317 void
bus_space_write_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)2318 bus_space_write_region_stream_2(
2319 	bus_space_tag_t		t,
2320 	bus_space_handle_t	h,
2321 	bus_size_t		o,
2322 	const uint16_t		*a,
2323 	bus_size_t		c)
2324 {
2325 	for (; c; a++, c--, o+=2)
2326 		bus_space_write_stream_2(t, h, o, *a);
2327 }
2328 
2329 void
bus_space_write_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)2330 bus_space_write_region_stream_4(
2331 	bus_space_tag_t		t,
2332 	bus_space_handle_t	h,
2333 	bus_size_t		o,
2334 	const uint32_t		*a,
2335 	bus_size_t		c)
2336 {
2337 	for (; c; a++, c--, o+=4)
2338 		bus_space_write_stream_4(t, h, o, *a);
2339 }
2340 
2341 void
bus_space_write_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)2342 bus_space_write_region_stream_8(
2343 	bus_space_tag_t		t,
2344 	bus_space_handle_t	h,
2345 	bus_size_t		o,
2346 	const uint64_t		*a,
2347 	bus_size_t		c)
2348 {
2349 	for (; c; a++, c--, o+=8)
2350 		bus_space_write_stream_8(t, h, o, *a);
2351 }
2352 
2353 
2354 /*
2355  *	void bus_space_set_region_stream_N(bus_space_tag_t tag,
2356  *	    bus_space_handle_t bsh, bus_size_t off,
2357  *	    u_intN_t *addr, bus_size_t count);
2358  *
2359  */
2360 void
bus_space_set_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t v,bus_size_t c)2361 bus_space_set_region_stream_1(
2362 	bus_space_tag_t		t,
2363 	bus_space_handle_t	h,
2364 	bus_size_t		o,
2365 	const uint8_t		v,
2366 	bus_size_t		c)
2367 {
2368 	for (; c; c--, o++)
2369 		bus_space_write_stream_1(t, h, o, v);
2370 }
2371 
2372 void
bus_space_set_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t v,bus_size_t c)2373 bus_space_set_region_stream_2(
2374 	bus_space_tag_t		t,
2375 	bus_space_handle_t	h,
2376 	bus_size_t		o,
2377 	const uint16_t		v,
2378 	bus_size_t		c)
2379 {
2380 	for (; c; c--, o+=2)
2381 		bus_space_write_stream_2(t, h, o, v);
2382 }
2383 
2384 void
bus_space_set_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t v,bus_size_t c)2385 bus_space_set_region_stream_4(
2386 	bus_space_tag_t		t,
2387 	bus_space_handle_t	h,
2388 	bus_size_t		o,
2389 	const uint32_t		v,
2390 	bus_size_t		c)
2391 {
2392 	for (; c; c--, o+=4)
2393 		bus_space_write_stream_4(t, h, o, v);
2394 }
2395 
2396 void
bus_space_set_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t v,bus_size_t c)2397 bus_space_set_region_stream_8(
2398 	bus_space_tag_t		t,
2399 	bus_space_handle_t	h,
2400 	bus_size_t		o,
2401 	const uint64_t		v,
2402 	bus_size_t		c)
2403 {
2404 	for (; c; c--, o+=8)
2405 		bus_space_write_stream_8(t, h, o, v);
2406 }
2407 
2408 /*
2409  *	void bus_space_copy_region_stream_N(bus_space_tag_t tag,
2410  *	    bus_space_handle_t bsh1, bus_size_t off1,
2411  *	    bus_space_handle_t bsh2, bus_size_t off2,
2412  *	    bus_size_t count);
2413  *
2414  * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
2415  * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
2416  */
2417 
2418 void
bus_space_copy_region_stream_1(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2419 bus_space_copy_region_stream_1(
2420 	bus_space_tag_t		t,
2421 	bus_space_handle_t	h1,
2422 	bus_size_t		o1,
2423 	bus_space_handle_t	h2,
2424 	bus_size_t		o2,
2425 	bus_size_t		c)
2426 {
2427 	for (; c; c--, o1++, o2++)
2428 	    bus_space_write_stream_1(t, h1, o1, bus_space_read_stream_1(t, h2, o2));
2429 }
2430 
2431 void
bus_space_copy_region_stream_2(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2432 bus_space_copy_region_stream_2(
2433 	bus_space_tag_t		t,
2434 	bus_space_handle_t	h1,
2435 	bus_size_t		o1,
2436 	bus_space_handle_t	h2,
2437 	bus_size_t		o2,
2438 	bus_size_t		c)
2439 {
2440 	for (; c; c--, o1+=2, o2+=2)
2441 	    bus_space_write_stream_2(t, h1, o1, bus_space_read_stream_2(t, h2, o2));
2442 }
2443 
2444 void
bus_space_copy_region_stream_4(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2445 bus_space_copy_region_stream_4(
2446 	bus_space_tag_t		t,
2447 	bus_space_handle_t	h1,
2448 	bus_size_t		o1,
2449 	bus_space_handle_t	h2,
2450 	bus_size_t		o2,
2451 	bus_size_t		c)
2452 {
2453 	for (; c; c--, o1+=4, o2+=4)
2454 	    bus_space_write_stream_4(t, h1, o1, bus_space_read_stream_4(t, h2, o2));
2455 }
2456 
2457 void
bus_space_copy_region_stream_8(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2458 bus_space_copy_region_stream_8(
2459 	bus_space_tag_t		t,
2460 	bus_space_handle_t	h1,
2461 	bus_size_t		o1,
2462 	bus_space_handle_t	h2,
2463 	bus_size_t		o2,
2464 	bus_size_t		c)
2465 {
2466 	for (; c; c--, o1+=8, o2+=8)
2467 	    bus_space_write_stream_8(t, h1, o1, bus_space_read_8(t, h2, o2));
2468 }
2469 
2470 void
bus_space_write_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t v)2471 bus_space_write_1(
2472 	bus_space_tag_t		t,
2473 	bus_space_handle_t	h,
2474 	bus_size_t		o,
2475 	uint8_t			v)
2476 {
2477 	(*t->sparc_write_1)(t, h, o, v);
2478 }
2479 
2480 void
bus_space_write_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t v)2481 bus_space_write_2(
2482 	bus_space_tag_t		t,
2483 	bus_space_handle_t	h,
2484 	bus_size_t		o,
2485 	uint16_t		v)
2486 {
2487 	(*t->sparc_write_2)(t, h, o, v);
2488 }
2489 
2490 void
bus_space_write_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t v)2491 bus_space_write_4(
2492 	bus_space_tag_t		t,
2493 	bus_space_handle_t	h,
2494 	bus_size_t		o,
2495 	uint32_t		v)
2496 {
2497 	(*t->sparc_write_4)(t, h, o, v);
2498 }
2499 
2500 void
bus_space_write_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t v)2501 bus_space_write_8(
2502 	bus_space_tag_t		t,
2503 	bus_space_handle_t	h,
2504 	bus_size_t		o,
2505 	uint64_t		v)
2506 {
2507 	(*t->sparc_write_8)(t, h, o, v);
2508 }
2509 
2510 #if __SLIM_SPARC_BUS_SPACE
2511 
2512 void
bus_space_write_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t v)2513 bus_space_write_1(
2514 	bus_space_tag_t		t,
2515 	bus_space_handle_t	h,
2516 	bus_size_t		o,
2517 	uint8_t			v)
2518 {
2519 	__insn_barrier();
2520 	bus_space_write_1_real(t, h, o, v);
2521 }
2522 
2523 void
bus_space_write_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t v)2524 bus_space_write_2(
2525 	bus_space_tag_t		t,
2526 	bus_space_handle_t	h,
2527 	bus_size_t		o,
2528 	uint16_t		v)
2529 {
2530 	__insn_barrier();
2531 	bus_space_write_2_real(t, h, o, v);
2532 }
2533 
2534 void
bus_space_write_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t v)2535 bus_space_write_4(
2536 	bus_space_tag_t		t,
2537 	bus_space_handle_t	h,
2538 	bus_size_t		o,
2539 	uint32_t		v)
2540 {
2541 	__insn_barrier();
2542 	bus_space_write_4_real(t, h, o, v);
2543 }
2544 
2545 void
bus_space_write_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t v)2546 bus_space_write_8(
2547 	bus_space_tag_t		t,
2548 	bus_space_handle_t	h,
2549 	bus_size_t		o,
2550 	uint64_t		v)
2551 {
2552 	__insn_barrier();
2553 	bus_space_write_8_real(t, h, o, v);
2554 }
2555 
2556 #endif /* __SLIM_SPARC_BUS_SPACE */
2557 
2558 uint8_t
bus_space_read_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2559 bus_space_read_1(
2560 	bus_space_tag_t		t,
2561 	bus_space_handle_t	h,
2562 	bus_size_t		o)
2563 {
2564 	return (*t->sparc_read_1)(t, h, o);
2565 }
2566 
2567 uint16_t
bus_space_read_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2568 bus_space_read_2(
2569 	bus_space_tag_t		t,
2570 	bus_space_handle_t	h,
2571 	bus_size_t		o)
2572 {
2573 	return (*t->sparc_read_2)(t, h, o);
2574 }
2575 
2576 uint32_t
bus_space_read_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2577 bus_space_read_4(
2578 	bus_space_tag_t		t,
2579 	bus_space_handle_t	h,
2580 	bus_size_t		o)
2581 {
2582 	return (*t->sparc_read_4)(t, h, o);
2583 }
2584 
2585 uint64_t
bus_space_read_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2586 bus_space_read_8(
2587 	bus_space_tag_t		t,
2588 	bus_space_handle_t	h,
2589 	bus_size_t		o)
2590 {
2591 	return (*t->sparc_read_8)(t, h, o);
2592 }
2593 
2594 #if __SLIM_SPARC_BUS_SPACE
2595 uint8_t
bus_space_read_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2596 bus_space_read_1(
2597 	bus_space_tag_t		t,
2598 	bus_space_handle_t	h,
2599 	bus_size_t		o)
2600 {
2601 	__insn_barrier();
2602 	return bus_space_read_1_real(t, h, o);
2603 }
2604 
2605 uint16_t
bus_space_read_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2606 bus_space_read_2(
2607 	bus_space_tag_t		t,
2608 	bus_space_handle_t	h,
2609 	bus_size_t		o)
2610 {
2611 	__insn_barrier();
2612 	return bus_space_read_2_real(t, h, o);
2613 }
2614 
2615 uint32_t
bus_space_read_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2616 bus_space_read_4(
2617 	bus_space_tag_t		t,
2618 	bus_space_handle_t	h,
2619 	bus_size_t		o)
2620 {
2621 	__insn_barrier();
2622 	return bus_space_read_4_real(t, h, o);
2623 }
2624 
2625 uint64_t
bus_space_read_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2626 bus_space_read_8(
2627 	bus_space_tag_t		t,
2628 	bus_space_handle_t	h,
2629 	bus_size_t		o)
2630 {
2631 	__insn_barrier();
2632 	return bus_space_read_8_real(t, h, o);
2633 }
2634 
2635 #endif /* __SLIM_SPARC_BUS_SPACE */
2636 
2637 void
bus_space_read_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)2638 bus_space_read_multi_1(
2639 	bus_space_tag_t		t,
2640 	bus_space_handle_t	h,
2641 	bus_size_t		o,
2642 	uint8_t			*a,
2643 	bus_size_t		c)
2644 {
2645 	while (c-- > 0)
2646 		*a++ = bus_space_read_1(t, h, o);
2647 }
2648 
2649 void
bus_space_read_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2650 bus_space_read_multi_2(
2651 	bus_space_tag_t		t,
2652 	bus_space_handle_t	h,
2653 	bus_size_t		o,
2654 	uint16_t		*a,
2655 	bus_size_t		c)
2656 {
2657 	while (c-- > 0)
2658 		*a++ = bus_space_read_2(t, h, o);
2659 }
2660 
2661 void
bus_space_read_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2662 bus_space_read_multi_4(
2663 	bus_space_tag_t		t,
2664 	bus_space_handle_t	h,
2665 	bus_size_t		o,
2666 	uint32_t		*a,
2667 	bus_size_t		c)
2668 {
2669 	while (c-- > 0)
2670 		*a++ = bus_space_read_4(t, h, o);
2671 }
2672 
2673 void
bus_space_read_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2674 bus_space_read_multi_8(
2675 	bus_space_tag_t		t,
2676 	bus_space_handle_t	h,
2677 	bus_size_t		o,
2678 	uint64_t		*a,
2679 	bus_size_t		c)
2680 {
2681 	while (c-- > 0)
2682 		*a++ = bus_space_read_8(t, h, o);
2683 }
2684 
2685 /*
2686  *	void bus_space_read_multi_N(bus_space_tag_t tag,
2687  *	    bus_space_handle_t bsh, bus_size_t offset,
2688  *	    u_intN_t *addr, bus_size_t count);
2689  *
2690  * Read `count' 1, 2, 4, or 8 byte quantities from bus space
2691  * described by tag/handle/offset and copy into buffer provided.
2692  */
2693 void
bus_space_read_multi_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2694 bus_space_read_multi_stream_2(
2695 	bus_space_tag_t		t,
2696 	bus_space_handle_t	h,
2697 	bus_size_t		o,
2698 	uint16_t		*a,
2699 	bus_size_t		c)
2700 {
2701 	while (c-- > 0)
2702 		*a++ = bus_space_read_2_real(t, h, o);
2703 }
2704 
2705 void
bus_space_read_multi_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2706 bus_space_read_multi_stream_4(
2707 	bus_space_tag_t		t,
2708 	bus_space_handle_t	h,
2709 	bus_size_t		o,
2710 	uint32_t		*a,
2711 	bus_size_t		c)
2712 {
2713 	while (c-- > 0)
2714 		*a++ = bus_space_read_4_real(t, h, o);
2715 }
2716 
2717 void
bus_space_read_multi_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2718 bus_space_read_multi_stream_8(
2719 	bus_space_tag_t		t,
2720 	bus_space_handle_t	h,
2721 	bus_size_t		o,
2722 	uint64_t		*a,
2723 	bus_size_t		c)
2724 {
2725 	while (c-- > 0)
2726 		*a++ = bus_space_read_8_real(t, h, o);
2727 }
2728 
2729 /*
2730  *	void bus_space_write_multi_N(bus_space_tag_t tag,
2731  *	    bus_space_handle_t bsh, bus_size_t offset,
2732  *	    const u_intN_t *addr, bus_size_t count);
2733  *
2734  * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
2735  * provided to bus space described by tag/handle/offset.
2736  */
2737 void
bus_space_write_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)2738 bus_space_write_multi_1(
2739 	bus_space_tag_t		t,
2740 	bus_space_handle_t	h,
2741 	bus_size_t		o,
2742 	const uint8_t		*a,
2743 	bus_size_t		c)
2744 {
2745 	while (c-- > 0)
2746 		bus_space_write_1(t, h, o, *a++);
2747 }
2748 
2749 void
bus_space_write_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)2750 bus_space_write_multi_2(
2751 	bus_space_tag_t		t,
2752 	bus_space_handle_t	h,
2753 	bus_size_t		o,
2754 	const uint16_t		*a,
2755 	bus_size_t		c)
2756 {
2757 	while (c-- > 0)
2758 		bus_space_write_2(t, h, o, *a++);
2759 }
2760 
2761 void
bus_space_write_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)2762 bus_space_write_multi_4(
2763 	bus_space_tag_t		t,
2764 	bus_space_handle_t	h,
2765 	bus_size_t		o,
2766 	const uint32_t		*a,
2767 	bus_size_t		c)
2768 {
2769 	while (c-- > 0)
2770 		bus_space_write_4(t, h, o, *a++);
2771 }
2772 
2773 void
bus_space_write_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)2774 bus_space_write_multi_8(
2775 	bus_space_tag_t		t,
2776 	bus_space_handle_t	h,
2777 	bus_size_t		o,
2778 	const uint64_t		*a,
2779 	bus_size_t		c)
2780 {
2781 	while (c-- > 0)
2782 		bus_space_write_8(t, h, o, *a++);
2783 }
2784 
2785 /*
2786  * Allocate a new bus tag and have it inherit the methods of the
2787  * given parent.
2788  */
2789 bus_space_tag_t
bus_space_tag_alloc(bus_space_tag_t parent,void * cookie)2790 bus_space_tag_alloc(bus_space_tag_t parent, void *cookie)
2791 {
2792 	struct sparc_bus_space_tag *sbt;
2793 
2794 	sbt = malloc(sizeof(struct sparc_bus_space_tag),
2795 		     M_DEVBUF, M_NOWAIT|M_ZERO);
2796 	if (sbt == NULL)
2797 		return (NULL);
2798 
2799 	if (parent) {
2800 		memcpy(sbt, parent, sizeof(*sbt));
2801 		sbt->parent = parent;
2802 		sbt->ranges = NULL;
2803 		sbt->nranges = 0;
2804 	}
2805 
2806 	sbt->cookie = cookie;
2807 	return (sbt);
2808 }
2809 
2810 /*
2811  * Generic routine to translate an address using OpenPROM `ranges'.
2812  */
2813 int
bus_space_translate_address_generic(struct openprom_range * ranges,int nranges,bus_addr_t * bap)2814 bus_space_translate_address_generic(struct openprom_range *ranges, int nranges,
2815     bus_addr_t *bap)
2816 {
2817 	int i, space = BUS_ADDR_IOSPACE(*bap);
2818 
2819 	for (i = 0; i < nranges; i++) {
2820 		struct openprom_range *rp = &ranges[i];
2821 
2822 		if (rp->or_child_space != space)
2823 			continue;
2824 
2825 		/* We've found the connection to the parent bus. */
2826 		*bap = BUS_ADDR(rp->or_parent_space,
2827 		    rp->or_parent_base + BUS_ADDR_PADDR(*bap));
2828 		return (0);
2829 	}
2830 
2831 	return (EINVAL);
2832 }
2833 
2834 static int
sparc_bus_map_iodev(bus_space_tag_t t,bus_addr_t ba,bus_size_t size,int flags,vaddr_t va,bus_space_handle_t * hp)2835 sparc_bus_map_iodev(bus_space_tag_t t, bus_addr_t ba, bus_size_t size, int flags,
2836 	      vaddr_t va, bus_space_handle_t *hp)
2837 {
2838 	vaddr_t v;
2839 	paddr_t pa;
2840 	unsigned int pmtype;
2841 	bus_space_tag_t pt;
2842 static	vaddr_t iobase;
2843 
2844 	/*
2845 	 * This base class bus map function knows about address range
2846 	 * translation so bus drivers that need no other special
2847 	 * handling can just keep this method in their tags.
2848 	 *
2849 	 * We expect to resolve range translations iteratively, but allow
2850 	 * for recursion just in case.
2851 	 */
2852 	while ((pt = t->parent) != NULL) {
2853 		if (t->ranges != NULL) {
2854 			int error;
2855 
2856 			if ((error = bus_space_translate_address_generic(
2857 					t->ranges, t->nranges, &ba)) != 0)
2858 				return (error);
2859 		}
2860 		if (pt->sparc_bus_map != sparc_bus_map)
2861 			return (bus_space_map2(pt, ba, size, flags, va, hp));
2862 		t = pt;
2863 	}
2864 
2865 	if (iobase == 0)
2866 		iobase = IODEV_BASE;
2867 
2868 	size = round_page(size);
2869 	if (size == 0) {
2870 		printf("sparc_bus_map: zero size\n");
2871 		return (EINVAL);
2872 	}
2873 
2874 	if (va)
2875 		v = trunc_page(va);
2876 	else {
2877 		v = iobase;
2878 		iobase += size;
2879 		if (iobase > IODEV_END)	/* unlikely */
2880 			panic("sparc_bus_map: iobase=0x%lx", iobase);
2881 	}
2882 
2883 	pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba));
2884 	pa = BUS_ADDR_PADDR(ba);
2885 
2886 	/* note: preserve page offset */
2887 	*hp = (bus_space_handle_t)(v | ((u_long)pa & PGOFSET));
2888 
2889 	pa = trunc_page(pa);
2890 	do {
2891 		pmap_kenter_pa(v, pa | pmtype | PMAP_NC,
2892 		    VM_PROT_READ | VM_PROT_WRITE, 0);
2893 		v += PAGE_SIZE;
2894 		pa += PAGE_SIZE;
2895 	} while ((size -= PAGE_SIZE) > 0);
2896 
2897 	pmap_update(pmap_kernel());
2898 	return (0);
2899 }
2900 
2901 static int
sparc_bus_map_large(bus_space_tag_t t,bus_addr_t ba,bus_size_t size,int flags,bus_space_handle_t * hp)2902 sparc_bus_map_large(bus_space_tag_t t, bus_addr_t ba,
2903 		    bus_size_t size, int flags, bus_space_handle_t *hp)
2904 {
2905 	vaddr_t v = 0;
2906 
2907 	if (uvm_map(kernel_map, &v, size, NULL, 0, PAGE_SIZE,
2908 	    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_NORMAL,
2909 			0)) == 0) {
2910 		return sparc_bus_map_iodev(t, ba, size, flags, v, hp);
2911 	}
2912 	return -1;
2913 }
2914 
2915 int
sparc_bus_map(bus_space_tag_t t,bus_addr_t ba,bus_size_t size,int flags,vaddr_t va,bus_space_handle_t * hp)2916 sparc_bus_map(bus_space_tag_t t, bus_addr_t ba,
2917 		    bus_size_t size, int flags, vaddr_t va,
2918 		    bus_space_handle_t *hp)
2919 {
2920 
2921 	if (flags & BUS_SPACE_MAP_LARGE) {
2922 		return sparc_bus_map_large(t, ba, size, flags, hp);
2923 	} else
2924 		return sparc_bus_map_iodev(t, ba, size, flags, va, hp);
2925 
2926 }
2927 
2928 int
sparc_bus_unmap(bus_space_tag_t t,bus_space_handle_t bh,bus_size_t size)2929 sparc_bus_unmap(bus_space_tag_t t, bus_space_handle_t bh, bus_size_t size)
2930 {
2931 	vaddr_t va = trunc_page((vaddr_t)bh);
2932 
2933 	/*
2934 	 * XXX
2935 	 * mappings with BUS_SPACE_MAP_LARGE need additional care here
2936 	 * we can just check if the VA is in the IODEV range
2937 	 */
2938 
2939 	pmap_kremove(va, round_page(size));
2940 	pmap_update(pmap_kernel());
2941 	return (0);
2942 }
2943 
2944 int
sparc_bus_subregion(bus_space_tag_t tag,bus_space_handle_t handle,bus_size_t offset,bus_size_t size,bus_space_handle_t * nhandlep)2945 sparc_bus_subregion(bus_space_tag_t tag, bus_space_handle_t handle,
2946 		    bus_size_t offset, bus_size_t size,
2947 		    bus_space_handle_t *nhandlep)
2948 {
2949 
2950 	*nhandlep = handle + offset;
2951 	return (0);
2952 }
2953 
2954 paddr_t
sparc_bus_mmap(bus_space_tag_t t,bus_addr_t ba,off_t off,int prot,int flags)2955 sparc_bus_mmap(bus_space_tag_t t, bus_addr_t ba, off_t off,
2956 	       int prot, int flags)
2957 {
2958 	u_int pmtype;
2959 	paddr_t pa;
2960 	bus_space_tag_t pt;
2961 
2962 	/*
2963 	 * Base class bus mmap function; see also sparc_bus_map
2964 	 */
2965 	while ((pt = t->parent) != NULL) {
2966 		if (t->ranges != NULL) {
2967 			int error;
2968 
2969 			if ((error = bus_space_translate_address_generic(
2970 					t->ranges, t->nranges, &ba)) != 0)
2971 				return (-1);
2972 		}
2973 		if (pt->sparc_bus_mmap != sparc_bus_mmap)
2974 			return (bus_space_mmap(pt, ba, off, prot, flags));
2975 		t = pt;
2976 	}
2977 
2978 	pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba));
2979 	pa = trunc_page(BUS_ADDR_PADDR(ba) + off);
2980 
2981 	return (paddr_t)(pa | pmtype | PMAP_NC);
2982 }
2983 
2984 /*
2985  * Establish a temporary bus mapping for device probing.
2986  */
2987 int
bus_space_probe(bus_space_tag_t tag,bus_addr_t paddr,bus_size_t size,size_t offset,int flags,int (* callback)(void *,void *),void * arg)2988 bus_space_probe(bus_space_tag_t tag, bus_addr_t paddr, bus_size_t size,
2989 		size_t offset, int flags,
2990 		int (*callback)(void *, void *), void *arg)
2991 {
2992 	bus_space_handle_t bh;
2993 	void *tmp;
2994 	int result;
2995 
2996 	if (bus_space_map2(tag, paddr, size, flags, TMPMAP_VA, &bh) != 0)
2997 		return (0);
2998 
2999 	tmp = (void *)bh;
3000 	result = (probeget((char *)tmp + offset, size) != -1);
3001 	if (result && callback != NULL)
3002 		result = (*callback)(tmp, arg);
3003 	bus_space_unmap(tag, bh, size);
3004 	return (result);
3005 }
3006 
3007 
3008 void *
sparc_mainbus_intr_establish(bus_space_tag_t t,int pil,int level,int (* handler)(void *),void * arg,void (* fastvec)(void))3009 sparc_mainbus_intr_establish(bus_space_tag_t t, int pil, int level,
3010 			     int (*handler)(void *), void *arg,
3011 			     void (*fastvec)(void))
3012 {
3013 	struct intrhand *ih;
3014 
3015 	ih = (struct intrhand *)
3016 		malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
3017 	if (ih == NULL)
3018 		return (NULL);
3019 
3020 	ih->ih_fun = handler;
3021 	ih->ih_arg = arg;
3022 	intr_establish(pil, level, ih, fastvec, false);
3023 	return (ih);
3024 }
3025 
sparc_bus_barrier(bus_space_tag_t t,bus_space_handle_t h,bus_size_t offset,bus_size_t size,int flags)3026 void sparc_bus_barrier (bus_space_tag_t t, bus_space_handle_t h,
3027 			bus_size_t offset, bus_size_t size, int flags)
3028 {
3029 
3030 	/* No default barrier action defined */
3031 	return;
3032 }
3033 
3034 static uint8_t
sparc_bus_space_read_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)3035 sparc_bus_space_read_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3036 {
3037 
3038 	return bus_space_read_1_real(t, h, o);
3039 }
3040 
3041 static uint16_t
sparc_bus_space_read_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)3042 sparc_bus_space_read_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3043 {
3044 
3045 	return bus_space_read_2_real(t, h, o);
3046 }
3047 
3048 static uint32_t
sparc_bus_space_read_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)3049 sparc_bus_space_read_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3050 {
3051 
3052 	return bus_space_read_4_real(t, h, o);
3053 }
3054 
3055 static uint64_t
sparc_bus_space_read_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)3056 sparc_bus_space_read_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3057 {
3058 
3059 	return bus_space_read_8_real(t, h, o);
3060 }
3061 
3062 static void
sparc_bus_space_write_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t v)3063 sparc_bus_space_write_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3064 			uint8_t v)
3065 {
3066 
3067 	bus_space_write_1_real(t, h, o, v);
3068 }
3069 
3070 static void
sparc_bus_space_write_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t v)3071 sparc_bus_space_write_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3072 			uint16_t v)
3073 {
3074 
3075 	bus_space_write_2_real(t, h, o, v);
3076 }
3077 
3078 static void
sparc_bus_space_write_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t v)3079 sparc_bus_space_write_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3080 			uint32_t v)
3081 {
3082 
3083 	bus_space_write_4_real(t, h, o, v);
3084 }
3085 
3086 static void
sparc_bus_space_write_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t v)3087 sparc_bus_space_write_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3088 			uint64_t v)
3089 {
3090 
3091 	bus_space_write_8_real(t, h, o, v);
3092 }
3093 
3094 struct sparc_bus_space_tag mainbus_space_tag = {
3095 	NULL,				/* cookie */
3096 	NULL,				/* parent bus tag */
3097 	NULL,				/* ranges */
3098 	0,				/* nranges */
3099 	sparc_bus_map,			/* bus_space_map */
3100 	sparc_bus_unmap,		/* bus_space_unmap */
3101 	sparc_bus_subregion,		/* bus_space_subregion */
3102 	sparc_bus_barrier,		/* bus_space_barrier */
3103 	sparc_bus_mmap,			/* bus_space_mmap */
3104 	sparc_mainbus_intr_establish,	/* bus_intr_establish */
3105 
3106 	sparc_bus_space_read_1,		/* bus_space_read_1 */
3107 	sparc_bus_space_read_2,		/* bus_space_read_2 */
3108 	sparc_bus_space_read_4,		/* bus_space_read_4 */
3109 	sparc_bus_space_read_8,		/* bus_space_read_8 */
3110 	sparc_bus_space_write_1,	/* bus_space_write_1 */
3111 	sparc_bus_space_write_2,	/* bus_space_write_2 */
3112 	sparc_bus_space_write_4,	/* bus_space_write_4 */
3113 	sparc_bus_space_write_8		/* bus_space_write_8 */
3114 };
3115 
3116 int
mm_md_physacc(paddr_t pa,vm_prot_t prot)3117 mm_md_physacc(paddr_t pa, vm_prot_t prot)
3118 {
3119 
3120 	return pmap_pa_exists(pa) ? 0 : EFAULT;
3121 }
3122 
3123 int
mm_md_kernacc(void * ptr,vm_prot_t prot,bool * handled)3124 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled)
3125 {
3126 	extern vaddr_t prom_vstart;
3127 	extern vaddr_t prom_vend;
3128 	const vaddr_t v = (vaddr_t)ptr;
3129 
3130 	*handled = (v >= MSGBUF_VA && v < MSGBUF_VA + PAGE_SIZE) ||
3131 	    (v >= prom_vstart && v < prom_vend && (prot & VM_PROT_WRITE) == 0);
3132 	return 0;
3133 }
3134 
3135 int
mm_md_readwrite(dev_t dev,struct uio * uio)3136 mm_md_readwrite(dev_t dev, struct uio *uio)
3137 {
3138 
3139 	switch (minor(dev)) {
3140 #if defined(SUN4)
3141 	case DEV_EEPROM:
3142 		if (cputyp == CPU_SUN4)
3143 			return eeprom_uio(uio);
3144 		else
3145 #endif
3146 		return ENXIO;
3147 	default:
3148 		return ENXIO;
3149 	}
3150 }
3151