1 /* $NetBSD: machdep.c,v 1.339 2022/07/26 20:08:56 andvar Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1992, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This software was developed by the Computer Systems Engineering group
38 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
39 * contributed to Berkeley.
40 *
41 * All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Lawrence Berkeley Laboratory.
45 *
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
48 * are met:
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)machdep.c 8.6 (Berkeley) 1/14/94
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.339 2022/07/26 20:08:56 andvar Exp $");
75
76 #include "opt_compat_netbsd.h"
77 #include "opt_compat_sunos.h"
78 #include "opt_sparc_arch.h"
79 #include "opt_modular.h"
80 #include "opt_multiprocessor.h"
81
82 #include <sys/param.h>
83 #include <sys/signal.h>
84 #include <sys/signalvar.h>
85 #include <sys/proc.h>
86 #include <sys/extent.h>
87 #include <sys/cpu.h>
88 #include <sys/buf.h>
89 #include <sys/device.h>
90 #include <sys/reboot.h>
91 #include <sys/systm.h>
92 #include <sys/kernel.h>
93 #include <sys/conf.h>
94 #include <sys/file.h>
95 #include <sys/kmem.h>
96 #include <sys/mbuf.h>
97 #include <sys/mount.h>
98 #include <sys/msgbuf.h>
99 #include <sys/syscallargs.h>
100 #include <sys/exec.h>
101 #include <sys/exec_aout.h>
102 #include <sys/ucontext.h>
103 #include <sys/module.h>
104 #include <sys/mutex.h>
105 #include <sys/ras.h>
106
107 #include <dev/mm.h>
108
109 #include <uvm/uvm.h> /* we use uvm.kernel_object */
110
111 #include <sys/sysctl.h>
112
113 #ifdef COMPAT_13
114 #include <compat/sys/signal.h>
115 #include <compat/sys/signalvar.h>
116 #endif
117
118 #define _SPARC_BUS_DMA_PRIVATE
119 #include <machine/autoconf.h>
120 #include <sys/bus.h>
121 #include <machine/frame.h>
122 #include <machine/cpu.h>
123 #include <machine/pcb.h>
124 #include <machine/pmap.h>
125 #include <machine/oldmon.h>
126 #include <machine/bsd_openprom.h>
127 #include <machine/bootinfo.h>
128 #include <machine/eeprom.h>
129
130 #include <sparc/sparc/asm.h>
131 #include <sparc/sparc/cache.h>
132 #include <sparc/sparc/vaddrs.h>
133 #include <sparc/sparc/cpuvar.h>
134
135 #include "fb.h"
136 #include "power.h"
137
138 #if NPOWER > 0
139 #include <sparc/dev/power.h>
140 #endif
141
142 kmutex_t fpu_mtx;
143
144 /*
145 * dvmamap24 is used to manage DVMA memory for devices that have the upper
146 * eight address bits wired to all-ones (e.g. `le' and `ie')
147 */
148 struct extent *dvmamap24;
149
150 void dumpsys(void);
151 void stackdump(void);
152
153 /*
154 * Machine-dependent startup code
155 */
156 void
cpu_startup(void)157 cpu_startup(void)
158 {
159 #ifdef DEBUG
160 extern int pmapdebug;
161 int opmapdebug = pmapdebug;
162 #endif
163 struct pcb *pcb;
164 vsize_t size;
165 paddr_t pa;
166 char pbuf[9];
167
168 #ifdef DEBUG
169 pmapdebug = 0;
170 #endif
171
172 /* XXX */
173 pcb = lwp_getpcb(&lwp0);
174 if (pcb && pcb->pcb_psr == 0)
175 pcb->pcb_psr = getpsr();
176
177 /*
178 * Re-map the message buffer from its temporary address
179 * at KERNBASE to MSGBUF_VA.
180 */
181 #if !defined(MSGBUFSIZE) || MSGBUFSIZE <= 8192
182 /*
183 * We use the free page(s) in front of the kernel load address.
184 */
185 size = 8192;
186
187 /* Get physical address of the message buffer */
188 pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa);
189
190 /* Invalidate the current mapping at KERNBASE. */
191 pmap_kremove((vaddr_t)KERNBASE, size);
192 pmap_update(pmap_kernel());
193
194 /* Enter the new mapping */
195 pmap_map(MSGBUF_VA, pa, pa + size, VM_PROT_READ|VM_PROT_WRITE);
196
197 /*
198 * Re-initialize the message buffer.
199 */
200 initmsgbuf((void *)MSGBUF_VA, size);
201 #else /* MSGBUFSIZE */
202 {
203 struct pglist mlist;
204 struct vm_page *m;
205 vaddr_t va0, va;
206
207 /*
208 * We use the free page(s) in front of the kernel load address,
209 * and then allocate some more.
210 */
211 size = round_page(MSGBUFSIZE);
212
213 /* Get physical address of first 8192 chunk of the message buffer */
214 pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa);
215
216 /* Allocate additional physical pages */
217 if (uvm_pglistalloc(size - 8192,
218 vm_first_phys, vm_first_phys+vm_num_phys,
219 0, 0, &mlist, 1, 0) != 0)
220 panic("cpu_start: no memory for message buffer");
221
222 /* Invalidate the current mapping at KERNBASE. */
223 pmap_kremove((vaddr_t)KERNBASE, 8192);
224 pmap_update(pmap_kernel());
225
226 /* Allocate virtual memory space */
227 va0 = va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
228 if (va == 0)
229 panic("cpu_start: no virtual memory for message buffer");
230
231 /* Map first 8192 */
232 while (va < va0 + 8192) {
233 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
234 pa += PAGE_SIZE;
235 va += PAGE_SIZE;
236 }
237 pmap_update(pmap_kernel());
238
239 /* Map the rest of the pages */
240 TAILQ_FOREACH(m, &mlist ,pageq.queue) {
241 if (va >= va0 + size)
242 panic("cpu_start: memory buffer size botch");
243 pa = VM_PAGE_TO_PHYS(m);
244 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
245 va += PAGE_SIZE;
246 }
247 pmap_update(pmap_kernel());
248
249 /*
250 * Re-initialize the message buffer.
251 */
252 initmsgbuf((void *)va0, size);
253 }
254 #endif /* MSGBUFSIZE */
255
256 /*
257 * Good {morning,afternoon,evening,night}.
258 */
259 printf("%s%s", copyright, version);
260 /*identifycpu();*/
261 format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
262 printf("total memory = %s\n", pbuf);
263
264 /*
265 * Tune buffer cache variables based on the capabilities of the MMU
266 * to cut down on VM space allocated for the buffer caches that
267 * would lead to MMU resource shortage.
268 */
269 if (CPU_ISSUN4 || CPU_ISSUN4C) {
270 /* Clip UBC windows */
271 if (cpuinfo.mmu_nsegment <= 128) {
272 /*
273 * ubc_nwins and ubc_winshift control the amount
274 * of VM used by the UBC. Normally, this VM is
275 * not wired in the kernel map, hence non-locked
276 * `PMEGs' (see pmap.c) are used for this space.
277 * We still limit possible fragmentation to prevent
278 * the occasional wired UBC mappings from tying up
279 * too many PMEGs.
280 *
281 * Set the upper limit to 9 segments (default
282 * winshift = 13).
283 */
284 ubc_nwins = 512;
285
286 /*
287 * buf_setvalimit() allocates a submap for buffer
288 * allocation. We use it to limit the number of locked
289 * `PMEGs' (see pmap.c) dedicated to the buffer cache.
290 *
291 * Set the upper limit to 12 segments (3MB), which
292 * corresponds approximately to the size of the
293 * traditional 5% rule (assuming a maximum 64MB of
294 * memory in small sun4c machines).
295 */
296 buf_setvalimit(12 * 256*1024);
297 }
298
299 /* Clip max data & stack to avoid running into the MMU hole */
300 #if MAXDSIZ > 256*1024*1024
301 maxdmap = 256*1024*1024;
302 #endif
303 #if MAXSSIZ > 256*1024*1024
304 maxsmap = 256*1024*1024;
305 #endif
306 }
307
308 if (CPU_ISSUN4 || CPU_ISSUN4C) {
309 /*
310 * Allocate DMA map for 24-bit devices (le, ie)
311 * [dvma_base - dvma_end] is for VME devices..
312 */
313 dvmamap24 = extent_create("dvmamap24",
314 D24_DVMA_BASE, D24_DVMA_END,
315 0, 0, EX_NOWAIT);
316 if (dvmamap24 == NULL)
317 panic("unable to allocate DVMA map");
318 }
319
320 #ifdef DEBUG
321 pmapdebug = opmapdebug;
322 #endif
323 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
324 printf("avail memory = %s\n", pbuf);
325
326 pmap_redzone();
327
328 mutex_init(&fpu_mtx, MUTEX_DEFAULT, IPL_SCHED);
329 }
330
331 /*
332 * Set up registers on exec.
333 *
334 * XXX this entire mess must be fixed
335 */
336 /* ARGSUSED */
337 void
setregs(struct lwp * l,struct exec_package * pack,vaddr_t stack)338 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
339 {
340 struct trapframe *tf = l->l_md.md_tf;
341 struct fpstate *fs;
342 int psr;
343
344 /* Don't allow unaligned data references by default */
345 l->l_proc->p_md.md_flags &= ~MDP_FIXALIGN;
346
347 /*
348 * Set the registers to 0 except for:
349 * %o6: stack pointer, built in exec())
350 * %psr: (retain CWP and PSR_S bits)
351 * %g1: p->p_psstrp (used by crt0)
352 * %pc,%npc: entry point of program
353 */
354 psr = tf->tf_psr & (PSR_S | PSR_CWP);
355 if ((fs = l->l_md.md_fpstate) != NULL) {
356 struct cpu_info *cpi;
357 int s;
358 /*
359 * We hold an FPU state. If we own *some* FPU chip state
360 * we must get rid of it, and the only way to do that is
361 * to save it. In any case, get rid of our FPU state.
362 */
363 FPU_LOCK(s);
364 if ((cpi = l->l_md.md_fpu) != NULL) {
365 if (cpi->fplwp != l)
366 panic("FPU(%d): fplwp %p",
367 cpi->ci_cpuid, cpi->fplwp);
368 if (l == cpuinfo.fplwp)
369 savefpstate(fs);
370 #if defined(MULTIPROCESSOR)
371 else
372 XCALL1(ipi_savefpstate, fs, 1 << cpi->ci_cpuid);
373 #endif
374 cpi->fplwp = NULL;
375 }
376 l->l_md.md_fpu = NULL;
377 FPU_UNLOCK(s);
378 kmem_free(fs, sizeof(struct fpstate));
379 l->l_md.md_fpstate = NULL;
380 }
381 memset((void *)tf, 0, sizeof *tf);
382 tf->tf_psr = psr;
383 tf->tf_global[1] = l->l_proc->p_psstrp;
384 tf->tf_pc = pack->ep_entry & ~3;
385 tf->tf_npc = tf->tf_pc + 4;
386 stack -= sizeof(struct rwindow);
387 tf->tf_out[6] = stack;
388 }
389
390 #ifdef DEBUG
391 int sigdebug = 0;
392 int sigpid = 0;
393 #define SDB_FOLLOW 0x01
394 #define SDB_KSTACK 0x02
395 #define SDB_FPSTATE 0x04
396 #endif
397
398 /*
399 * machine dependent system variables.
400 */
401 static int
sysctl_machdep_boot(SYSCTLFN_ARGS)402 sysctl_machdep_boot(SYSCTLFN_ARGS)
403 {
404 struct sysctlnode node = *rnode;
405 struct btinfo_kernelfile *bi_file;
406 const char *cp;
407
408
409 switch (node.sysctl_num) {
410 case CPU_BOOTED_KERNEL:
411 if ((bi_file = lookup_bootinfo(BTINFO_KERNELFILE)) != NULL)
412 cp = bi_file->name;
413 else
414 cp = prom_getbootfile();
415 if (cp != NULL && cp[0] == '\0')
416 cp = "netbsd";
417 break;
418 case CPU_BOOTED_DEVICE:
419 cp = prom_getbootpath();
420 break;
421 case CPU_BOOT_ARGS:
422 cp = prom_getbootargs();
423 break;
424 default:
425 return (EINVAL);
426 }
427
428 if (cp == NULL || cp[0] == '\0')
429 return (ENOENT);
430
431 node.sysctl_data = __UNCONST(cp);
432 node.sysctl_size = strlen(cp) + 1;
433 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
434 }
435
436 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
437 {
438
439 sysctl_createv(clog, 0, NULL, NULL,
440 CTLFLAG_PERMANENT,
441 CTLTYPE_NODE, "machdep", NULL,
442 NULL, 0, NULL, 0,
443 CTL_MACHDEP, CTL_EOL);
444
445 sysctl_createv(clog, 0, NULL, NULL,
446 CTLFLAG_PERMANENT,
447 CTLTYPE_STRING, "booted_kernel", NULL,
448 sysctl_machdep_boot, 0, NULL, 0,
449 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
450 sysctl_createv(clog, 0, NULL, NULL,
451 CTLFLAG_PERMANENT,
452 CTLTYPE_STRING, "booted_device", NULL,
453 sysctl_machdep_boot, 0, NULL, 0,
454 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
455 sysctl_createv(clog, 0, NULL, NULL,
456 CTLFLAG_PERMANENT,
457 CTLTYPE_STRING, "boot_args", NULL,
458 sysctl_machdep_boot, 0, NULL, 0,
459 CTL_MACHDEP, CPU_BOOT_ARGS, CTL_EOL);
460 sysctl_createv(clog, 0, NULL, NULL,
461 CTLFLAG_PERMANENT,
462 CTLTYPE_INT, "cpu_arch", NULL,
463 NULL, 0, &cpu_arch, 0,
464 CTL_MACHDEP, CPU_ARCH, CTL_EOL);
465 }
466
467 /*
468 * Send an interrupt to process.
469 */
470 struct sigframe {
471 siginfo_t sf_si;
472 ucontext_t sf_uc;
473 };
474
475 void
sendsig_siginfo(const ksiginfo_t * ksi,const sigset_t * mask)476 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
477 {
478 struct lwp *l = curlwp;
479 struct proc *p = l->l_proc;
480 struct sigacts *ps = p->p_sigacts;
481 struct trapframe *tf;
482 ucontext_t uc;
483 struct sigframe *fp;
484 u_int onstack, oldsp, newsp;
485 u_int catcher;
486 int sig, error;
487 size_t ucsz;
488
489 sig = ksi->ksi_signo;
490
491 tf = l->l_md.md_tf;
492 oldsp = tf->tf_out[6];
493
494 /*
495 * Compute new user stack addresses, subtract off
496 * one signal frame, and align.
497 */
498 onstack =
499 (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
500 (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
501
502 if (onstack)
503 fp = (struct sigframe *)
504 ((char *)l->l_sigstk.ss_sp +
505 l->l_sigstk.ss_size);
506 else
507 fp = (struct sigframe *)oldsp;
508
509 fp = (struct sigframe *)((int)(fp - 1) & ~7);
510
511 #ifdef DEBUG
512 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
513 printf("sendsig: %s[%d] sig %d newusp %p si %p uc %p\n",
514 p->p_comm, p->p_pid, sig, fp, &fp->sf_si, &fp->sf_uc);
515 #endif
516
517 /*
518 * Build the signal context to be used by sigreturn.
519 */
520 uc.uc_flags = _UC_SIGMASK |
521 ((l->l_sigstk.ss_flags & SS_ONSTACK)
522 ? _UC_SETSTACK : _UC_CLRSTACK);
523 uc.uc_sigmask = *mask;
524 uc.uc_link = l->l_ctxlink;
525 memset(&uc.uc_stack, 0, sizeof(uc.uc_stack));
526
527 /*
528 * Now copy the stack contents out to user space.
529 * We need to make sure that when we start the signal handler,
530 * its %i6 (%fp), which is loaded from the newly allocated stack area,
531 * joins seamlessly with the frame it was in when the signal occurred,
532 * so that the debugger and _longjmp code can back up through it.
533 * Since we're calling the handler directly, allocate a full size
534 * C stack frame.
535 */
536 sendsig_reset(l, sig);
537 mutex_exit(p->p_lock);
538 newsp = (int)fp - sizeof(struct frame);
539 cpu_getmcontext(l, &uc.uc_mcontext, &uc.uc_flags);
540 ucsz = (int)&uc.__uc_pad - (int)&uc;
541 error = (copyout(&ksi->ksi_info, &fp->sf_si, sizeof ksi->ksi_info) ||
542 copyout(&uc, &fp->sf_uc, ucsz) ||
543 ustore_int((u_int *)&((struct rwindow *)newsp)->rw_in[6], oldsp));
544 mutex_enter(p->p_lock);
545
546 if (error) {
547 /*
548 * Process has trashed its stack; give it an illegal
549 * instruction to halt it in its tracks.
550 */
551 #ifdef DEBUG
552 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
553 printf("sendsig: window save or copyout error\n");
554 #endif
555 sigexit(l, SIGILL);
556 /* NOTREACHED */
557 }
558
559 switch (ps->sa_sigdesc[sig].sd_vers) {
560 default:
561 /* Unsupported trampoline version; kill the process. */
562 sigexit(l, SIGILL);
563 case __SIGTRAMP_SIGINFO_VERSION:
564 /*
565 * Arrange to continue execution at the user's handler.
566 * It needs a new stack pointer, a return address and
567 * three arguments: (signo, siginfo *, ucontext *).
568 */
569 catcher = (u_int)SIGACTION(p, sig).sa_handler;
570 tf->tf_pc = catcher;
571 tf->tf_npc = catcher + 4;
572 tf->tf_out[0] = sig;
573 tf->tf_out[1] = (int)&fp->sf_si;
574 tf->tf_out[2] = (int)&fp->sf_uc;
575 tf->tf_out[6] = newsp;
576 tf->tf_out[7] = (int)ps->sa_sigdesc[sig].sd_tramp - 8;
577 break;
578 }
579
580 /* Remember that we're now on the signal stack. */
581 if (onstack)
582 l->l_sigstk.ss_flags |= SS_ONSTACK;
583
584 #ifdef DEBUG
585 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
586 printf("sendsig: about to return to catcher\n");
587 #endif
588 }
589
590 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flags)591 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
592 {
593 struct trapframe *tf = (struct trapframe *)l->l_md.md_tf;
594 __greg_t *r = mcp->__gregs;
595 __greg_t ras_pc;
596 #ifdef FPU_CONTEXT
597 __fpregset_t *f = &mcp->__fpregs;
598 struct fpstate *fps = l->l_md.md_fpstate;
599 #endif
600
601 /*
602 * Put the stack in a consistent state before we whack away
603 * at it. Note that write_user_windows may just dump the
604 * registers into the pcb; we need them in the process's memory.
605 */
606 write_user_windows();
607 if (rwindow_save(l)) {
608 mutex_enter(l->l_proc->p_lock);
609 sigexit(l, SIGILL);
610 }
611
612 /*
613 * Get the general purpose registers
614 */
615 r[_REG_PSR] = tf->tf_psr;
616 r[_REG_PC] = tf->tf_pc;
617 r[_REG_nPC] = tf->tf_npc;
618 r[_REG_Y] = tf->tf_y;
619 r[_REG_G1] = tf->tf_global[1];
620 r[_REG_G2] = tf->tf_global[2];
621 r[_REG_G3] = tf->tf_global[3];
622 r[_REG_G4] = tf->tf_global[4];
623 r[_REG_G5] = tf->tf_global[5];
624 r[_REG_G6] = tf->tf_global[6];
625 r[_REG_G7] = tf->tf_global[7];
626 r[_REG_O0] = tf->tf_out[0];
627 r[_REG_O1] = tf->tf_out[1];
628 r[_REG_O2] = tf->tf_out[2];
629 r[_REG_O3] = tf->tf_out[3];
630 r[_REG_O4] = tf->tf_out[4];
631 r[_REG_O5] = tf->tf_out[5];
632 r[_REG_O6] = tf->tf_out[6];
633 r[_REG_O7] = tf->tf_out[7];
634
635 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
636 (void *) r[_REG_PC])) != -1) {
637 r[_REG_PC] = ras_pc;
638 r[_REG_nPC] = ras_pc + 4;
639 }
640
641 *flags |= (_UC_CPU|_UC_TLSBASE);
642
643 #ifdef FPU_CONTEXT
644 /*
645 * Get the floating point registers
646 */
647 memcpy(f->__fpu_regs, fps->fs_regs, sizeof(fps->fs_regs));
648 f->__fp_nqsize = sizeof(struct fp_qentry);
649 f->__fp_nqel = fps->fs_qsize;
650 f->__fp_fsr = fps->fs_fsr;
651 if (f->__fp_q != NULL) {
652 size_t sz = f->__fp_nqel * f->__fp_nqsize;
653 if (sz > sizeof(fps->fs_queue)) {
654 #ifdef DIAGNOSTIC
655 printf("getcontext: fp_queue too large\n");
656 #endif
657 return;
658 }
659 if (copyout(fps->fs_queue, f->__fp_q, sz) != 0) {
660 #ifdef DIAGNOSTIC
661 printf("getcontext: copy of fp_queue failed %d\n",
662 error);
663 #endif
664 return;
665 }
666 }
667 f->fp_busy = 0; /* XXX: How do we determine that? */
668 *flags |= _UC_FPU;
669 #endif
670
671 return;
672 }
673
674 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mc)675 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mc)
676 {
677 const __greg_t *gr = mc->__gregs;
678
679 /*
680 * Only the icc bits in the psr are used, so it need not be
681 * verified. pc and npc must be multiples of 4. This is all
682 * that is required; if it holds, just do it.
683 */
684 if (((gr[_REG_PC] | gr[_REG_nPC]) & 3) != 0 ||
685 gr[_REG_PC] == 0 || gr[_REG_nPC] == 0)
686 return EINVAL;
687
688 return 0;
689 }
690
691 /*
692 * Set to mcontext specified.
693 * Return to previous pc and psl as specified by
694 * context left by sendsig. Check carefully to
695 * make sure that the user has not modified the
696 * psl to gain improper privileges or to cause
697 * a machine fault.
698 * This is almost like sigreturn() and it shows.
699 */
700 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)701 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
702 {
703 struct trapframe *tf;
704 const __greg_t *r = mcp->__gregs;
705 struct proc *p = l->l_proc;
706 int error;
707 #ifdef FPU_CONTEXT
708 __fpregset_t *f = &mcp->__fpregs;
709 struct fpstate *fps = l->l_md.md_fpstate;
710 #endif
711
712 write_user_windows();
713 if (rwindow_save(l)) {
714 mutex_enter(p->p_lock);
715 sigexit(l, SIGILL);
716 }
717
718 #ifdef DEBUG
719 if (sigdebug & SDB_FOLLOW)
720 printf("__setmcontext: %s[%d], __mcontext %p\n",
721 l->l_proc->p_comm, l->l_proc->p_pid, mcp);
722 #endif
723
724 if (flags & _UC_CPU) {
725 /* Validate */
726 error = cpu_mcontext_validate(l, mcp);
727 if (error)
728 return error;
729
730 /* Restore register context. */
731 tf = (struct trapframe *)l->l_md.md_tf;
732
733 /* take only psr ICC field */
734 tf->tf_psr = (tf->tf_psr & ~PSR_ICC) |
735 (r[_REG_PSR] & PSR_ICC);
736 tf->tf_pc = r[_REG_PC];
737 tf->tf_npc = r[_REG_nPC];
738 tf->tf_y = r[_REG_Y];
739
740 /* Restore everything */
741 tf->tf_global[1] = r[_REG_G1];
742 tf->tf_global[2] = r[_REG_G2];
743 tf->tf_global[3] = r[_REG_G3];
744 tf->tf_global[4] = r[_REG_G4];
745 tf->tf_global[5] = r[_REG_G5];
746 tf->tf_global[6] = r[_REG_G6];
747 /* done in lwp_setprivate */
748 /* tf->tf_global[7] = r[_REG_G7]; */
749
750 tf->tf_out[0] = r[_REG_O0];
751 tf->tf_out[1] = r[_REG_O1];
752 tf->tf_out[2] = r[_REG_O2];
753 tf->tf_out[3] = r[_REG_O3];
754 tf->tf_out[4] = r[_REG_O4];
755 tf->tf_out[5] = r[_REG_O5];
756 tf->tf_out[6] = r[_REG_O6];
757 tf->tf_out[7] = r[_REG_O7];
758
759 if (flags & _UC_TLSBASE)
760 lwp_setprivate(l, (void *)(uintptr_t)r[_REG_G7]);
761 }
762
763 #ifdef FPU_CONTEXT
764 if (flags & _UC_FPU) {
765 /*
766 * Set the floating point registers
767 */
768 int error;
769 size_t sz = f->__fp_nqel * f->__fp_nqsize;
770 if (sz > sizeof(fps->fs_queue)) {
771 #ifdef DIAGNOSTIC
772 printf("setmcontext: fp_queue too large\n");
773 #endif
774 return (EINVAL);
775 }
776 memcpy(fps->fs_regs, f->__fpu_regs, sizeof(fps->fs_regs));
777 fps->fs_qsize = f->__fp_nqel;
778 fps->fs_fsr = f->__fp_fsr;
779 if (f->__fp_q != NULL) {
780 if ((error = copyin(f->__fp_q, fps->fs_queue, sz)) != 0) {
781 #ifdef DIAGNOSTIC
782 printf("setmcontext: fp_queue copy failed\n");
783 #endif
784 return (error);
785 }
786 }
787 }
788 #endif
789
790 mutex_enter(p->p_lock);
791 if (flags & _UC_SETSTACK)
792 l->l_sigstk.ss_flags |= SS_ONSTACK;
793 if (flags & _UC_CLRSTACK)
794 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
795 mutex_exit(p->p_lock);
796
797 return (0);
798 }
799
800 int waittime = -1;
801
802 void
cpu_reboot(int howto,char * user_boot_string)803 cpu_reboot(int howto, char *user_boot_string)
804 {
805 int i;
806 char opts[4];
807 static char str[128];
808
809 /* If system is cold, just halt. */
810 if (cold) {
811 howto |= RB_HALT;
812 goto haltsys;
813 }
814
815 #if NFB > 0
816 fb_unblank();
817 #endif
818 boothowto = howto;
819 if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
820
821 /* XXX protect against curlwp->p_stats.foo refs in sync() */
822 if (curlwp == NULL)
823 curlwp = &lwp0;
824 waittime = 0;
825 vfs_shutdown();
826
827 /*
828 * If we've been adjusting the clock, the todr
829 * will be out of synch; adjust it now.
830 * resettodr will only do this only if inittodr()
831 * has already been called.
832 */
833 resettodr();
834 }
835
836 /* Disable interrupts. But still allow IPI on MP systems */
837 if (sparc_ncpus > 1)
838 (void)splsched();
839 else
840 (void)splhigh();
841
842 #if defined(MULTIPROCESSOR)
843 /* Direct system interrupts to this CPU, since dump uses polled I/O */
844 if (CPU_ISSUN4M)
845 *((u_int *)ICR_ITR) = cpuinfo.mid - 8;
846 #endif
847
848 /* If rebooting and a dump is requested, do it. */
849 #if 0
850 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
851 #else
852 if (howto & RB_DUMP)
853 #endif
854 dumpsys();
855
856 haltsys:
857
858 /* Run any shutdown hooks. */
859 doshutdownhooks();
860
861 pmf_system_shutdown(boothowto);
862
863 /* If powerdown was requested, do it. */
864 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
865 prom_interpret("power-off");
866 #if NPOWER > 0
867 /* Fall back on `power' device if the PROM can't do it */
868 powerdown();
869 #endif
870 printf("WARNING: powerdown not supported\n");
871 /*
872 * RB_POWERDOWN implies RB_HALT... fall into it...
873 */
874 }
875
876 if (howto & RB_HALT) {
877 #if defined(MULTIPROCESSOR)
878 mp_halt_cpus();
879 printf("cpu%d halted\n\n", cpu_number());
880 #else
881 printf("halted\n\n");
882 #endif
883 prom_halt();
884 }
885
886 printf("rebooting\n\n");
887
888 i = 1;
889 if (howto & RB_SINGLE)
890 opts[i++] = 's';
891 if (howto & RB_KDB)
892 opts[i++] = 'd';
893 opts[i] = '\0';
894 opts[0] = (i > 1) ? '-' : '\0';
895
896 if (user_boot_string && *user_boot_string) {
897 i = strlen(user_boot_string);
898 if (i > sizeof(str) - sizeof(opts) - 1)
899 prom_boot(user_boot_string); /* XXX */
900 memcpy(str, user_boot_string, i);
901 if (opts[0] != '\0')
902 str[i] = ' ';
903 }
904 strcat(str, opts);
905 prom_boot(str);
906 /*NOTREACHED*/
907 }
908
909 uint32_t dumpmag = 0x8fca0101; /* magic number for savecore */
910 int dumpsize = 0; /* also for savecore */
911 long dumplo = 0;
912
913 void
cpu_dumpconf(void)914 cpu_dumpconf(void)
915 {
916 int nblks, dumpblks;
917
918 if (dumpdev == NODEV)
919 return;
920 nblks = bdev_size(dumpdev);
921
922 dumpblks = ctod(physmem) + pmap_dumpsize();
923 if (dumpblks > (nblks - ctod(1)))
924 /*
925 * dump size is too big for the partition.
926 * Note, we safeguard a click at the front for a
927 * possible disk label.
928 */
929 return;
930
931 /* Put the dump at the end of the partition */
932 dumplo = nblks - dumpblks;
933
934 /*
935 * savecore(8) expects dumpsize to be the number of pages
936 * of actual core dumped (i.e. excluding the MMU stuff).
937 */
938 dumpsize = physmem;
939 }
940
941 #define BYTES_PER_DUMP (32 * 1024) /* must be a multiple of pagesize */
942 static vaddr_t dumpspace;
943 struct pcb dumppcb;
944
945 void *
reserve_dumppages(void * p)946 reserve_dumppages(void *p)
947 {
948
949 dumpspace = (vaddr_t)p;
950 return ((char *)p + BYTES_PER_DUMP);
951 }
952
953 /*
954 * Write a crash dump.
955 */
956 void
dumpsys(void)957 dumpsys(void)
958 {
959 const struct bdevsw *bdev;
960 int psize;
961 daddr_t blkno;
962 int (*dump)(dev_t, daddr_t, void *, size_t);
963 int error = 0;
964 struct memarr *mp;
965 int nmem;
966
967 /* copy registers to memory */
968 snapshot(cpuinfo.curpcb);
969 memcpy(&dumppcb, cpuinfo.curpcb, sizeof dumppcb);
970 stackdump();
971
972 if (dumpdev == NODEV)
973 return;
974 bdev = bdevsw_lookup(dumpdev);
975 if (bdev == NULL || bdev->d_psize == NULL)
976 return;
977
978 /*
979 * For dumps during autoconfiguration,
980 * if dump device has already configured...
981 */
982 if (dumpsize == 0)
983 cpu_dumpconf();
984 if (dumplo <= 0) {
985 printf("\ndump to dev %u,%u not possible\n",
986 major(dumpdev), minor(dumpdev));
987 return;
988 }
989 printf("\ndumping to dev %u,%u offset %ld\n",
990 major(dumpdev), minor(dumpdev), dumplo);
991
992 psize = bdev_size(dumpdev);
993 printf("dump ");
994 if (psize == -1) {
995 printf("area unavailable\n");
996 return;
997 }
998 blkno = dumplo;
999 dump = bdev->d_dump;
1000
1001 error = pmap_dumpmmu(dump, blkno);
1002 blkno += pmap_dumpsize();
1003
1004 for (mp = pmemarr, nmem = npmemarr; --nmem >= 0 && error == 0; mp++) {
1005 unsigned i = 0, n;
1006 int maddr = mp->addr;
1007
1008 if (maddr == 0) {
1009 /* Skip first page at physical address 0 */
1010 maddr += PAGE_SIZE;
1011 i += PAGE_SIZE;
1012 blkno += btodb(PAGE_SIZE);
1013 }
1014
1015 for (; i < mp->len; i += n) {
1016 n = mp->len - i;
1017 if (n > BYTES_PER_DUMP)
1018 n = BYTES_PER_DUMP;
1019
1020 /* print out how many MBs we have dumped */
1021 if (i && (i % (1024*1024)) == 0)
1022 printf_nolog("%d ", i / (1024*1024));
1023
1024 (void) pmap_map(dumpspace, maddr, maddr + n,
1025 VM_PROT_READ);
1026 error = (*dump)(dumpdev, blkno,
1027 (void *)dumpspace, (int)n);
1028 pmap_kremove(dumpspace, n);
1029 pmap_update(pmap_kernel());
1030 if (error)
1031 break;
1032 maddr += n;
1033 blkno += btodb(n);
1034 }
1035 }
1036
1037 switch (error) {
1038
1039 case ENXIO:
1040 printf("device bad\n");
1041 break;
1042
1043 case EFAULT:
1044 printf("device not ready\n");
1045 break;
1046
1047 case EINVAL:
1048 printf("area improper\n");
1049 break;
1050
1051 case EIO:
1052 printf("i/o error\n");
1053 break;
1054
1055 case 0:
1056 printf("succeeded\n");
1057 break;
1058
1059 default:
1060 printf("error %d\n", error);
1061 break;
1062 }
1063 }
1064
1065 /*
1066 * get the fp and dump the stack as best we can. don't leave the
1067 * current stack page
1068 */
1069 void
stackdump(void)1070 stackdump(void)
1071 {
1072 struct frame *fp = getfp(), *sfp;
1073
1074 sfp = fp;
1075 printf("Frame pointer is at %p\n", fp);
1076 printf("Call traceback:\n");
1077 while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) {
1078 printf(" pc = 0x%x args = (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp = %p\n",
1079 fp->fr_pc, fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2],
1080 fp->fr_arg[3], fp->fr_arg[4], fp->fr_arg[5], fp->fr_fp);
1081 fp = fp->fr_fp;
1082 }
1083 }
1084
1085 int
cpu_exec_aout_makecmds(struct lwp * l,struct exec_package * epp)1086 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp)
1087 {
1088
1089 return (ENOEXEC);
1090 }
1091
1092 #if defined(SUN4)
1093 void
oldmon_w_trace(u_long va)1094 oldmon_w_trace(u_long va)
1095 {
1096 struct cpu_info * const ci = curcpu();
1097 u_long stop;
1098 struct frame *fp;
1099
1100 printf("curlwp = %p, pid %d\n", curlwp, curproc->p_pid);
1101
1102 printf("uvm: cpu%u: swtch %"PRIu64", trap %"PRIu64", sys %"PRIu64", "
1103 "intr %"PRIu64", soft %"PRIu64", faults %"PRIu64"\n",
1104 cpu_index(ci), ci->ci_data.cpu_nswtch, ci->ci_data.cpu_ntrap,
1105 ci->ci_data.cpu_nsyscall, ci->ci_data.cpu_nintr,
1106 ci->ci_data.cpu_nsoft, ci->ci_data.cpu_nfault);
1107 write_user_windows();
1108
1109 #define round_up(x) (( (x) + (PAGE_SIZE-1) ) & (~(PAGE_SIZE-1)) )
1110
1111 printf("\nstack trace with sp = 0x%lx\n", va);
1112 stop = round_up(va);
1113 printf("stop at 0x%lx\n", stop);
1114 fp = (struct frame *) va;
1115 while (round_up((u_long) fp) == stop) {
1116 printf(" 0x%x(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp %p\n", fp->fr_pc,
1117 fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2], fp->fr_arg[3],
1118 fp->fr_arg[4], fp->fr_arg[5], fp->fr_fp);
1119 fp = fp->fr_fp;
1120 if (fp == NULL)
1121 break;
1122 }
1123 printf("end of stack trace\n");
1124 }
1125
1126 void
oldmon_w_cmd(u_long va,char * ar)1127 oldmon_w_cmd(u_long va, char *ar)
1128 {
1129 switch (*ar) {
1130 case '\0':
1131 switch (va) {
1132 case 0:
1133 panic("g0 panic");
1134 case 4:
1135 printf("w: case 4\n");
1136 break;
1137 default:
1138 printf("w: unknown case %ld\n", va);
1139 break;
1140 }
1141 break;
1142 case 't':
1143 oldmon_w_trace(va);
1144 break;
1145 default:
1146 printf("w: arg not allowed\n");
1147 }
1148 }
1149
1150 int
ldcontrolb(void * addr)1151 ldcontrolb(void *addr)
1152 {
1153 struct pcb *xpcb;
1154 u_long saveonfault;
1155 int res;
1156 int s;
1157
1158 if (CPU_ISSUN4M || CPU_ISSUN4D) {
1159 printf("warning: ldcontrolb called on sun4m/sun4d\n");
1160 return 0;
1161 }
1162
1163 s = splhigh();
1164 xpcb = lwp_getpcb(curlwp);
1165
1166 saveonfault = (u_long)xpcb->pcb_onfault;
1167 res = xldcontrolb(addr, xpcb);
1168 xpcb->pcb_onfault = (void *)saveonfault;
1169
1170 splx(s);
1171 return (res);
1172 }
1173 #endif /* SUN4 */
1174
1175 void
wzero(void * vb,u_int l)1176 wzero(void *vb, u_int l)
1177 {
1178 u_char *b = vb;
1179 u_char *be = b + l;
1180 u_short *sp;
1181
1182 if (l == 0)
1183 return;
1184
1185 /* front, */
1186 if ((u_long)b & 1)
1187 *b++ = 0;
1188
1189 /* back, */
1190 if (b != be && ((u_long)be & 1) != 0) {
1191 be--;
1192 *be = 0;
1193 }
1194
1195 /* and middle. */
1196 sp = (u_short *)b;
1197 while (sp != (u_short *)be)
1198 *sp++ = 0;
1199 }
1200
1201 void
wcopy(const void * vb1,void * vb2,u_int l)1202 wcopy(const void *vb1, void *vb2, u_int l)
1203 {
1204 const u_char *b1e, *b1 = vb1;
1205 u_char *b2 = vb2;
1206 const u_short *sp;
1207 int bstore = 0;
1208
1209 if (l == 0)
1210 return;
1211
1212 /* front, */
1213 if ((u_long)b1 & 1) {
1214 *b2++ = *b1++;
1215 l--;
1216 }
1217
1218 /* middle, */
1219 sp = (const u_short *)b1;
1220 b1e = b1 + l;
1221 if (l & 1)
1222 b1e--;
1223 bstore = (u_long)b2 & 1;
1224
1225 while (sp < (const u_short *)b1e) {
1226 if (bstore) {
1227 b2[1] = *sp & 0xff;
1228 b2[0] = *sp >> 8;
1229 } else
1230 *((short *)b2) = *sp;
1231 sp++;
1232 b2 += 2;
1233 }
1234
1235 /* and back. */
1236 if (l & 1)
1237 *b2 = *b1e;
1238 }
1239
1240 #ifdef MODULAR
1241 void
module_init_md(void)1242 module_init_md(void)
1243 {
1244 }
1245 #endif
1246
1247 static size_t
_bus_dmamap_mapsize(int const nsegments)1248 _bus_dmamap_mapsize(int const nsegments)
1249 {
1250 KASSERT(nsegments > 0);
1251 return sizeof(struct sparc_bus_dmamap) +
1252 (sizeof(bus_dma_segment_t) * (nsegments - 1));
1253 }
1254
1255 /*
1256 * Common function for DMA map creation. May be called by bus-specific
1257 * DMA map creation functions.
1258 */
1259 int
_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)1260 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
1261 bus_size_t maxsegsz, bus_size_t boundary, int flags,
1262 bus_dmamap_t *dmamp)
1263 {
1264 struct sparc_bus_dmamap *map;
1265 void *mapstore;
1266
1267 /*
1268 * Allocate and initialize the DMA map. The end of the map
1269 * is a variable-sized array of segments, so we allocate enough
1270 * room for them in one shot.
1271 *
1272 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
1273 * of ALLOCNOW notifies others that we've reserved these resources,
1274 * and they are not to be freed.
1275 *
1276 * The bus_dmamap_t includes one bus_dma_segment_t, hence
1277 * the (nsegments - 1).
1278 */
1279 if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
1280 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
1281 return (ENOMEM);
1282
1283 map = (struct sparc_bus_dmamap *)mapstore;
1284 map->_dm_size = size;
1285 map->_dm_segcnt = nsegments;
1286 map->_dm_maxmaxsegsz = maxsegsz;
1287 map->_dm_boundary = boundary;
1288 map->_dm_align = PAGE_SIZE;
1289 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
1290 map->dm_maxsegsz = maxsegsz;
1291 map->dm_mapsize = 0; /* no valid mappings */
1292 map->dm_nsegs = 0;
1293
1294 *dmamp = map;
1295 return (0);
1296 }
1297
1298 /*
1299 * Common function for DMA map destruction. May be called by bus-specific
1300 * DMA map destruction functions.
1301 */
1302 void
_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)1303 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
1304 {
1305
1306 kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
1307 }
1308
1309 /*
1310 * Like _bus_dmamap_load(), but for mbufs.
1311 */
1312 int
_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m,int flags)1313 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
1314 struct mbuf *m, int flags)
1315 {
1316
1317 panic("_bus_dmamap_load_mbuf: not implemented");
1318 }
1319
1320 /*
1321 * Like _bus_dmamap_load(), but for uios.
1322 */
1323 int
_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)1324 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
1325 struct uio *uio, int flags)
1326 {
1327
1328 panic("_bus_dmamap_load_uio: not implemented");
1329 }
1330
1331 /*
1332 * Like _bus_dmamap_load(), but for raw memory allocated with
1333 * bus_dmamem_alloc().
1334 */
1335 int
_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)1336 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
1337 bus_dma_segment_t *segs, int nsegs, bus_size_t size,
1338 int flags)
1339 {
1340
1341 panic("_bus_dmamap_load_raw: not implemented");
1342 }
1343
1344 /*
1345 * Common function for DMA map synchronization. May be called
1346 * by bus-specific DMA map synchronization functions.
1347 */
1348 void
_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)1349 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
1350 bus_addr_t offset, bus_size_t len, int ops)
1351 {
1352 }
1353
1354 /*
1355 * Common function for DMA-safe memory allocation. May be called
1356 * by bus-specific DMA memory allocation functions.
1357 */
1358 int
_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)1359 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
1360 bus_size_t alignment, bus_size_t boundary,
1361 bus_dma_segment_t *segs, int nsegs, int *rsegs,
1362 int flags)
1363 {
1364 vaddr_t low, high;
1365 struct pglist *mlist;
1366 int error;
1367
1368 /* Always round the size. */
1369 size = round_page(size);
1370 low = vm_first_phys;
1371 high = vm_first_phys + vm_num_phys - PAGE_SIZE;
1372
1373 if ((mlist = kmem_alloc(sizeof(*mlist),
1374 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
1375 return (ENOMEM);
1376
1377 /*
1378 * Allocate pages from the VM system.
1379 */
1380 error = uvm_pglistalloc(size, low, high, 0, 0,
1381 mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1382 if (error) {
1383 kmem_free(mlist, sizeof(*mlist));
1384 return (error);
1385 }
1386
1387 /*
1388 * Simply keep a pointer around to the linked list, so
1389 * bus_dmamap_free() can return it.
1390 *
1391 * NOBODY SHOULD TOUCH THE pageq.queue FIELDS WHILE THESE PAGES
1392 * ARE IN OUR CUSTODY.
1393 */
1394 segs[0]._ds_mlist = mlist;
1395
1396 /*
1397 * We now have physical pages, but no DVMA addresses yet. These
1398 * will be allocated in bus_dmamap_load*() routines. Hence we
1399 * save any alignment and boundary requirements in this DMA
1400 * segment.
1401 */
1402 segs[0].ds_addr = 0;
1403 segs[0].ds_len = 0;
1404 segs[0]._ds_va = 0;
1405 *rsegs = 1;
1406 return (0);
1407 }
1408
1409 /*
1410 * Common function for freeing DMA-safe memory. May be called by
1411 * bus-specific DMA memory free functions.
1412 */
1413 void
_bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)1414 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1415 {
1416 struct pglist *mlist = segs[0]._ds_mlist;
1417
1418 if (nsegs != 1)
1419 panic("bus_dmamem_free: nsegs = %d", nsegs);
1420
1421 /*
1422 * Return the list of pages back to the VM system.
1423 */
1424 uvm_pglistfree(mlist);
1425 kmem_free(mlist, sizeof(*mlist));
1426 }
1427
1428 /*
1429 * Common function for unmapping DMA-safe memory. May be called by
1430 * bus-specific DMA memory unmapping functions.
1431 */
1432 void
_bus_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)1433 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1434 {
1435
1436 #ifdef DIAGNOSTIC
1437 if ((u_long)kva & PAGE_MASK)
1438 panic("_bus_dmamem_unmap");
1439 #endif
1440
1441 size = round_page(size);
1442 pmap_kremove((vaddr_t)kva, size);
1443 pmap_update(pmap_kernel());
1444 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1445 }
1446
1447 /*
1448 * Common function for mmap(2)'ing DMA-safe memory. May be called by
1449 * bus-specific DMA mmap(2)'ing functions.
1450 */
1451 paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)1452 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1453 off_t off, int prot, int flags)
1454 {
1455
1456 panic("_bus_dmamem_mmap: not implemented");
1457 }
1458
1459 /*
1460 * Utility to allocate an aligned kernel virtual address range
1461 */
1462 vaddr_t
_bus_dma_valloc_skewed(size_t size,u_long boundary,u_long align,u_long skew)1463 _bus_dma_valloc_skewed(size_t size, u_long boundary, u_long align, u_long skew)
1464 {
1465 size_t oversize;
1466 vaddr_t va, sva;
1467
1468 /*
1469 * Find a region of kernel virtual addresses that is aligned
1470 * to the given address modulo the requested alignment, i.e.
1471 *
1472 * (va - skew) == 0 mod align
1473 *
1474 * The following conditions apply to the arguments:
1475 *
1476 * - `size' must be a multiple of the VM page size
1477 * - `align' must be a power of two
1478 * and greater than or equal to the VM page size
1479 * - `skew' must be smaller than `align'
1480 * - `size' must be smaller than `boundary'
1481 */
1482
1483 #ifdef DIAGNOSTIC
1484 if ((size & PAGE_MASK) != 0)
1485 panic("_bus_dma_valloc_skewed: invalid size %lx", size);
1486 if ((align & PAGE_MASK) != 0)
1487 panic("_bus_dma_valloc_skewed: invalid alignment %lx", align);
1488 if (align < skew)
1489 panic("_bus_dma_valloc_skewed: align %lx < skew %lx",
1490 align, skew);
1491 #endif
1492
1493 /* XXX - Implement this! */
1494 if (boundary) {
1495 printf("_bus_dma_valloc_skewed: "
1496 "boundary check not implemented");
1497 return (0);
1498 }
1499
1500 /*
1501 * First, find a region large enough to contain any aligned chunk
1502 */
1503 oversize = size + align - PAGE_SIZE;
1504 sva = vm_map_min(kernel_map);
1505 if (uvm_map(kernel_map, &sva, oversize, NULL, UVM_UNKNOWN_OFFSET,
1506 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
1507 UVM_ADV_RANDOM, UVM_FLAG_NOWAIT)))
1508 return (0);
1509
1510 /*
1511 * Compute start of aligned region
1512 */
1513 va = sva;
1514 va += (skew + align - va) & (align - 1);
1515
1516 /*
1517 * Return excess virtual addresses
1518 */
1519 if (va != sva)
1520 (void)uvm_unmap(kernel_map, sva, va);
1521 if (va + size != sva + oversize)
1522 (void)uvm_unmap(kernel_map, va + size, sva + oversize);
1523
1524 return (va);
1525 }
1526
1527 /* sun4/sun4c DMA map functions */
1528 int sun4_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
1529 bus_size_t, struct proc *, int);
1530 int sun4_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
1531 bus_dma_segment_t *, int, bus_size_t, int);
1532 void sun4_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
1533 int sun4_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *,
1534 int, size_t, void **, int);
1535
1536 /*
1537 * sun4/sun4c: load DMA map with a linear buffer.
1538 */
1539 int
sun4_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)1540 sun4_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
1541 void *buf, bus_size_t buflen,
1542 struct proc *p, int flags)
1543 {
1544 bus_size_t sgsize;
1545 vaddr_t va = (vaddr_t)buf;
1546 int pagesz = PAGE_SIZE;
1547 vaddr_t dva;
1548 pmap_t pmap;
1549
1550 /*
1551 * Make sure that on error condition we return "no valid mappings".
1552 */
1553 map->dm_nsegs = 0;
1554
1555 if (buflen > map->_dm_size)
1556 return (EINVAL);
1557
1558 cache_flush(buf, buflen);
1559
1560 if ((map->_dm_flags & BUS_DMA_24BIT) == 0) {
1561 /*
1562 * XXX Need to implement "don't DMA across this boundary".
1563 */
1564 if (map->_dm_boundary != 0) {
1565 bus_addr_t baddr;
1566
1567 /* Calculate first boundary line after `buf' */
1568 baddr = ((bus_addr_t)va + map->_dm_boundary) &
1569 -map->_dm_boundary;
1570
1571 /*
1572 * If the requested segment crosses the boundary,
1573 * we can't grant a direct map. For now, steal some
1574 * space from the `24BIT' map instead.
1575 *
1576 * (XXX - no overflow detection here)
1577 */
1578 if (buflen > (baddr - (bus_addr_t)va))
1579 goto no_fit;
1580 }
1581 map->dm_mapsize = buflen;
1582 map->dm_nsegs = 1;
1583 map->dm_segs[0].ds_addr = (bus_addr_t)va;
1584 map->dm_segs[0].ds_len = buflen;
1585 map->_dm_flags |= _BUS_DMA_DIRECTMAP;
1586 return (0);
1587 }
1588
1589 no_fit:
1590 sgsize = round_page(buflen + (va & (pagesz - 1)));
1591
1592 if (extent_alloc(dvmamap24, sgsize, pagesz, map->_dm_boundary,
1593 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
1594 &dva) != 0) {
1595 return (ENOMEM);
1596 }
1597
1598 /*
1599 * We always use just one segment.
1600 */
1601 map->dm_mapsize = buflen;
1602 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
1603 map->dm_segs[0].ds_len = buflen;
1604 map->dm_segs[0]._ds_sgsize = sgsize;
1605
1606 if (p != NULL)
1607 pmap = p->p_vmspace->vm_map.pmap;
1608 else
1609 pmap = pmap_kernel();
1610
1611 for (; buflen > 0; ) {
1612 paddr_t pa;
1613
1614 /*
1615 * Get the physical address for this page.
1616 */
1617 (void) pmap_extract(pmap, va, &pa);
1618
1619 /*
1620 * Compute the segment size, and adjust counts.
1621 */
1622 sgsize = pagesz - (va & (pagesz - 1));
1623 if (buflen < sgsize)
1624 sgsize = buflen;
1625
1626 #ifdef notyet
1627 #if defined(SUN4)
1628 if (have_iocache)
1629 pa |= PG_IOC;
1630 #endif
1631 #endif
1632 pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC,
1633 VM_PROT_READ | VM_PROT_WRITE, 0);
1634
1635 dva += pagesz;
1636 va += sgsize;
1637 buflen -= sgsize;
1638 }
1639 pmap_update(pmap_kernel());
1640
1641 map->dm_nsegs = 1;
1642 return (0);
1643 }
1644
1645 /*
1646 * Like _bus_dmamap_load(), but for raw memory allocated with
1647 * bus_dmamem_alloc().
1648 */
1649 int
sun4_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)1650 sun4_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
1651 bus_dma_segment_t *segs, int nsegs, bus_size_t size,
1652 int flags)
1653 {
1654 struct vm_page *m;
1655 paddr_t pa;
1656 vaddr_t dva;
1657 bus_size_t sgsize;
1658 struct pglist *mlist;
1659 int pagesz = PAGE_SIZE;
1660 int error;
1661
1662 map->dm_nsegs = 0;
1663 sgsize = (size + pagesz - 1) & -pagesz;
1664
1665 /* Allocate DVMA addresses */
1666 if ((map->_dm_flags & BUS_DMA_24BIT) != 0) {
1667 error = extent_alloc(dvmamap24, sgsize, pagesz,
1668 map->_dm_boundary,
1669 (flags & BUS_DMA_NOWAIT) == 0
1670 ? EX_WAITOK : EX_NOWAIT,
1671 &dva);
1672 if (error)
1673 return (error);
1674 } else {
1675 /* Any properly aligned virtual address will do */
1676 dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary,
1677 pagesz, 0);
1678 if (dva == 0)
1679 return (ENOMEM);
1680 }
1681
1682 map->dm_segs[0].ds_addr = dva;
1683 map->dm_segs[0].ds_len = size;
1684 map->dm_segs[0]._ds_sgsize = sgsize;
1685
1686 /* Map physical pages into IOMMU */
1687 mlist = segs[0]._ds_mlist;
1688 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
1689 if (sgsize == 0)
1690 panic("sun4_dmamap_load_raw: size botch");
1691 pa = VM_PAGE_TO_PHYS(m);
1692 #ifdef notyet
1693 #if defined(SUN4)
1694 if (have_iocache)
1695 pa |= PG_IOC;
1696 #endif
1697 #endif
1698 pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC,
1699 VM_PROT_READ | VM_PROT_WRITE, 0);
1700
1701 dva += pagesz;
1702 sgsize -= pagesz;
1703 }
1704 pmap_update(pmap_kernel());
1705
1706 map->dm_nsegs = 1;
1707 map->dm_mapsize = size;
1708
1709 return (0);
1710 }
1711
1712 /*
1713 * sun4/sun4c function for unloading a DMA map.
1714 */
1715 void
sun4_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)1716 sun4_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
1717 {
1718 bus_dma_segment_t *segs = map->dm_segs;
1719 int nsegs = map->dm_nsegs;
1720 int flags = map->_dm_flags;
1721 vaddr_t dva;
1722 bus_size_t len;
1723 int i, s, error;
1724
1725 map->dm_maxsegsz = map->_dm_maxmaxsegsz;
1726
1727 if ((flags & _BUS_DMA_DIRECTMAP) != 0) {
1728 /* Nothing to release */
1729 map->dm_mapsize = 0;
1730 map->dm_nsegs = 0;
1731 map->_dm_flags &= ~_BUS_DMA_DIRECTMAP;
1732 return;
1733 }
1734
1735 for (i = 0; i < nsegs; i++) {
1736 dva = segs[i].ds_addr & -PAGE_SIZE;
1737 len = segs[i]._ds_sgsize;
1738
1739 pmap_kremove(dva, len);
1740
1741 if ((flags & BUS_DMA_24BIT) != 0) {
1742 s = splhigh();
1743 error = extent_free(dvmamap24, dva, len, EX_NOWAIT);
1744 splx(s);
1745 if (error != 0)
1746 printf("warning: %ld of DVMA space lost\n", len);
1747 } else {
1748 uvm_unmap(kernel_map, dva, dva + len);
1749 }
1750 }
1751 pmap_update(pmap_kernel());
1752
1753 /* Mark the mappings as invalid. */
1754 map->dm_mapsize = 0;
1755 map->dm_nsegs = 0;
1756 }
1757
1758 /*
1759 * Common function for mapping DMA-safe memory. May be called by
1760 * bus-specific DMA memory map functions.
1761 */
1762 int
sun4_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)1763 sun4_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1764 size_t size, void **kvap, int flags)
1765 {
1766 struct vm_page *m;
1767 vaddr_t va;
1768 struct pglist *mlist;
1769 const uvm_flag_t kmflags =
1770 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1771
1772 if (nsegs != 1)
1773 panic("sun4_dmamem_map: nsegs = %d", nsegs);
1774
1775 size = round_page(size);
1776
1777 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1778 if (va == 0)
1779 return (ENOMEM);
1780
1781 segs[0]._ds_va = va;
1782 *kvap = (void *)va;
1783
1784 mlist = segs[0]._ds_mlist;
1785 TAILQ_FOREACH(m, mlist, pageq.queue) {
1786 paddr_t pa;
1787
1788 if (size == 0)
1789 panic("sun4_dmamem_map: size botch");
1790
1791 pa = VM_PAGE_TO_PHYS(m);
1792 pmap_kenter_pa(va, pa | PMAP_NC,
1793 VM_PROT_READ | VM_PROT_WRITE, 0);
1794
1795 va += PAGE_SIZE;
1796 size -= PAGE_SIZE;
1797 }
1798 pmap_update(pmap_kernel());
1799
1800 return (0);
1801 }
1802
1803
1804 struct sparc_bus_dma_tag mainbus_dma_tag = {
1805 NULL,
1806 _bus_dmamap_create,
1807 _bus_dmamap_destroy,
1808 sun4_dmamap_load,
1809 _bus_dmamap_load_mbuf,
1810 _bus_dmamap_load_uio,
1811 sun4_dmamap_load_raw,
1812 sun4_dmamap_unload,
1813 _bus_dmamap_sync,
1814
1815 _bus_dmamem_alloc,
1816 _bus_dmamem_free,
1817 sun4_dmamem_map,
1818 _bus_dmamem_unmap,
1819 _bus_dmamem_mmap
1820 };
1821
1822
1823 /*
1824 * Base bus space handlers.
1825 */
1826 static int sparc_bus_map(bus_space_tag_t, bus_addr_t,
1827 bus_size_t, int, vaddr_t,
1828 bus_space_handle_t *);
1829 static int sparc_bus_unmap(bus_space_tag_t, bus_space_handle_t,
1830 bus_size_t);
1831 static int sparc_bus_subregion(bus_space_tag_t, bus_space_handle_t,
1832 bus_size_t, bus_size_t,
1833 bus_space_handle_t *);
1834 static paddr_t sparc_bus_mmap(bus_space_tag_t, bus_addr_t, off_t,
1835 int, int);
1836 static void *sparc_mainbus_intr_establish(bus_space_tag_t, int, int,
1837 int (*)(void *),
1838 void *,
1839 void (*)(void));
1840 static void sparc_bus_barrier(bus_space_tag_t, bus_space_handle_t,
1841 bus_size_t, bus_size_t, int);
1842
1843 int
bus_space_map(bus_space_tag_t t,bus_addr_t a,bus_size_t s,int f,bus_space_handle_t * hp)1844 bus_space_map(
1845 bus_space_tag_t t,
1846 bus_addr_t a,
1847 bus_size_t s,
1848 int f,
1849 bus_space_handle_t *hp)
1850 {
1851 return (*t->sparc_bus_map)(t, a, s, f, (vaddr_t)0, hp);
1852 }
1853
1854 int
bus_space_map2(bus_space_tag_t t,bus_addr_t a,bus_size_t s,int f,vaddr_t v,bus_space_handle_t * hp)1855 bus_space_map2(
1856 bus_space_tag_t t,
1857 bus_addr_t a,
1858 bus_size_t s,
1859 int f,
1860 vaddr_t v,
1861 bus_space_handle_t *hp)
1862 {
1863 return (*t->sparc_bus_map)(t, a, s, f, v, hp);
1864 }
1865
1866 void
bus_space_unmap(bus_space_tag_t t,bus_space_handle_t h,bus_size_t s)1867 bus_space_unmap(
1868 bus_space_tag_t t,
1869 bus_space_handle_t h,
1870 bus_size_t s)
1871 {
1872 (*t->sparc_bus_unmap)(t, h, s);
1873 }
1874
1875 int
bus_space_subregion(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,bus_size_t s,bus_space_handle_t * hp)1876 bus_space_subregion(
1877 bus_space_tag_t t,
1878 bus_space_handle_t h,
1879 bus_size_t o,
1880 bus_size_t s,
1881 bus_space_handle_t *hp)
1882 {
1883 return (*t->sparc_bus_subregion)(t, h, o, s, hp);
1884 }
1885
1886 paddr_t
bus_space_mmap(bus_space_tag_t t,bus_addr_t a,off_t o,int p,int f)1887 bus_space_mmap(
1888 bus_space_tag_t t,
1889 bus_addr_t a,
1890 off_t o,
1891 int p,
1892 int f)
1893 {
1894 return (*t->sparc_bus_mmap)(t, a, o, p, f);
1895 }
1896
1897 void *
bus_intr_establish(bus_space_tag_t t,int p,int l,int (* h)(void *),void * a)1898 bus_intr_establish(
1899 bus_space_tag_t t,
1900 int p,
1901 int l,
1902 int (*h)(void *),
1903 void *a)
1904 {
1905 return (*t->sparc_intr_establish)(t, p, l, h, a, NULL);
1906 }
1907
1908 void *
bus_intr_establish2(bus_space_tag_t t,int p,int l,int (* h)(void *),void * a,void (* v)(void))1909 bus_intr_establish2(
1910 bus_space_tag_t t,
1911 int p,
1912 int l,
1913 int (*h)(void *),
1914 void *a,
1915 void (*v)(void))
1916 {
1917 return (*t->sparc_intr_establish)(t, p, l, h, a, v);
1918 }
1919
1920 void
bus_space_barrier(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,bus_size_t s,int f)1921 bus_space_barrier(
1922 bus_space_tag_t t,
1923 bus_space_handle_t h,
1924 bus_size_t o,
1925 bus_size_t s,
1926 int f)
1927 {
1928 (*t->sparc_bus_barrier)(t, h, o, s, f);
1929 }
1930
1931 void
bus_space_write_multi_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)1932 bus_space_write_multi_stream_2(
1933 bus_space_tag_t t,
1934 bus_space_handle_t h,
1935 bus_size_t o,
1936 const uint16_t *a,
1937 bus_size_t c)
1938 {
1939 while (c-- > 0)
1940 bus_space_write_2_real(t, h, o, *a++);
1941 }
1942
1943 void
bus_space_write_multi_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)1944 bus_space_write_multi_stream_4(
1945 bus_space_tag_t t,
1946 bus_space_handle_t h,
1947 bus_size_t o,
1948 const uint32_t *a,
1949 bus_size_t c)
1950 {
1951 while (c-- > 0)
1952 bus_space_write_4_real(t, h, o, *a++);
1953 }
1954
1955 void
bus_space_write_multi_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)1956 bus_space_write_multi_stream_8(
1957 bus_space_tag_t t,
1958 bus_space_handle_t h,
1959 bus_size_t o,
1960 const uint64_t *a,
1961 bus_size_t c)
1962 {
1963 while (c-- > 0)
1964 bus_space_write_8_real(t, h, o, *a++);
1965 }
1966
1967
1968 /*
1969 * void bus_space_set_multi_N(bus_space_tag_t tag,
1970 * bus_space_handle_t bsh, bus_size_t offset, u_intN_t val,
1971 * bus_size_t count);
1972 *
1973 * Write the 1, 2, 4, or 8 byte value `val' to bus space described
1974 * by tag/handle/offset `count' times.
1975 */
1976 void
bus_space_set_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t v,bus_size_t c)1977 bus_space_set_multi_1(
1978 bus_space_tag_t t,
1979 bus_space_handle_t h,
1980 bus_size_t o,
1981 const uint8_t v,
1982 bus_size_t c)
1983 {
1984 while (c-- > 0)
1985 bus_space_write_1(t, h, o, v);
1986 }
1987
1988 void
bus_space_set_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t v,bus_size_t c)1989 bus_space_set_multi_2(
1990 bus_space_tag_t t,
1991 bus_space_handle_t h,
1992 bus_size_t o,
1993 const uint16_t v,
1994 bus_size_t c)
1995 {
1996 while (c-- > 0)
1997 bus_space_write_2(t, h, o, v);
1998 }
1999
2000 void
bus_space_set_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t v,bus_size_t c)2001 bus_space_set_multi_4(
2002 bus_space_tag_t t,
2003 bus_space_handle_t h,
2004 bus_size_t o,
2005 const uint32_t v,
2006 bus_size_t c)
2007 {
2008 while (c-- > 0)
2009 bus_space_write_4(t, h, o, v);
2010 }
2011
2012 void
bus_space_set_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t v,bus_size_t c)2013 bus_space_set_multi_8(
2014 bus_space_tag_t t,
2015 bus_space_handle_t h,
2016 bus_size_t o,
2017 const uint64_t v,
2018 bus_size_t c)
2019 {
2020 while (c-- > 0)
2021 bus_space_write_8(t, h, o, v);
2022 }
2023
2024
2025 /*
2026 * void bus_space_read_region_N(bus_space_tag_t tag,
2027 * bus_space_handle_t bsh, bus_size_t off,
2028 * u_intN_t *addr, bus_size_t count);
2029 *
2030 */
2031 void
bus_space_read_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)2032 bus_space_read_region_1(
2033 bus_space_tag_t t,
2034 bus_space_handle_t h,
2035 bus_size_t o,
2036 uint8_t *a,
2037 bus_size_t c)
2038 {
2039 for (; c; a++, c--, o++)
2040 *a = bus_space_read_1(t, h, o);
2041 }
2042
2043 void
bus_space_read_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2044 bus_space_read_region_2(
2045 bus_space_tag_t t,
2046 bus_space_handle_t h,
2047 bus_size_t o,
2048 uint16_t *a,
2049 bus_size_t c)
2050 {
2051 for (; c; a++, c--, o+=2)
2052 *a = bus_space_read_2(t, h, o);
2053 }
2054
2055 void
bus_space_read_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2056 bus_space_read_region_4(
2057 bus_space_tag_t t,
2058 bus_space_handle_t h,
2059 bus_size_t o,
2060 uint32_t *a,
2061 bus_size_t c)
2062 {
2063 for (; c; a++, c--, o+=4)
2064 *a = bus_space_read_4(t, h, o);
2065 }
2066
2067 void
bus_space_read_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2068 bus_space_read_region_8(
2069 bus_space_tag_t t,
2070 bus_space_handle_t h,
2071 bus_size_t o,
2072 uint64_t *a,
2073 bus_size_t c)
2074 {
2075 for (; c; a++, c--, o+=8)
2076 *a = bus_space_read_8(t, h, o);
2077 }
2078
2079 /*
2080 * void bus_space_write_region_N(bus_space_tag_t tag,
2081 * bus_space_handle_t bsh, bus_size_t off,
2082 * u_intN_t *addr, bus_size_t count);
2083 *
2084 */
2085 void
bus_space_write_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)2086 bus_space_write_region_1(
2087 bus_space_tag_t t,
2088 bus_space_handle_t h,
2089 bus_size_t o,
2090 const uint8_t *a,
2091 bus_size_t c)
2092 {
2093 for (; c; a++, c--, o++)
2094 bus_space_write_1(t, h, o, *a);
2095 }
2096
2097 void
bus_space_write_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)2098 bus_space_write_region_2(
2099 bus_space_tag_t t,
2100 bus_space_handle_t h,
2101 bus_size_t o,
2102 const uint16_t *a,
2103 bus_size_t c)
2104 {
2105 for (; c; a++, c--, o+=2)
2106 bus_space_write_2(t, h, o, *a);
2107 }
2108
2109 void
bus_space_write_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)2110 bus_space_write_region_4(
2111 bus_space_tag_t t,
2112 bus_space_handle_t h,
2113 bus_size_t o,
2114 const uint32_t *a,
2115 bus_size_t c)
2116 {
2117 for (; c; a++, c--, o+=4)
2118 bus_space_write_4(t, h, o, *a);
2119 }
2120
2121 void
bus_space_write_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)2122 bus_space_write_region_8(
2123 bus_space_tag_t t,
2124 bus_space_handle_t h,
2125 bus_size_t o,
2126 const uint64_t *a,
2127 bus_size_t c)
2128 {
2129 for (; c; a++, c--, o+=8)
2130 bus_space_write_8(t, h, o, *a);
2131 }
2132
2133
2134 /*
2135 * void bus_space_set_region_N(bus_space_tag_t tag,
2136 * bus_space_handle_t bsh, bus_size_t off,
2137 * u_intN_t *addr, bus_size_t count);
2138 *
2139 */
2140 void
bus_space_set_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t v,bus_size_t c)2141 bus_space_set_region_1(
2142 bus_space_tag_t t,
2143 bus_space_handle_t h,
2144 bus_size_t o,
2145 const uint8_t v,
2146 bus_size_t c)
2147 {
2148 for (; c; c--, o++)
2149 bus_space_write_1(t, h, o, v);
2150 }
2151
2152 void
bus_space_set_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t v,bus_size_t c)2153 bus_space_set_region_2(
2154 bus_space_tag_t t,
2155 bus_space_handle_t h,
2156 bus_size_t o,
2157 const uint16_t v,
2158 bus_size_t c)
2159 {
2160 for (; c; c--, o+=2)
2161 bus_space_write_2(t, h, o, v);
2162 }
2163
2164 void
bus_space_set_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t v,bus_size_t c)2165 bus_space_set_region_4(
2166 bus_space_tag_t t,
2167 bus_space_handle_t h,
2168 bus_size_t o,
2169 const uint32_t v,
2170 bus_size_t c)
2171 {
2172 for (; c; c--, o+=4)
2173 bus_space_write_4(t, h, o, v);
2174 }
2175
2176 void
bus_space_set_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t v,bus_size_t c)2177 bus_space_set_region_8(
2178 bus_space_tag_t t,
2179 bus_space_handle_t h,
2180 bus_size_t o,
2181 const uint64_t v,
2182 bus_size_t c)
2183 {
2184 for (; c; c--, o+=8)
2185 bus_space_write_8(t, h, o, v);
2186 }
2187
2188
2189 /*
2190 * void bus_space_copy_region_N(bus_space_tag_t tag,
2191 * bus_space_handle_t bsh1, bus_size_t off1,
2192 * bus_space_handle_t bsh2, bus_size_t off2,
2193 * bus_size_t count);
2194 *
2195 * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
2196 * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
2197 */
2198 void
bus_space_copy_region_1(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2199 bus_space_copy_region_1(
2200 bus_space_tag_t t,
2201 bus_space_handle_t h1,
2202 bus_size_t o1,
2203 bus_space_handle_t h2,
2204 bus_size_t o2,
2205 bus_size_t c)
2206 {
2207 for (; c; c--, o1++, o2++)
2208 bus_space_write_1(t, h1, o1, bus_space_read_1(t, h2, o2));
2209 }
2210
2211 void
bus_space_copy_region_2(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2212 bus_space_copy_region_2(
2213 bus_space_tag_t t,
2214 bus_space_handle_t h1,
2215 bus_size_t o1,
2216 bus_space_handle_t h2,
2217 bus_size_t o2,
2218 bus_size_t c)
2219 {
2220 for (; c; c--, o1+=2, o2+=2)
2221 bus_space_write_2(t, h1, o1, bus_space_read_2(t, h2, o2));
2222 }
2223
2224 void
bus_space_copy_region_4(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2225 bus_space_copy_region_4(
2226 bus_space_tag_t t,
2227 bus_space_handle_t h1,
2228 bus_size_t o1,
2229 bus_space_handle_t h2,
2230 bus_size_t o2,
2231 bus_size_t c)
2232 {
2233 for (; c; c--, o1+=4, o2+=4)
2234 bus_space_write_4(t, h1, o1, bus_space_read_4(t, h2, o2));
2235 }
2236
2237 void
bus_space_copy_region_8(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2238 bus_space_copy_region_8(
2239 bus_space_tag_t t,
2240 bus_space_handle_t h1,
2241 bus_size_t o1,
2242 bus_space_handle_t h2,
2243 bus_size_t o2,
2244 bus_size_t c)
2245 {
2246 for (; c; c--, o1+=8, o2+=8)
2247 bus_space_write_8(t, h1, o1, bus_space_read_8(t, h2, o2));
2248 }
2249
2250 /*
2251 * void bus_space_read_region_stream_N(bus_space_tag_t tag,
2252 * bus_space_handle_t bsh, bus_size_t off,
2253 * u_intN_t *addr, bus_size_t count);
2254 *
2255 */
2256 void
bus_space_read_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)2257 bus_space_read_region_stream_1(
2258 bus_space_tag_t t,
2259 bus_space_handle_t h,
2260 bus_size_t o,
2261 uint8_t *a,
2262 bus_size_t c)
2263 {
2264 for (; c; a++, c--, o++)
2265 *a = bus_space_read_stream_1(t, h, o);
2266 }
2267 void
bus_space_read_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2268 bus_space_read_region_stream_2(
2269 bus_space_tag_t t,
2270 bus_space_handle_t h,
2271 bus_size_t o,
2272 uint16_t *a,
2273 bus_size_t c)
2274 {
2275 for (; c; a++, c--, o+=2)
2276 *a = bus_space_read_stream_2(t, h, o);
2277 }
2278 void
bus_space_read_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2279 bus_space_read_region_stream_4(
2280 bus_space_tag_t t,
2281 bus_space_handle_t h,
2282 bus_size_t o,
2283 uint32_t *a,
2284 bus_size_t c)
2285 {
2286 for (; c; a++, c--, o+=4)
2287 *a = bus_space_read_stream_4(t, h, o);
2288 }
2289 void
bus_space_read_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2290 bus_space_read_region_stream_8(
2291 bus_space_tag_t t,
2292 bus_space_handle_t h,
2293 bus_size_t o,
2294 uint64_t *a,
2295 bus_size_t c)
2296 {
2297 for (; c; a++, c--, o+=8)
2298 *a = bus_space_read_stream_8(t, h, o);
2299 }
2300
2301 /*
2302 * void bus_space_write_region_stream_N(bus_space_tag_t tag,
2303 * bus_space_handle_t bsh, bus_size_t off,
2304 * u_intN_t *addr, bus_size_t count);
2305 *
2306 */
2307 void
bus_space_write_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)2308 bus_space_write_region_stream_1(
2309 bus_space_tag_t t,
2310 bus_space_handle_t h,
2311 bus_size_t o,
2312 const uint8_t *a,
2313 bus_size_t c)
2314 {
2315 for (; c; a++, c--, o++)
2316 bus_space_write_stream_1(t, h, o, *a);
2317 }
2318
2319 void
bus_space_write_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)2320 bus_space_write_region_stream_2(
2321 bus_space_tag_t t,
2322 bus_space_handle_t h,
2323 bus_size_t o,
2324 const uint16_t *a,
2325 bus_size_t c)
2326 {
2327 for (; c; a++, c--, o+=2)
2328 bus_space_write_stream_2(t, h, o, *a);
2329 }
2330
2331 void
bus_space_write_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)2332 bus_space_write_region_stream_4(
2333 bus_space_tag_t t,
2334 bus_space_handle_t h,
2335 bus_size_t o,
2336 const uint32_t *a,
2337 bus_size_t c)
2338 {
2339 for (; c; a++, c--, o+=4)
2340 bus_space_write_stream_4(t, h, o, *a);
2341 }
2342
2343 void
bus_space_write_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)2344 bus_space_write_region_stream_8(
2345 bus_space_tag_t t,
2346 bus_space_handle_t h,
2347 bus_size_t o,
2348 const uint64_t *a,
2349 bus_size_t c)
2350 {
2351 for (; c; a++, c--, o+=8)
2352 bus_space_write_stream_8(t, h, o, *a);
2353 }
2354
2355
2356 /*
2357 * void bus_space_set_region_stream_N(bus_space_tag_t tag,
2358 * bus_space_handle_t bsh, bus_size_t off,
2359 * u_intN_t *addr, bus_size_t count);
2360 *
2361 */
2362 void
bus_space_set_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t v,bus_size_t c)2363 bus_space_set_region_stream_1(
2364 bus_space_tag_t t,
2365 bus_space_handle_t h,
2366 bus_size_t o,
2367 const uint8_t v,
2368 bus_size_t c)
2369 {
2370 for (; c; c--, o++)
2371 bus_space_write_stream_1(t, h, o, v);
2372 }
2373
2374 void
bus_space_set_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t v,bus_size_t c)2375 bus_space_set_region_stream_2(
2376 bus_space_tag_t t,
2377 bus_space_handle_t h,
2378 bus_size_t o,
2379 const uint16_t v,
2380 bus_size_t c)
2381 {
2382 for (; c; c--, o+=2)
2383 bus_space_write_stream_2(t, h, o, v);
2384 }
2385
2386 void
bus_space_set_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t v,bus_size_t c)2387 bus_space_set_region_stream_4(
2388 bus_space_tag_t t,
2389 bus_space_handle_t h,
2390 bus_size_t o,
2391 const uint32_t v,
2392 bus_size_t c)
2393 {
2394 for (; c; c--, o+=4)
2395 bus_space_write_stream_4(t, h, o, v);
2396 }
2397
2398 void
bus_space_set_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t v,bus_size_t c)2399 bus_space_set_region_stream_8(
2400 bus_space_tag_t t,
2401 bus_space_handle_t h,
2402 bus_size_t o,
2403 const uint64_t v,
2404 bus_size_t c)
2405 {
2406 for (; c; c--, o+=8)
2407 bus_space_write_stream_8(t, h, o, v);
2408 }
2409
2410 /*
2411 * void bus_space_copy_region_stream_N(bus_space_tag_t tag,
2412 * bus_space_handle_t bsh1, bus_size_t off1,
2413 * bus_space_handle_t bsh2, bus_size_t off2,
2414 * bus_size_t count);
2415 *
2416 * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
2417 * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
2418 */
2419
2420 void
bus_space_copy_region_stream_1(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2421 bus_space_copy_region_stream_1(
2422 bus_space_tag_t t,
2423 bus_space_handle_t h1,
2424 bus_size_t o1,
2425 bus_space_handle_t h2,
2426 bus_size_t o2,
2427 bus_size_t c)
2428 {
2429 for (; c; c--, o1++, o2++)
2430 bus_space_write_stream_1(t, h1, o1, bus_space_read_stream_1(t, h2, o2));
2431 }
2432
2433 void
bus_space_copy_region_stream_2(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2434 bus_space_copy_region_stream_2(
2435 bus_space_tag_t t,
2436 bus_space_handle_t h1,
2437 bus_size_t o1,
2438 bus_space_handle_t h2,
2439 bus_size_t o2,
2440 bus_size_t c)
2441 {
2442 for (; c; c--, o1+=2, o2+=2)
2443 bus_space_write_stream_2(t, h1, o1, bus_space_read_stream_2(t, h2, o2));
2444 }
2445
2446 void
bus_space_copy_region_stream_4(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2447 bus_space_copy_region_stream_4(
2448 bus_space_tag_t t,
2449 bus_space_handle_t h1,
2450 bus_size_t o1,
2451 bus_space_handle_t h2,
2452 bus_size_t o2,
2453 bus_size_t c)
2454 {
2455 for (; c; c--, o1+=4, o2+=4)
2456 bus_space_write_stream_4(t, h1, o1, bus_space_read_stream_4(t, h2, o2));
2457 }
2458
2459 void
bus_space_copy_region_stream_8(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2460 bus_space_copy_region_stream_8(
2461 bus_space_tag_t t,
2462 bus_space_handle_t h1,
2463 bus_size_t o1,
2464 bus_space_handle_t h2,
2465 bus_size_t o2,
2466 bus_size_t c)
2467 {
2468 for (; c; c--, o1+=8, o2+=8)
2469 bus_space_write_stream_8(t, h1, o1, bus_space_read_8(t, h2, o2));
2470 }
2471
2472 void
bus_space_write_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t v)2473 bus_space_write_1(
2474 bus_space_tag_t t,
2475 bus_space_handle_t h,
2476 bus_size_t o,
2477 uint8_t v)
2478 {
2479 (*t->sparc_write_1)(t, h, o, v);
2480 }
2481
2482 void
bus_space_write_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t v)2483 bus_space_write_2(
2484 bus_space_tag_t t,
2485 bus_space_handle_t h,
2486 bus_size_t o,
2487 uint16_t v)
2488 {
2489 (*t->sparc_write_2)(t, h, o, v);
2490 }
2491
2492 void
bus_space_write_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t v)2493 bus_space_write_4(
2494 bus_space_tag_t t,
2495 bus_space_handle_t h,
2496 bus_size_t o,
2497 uint32_t v)
2498 {
2499 (*t->sparc_write_4)(t, h, o, v);
2500 }
2501
2502 void
bus_space_write_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t v)2503 bus_space_write_8(
2504 bus_space_tag_t t,
2505 bus_space_handle_t h,
2506 bus_size_t o,
2507 uint64_t v)
2508 {
2509 (*t->sparc_write_8)(t, h, o, v);
2510 }
2511
2512 #if __SLIM_SPARC_BUS_SPACE
2513
2514 void
bus_space_write_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t v)2515 bus_space_write_1(
2516 bus_space_tag_t t,
2517 bus_space_handle_t h,
2518 bus_size_t o,
2519 uint8_t v)
2520 {
2521 __insn_barrier();
2522 bus_space_write_1_real(t, h, o, v);
2523 }
2524
2525 void
bus_space_write_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t v)2526 bus_space_write_2(
2527 bus_space_tag_t t,
2528 bus_space_handle_t h,
2529 bus_size_t o,
2530 uint16_t v)
2531 {
2532 __insn_barrier();
2533 bus_space_write_2_real(t, h, o, v);
2534 }
2535
2536 void
bus_space_write_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t v)2537 bus_space_write_4(
2538 bus_space_tag_t t,
2539 bus_space_handle_t h,
2540 bus_size_t o,
2541 uint32_t v)
2542 {
2543 __insn_barrier();
2544 bus_space_write_4_real(t, h, o, v);
2545 }
2546
2547 void
bus_space_write_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t v)2548 bus_space_write_8(
2549 bus_space_tag_t t,
2550 bus_space_handle_t h,
2551 bus_size_t o,
2552 uint64_t v)
2553 {
2554 __insn_barrier();
2555 bus_space_write_8_real(t, h, o, v);
2556 }
2557
2558 #endif /* __SLIM_SPARC_BUS_SPACE */
2559
2560 uint8_t
bus_space_read_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2561 bus_space_read_1(
2562 bus_space_tag_t t,
2563 bus_space_handle_t h,
2564 bus_size_t o)
2565 {
2566 return (*t->sparc_read_1)(t, h, o);
2567 }
2568
2569 uint16_t
bus_space_read_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2570 bus_space_read_2(
2571 bus_space_tag_t t,
2572 bus_space_handle_t h,
2573 bus_size_t o)
2574 {
2575 return (*t->sparc_read_2)(t, h, o);
2576 }
2577
2578 uint32_t
bus_space_read_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2579 bus_space_read_4(
2580 bus_space_tag_t t,
2581 bus_space_handle_t h,
2582 bus_size_t o)
2583 {
2584 return (*t->sparc_read_4)(t, h, o);
2585 }
2586
2587 uint64_t
bus_space_read_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2588 bus_space_read_8(
2589 bus_space_tag_t t,
2590 bus_space_handle_t h,
2591 bus_size_t o)
2592 {
2593 return (*t->sparc_read_8)(t, h, o);
2594 }
2595
2596 #if __SLIM_SPARC_BUS_SPACE
2597 uint8_t
bus_space_read_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2598 bus_space_read_1(
2599 bus_space_tag_t t,
2600 bus_space_handle_t h,
2601 bus_size_t o)
2602 {
2603 __insn_barrier();
2604 return bus_space_read_1_real(t, h, o);
2605 }
2606
2607 uint16_t
bus_space_read_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2608 bus_space_read_2(
2609 bus_space_tag_t t,
2610 bus_space_handle_t h,
2611 bus_size_t o)
2612 {
2613 __insn_barrier();
2614 return bus_space_read_2_real(t, h, o);
2615 }
2616
2617 uint32_t
bus_space_read_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2618 bus_space_read_4(
2619 bus_space_tag_t t,
2620 bus_space_handle_t h,
2621 bus_size_t o)
2622 {
2623 __insn_barrier();
2624 return bus_space_read_4_real(t, h, o);
2625 }
2626
2627 uint64_t
bus_space_read_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2628 bus_space_read_8(
2629 bus_space_tag_t t,
2630 bus_space_handle_t h,
2631 bus_size_t o)
2632 {
2633 __insn_barrier();
2634 return bus_space_read_8_real(t, h, o);
2635 }
2636
2637 #endif /* __SLIM_SPARC_BUS_SPACE */
2638
2639 void
bus_space_read_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)2640 bus_space_read_multi_1(
2641 bus_space_tag_t t,
2642 bus_space_handle_t h,
2643 bus_size_t o,
2644 uint8_t *a,
2645 bus_size_t c)
2646 {
2647 while (c-- > 0)
2648 *a++ = bus_space_read_1(t, h, o);
2649 }
2650
2651 void
bus_space_read_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2652 bus_space_read_multi_2(
2653 bus_space_tag_t t,
2654 bus_space_handle_t h,
2655 bus_size_t o,
2656 uint16_t *a,
2657 bus_size_t c)
2658 {
2659 while (c-- > 0)
2660 *a++ = bus_space_read_2(t, h, o);
2661 }
2662
2663 void
bus_space_read_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2664 bus_space_read_multi_4(
2665 bus_space_tag_t t,
2666 bus_space_handle_t h,
2667 bus_size_t o,
2668 uint32_t *a,
2669 bus_size_t c)
2670 {
2671 while (c-- > 0)
2672 *a++ = bus_space_read_4(t, h, o);
2673 }
2674
2675 void
bus_space_read_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2676 bus_space_read_multi_8(
2677 bus_space_tag_t t,
2678 bus_space_handle_t h,
2679 bus_size_t o,
2680 uint64_t *a,
2681 bus_size_t c)
2682 {
2683 while (c-- > 0)
2684 *a++ = bus_space_read_8(t, h, o);
2685 }
2686
2687 /*
2688 * void bus_space_read_multi_N(bus_space_tag_t tag,
2689 * bus_space_handle_t bsh, bus_size_t offset,
2690 * u_intN_t *addr, bus_size_t count);
2691 *
2692 * Read `count' 1, 2, 4, or 8 byte quantities from bus space
2693 * described by tag/handle/offset and copy into buffer provided.
2694 */
2695 void
bus_space_read_multi_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2696 bus_space_read_multi_stream_2(
2697 bus_space_tag_t t,
2698 bus_space_handle_t h,
2699 bus_size_t o,
2700 uint16_t *a,
2701 bus_size_t c)
2702 {
2703 while (c-- > 0)
2704 *a++ = bus_space_read_2_real(t, h, o);
2705 }
2706
2707 void
bus_space_read_multi_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2708 bus_space_read_multi_stream_4(
2709 bus_space_tag_t t,
2710 bus_space_handle_t h,
2711 bus_size_t o,
2712 uint32_t *a,
2713 bus_size_t c)
2714 {
2715 while (c-- > 0)
2716 *a++ = bus_space_read_4_real(t, h, o);
2717 }
2718
2719 void
bus_space_read_multi_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2720 bus_space_read_multi_stream_8(
2721 bus_space_tag_t t,
2722 bus_space_handle_t h,
2723 bus_size_t o,
2724 uint64_t *a,
2725 bus_size_t c)
2726 {
2727 while (c-- > 0)
2728 *a++ = bus_space_read_8_real(t, h, o);
2729 }
2730
2731 /*
2732 * void bus_space_write_multi_N(bus_space_tag_t tag,
2733 * bus_space_handle_t bsh, bus_size_t offset,
2734 * const u_intN_t *addr, bus_size_t count);
2735 *
2736 * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
2737 * provided to bus space described by tag/handle/offset.
2738 */
2739 void
bus_space_write_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)2740 bus_space_write_multi_1(
2741 bus_space_tag_t t,
2742 bus_space_handle_t h,
2743 bus_size_t o,
2744 const uint8_t *a,
2745 bus_size_t c)
2746 {
2747 while (c-- > 0)
2748 bus_space_write_1(t, h, o, *a++);
2749 }
2750
2751 void
bus_space_write_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)2752 bus_space_write_multi_2(
2753 bus_space_tag_t t,
2754 bus_space_handle_t h,
2755 bus_size_t o,
2756 const uint16_t *a,
2757 bus_size_t c)
2758 {
2759 while (c-- > 0)
2760 bus_space_write_2(t, h, o, *a++);
2761 }
2762
2763 void
bus_space_write_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)2764 bus_space_write_multi_4(
2765 bus_space_tag_t t,
2766 bus_space_handle_t h,
2767 bus_size_t o,
2768 const uint32_t *a,
2769 bus_size_t c)
2770 {
2771 while (c-- > 0)
2772 bus_space_write_4(t, h, o, *a++);
2773 }
2774
2775 void
bus_space_write_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)2776 bus_space_write_multi_8(
2777 bus_space_tag_t t,
2778 bus_space_handle_t h,
2779 bus_size_t o,
2780 const uint64_t *a,
2781 bus_size_t c)
2782 {
2783 while (c-- > 0)
2784 bus_space_write_8(t, h, o, *a++);
2785 }
2786
2787 /*
2788 * Allocate a new bus tag and have it inherit the methods of the
2789 * given parent.
2790 */
2791 bus_space_tag_t
bus_space_tag_alloc(bus_space_tag_t parent,void * cookie)2792 bus_space_tag_alloc(bus_space_tag_t parent, void *cookie)
2793 {
2794 struct sparc_bus_space_tag *sbt;
2795
2796 sbt = kmem_zalloc(sizeof(*sbt), KM_SLEEP);
2797
2798 if (parent) {
2799 memcpy(sbt, parent, sizeof(*sbt));
2800 sbt->parent = parent;
2801 sbt->ranges = NULL;
2802 sbt->nranges = 0;
2803 }
2804
2805 sbt->cookie = cookie;
2806 return (sbt);
2807 }
2808
2809 /*
2810 * Generic routine to translate an address using OpenPROM `ranges'.
2811 */
2812 int
bus_space_translate_address_generic(struct openprom_range * ranges,int nranges,bus_addr_t * bap)2813 bus_space_translate_address_generic(struct openprom_range *ranges, int nranges,
2814 bus_addr_t *bap)
2815 {
2816 int i, space = BUS_ADDR_IOSPACE(*bap);
2817
2818 for (i = 0; i < nranges; i++) {
2819 struct openprom_range *rp = &ranges[i];
2820
2821 if (rp->or_child_space != space)
2822 continue;
2823
2824 /* We've found the connection to the parent bus. */
2825 *bap = BUS_ADDR(rp->or_parent_space,
2826 rp->or_parent_base + BUS_ADDR_PADDR(*bap));
2827 return (0);
2828 }
2829
2830 return (EINVAL);
2831 }
2832
2833 static int
sparc_bus_map_iodev(bus_space_tag_t t,bus_addr_t ba,bus_size_t size,int flags,vaddr_t va,bus_space_handle_t * hp)2834 sparc_bus_map_iodev(bus_space_tag_t t, bus_addr_t ba, bus_size_t size, int flags,
2835 vaddr_t va, bus_space_handle_t *hp)
2836 {
2837 vaddr_t v;
2838 paddr_t pa;
2839 unsigned int pmtype;
2840 bus_space_tag_t pt;
2841 static vaddr_t iobase;
2842
2843 /*
2844 * This base class bus map function knows about address range
2845 * translation so bus drivers that need no other special
2846 * handling can just keep this method in their tags.
2847 *
2848 * We expect to resolve range translations iteratively, but allow
2849 * for recursion just in case.
2850 */
2851 while ((pt = t->parent) != NULL) {
2852 if (t->ranges != NULL) {
2853 int error;
2854
2855 if ((error = bus_space_translate_address_generic(
2856 t->ranges, t->nranges, &ba)) != 0)
2857 return (error);
2858 }
2859 if (pt->sparc_bus_map != sparc_bus_map)
2860 return (bus_space_map2(pt, ba, size, flags, va, hp));
2861 t = pt;
2862 }
2863
2864 if (iobase == 0)
2865 iobase = IODEV_BASE;
2866
2867 size = round_page(size);
2868 if (size == 0) {
2869 printf("sparc_bus_map: zero size\n");
2870 return (EINVAL);
2871 }
2872
2873 if (va)
2874 v = trunc_page(va);
2875 else {
2876 v = iobase;
2877 iobase += size;
2878 if (iobase > IODEV_END) /* unlikely */
2879 panic("sparc_bus_map: iobase=0x%lx", iobase);
2880 }
2881
2882 pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba));
2883 pa = BUS_ADDR_PADDR(ba);
2884
2885 /* note: preserve page offset */
2886 *hp = (bus_space_handle_t)(v | ((u_long)pa & PGOFSET));
2887
2888 pa = trunc_page(pa);
2889 do {
2890 pmap_kenter_pa(v, pa | pmtype | PMAP_NC,
2891 VM_PROT_READ | VM_PROT_WRITE, 0);
2892 v += PAGE_SIZE;
2893 pa += PAGE_SIZE;
2894 } while ((size -= PAGE_SIZE) > 0);
2895
2896 pmap_update(pmap_kernel());
2897 return (0);
2898 }
2899
2900 static int
sparc_bus_map_large(bus_space_tag_t t,bus_addr_t ba,bus_size_t size,int flags,bus_space_handle_t * hp)2901 sparc_bus_map_large(bus_space_tag_t t, bus_addr_t ba,
2902 bus_size_t size, int flags, bus_space_handle_t *hp)
2903 {
2904 vaddr_t v = 0;
2905
2906 if (uvm_map(kernel_map, &v, size, NULL, 0, PAGE_SIZE,
2907 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_NORMAL,
2908 0)) == 0) {
2909 return sparc_bus_map_iodev(t, ba, size, flags, v, hp);
2910 }
2911 return -1;
2912 }
2913
2914 int
sparc_bus_map(bus_space_tag_t t,bus_addr_t ba,bus_size_t size,int flags,vaddr_t va,bus_space_handle_t * hp)2915 sparc_bus_map(bus_space_tag_t t, bus_addr_t ba,
2916 bus_size_t size, int flags, vaddr_t va,
2917 bus_space_handle_t *hp)
2918 {
2919
2920 if (flags & BUS_SPACE_MAP_LARGE) {
2921 return sparc_bus_map_large(t, ba, size, flags, hp);
2922 } else
2923 return sparc_bus_map_iodev(t, ba, size, flags, va, hp);
2924
2925 }
2926
2927 int
sparc_bus_unmap(bus_space_tag_t t,bus_space_handle_t bh,bus_size_t size)2928 sparc_bus_unmap(bus_space_tag_t t, bus_space_handle_t bh, bus_size_t size)
2929 {
2930 vaddr_t va = trunc_page((vaddr_t)bh);
2931
2932 /*
2933 * XXX
2934 * mappings with BUS_SPACE_MAP_LARGE need additional care here
2935 * we can just check if the VA is in the IODEV range
2936 */
2937
2938 pmap_kremove(va, round_page(size));
2939 pmap_update(pmap_kernel());
2940 return (0);
2941 }
2942
2943 int
sparc_bus_subregion(bus_space_tag_t tag,bus_space_handle_t handle,bus_size_t offset,bus_size_t size,bus_space_handle_t * nhandlep)2944 sparc_bus_subregion(bus_space_tag_t tag, bus_space_handle_t handle,
2945 bus_size_t offset, bus_size_t size,
2946 bus_space_handle_t *nhandlep)
2947 {
2948
2949 *nhandlep = handle + offset;
2950 return (0);
2951 }
2952
2953 paddr_t
sparc_bus_mmap(bus_space_tag_t t,bus_addr_t ba,off_t off,int prot,int flags)2954 sparc_bus_mmap(bus_space_tag_t t, bus_addr_t ba, off_t off,
2955 int prot, int flags)
2956 {
2957 u_int pmtype;
2958 paddr_t pa;
2959 bus_space_tag_t pt;
2960
2961 /*
2962 * Base class bus mmap function; see also sparc_bus_map
2963 */
2964 while ((pt = t->parent) != NULL) {
2965 if (t->ranges != NULL) {
2966 int error;
2967
2968 if ((error = bus_space_translate_address_generic(
2969 t->ranges, t->nranges, &ba)) != 0)
2970 return (-1);
2971 }
2972 if (pt->sparc_bus_mmap != sparc_bus_mmap)
2973 return (bus_space_mmap(pt, ba, off, prot, flags));
2974 t = pt;
2975 }
2976
2977 pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba));
2978 pa = trunc_page(BUS_ADDR_PADDR(ba) + off);
2979
2980 return (paddr_t)(pa | pmtype | PMAP_NC);
2981 }
2982
2983 /*
2984 * Establish a temporary bus mapping for device probing.
2985 */
2986 int
bus_space_probe(bus_space_tag_t tag,bus_addr_t paddr,bus_size_t size,size_t offset,int flags,int (* callback)(void *,void *),void * arg)2987 bus_space_probe(bus_space_tag_t tag, bus_addr_t paddr, bus_size_t size,
2988 size_t offset, int flags,
2989 int (*callback)(void *, void *), void *arg)
2990 {
2991 bus_space_handle_t bh;
2992 void *tmp;
2993 int result;
2994
2995 if (bus_space_map2(tag, paddr, size, flags, TMPMAP_VA, &bh) != 0)
2996 return (0);
2997
2998 tmp = (void *)bh;
2999 result = (probeget((char *)tmp + offset, size) != -1);
3000 if (result && callback != NULL)
3001 result = (*callback)(tmp, arg);
3002 bus_space_unmap(tag, bh, size);
3003 return (result);
3004 }
3005
3006
3007 void *
sparc_mainbus_intr_establish(bus_space_tag_t t,int pil,int level,int (* handler)(void *),void * arg,void (* fastvec)(void))3008 sparc_mainbus_intr_establish(bus_space_tag_t t, int pil, int level,
3009 int (*handler)(void *), void *arg,
3010 void (*fastvec)(void))
3011 {
3012 struct intrhand *ih;
3013
3014 ih = kmem_alloc(sizeof(struct intrhand), KM_SLEEP);
3015 ih->ih_fun = handler;
3016 ih->ih_arg = arg;
3017 intr_establish(pil, level, ih, fastvec, false);
3018 return (ih);
3019 }
3020
sparc_bus_barrier(bus_space_tag_t t,bus_space_handle_t h,bus_size_t offset,bus_size_t size,int flags)3021 void sparc_bus_barrier (bus_space_tag_t t, bus_space_handle_t h,
3022 bus_size_t offset, bus_size_t size, int flags)
3023 {
3024
3025 /* No default barrier action defined */
3026 return;
3027 }
3028
3029 static uint8_t
sparc_bus_space_read_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)3030 sparc_bus_space_read_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3031 {
3032
3033 return bus_space_read_1_real(t, h, o);
3034 }
3035
3036 static uint16_t
sparc_bus_space_read_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)3037 sparc_bus_space_read_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3038 {
3039
3040 return bus_space_read_2_real(t, h, o);
3041 }
3042
3043 static uint32_t
sparc_bus_space_read_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)3044 sparc_bus_space_read_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3045 {
3046
3047 return bus_space_read_4_real(t, h, o);
3048 }
3049
3050 static uint64_t
sparc_bus_space_read_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)3051 sparc_bus_space_read_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3052 {
3053
3054 return bus_space_read_8_real(t, h, o);
3055 }
3056
3057 static void
sparc_bus_space_write_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t v)3058 sparc_bus_space_write_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3059 uint8_t v)
3060 {
3061
3062 bus_space_write_1_real(t, h, o, v);
3063 }
3064
3065 static void
sparc_bus_space_write_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t v)3066 sparc_bus_space_write_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3067 uint16_t v)
3068 {
3069
3070 bus_space_write_2_real(t, h, o, v);
3071 }
3072
3073 static void
sparc_bus_space_write_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t v)3074 sparc_bus_space_write_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3075 uint32_t v)
3076 {
3077
3078 bus_space_write_4_real(t, h, o, v);
3079 }
3080
3081 static void
sparc_bus_space_write_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t v)3082 sparc_bus_space_write_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3083 uint64_t v)
3084 {
3085
3086 bus_space_write_8_real(t, h, o, v);
3087 }
3088
3089 struct sparc_bus_space_tag mainbus_space_tag = {
3090 NULL, /* cookie */
3091 NULL, /* parent bus tag */
3092 NULL, /* ranges */
3093 0, /* nranges */
3094 sparc_bus_map, /* bus_space_map */
3095 sparc_bus_unmap, /* bus_space_unmap */
3096 sparc_bus_subregion, /* bus_space_subregion */
3097 sparc_bus_barrier, /* bus_space_barrier */
3098 sparc_bus_mmap, /* bus_space_mmap */
3099 sparc_mainbus_intr_establish, /* bus_intr_establish */
3100
3101 sparc_bus_space_read_1, /* bus_space_read_1 */
3102 sparc_bus_space_read_2, /* bus_space_read_2 */
3103 sparc_bus_space_read_4, /* bus_space_read_4 */
3104 sparc_bus_space_read_8, /* bus_space_read_8 */
3105 sparc_bus_space_write_1, /* bus_space_write_1 */
3106 sparc_bus_space_write_2, /* bus_space_write_2 */
3107 sparc_bus_space_write_4, /* bus_space_write_4 */
3108 sparc_bus_space_write_8 /* bus_space_write_8 */
3109 };
3110
3111 int
mm_md_physacc(paddr_t pa,vm_prot_t prot)3112 mm_md_physacc(paddr_t pa, vm_prot_t prot)
3113 {
3114
3115 return pmap_pa_exists(pa) ? 0 : EFAULT;
3116 }
3117
3118 int
mm_md_kernacc(void * ptr,vm_prot_t prot,bool * handled)3119 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled)
3120 {
3121 const vaddr_t v = (vaddr_t)ptr;
3122
3123 *handled = (v >= MSGBUF_VA && v < MSGBUF_VA + PAGE_SIZE) ||
3124 (v >= prom_vstart && v < prom_vend && (prot & VM_PROT_WRITE) == 0);
3125 return 0;
3126 }
3127
3128 int
mm_md_readwrite(dev_t dev,struct uio * uio)3129 mm_md_readwrite(dev_t dev, struct uio *uio)
3130 {
3131
3132 switch (minor(dev)) {
3133 #if defined(SUN4)
3134 case DEV_EEPROM:
3135 if (cputyp == CPU_SUN4)
3136 return eeprom_uio(uio);
3137 #endif
3138 }
3139 return ENXIO;
3140 }
3141