1 /* $NetBSD: machdep.c,v 1.285 2016/07/07 06:55:38 msaitoh Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1992, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This software was developed by the Computer Systems Engineering group
38 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
39 * contributed to Berkeley.
40 *
41 * All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Lawrence Berkeley Laboratory.
45 *
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
48 * are met:
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)machdep.c 8.6 (Berkeley) 1/14/94
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.285 2016/07/07 06:55:38 msaitoh Exp $");
75
76 #include "opt_ddb.h"
77 #include "opt_multiprocessor.h"
78 #include "opt_modular.h"
79 #include "opt_compat_netbsd.h"
80 #include "opt_compat_svr4.h"
81 #include "opt_compat_sunos.h"
82
83 #include <sys/param.h>
84 #include <sys/extent.h>
85 #include <sys/signal.h>
86 #include <sys/signalvar.h>
87 #include <sys/proc.h>
88 #include <sys/buf.h>
89 #include <sys/device.h>
90 #include <sys/ras.h>
91 #include <sys/reboot.h>
92 #include <sys/systm.h>
93 #include <sys/kernel.h>
94 #include <sys/conf.h>
95 #include <sys/file.h>
96 #include <sys/malloc.h>
97 #include <sys/mbuf.h>
98 #include <sys/mount.h>
99 #include <sys/msgbuf.h>
100 #include <sys/syscallargs.h>
101 #include <sys/exec.h>
102 #include <sys/ucontext.h>
103 #include <sys/cpu.h>
104 #include <sys/module.h>
105 #include <sys/ksyms.h>
106
107 #include <sys/exec_aout.h>
108
109 #include <dev/mm.h>
110
111 #include <uvm/uvm.h>
112
113 #include <sys/sysctl.h>
114 #ifndef ELFSIZE
115 #ifdef __arch64__
116 #define ELFSIZE 64
117 #else
118 #define ELFSIZE 32
119 #endif
120 #endif
121 #include <sys/exec_elf.h>
122
123 #define _SPARC_BUS_DMA_PRIVATE
124 #include <machine/autoconf.h>
125 #include <sys/bus.h>
126 #include <machine/frame.h>
127 #include <machine/cpu.h>
128 #include <machine/pcb.h>
129 #include <machine/pmap.h>
130 #include <machine/openfirm.h>
131 #include <machine/sparc64.h>
132
133 #include <sparc64/sparc64/cache.h>
134
135 /* #include "fb.h" */
136 #include "ksyms.h"
137
138 int bus_space_debug = 0; /* This may be used by macros elsewhere. */
139 #ifdef DEBUG
140 #define DPRINTF(l, s) do { if (bus_space_debug & l) printf s; } while (0)
141 #else
142 #define DPRINTF(l, s)
143 #endif
144
145 #if defined(COMPAT_16) || defined(COMPAT_SVR4) || defined(COMPAT_SVR4_32) || defined(COMPAT_SUNOS)
146 #ifdef DEBUG
147 /* See <sparc64/sparc64/sigdebug.h> */
148 int sigdebug = 0x0;
149 int sigpid = 0;
150 #endif
151 #endif
152
153 extern vaddr_t avail_end;
154 #ifdef MODULAR
155 vaddr_t module_start, module_end;
156 static struct vm_map module_map_store;
157 #endif
158
159 /*
160 * Maximum number of DMA segments we'll allow in dmamem_load()
161 * routines. Can be overridden in config files, etc.
162 */
163 #ifndef MAX_DMA_SEGS
164 #define MAX_DMA_SEGS 20
165 #endif
166
167 void dumpsys(void);
168 void stackdump(void);
169
170
171 /*
172 * Machine-dependent startup code
173 */
174 void
cpu_startup(void)175 cpu_startup(void)
176 {
177 #ifdef DEBUG
178 extern int pmapdebug;
179 int opmapdebug = pmapdebug;
180 #endif
181 char pbuf[9];
182
183 #ifdef DEBUG
184 pmapdebug = 0;
185 #endif
186
187 /*
188 * Good {morning,afternoon,evening,night}.
189 */
190 printf("%s%s", copyright, version);
191 /*identifycpu();*/
192 format_bytes(pbuf, sizeof(pbuf), ctob((uint64_t)physmem));
193 printf("total memory = %s\n", pbuf);
194
195 #ifdef DEBUG
196 pmapdebug = opmapdebug;
197 #endif
198 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
199 printf("avail memory = %s\n", pbuf);
200
201 #if 0
202 pmap_redzone();
203 #endif
204
205 #ifdef MODULAR
206 uvm_map_setup(&module_map_store, module_start, module_end, 0);
207 module_map_store.pmap = pmap_kernel();
208 module_map = &module_map_store;
209 #endif
210 }
211
212 /*
213 * Set up registers on exec.
214 */
215
216 #ifdef __arch64__
217 #define STACK_OFFSET BIAS
218 #undef CCFSZ
219 #define CCFSZ CC64FSZ
220 #else
221 #define STACK_OFFSET 0
222 #endif
223
224 /* ARGSUSED */
225 void
setregs(struct lwp * l,struct exec_package * pack,vaddr_t stack)226 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
227 {
228 struct trapframe64 *tf = l->l_md.md_tf;
229 struct fpstate64 *fs;
230 int64_t tstate;
231 int pstate = PSTATE_USER;
232 #ifdef __arch64__
233 Elf_Ehdr *eh = pack->ep_hdr;
234 #endif
235
236 /* Clear the P_32 flag. */
237 l->l_proc->p_flag &= ~PK_32;
238
239 /* Don't allow misaligned code by default */
240 l->l_proc->p_md.md_flags &= ~MDP_FIXALIGN;
241
242 /*
243 * Set the registers to 0 except for:
244 * %o6: stack pointer, built in exec())
245 * %tstate: (retain icc and xcc and cwp bits)
246 * %g1: p->p_psstrp (used by crt0)
247 * %tpc,%tnpc: entry point of program
248 */
249 #ifdef __arch64__
250 /* Check what memory model is requested */
251 switch ((eh->e_flags & EF_SPARCV9_MM)) {
252 default:
253 printf("Unknown memory model %d\n",
254 (eh->e_flags & EF_SPARCV9_MM));
255 /* FALLTHROUGH */
256 case EF_SPARCV9_TSO:
257 pstate = PSTATE_MM_TSO|PSTATE_IE;
258 break;
259 case EF_SPARCV9_PSO:
260 pstate = PSTATE_MM_PSO|PSTATE_IE;
261 break;
262 case EF_SPARCV9_RMO:
263 pstate = PSTATE_MM_RMO|PSTATE_IE;
264 break;
265 }
266 #endif
267 tstate = ((int64_t)ASI_PRIMARY_NO_FAULT << TSTATE_ASI_SHIFT) |
268 (pstate << TSTATE_PSTATE_SHIFT) | (tf->tf_tstate & TSTATE_CWP);
269 if ((fs = l->l_md.md_fpstate) != NULL) {
270 /*
271 * We hold an FPU state. If we own *the* FPU chip state
272 * we must get rid of it, and the only way to do that is
273 * to save it. In any case, get rid of our FPU state.
274 */
275 fpusave_lwp(l, false);
276 pool_cache_put(fpstate_cache, fs);
277 l->l_md.md_fpstate = NULL;
278 }
279 memset(tf, 0, sizeof *tf);
280 tf->tf_tstate = tstate;
281 tf->tf_global[1] = l->l_proc->p_psstrp;
282 /* %g4 needs to point to the start of the data segment */
283 tf->tf_global[4] = 0;
284 tf->tf_pc = pack->ep_entry & ~3;
285 tf->tf_npc = tf->tf_pc + 4;
286 stack -= sizeof(struct rwindow);
287 tf->tf_out[6] = stack - STACK_OFFSET;
288 tf->tf_out[7] = 0UL;
289 #ifdef NOTDEF_DEBUG
290 printf("setregs: setting tf %p sp %p pc %p\n", (long)tf,
291 (long)tf->tf_out[6], (long)tf->tf_pc);
292 #ifdef DDB
293 Debugger();
294 #endif
295 #endif
296 }
297
298 /*
299 * machine dependent system variables.
300 */
301 static int
sysctl_machdep_boot(SYSCTLFN_ARGS)302 sysctl_machdep_boot(SYSCTLFN_ARGS)
303 {
304 struct sysctlnode node = *rnode;
305 char bootpath[256];
306 const char *cp = NULL;
307 extern char ofbootpath[], *ofbootpartition, *ofbootfile, *ofbootflags;
308
309 switch (node.sysctl_num) {
310 case CPU_BOOTED_KERNEL:
311 cp = ofbootfile;
312 if (cp == NULL || cp[0] == '\0')
313 /* Unknown to firmware, return default name */
314 cp = "netbsd";
315 break;
316 case CPU_BOOT_ARGS:
317 cp = ofbootflags;
318 break;
319 case CPU_BOOTED_DEVICE:
320 if (ofbootpartition) {
321 snprintf(bootpath, sizeof(bootpath), "%s:%s",
322 ofbootpath, ofbootpartition);
323 cp = bootpath;
324 } else {
325 cp = ofbootpath;
326 }
327 break;
328 }
329
330 if (cp == NULL || cp[0] == '\0')
331 return (ENOENT);
332
333 /*XXXUNCONST*/
334 node.sysctl_data = __UNCONST(cp);
335 node.sysctl_size = strlen(cp) + 1;
336 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
337 }
338
339 /*
340 * figure out which VIS version the CPU supports
341 * this assumes all CPUs in the system are the same
342 */
343 static int
get_vis(void)344 get_vis(void)
345 {
346 int vis = 0;
347
348 if ( CPU_ISSUN4V ) {
349 /*
350 * UA2005 and UA2007 supports VIS 1 and VIS 2.
351 * Oracle SPARC Architecture 2011 supports VIS 3.
352 *
353 * XXX Settle with VIS 2 until we can determite the
354 * actual sun4v implementation.
355 */
356 vis = 2;
357 } else {
358 if (GETVER_CPU_MANUF() == MANUF_FUJITSU) {
359 /* as far as I can tell SPARC64-III and up have VIS 1.0 */
360 if (GETVER_CPU_IMPL() >= IMPL_SPARC64_III) {
361 vis = 1;
362 }
363 /* XXX - which, if any, SPARC64 support VIS 2.0? */
364 } else {
365 /* this better be Sun */
366 vis = 1; /* all UltraSPARCs support at least VIS 1.0 */
367 if (CPU_IS_USIII_UP()) {
368 vis = 2;
369 }
370 }
371 }
372 return vis;
373 }
374
375 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
376 {
377
378 sysctl_createv(clog, 0, NULL, NULL,
379 CTLFLAG_PERMANENT,
380 CTLTYPE_NODE, "machdep", NULL,
381 NULL, 0, NULL, 0,
382 CTL_MACHDEP, CTL_EOL);
383
384 sysctl_createv(clog, 0, NULL, NULL,
385 CTLFLAG_PERMANENT,
386 CTLTYPE_STRING, "booted_kernel", NULL,
387 sysctl_machdep_boot, 0, NULL, 0,
388 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
389 sysctl_createv(clog, 0, NULL, NULL,
390 CTLFLAG_PERMANENT,
391 CTLTYPE_STRING, "boot_args", NULL,
392 sysctl_machdep_boot, 0, NULL, 0,
393 CTL_MACHDEP, CPU_BOOT_ARGS, CTL_EOL);
394 sysctl_createv(clog, 0, NULL, NULL,
395 CTLFLAG_PERMANENT,
396 CTLTYPE_STRING, "booted_device", NULL,
397 sysctl_machdep_boot, 0, NULL, 0,
398 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
399 sysctl_createv(clog, 0, NULL, NULL,
400 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
401 CTLTYPE_INT, "cpu_arch", NULL,
402 NULL, 9, NULL, 0,
403 CTL_MACHDEP, CPU_ARCH, CTL_EOL);
404 sysctl_createv(clog, 0, NULL, NULL,
405 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
406 CTLTYPE_INT, "vis",
407 SYSCTL_DESCR("supported version of VIS instruction set"),
408 NULL, get_vis(), NULL, 0,
409 CTL_MACHDEP, CPU_VIS, CTL_EOL);
410 }
411
412 void *
getframe(struct lwp * l,int sig,int * onstack)413 getframe(struct lwp *l, int sig, int *onstack)
414 {
415 struct proc *p = l->l_proc;
416 struct trapframe64 *tf = l->l_md.md_tf;
417
418 /*
419 * Compute new user stack addresses, subtract off
420 * one signal frame, and align.
421 */
422 *onstack = (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0
423 && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
424
425 if (*onstack)
426 return ((char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size);
427 else
428 return (void *)((uintptr_t)tf->tf_out[6] + STACK_OFFSET);
429 }
430
431 struct sigframe_siginfo {
432 siginfo_t sf_si; /* saved siginfo */
433 ucontext_t sf_uc; /* saved ucontext */
434 };
435
436 void
sendsig_siginfo(const ksiginfo_t * ksi,const sigset_t * mask)437 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
438 {
439 struct lwp *l = curlwp;
440 struct proc *p = l->l_proc;
441 struct sigacts *ps = p->p_sigacts;
442 int onstack, error;
443 int sig = ksi->ksi_signo;
444 ucontext_t uc;
445 long ucsz;
446 struct sigframe_siginfo *fp = getframe(l, sig, &onstack);
447 sig_t catcher = SIGACTION(p, sig).sa_handler;
448 struct trapframe64 *tf = l->l_md.md_tf;
449 struct rwindow *newsp;
450 register_t sp;
451 /* Allocate an aligned sigframe */
452 fp = (void *)((u_long)(fp - 1) & ~0x0f);
453
454 uc.uc_flags = _UC_SIGMASK |
455 ((l->l_sigstk.ss_flags & SS_ONSTACK)
456 ? _UC_SETSTACK : _UC_CLRSTACK);
457 uc.uc_sigmask = *mask;
458 uc.uc_link = l->l_ctxlink;
459 memset(&uc.uc_stack, 0, sizeof(uc.uc_stack));
460
461 sendsig_reset(l, sig);
462 mutex_exit(p->p_lock);
463 cpu_getmcontext(l, &uc.uc_mcontext, &uc.uc_flags);
464 ucsz = (char *)&uc.__uc_pad - (char *)&uc;
465
466 /*
467 * Now copy the stack contents out to user space.
468 * We need to make sure that when we start the signal handler,
469 * its %i6 (%fp), which is loaded from the newly allocated stack area,
470 * joins seamlessly with the frame it was in when the signal occurred,
471 * so that the debugger and _longjmp code can back up through it.
472 * Since we're calling the handler directly, allocate a full size
473 * C stack frame.
474 */
475 newsp = (struct rwindow *)((u_long)fp - CCFSZ);
476 sp = (register_t)(uintptr_t)tf->tf_out[6];
477 error = (copyout(&ksi->ksi_info, &fp->sf_si,
478 sizeof(ksi->ksi_info)) != 0 ||
479 copyout(&uc, &fp->sf_uc, ucsz) != 0 ||
480 copyout(&sp, &newsp->rw_in[6], sizeof(sp)) != 0);
481 mutex_enter(p->p_lock);
482
483 if (error) {
484 /*
485 * Process has trashed its stack; give it an illegal
486 * instruction to halt it in its tracks.
487 */
488 sigexit(l, SIGILL);
489 /* NOTREACHED */
490 }
491
492 tf->tf_pc = (const vaddr_t)catcher;
493 tf->tf_npc = (const vaddr_t)catcher + 4;
494 tf->tf_out[0] = sig;
495 tf->tf_out[1] = (vaddr_t)&fp->sf_si;
496 tf->tf_out[2] = (vaddr_t)&fp->sf_uc;
497 tf->tf_out[6] = (vaddr_t)newsp - STACK_OFFSET;
498 tf->tf_out[7] = (vaddr_t)ps->sa_sigdesc[sig].sd_tramp - 8;
499
500 /* Remember that we're now on the signal stack. */
501 if (onstack)
502 l->l_sigstk.ss_flags |= SS_ONSTACK;
503 }
504
505 struct pcb dumppcb;
506
507 static void
maybe_dump(int howto)508 maybe_dump(int howto)
509 {
510 int s;
511
512 /* Disable interrupts. */
513 s = splhigh();
514
515 /* Do a dump if requested. */
516 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
517 dumpsys();
518
519 splx(s);
520 }
521
522 void
cpu_reboot(int howto,char * user_boot_string)523 cpu_reboot(int howto, char *user_boot_string)
524 {
525 static bool syncdone = false;
526 int i;
527 static char str[128];
528 struct lwp *l;
529
530 l = (curlwp == NULL) ? &lwp0 : curlwp;
531
532 if (cold) {
533 howto |= RB_HALT;
534 goto haltsys;
535 }
536
537 #if NFB > 0
538 fb_unblank();
539 #endif
540 boothowto = howto;
541
542 /* If rebooting and a dump is requested, do it.
543 *
544 * XXX used to dump after vfs_shutdown() and before
545 * detaching devices / shutdown hooks / pmf_system_shutdown().
546 */
547 maybe_dump(howto);
548
549 /*
550 * If we've panic'd, don't make the situation potentially
551 * worse by syncing or unmounting the file systems.
552 */
553 if ((howto & RB_NOSYNC) == 0 && panicstr == NULL) {
554 if (!syncdone) {
555 syncdone = true;
556 /* XXX used to force unmount as well, here */
557 vfs_sync_all(l);
558 /*
559 * If we've been adjusting the clock, the todr
560 * will be out of synch; adjust it now.
561 *
562 * resettodr will only do this only if inittodr()
563 * has already been called.
564 *
565 * XXX used to do this after unmounting all
566 * filesystems with vfs_shutdown().
567 */
568 resettodr();
569 }
570
571 while (vfs_unmountall1(l, false, false) ||
572 config_detach_all(boothowto) ||
573 vfs_unmount_forceone(l))
574 ; /* do nothing */
575 } else
576 suspendsched();
577
578 pmf_system_shutdown(boothowto);
579
580 splhigh();
581
582 haltsys:
583 doshutdownhooks();
584
585 #ifdef MULTIPROCESSOR
586 /* Stop all secondary cpus */
587 mp_halt_cpus();
588 #endif
589
590 /* If powerdown was requested, do it. */
591 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
592 #ifdef MULTIPROCESSOR
593 printf("cpu%d: powered down\n\n", cpu_number());
594 #else
595 printf("powered down\n\n");
596 #endif
597 /* Let the OBP do the work. */
598 OF_poweroff();
599 printf("WARNING: powerdown failed!\n");
600 /*
601 * RB_POWERDOWN implies RB_HALT... fall into it...
602 */
603 }
604
605 if (howto & RB_HALT) {
606 #ifdef MULTIPROCESSOR
607 printf("cpu%d: halted\n\n", cpu_number());
608 #else
609 printf("halted\n\n");
610 #endif
611 OF_exit();
612 panic("PROM exit failed");
613 }
614
615 #ifdef MULTIPROCESSOR
616 printf("cpu%d: rebooting\n\n", cpu_number());
617 #else
618 printf("rebooting\n\n");
619 #endif
620 if (user_boot_string && *user_boot_string) {
621 i = strlen(user_boot_string);
622 if (i > sizeof(str))
623 OF_boot(user_boot_string); /* XXX */
624 memcpy(str, user_boot_string, i);
625 } else {
626 i = 1;
627 str[0] = '\0';
628 }
629
630 if (howto & RB_SINGLE)
631 str[i++] = 's';
632 if (howto & RB_KDB)
633 str[i++] = 'd';
634 if (i > 1) {
635 if (str[0] == '\0')
636 str[0] = '-';
637 str[i] = 0;
638 } else
639 str[0] = 0;
640 OF_boot(str);
641 panic("cpu_reboot -- failed");
642 /*NOTREACHED*/
643 }
644
645 uint32_t dumpmag = 0x8fca0101; /* magic number for savecore */
646 int dumpsize = 0; /* also for savecore */
647 long dumplo = 0;
648
649 void
cpu_dumpconf(void)650 cpu_dumpconf(void)
651 {
652 int nblks, dumpblks;
653
654 if (dumpdev == NODEV)
655 /* No usable dump device */
656 return;
657 nblks = bdev_size(dumpdev);
658
659 dumpblks = ctod(physmem) + pmap_dumpsize();
660 if (dumpblks > (nblks - ctod(1)))
661 /*
662 * dump size is too big for the partition.
663 * Note, we safeguard a click at the front for a
664 * possible disk label.
665 */
666 return;
667
668 /* Put the dump at the end of the partition */
669 dumplo = nblks - dumpblks;
670
671 /*
672 * savecore(8) expects dumpsize to be the number of pages
673 * of actual core dumped (i.e. excluding the MMU stuff).
674 */
675 dumpsize = physmem;
676 }
677
678 #define BYTES_PER_DUMP MAXPHYS /* must be a multiple of pagesize */
679 static vaddr_t dumpspace;
680
681 void *
reserve_dumppages(void * p)682 reserve_dumppages(void *p)
683 {
684
685 dumpspace = (vaddr_t)p;
686 return (char *)p + BYTES_PER_DUMP;
687 }
688
689 /*
690 * Write a crash dump.
691 */
692 void
dumpsys(void)693 dumpsys(void)
694 {
695 const struct bdevsw *bdev;
696 int psize;
697 daddr_t blkno;
698 int (*dump)(dev_t, daddr_t, void *, size_t);
699 int j, error = 0;
700 uint64_t todo;
701 struct mem_region *mp;
702
703 /* copy registers to dumppcb and flush windows */
704 memset(&dumppcb, 0, sizeof(struct pcb));
705 snapshot(&dumppcb);
706 stackdump();
707
708 if (dumpdev == NODEV)
709 return;
710 bdev = bdevsw_lookup(dumpdev);
711 if (bdev == NULL || bdev->d_psize == NULL)
712 return;
713
714 /*
715 * For dumps during autoconfiguration,
716 * if dump device has already configured...
717 */
718 if (dumpsize == 0)
719 cpu_dumpconf();
720 if (!dumpspace) {
721 printf("\nno address space available, dump not possible\n");
722 return;
723 }
724 if (dumplo <= 0) {
725 printf("\ndump to dev %" PRId32 ",%" PRId32 " not possible ("
726 "partition too small?)\n", major(dumpdev), minor(dumpdev));
727 return;
728 }
729 printf("\ndumping to dev %" PRId32 ",%" PRId32 " offset %ld\n",
730 major(dumpdev), minor(dumpdev), dumplo);
731
732 psize = bdev_size(dumpdev);
733 if (psize == -1) {
734 printf("dump area unavailable\n");
735 return;
736 }
737 blkno = dumplo;
738 dump = bdev->d_dump;
739
740 error = pmap_dumpmmu(dump, blkno);
741 blkno += pmap_dumpsize();
742
743 /* calculate total size of dump */
744 for (todo = 0, j = 0; j < phys_installed_size; j++)
745 todo += phys_installed[j].size;
746
747 for (mp = &phys_installed[0], j = 0; j < phys_installed_size;
748 j++, mp = &phys_installed[j]) {
749 uint64_t i = 0, n, off;
750 paddr_t maddr = mp->start;
751
752 for (; i < mp->size; i += n) {
753 n = mp->size - i;
754 if (n > BYTES_PER_DUMP)
755 n = BYTES_PER_DUMP;
756
757 /* print out how many MBs we still have to dump */
758 if ((todo % (1024*1024)) == 0)
759 printf_nolog("\r%6" PRIu64 " M ",
760 todo / (1024*1024));
761 for (off = 0; off < n; off += PAGE_SIZE)
762 pmap_kenter_pa(dumpspace+off, maddr+off,
763 VM_PROT_READ, 0);
764 error = (*dump)(dumpdev, blkno,
765 (void *)dumpspace, (size_t)n);
766 pmap_kremove(dumpspace, n);
767 if (error)
768 break;
769 maddr += n;
770 todo -= n;
771 blkno += btodb(n);
772 }
773 }
774
775 switch (error) {
776
777 case ENXIO:
778 printf("- device bad\n");
779 break;
780
781 case EFAULT:
782 printf("- device not ready\n");
783 break;
784
785 case EINVAL:
786 printf("- area improper\n");
787 break;
788
789 case EIO:
790 printf("- i/o error\n");
791 break;
792
793 case 0:
794 printf("\rdump succeeded\n");
795 break;
796
797 default:
798 printf("- error %d\n", error);
799 break;
800 }
801 }
802
803 void trapdump(struct trapframe64*);
804 /*
805 * dump out a trapframe.
806 */
807 void
trapdump(struct trapframe64 * tf)808 trapdump(struct trapframe64* tf)
809 {
810 printf("TRAPFRAME: tstate=%llx pc=%llx npc=%llx y=%x\n",
811 (unsigned long long)tf->tf_tstate, (unsigned long long)tf->tf_pc,
812 (unsigned long long)tf->tf_npc, (unsigned)tf->tf_y);
813 printf("%%g1-7: %llx %llx %llx %llx %llx %llx %llx\n",
814 (unsigned long long)tf->tf_global[1],
815 (unsigned long long)tf->tf_global[2],
816 (unsigned long long)tf->tf_global[3],
817 (unsigned long long)tf->tf_global[4],
818 (unsigned long long)tf->tf_global[5],
819 (unsigned long long)tf->tf_global[6],
820 (unsigned long long)tf->tf_global[7]);
821 printf("%%o0-7: %llx %llx %llx %llx\n %llx %llx %llx %llx\n",
822 (unsigned long long)tf->tf_out[0],
823 (unsigned long long)tf->tf_out[1],
824 (unsigned long long)tf->tf_out[2],
825 (unsigned long long)tf->tf_out[3],
826 (unsigned long long)tf->tf_out[4],
827 (unsigned long long)tf->tf_out[5],
828 (unsigned long long)tf->tf_out[6],
829 (unsigned long long)tf->tf_out[7]);
830 }
831
832 static void
get_symbol_and_offset(const char ** mod,const char ** sym,vaddr_t * offset,vaddr_t pc)833 get_symbol_and_offset(const char **mod, const char **sym, vaddr_t *offset, vaddr_t pc)
834 {
835 static char symbuf[256];
836 unsigned long symaddr;
837
838 #if NKSYMS || defined(DDB) || defined(MODULAR)
839 if (ksyms_getname(mod, sym, pc,
840 KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY) == 0) {
841 if (ksyms_getval(*mod, *sym, &symaddr,
842 KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY) != 0)
843 goto failed;
844
845 *offset = (vaddr_t)(pc - symaddr);
846 return;
847 }
848 #endif
849 failed:
850 snprintf(symbuf, sizeof symbuf, "%llx", (unsigned long long)pc);
851 *mod = "netbsd";
852 *sym = symbuf;
853 *offset = 0;
854 }
855
856 /*
857 * get the fp and dump the stack as best we can. don't leave the
858 * current stack page
859 */
860 void
stackdump(void)861 stackdump(void)
862 {
863 struct frame32 *fp = (struct frame32 *)getfp(), *sfp;
864 struct frame64 *fp64;
865 const char *mod, *sym;
866 vaddr_t offset;
867
868 sfp = fp;
869 printf("Frame pointer is at %p\n", fp);
870 printf("Call traceback:\n");
871 while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) {
872 if( ((long)fp) & 1 ) {
873 fp64 = (struct frame64*)(((char*)fp)+BIAS);
874 /* 64-bit frame */
875 get_symbol_and_offset(&mod, &sym, &offset, fp64->fr_pc);
876 printf(" %s:%s+%#llx(%llx, %llx, %llx, %llx, %llx, %llx) fp = %llx\n",
877 mod, sym,
878 (unsigned long long)offset,
879 (unsigned long long)fp64->fr_arg[0],
880 (unsigned long long)fp64->fr_arg[1],
881 (unsigned long long)fp64->fr_arg[2],
882 (unsigned long long)fp64->fr_arg[3],
883 (unsigned long long)fp64->fr_arg[4],
884 (unsigned long long)fp64->fr_arg[5],
885 (unsigned long long)fp64->fr_fp);
886 fp = (struct frame32 *)(u_long)fp64->fr_fp;
887 } else {
888 /* 32-bit frame */
889 get_symbol_and_offset(&mod, &sym, &offset, fp->fr_pc);
890 printf(" %s:%s+%#lx(%x, %x, %x, %x, %x, %x) fp = %x\n",
891 mod, sym,
892 (unsigned long)offset,
893 fp->fr_arg[0],
894 fp->fr_arg[1],
895 fp->fr_arg[2],
896 fp->fr_arg[3],
897 fp->fr_arg[4],
898 fp->fr_arg[5],
899 fp->fr_fp);
900 fp = (struct frame32*)(u_long)fp->fr_fp;
901 }
902 }
903 }
904
905
906 int
cpu_exec_aout_makecmds(struct lwp * l,struct exec_package * epp)907 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp)
908 {
909 return (ENOEXEC);
910 }
911
912 /*
913 * Common function for DMA map creation. May be called by bus-specific
914 * DMA map creation functions.
915 */
916 int
_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)917 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
918 bus_size_t maxsegsz, bus_size_t boundary, int flags,
919 bus_dmamap_t *dmamp)
920 {
921 struct sparc_bus_dmamap *map;
922 void *mapstore;
923 size_t mapsize;
924
925 /*
926 * Allocate and initialize the DMA map. The end of the map
927 * is a variable-sized array of segments, so we allocate enough
928 * room for them in one shot.
929 *
930 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
931 * of ALLOCNOW notifies others that we've reserved these resources,
932 * and they are not to be freed.
933 *
934 * The bus_dmamap_t includes one bus_dma_segment_t, hence
935 * the (nsegments - 1).
936 */
937 mapsize = sizeof(struct sparc_bus_dmamap) +
938 (sizeof(bus_dma_segment_t) * (nsegments - 1));
939 if ((mapstore = malloc(mapsize, M_DMAMAP,
940 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
941 return (ENOMEM);
942
943 memset(mapstore, 0, mapsize);
944 map = (struct sparc_bus_dmamap *)mapstore;
945 map->_dm_size = size;
946 map->_dm_segcnt = nsegments;
947 map->_dm_maxmaxsegsz = maxsegsz;
948 map->_dm_boundary = boundary;
949 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT|BUS_DMA_COHERENT|
950 BUS_DMA_NOWRITE|BUS_DMA_NOCACHE);
951 map->dm_maxsegsz = maxsegsz;
952 map->dm_mapsize = 0; /* no valid mappings */
953 map->dm_nsegs = 0;
954
955 *dmamp = map;
956 return (0);
957 }
958
959 /*
960 * Common function for DMA map destruction. May be called by bus-specific
961 * DMA map destruction functions.
962 */
963 void
_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)964 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
965 {
966 if (map->dm_nsegs)
967 bus_dmamap_unload(t, map);
968 free(map, M_DMAMAP);
969 }
970
971 /*
972 * Common function for loading a DMA map with a linear buffer. May
973 * be called by bus-specific DMA map load functions.
974 *
975 * Most SPARCs have IOMMUs in the bus controllers. In those cases
976 * they only need one segment and will use virtual addresses for DVMA.
977 * Those bus controllers should intercept these vectors and should
978 * *NEVER* call _bus_dmamap_load() which is used only by devices that
979 * bypass DVMA.
980 */
981 int
_bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * sbuf,bus_size_t buflen,struct proc * p,int flags)982 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *sbuf,
983 bus_size_t buflen, struct proc *p, int flags)
984 {
985 bus_size_t sgsize;
986 vaddr_t vaddr = (vaddr_t)sbuf;
987 long incr;
988 int i;
989
990 /*
991 * Make sure that on error condition we return "no valid mappings".
992 */
993 map->dm_nsegs = 0;
994 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
995
996 if (buflen > map->_dm_size)
997 {
998 #ifdef DEBUG
999 printf("_bus_dmamap_load(): error %lu > %lu -- map size exceeded!\n",
1000 (unsigned long)buflen, (unsigned long)map->_dm_size);
1001 #ifdef DDB
1002 Debugger();
1003 #endif
1004 #endif
1005 return (EINVAL);
1006 }
1007
1008 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1009
1010 /*
1011 * We always use just one segment.
1012 */
1013 i = 0;
1014 map->dm_segs[i].ds_addr = 0UL;
1015 map->dm_segs[i].ds_len = 0;
1016
1017 incr = PAGE_SIZE - (vaddr & PGOFSET);
1018 while (sgsize > 0) {
1019 paddr_t pa;
1020
1021 incr = min(sgsize, incr);
1022
1023 (void) pmap_extract(pmap_kernel(), vaddr, &pa);
1024 if (map->dm_segs[i].ds_len == 0)
1025 map->dm_segs[i].ds_addr = pa;
1026 if (pa == (map->dm_segs[i].ds_addr + map->dm_segs[i].ds_len)
1027 && ((map->dm_segs[i].ds_len + incr) <= map->dm_maxsegsz)) {
1028 /* Hey, waddyaknow, they're contiguous */
1029 map->dm_segs[i].ds_len += incr;
1030 } else {
1031 if (++i >= map->_dm_segcnt)
1032 return (EFBIG);
1033 map->dm_segs[i].ds_addr = pa;
1034 map->dm_segs[i].ds_len = incr;
1035 }
1036 sgsize -= incr;
1037 vaddr += incr;
1038 incr = PAGE_SIZE;
1039 }
1040 map->dm_nsegs = i + 1;
1041 map->dm_mapsize = buflen;
1042 /* Mapping is bus dependent */
1043 return (0);
1044 }
1045
1046 /*
1047 * Like _bus_dmamap_load(), but for mbufs.
1048 */
1049 int
_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m,int flags)1050 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
1051 int flags)
1052 {
1053 bus_dma_segment_t segs[MAX_DMA_SEGS];
1054 int i;
1055 size_t len;
1056
1057 /*
1058 * Make sure that on error condition we return "no valid mappings".
1059 */
1060 map->dm_nsegs = 0;
1061 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
1062
1063 if (m->m_pkthdr.len > map->_dm_size)
1064 return EINVAL;
1065
1066 /* Record mbuf for *_unload */
1067 map->_dm_type = _DM_TYPE_MBUF;
1068 map->_dm_source = (void *)m;
1069
1070 i = 0;
1071 len = 0;
1072 while (m) {
1073 vaddr_t vaddr = mtod(m, vaddr_t);
1074 long buflen = (long)m->m_len;
1075
1076 len += buflen;
1077 while (buflen > 0 && i < MAX_DMA_SEGS) {
1078 paddr_t pa;
1079 long incr;
1080
1081 incr = PAGE_SIZE - (vaddr & PGOFSET);
1082 incr = min(buflen, incr);
1083
1084 if (pmap_extract(pmap_kernel(), vaddr, &pa) == FALSE) {
1085 #ifdef DIAGNOSTIC
1086 printf("_bus_dmamap_load_mbuf: pmap_extract failed %lx\n",
1087 vaddr);
1088 #endif
1089 map->_dm_type = 0;
1090 map->_dm_source = NULL;
1091 return EINVAL;
1092 }
1093
1094 buflen -= incr;
1095 vaddr += incr;
1096
1097 if (i > 0 &&
1098 pa == (segs[i-1].ds_addr + segs[i-1].ds_len) &&
1099 ((segs[i-1].ds_len + incr) <=
1100 map->dm_maxsegsz)) {
1101 /* Hey, waddyaknow, they're contiguous */
1102 segs[i-1].ds_len += incr;
1103 continue;
1104 }
1105 segs[i].ds_addr = pa;
1106 segs[i].ds_len = incr;
1107 segs[i]._ds_boundary = 0;
1108 segs[i]._ds_align = 0;
1109 segs[i]._ds_mlist = NULL;
1110 i++;
1111 }
1112 m = m->m_next;
1113 if (m && i >= MAX_DMA_SEGS) {
1114 /* Exceeded the size of our dmamap */
1115 map->_dm_type = 0;
1116 map->_dm_source = NULL;
1117 return EFBIG;
1118 }
1119 }
1120
1121 #ifdef DEBUG
1122 {
1123 size_t mbuflen, sglen;
1124 int j;
1125 int retval;
1126
1127 mbuflen = 0;
1128 for (m = (struct mbuf *)map->_dm_source; m; m = m->m_next)
1129 mbuflen += (long)m->m_len;
1130 sglen = 0;
1131 for (j = 0; j < i; j++)
1132 sglen += segs[j].ds_len;
1133 if (sglen != mbuflen)
1134 panic("load_mbuf: sglen %ld != mbuflen %lx\n",
1135 sglen, mbuflen);
1136 if (sglen != len)
1137 panic("load_mbuf: sglen %ld != len %lx\n",
1138 sglen, len);
1139 retval = bus_dmamap_load_raw(t, map, segs, i,
1140 (bus_size_t)len, flags);
1141 if (retval == 0) {
1142 if (map->dm_mapsize != len)
1143 panic("load_mbuf: mapsize %ld != len %lx\n",
1144 (long)map->dm_mapsize, len);
1145 sglen = 0;
1146 for (j = 0; j < map->dm_nsegs; j++)
1147 sglen += map->dm_segs[j].ds_len;
1148 if (sglen != len)
1149 panic("load_mbuf: dmamap sglen %ld != len %lx\n",
1150 sglen, len);
1151 }
1152 return (retval);
1153 }
1154 #endif
1155 return (bus_dmamap_load_raw(t, map, segs, i, (bus_size_t)len, flags));
1156 }
1157
1158 /*
1159 * Like _bus_dmamap_load(), but for uios.
1160 */
1161 int
_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)1162 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
1163 int flags)
1164 {
1165 /*
1166 * XXXXXXX The problem with this routine is that it needs to
1167 * lock the user address space that is being loaded, but there
1168 * is no real way for us to unlock it during the unload process.
1169 */
1170 #if 0
1171 bus_dma_segment_t segs[MAX_DMA_SEGS];
1172 int i, j;
1173 size_t len;
1174 struct proc *p = uio->uio_lwp->l_proc;
1175 struct pmap *pm;
1176
1177 /*
1178 * Make sure that on error condition we return "no valid mappings".
1179 */
1180 map->dm_nsegs = 0;
1181 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
1182
1183 if (uio->uio_segflg == UIO_USERSPACE) {
1184 pm = p->p_vmspace->vm_map.pmap;
1185 } else
1186 pm = pmap_kernel();
1187
1188 i = 0;
1189 len = 0;
1190 for (j = 0; j < uio->uio_iovcnt; j++) {
1191 struct iovec *iov = &uio->uio_iov[j];
1192 vaddr_t vaddr = (vaddr_t)iov->iov_base;
1193 bus_size_t buflen = iov->iov_len;
1194
1195 /*
1196 * Lock the part of the user address space involved
1197 * in the transfer.
1198 */
1199 if (__predict_false(uvm_vslock(p->p_vmspace, vaddr, buflen,
1200 (uio->uio_rw == UIO_WRITE) ?
1201 VM_PROT_WRITE : VM_PROT_READ) != 0)) {
1202 goto after_vsunlock;
1203 }
1204
1205 len += buflen;
1206 while (buflen > 0 && i < MAX_DMA_SEGS) {
1207 paddr_t pa;
1208 long incr;
1209
1210 incr = min(buflen, PAGE_SIZE);
1211 (void) pmap_extract(pm, vaddr, &pa);
1212 buflen -= incr;
1213 vaddr += incr;
1214 if (segs[i].ds_len == 0)
1215 segs[i].ds_addr = pa;
1216
1217
1218 if (i > 0 && pa == (segs[i-1].ds_addr + segs[i-1].ds_len)
1219 && ((segs[i-1].ds_len + incr) <= map->dm_maxsegsz)) {
1220 /* Hey, waddyaknow, they're contiguous */
1221 segs[i-1].ds_len += incr;
1222 continue;
1223 }
1224 segs[i].ds_addr = pa;
1225 segs[i].ds_len = incr;
1226 segs[i]._ds_boundary = 0;
1227 segs[i]._ds_align = 0;
1228 segs[i]._ds_mlist = NULL;
1229 i++;
1230 }
1231 uvm_vsunlock(p->p_vmspace, bp->b_data, todo);
1232 if (buflen > 0 && i >= MAX_DMA_SEGS)
1233 /* Exceeded the size of our dmamap */
1234 return EFBIG;
1235 }
1236 map->_dm_type = DM_TYPE_UIO;
1237 map->_dm_source = (void *)uio;
1238 return (bus_dmamap_load_raw(t, map, segs, i,
1239 (bus_size_t)len, flags));
1240 #endif
1241 return 0;
1242 }
1243
1244 /*
1245 * Like _bus_dmamap_load(), but for raw memory allocated with
1246 * bus_dmamem_alloc().
1247 */
1248 int
_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)1249 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
1250 int nsegs, bus_size_t size, int flags)
1251 {
1252
1253 panic("_bus_dmamap_load_raw: not implemented");
1254 }
1255
1256 /*
1257 * Common function for unloading a DMA map. May be called by
1258 * bus-specific DMA map unload functions.
1259 */
1260 void
_bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)1261 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
1262 {
1263 int i;
1264 struct vm_page *pg;
1265 struct pglist *pglist;
1266 paddr_t pa;
1267
1268 for (i = 0; i < map->dm_nsegs; i++) {
1269 if ((pglist = map->dm_segs[i]._ds_mlist) == NULL) {
1270
1271 /*
1272 * We were asked to load random VAs and lost the
1273 * PA info so just blow the entire cache away.
1274 */
1275 blast_dcache();
1276 break;
1277 }
1278 TAILQ_FOREACH(pg, pglist, pageq.queue) {
1279 pa = VM_PAGE_TO_PHYS(pg);
1280
1281 /*
1282 * We should be flushing a subrange, but we
1283 * don't know where the segments starts.
1284 */
1285 dcache_flush_page_all(pa);
1286 }
1287 }
1288
1289 /* Mark the mappings as invalid. */
1290 map->dm_maxsegsz = map->_dm_maxmaxsegsz;
1291 map->dm_mapsize = 0;
1292 map->dm_nsegs = 0;
1293
1294 }
1295
1296 /*
1297 * Common function for DMA map synchronization. May be called
1298 * by bus-specific DMA map synchronization functions.
1299 */
1300 void
_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)1301 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
1302 bus_size_t len, int ops)
1303 {
1304 int i;
1305 struct vm_page *pg;
1306 struct pglist *pglist;
1307
1308 /*
1309 * We sync out our caches, but the bus must do the same.
1310 *
1311 * Actually a #Sync is expensive. We should optimize.
1312 */
1313 if ((ops & BUS_DMASYNC_PREREAD) || (ops & BUS_DMASYNC_PREWRITE)) {
1314
1315 /*
1316 * Don't really need to do anything, but flush any pending
1317 * writes anyway.
1318 */
1319 membar_Sync();
1320 }
1321 if (ops & BUS_DMASYNC_POSTREAD) {
1322 /* Invalidate the vcache */
1323 for (i = 0; i < map->dm_nsegs; i++) {
1324 if ((pglist = map->dm_segs[i]._ds_mlist) == NULL)
1325 /* Should not really happen. */
1326 continue;
1327 TAILQ_FOREACH(pg, pglist, pageq.queue) {
1328 paddr_t start;
1329 psize_t size = PAGE_SIZE;
1330
1331 if (offset < PAGE_SIZE) {
1332 start = VM_PAGE_TO_PHYS(pg) + offset;
1333 size -= offset;
1334 if (size > len)
1335 size = len;
1336 cache_flush_phys(start, size, 0);
1337 len -= size;
1338 if (len == 0)
1339 goto done;
1340 offset = 0;
1341 continue;
1342 }
1343 offset -= size;
1344 }
1345 }
1346 }
1347 done:
1348 if (ops & BUS_DMASYNC_POSTWRITE) {
1349 /* Nothing to do. Handled by the bus controller. */
1350 }
1351 }
1352
1353 extern paddr_t vm_first_phys, vm_num_phys;
1354 /*
1355 * Common function for DMA-safe memory allocation. May be called
1356 * by bus-specific DMA memory allocation functions.
1357 */
1358 int
_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)1359 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1360 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1361 int flags)
1362 {
1363 vaddr_t low, high;
1364 struct pglist *pglist;
1365 int error;
1366
1367 /* Always round the size. */
1368 size = round_page(size);
1369 low = vm_first_phys;
1370 high = vm_first_phys + vm_num_phys - PAGE_SIZE;
1371
1372 if ((pglist = malloc(sizeof(*pglist), M_DEVBUF,
1373 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1374 return (ENOMEM);
1375
1376 /*
1377 * If the bus uses DVMA then ignore boundary and alignment.
1378 */
1379 segs[0]._ds_boundary = boundary;
1380 segs[0]._ds_align = alignment;
1381 if (flags & BUS_DMA_DVMA) {
1382 boundary = 0;
1383 alignment = 0;
1384 }
1385
1386 /*
1387 * Allocate pages from the VM system.
1388 */
1389 error = uvm_pglistalloc(size, low, high,
1390 alignment, boundary, pglist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1391 if (error) {
1392 free(pglist, M_DEVBUF);
1393 return (error);
1394 }
1395
1396 /*
1397 * Compute the location, size, and number of segments actually
1398 * returned by the VM code.
1399 */
1400 segs[0].ds_addr = 0UL; /* UPA does not map things */
1401 segs[0].ds_len = size;
1402 *rsegs = 1;
1403
1404 /*
1405 * Simply keep a pointer around to the linked list, so
1406 * bus_dmamap_free() can return it.
1407 *
1408 * NOBODY SHOULD TOUCH THE pageq.queue FIELDS WHILE THESE PAGES
1409 * ARE IN OUR CUSTODY.
1410 */
1411 segs[0]._ds_mlist = pglist;
1412
1413 /* The bus driver should do the actual mapping */
1414 return (0);
1415 }
1416
1417 /*
1418 * Common function for freeing DMA-safe memory. May be called by
1419 * bus-specific DMA memory free functions.
1420 */
1421 void
_bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)1422 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1423 {
1424
1425 if (nsegs != 1)
1426 panic("bus_dmamem_free: nsegs = %d", nsegs);
1427
1428 /*
1429 * Return the list of pages back to the VM system.
1430 */
1431 uvm_pglistfree(segs[0]._ds_mlist);
1432 free(segs[0]._ds_mlist, M_DEVBUF);
1433 }
1434
1435 /*
1436 * Common function for mapping DMA-safe memory. May be called by
1437 * bus-specific DMA memory map functions.
1438 */
1439 int
_bus_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)1440 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1441 size_t size, void **kvap, int flags)
1442 {
1443 vaddr_t va, sva;
1444 int r;
1445 size_t oversize;
1446 u_long align;
1447
1448 if (nsegs != 1)
1449 panic("_bus_dmamem_map: nsegs = %d", nsegs);
1450
1451 align = PAGE_SIZE;
1452
1453 size = round_page(size);
1454
1455 /*
1456 * Find a region of kernel virtual addresses that can accommodate
1457 * our aligment requirements.
1458 */
1459 oversize = size + align - PAGE_SIZE;
1460 r = uvm_map(kernel_map, &sva, oversize, NULL, UVM_UNKNOWN_OFFSET, 0,
1461 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
1462 UVM_ADV_NORMAL, 0));
1463 if (r != 0)
1464 return (ENOMEM);
1465
1466 /* Compute start of aligned region */
1467 va = sva;
1468 va += ((segs[0].ds_addr & (align - 1)) + align - va) & (align - 1);
1469
1470 /* Return excess virtual addresses */
1471 if (va != sva)
1472 uvm_unmap(kernel_map, sva, va);
1473 if (va + size != sva + oversize)
1474 uvm_unmap(kernel_map, va + size, sva + oversize);
1475
1476 *kvap = (void *)va;
1477 return (0);
1478 }
1479
1480 /*
1481 * Common function for unmapping DMA-safe memory. May be called by
1482 * bus-specific DMA memory unmapping functions.
1483 */
1484 void
_bus_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)1485 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1486 {
1487
1488 #ifdef DIAGNOSTIC
1489 if ((u_long)kva & PGOFSET)
1490 panic("_bus_dmamem_unmap");
1491 #endif
1492
1493 size = round_page(size);
1494 uvm_unmap(kernel_map, (vaddr_t)kva, (vaddr_t)kva + size);
1495 }
1496
1497 /*
1498 * Common functin for mmap(2)'ing DMA-safe memory. May be called by
1499 * bus-specific DMA mmap(2)'ing functions.
1500 */
1501 paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)1502 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
1503 int prot, int flags)
1504 {
1505 int i;
1506
1507 for (i = 0; i < nsegs; i++) {
1508 #ifdef DIAGNOSTIC
1509 if (off & PGOFSET)
1510 panic("_bus_dmamem_mmap: offset unaligned");
1511 if (segs[i].ds_addr & PGOFSET)
1512 panic("_bus_dmamem_mmap: segment unaligned");
1513 if (segs[i].ds_len & PGOFSET)
1514 panic("_bus_dmamem_mmap: segment size not multiple"
1515 " of page size");
1516 #endif
1517 if (off >= segs[i].ds_len) {
1518 off -= segs[i].ds_len;
1519 continue;
1520 }
1521
1522 return (atop(segs[i].ds_addr + off));
1523 }
1524
1525 /* Page not found. */
1526 return (-1);
1527 }
1528
1529
1530 struct sparc_bus_dma_tag mainbus_dma_tag = {
1531 NULL,
1532 NULL,
1533 _bus_dmamap_create,
1534 _bus_dmamap_destroy,
1535 _bus_dmamap_load,
1536 _bus_dmamap_load_mbuf,
1537 _bus_dmamap_load_uio,
1538 _bus_dmamap_load_raw,
1539 _bus_dmamap_unload,
1540 _bus_dmamap_sync,
1541
1542 _bus_dmamem_alloc,
1543 _bus_dmamem_free,
1544 _bus_dmamem_map,
1545 _bus_dmamem_unmap,
1546 _bus_dmamem_mmap
1547 };
1548
1549
1550 /*
1551 * Base bus space handlers.
1552 */
1553 static int sparc_bus_map(bus_space_tag_t, bus_addr_t, bus_size_t, int,
1554 vaddr_t, bus_space_handle_t *);
1555 static int sparc_bus_unmap(bus_space_tag_t, bus_space_handle_t, bus_size_t);
1556 static int sparc_bus_subregion(bus_space_tag_t, bus_space_handle_t, bus_size_t,
1557 bus_size_t, bus_space_handle_t *);
1558 static paddr_t sparc_bus_mmap(bus_space_tag_t, bus_addr_t, off_t, int, int);
1559 static void *sparc_mainbus_intr_establish(bus_space_tag_t, int, int,
1560 int (*)(void *), void *, void (*)(void));
1561 static int sparc_bus_alloc(bus_space_tag_t, bus_addr_t, bus_addr_t, bus_size_t,
1562 bus_size_t, bus_size_t, int, bus_addr_t *, bus_space_handle_t *);
1563 static void sparc_bus_free(bus_space_tag_t, bus_space_handle_t, bus_size_t);
1564
1565 struct extent *io_space = NULL;
1566
1567 int
bus_space_alloc(bus_space_tag_t t,bus_addr_t rs,bus_addr_t re,bus_size_t s,bus_size_t a,bus_size_t b,int f,bus_addr_t * ap,bus_space_handle_t * hp)1568 bus_space_alloc(bus_space_tag_t t, bus_addr_t rs, bus_addr_t re, bus_size_t s,
1569 bus_size_t a, bus_size_t b, int f, bus_addr_t *ap,
1570 bus_space_handle_t *hp)
1571 {
1572 _BS_CALL(t, sparc_bus_alloc)(t, rs, re, s, a, b, f, ap, hp);
1573 }
1574
1575 void
bus_space_free(bus_space_tag_t t,bus_space_handle_t h,bus_size_t s)1576 bus_space_free(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s)
1577 {
1578 _BS_CALL(t, sparc_bus_free)(t, h, s);
1579 }
1580
1581 int
bus_space_map(bus_space_tag_t t,bus_addr_t a,bus_size_t s,int f,bus_space_handle_t * hp)1582 bus_space_map(bus_space_tag_t t, bus_addr_t a, bus_size_t s, int f,
1583 bus_space_handle_t *hp)
1584 {
1585 _BS_CALL(t, sparc_bus_map)(t, a, s, f, 0, hp);
1586 }
1587
1588 void
bus_space_unmap(bus_space_tag_t t,bus_space_handle_t h,bus_size_t s)1589 bus_space_unmap(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s)
1590 {
1591 _BS_VOID_CALL(t, sparc_bus_unmap)(t, h, s);
1592 }
1593
1594 int
bus_space_subregion(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,bus_size_t s,bus_space_handle_t * hp)1595 bus_space_subregion(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
1596 bus_size_t s, bus_space_handle_t *hp)
1597 {
1598 _BS_CALL(t, sparc_bus_subregion)(t, h, o, s, hp);
1599 }
1600
1601 paddr_t
bus_space_mmap(bus_space_tag_t t,bus_addr_t a,off_t o,int p,int f)1602 bus_space_mmap(bus_space_tag_t t, bus_addr_t a, off_t o, int p, int f)
1603 {
1604 _BS_CALL(t, sparc_bus_mmap)(t, a, o, p, f);
1605 }
1606
1607 /*
1608 * void bus_space_read_multi_N(bus_space_tag_t tag,
1609 * bus_space_handle_t bsh, bus_size_t offset,
1610 * uintN_t *addr, bus_size_t count);
1611 *
1612 * Read `count' 1, 2, 4, or 8 byte quantities from bus space
1613 * described by tag/handle/offset and copy into buffer provided.
1614 */
1615 void
bus_space_read_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)1616 bus_space_read_multi_1(bus_space_tag_t t, bus_space_handle_t h,
1617 bus_size_t o, uint8_t * a, bus_size_t c)
1618 {
1619 while (c-- > 0)
1620 *a++ = bus_space_read_1(t, h, o);
1621 }
1622
1623 void
bus_space_read_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)1624 bus_space_read_multi_2(bus_space_tag_t t, bus_space_handle_t h,
1625 bus_size_t o, uint16_t * a, bus_size_t c)
1626 {
1627 while (c-- > 0)
1628 *a++ = bus_space_read_2(t, h, o);
1629 }
1630
1631 void
bus_space_read_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)1632 bus_space_read_multi_4(bus_space_tag_t t, bus_space_handle_t h,
1633 bus_size_t o, uint32_t * a, bus_size_t c)
1634 {
1635 while (c-- > 0)
1636 *a++ = bus_space_read_4(t, h, o);
1637 }
1638
1639 void
bus_space_read_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)1640 bus_space_read_multi_8(bus_space_tag_t t, bus_space_handle_t h,
1641 bus_size_t o, uint64_t * a, bus_size_t c)
1642 {
1643 while (c-- > 0)
1644 *a++ = bus_space_read_8(t, h, o);
1645 }
1646
1647 /*
1648 * void bus_space_write_multi_N(bus_space_tag_t tag,
1649 * bus_space_handle_t bsh, bus_size_t offset,
1650 * const uintN_t *addr, bus_size_t count);
1651 *
1652 * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
1653 * provided to bus space described by tag/handle/offset.
1654 */
1655 void
bus_space_write_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)1656 bus_space_write_multi_1(bus_space_tag_t t,
1657 bus_space_handle_t h, bus_size_t o,
1658 const uint8_t *a, bus_size_t c)
1659 {
1660 while (c-- > 0)
1661 bus_space_write_1(t, h, o, *a++);
1662 }
1663
1664 void
bus_space_write_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)1665 bus_space_write_multi_2(bus_space_tag_t t,
1666 bus_space_handle_t h, bus_size_t o,
1667 const uint16_t *a, bus_size_t c)
1668 {
1669 while (c-- > 0)
1670 bus_space_write_2(t, h, o, *a++);
1671 }
1672
1673 void
bus_space_write_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)1674 bus_space_write_multi_4(bus_space_tag_t t,
1675 bus_space_handle_t h, bus_size_t o,
1676 const uint32_t *a, bus_size_t c)
1677 {
1678 while (c-- > 0)
1679 bus_space_write_4(t, h, o, *a++);
1680 }
1681
1682 void
bus_space_write_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)1683 bus_space_write_multi_8(bus_space_tag_t t,
1684 bus_space_handle_t h, bus_size_t o,
1685 const uint64_t *a, bus_size_t c)
1686 {
1687 while (c-- > 0)
1688 bus_space_write_8(t, h, o, *a++);
1689 }
1690
1691 /*
1692 * void bus_space_set_multi_stream_N(bus_space_tag_t tag,
1693 * bus_space_handle_t bsh, bus_size_t offset, uintN_t val,
1694 * bus_size_t count);
1695 *
1696 * Write the 1, 2, 4, or 8 byte value `val' to bus space described
1697 * by tag/handle/offset `count' times.
1698 */
1699 void
bus_space_set_multi_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t v,bus_size_t c)1700 bus_space_set_multi_stream_1(bus_space_tag_t t,
1701 bus_space_handle_t h, bus_size_t o, uint8_t v,
1702 bus_size_t c)
1703 {
1704 while (c-- > 0)
1705 bus_space_write_stream_1(t, h, o, v);
1706 }
1707
1708 void
bus_space_set_multi_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t v,bus_size_t c)1709 bus_space_set_multi_stream_2(bus_space_tag_t t,
1710 bus_space_handle_t h, bus_size_t o, uint16_t v,
1711 bus_size_t c)
1712 {
1713 while (c-- > 0)
1714 bus_space_write_stream_2(t, h, o, v);
1715 }
1716
1717 void
bus_space_set_multi_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t v,bus_size_t c)1718 bus_space_set_multi_stream_4(bus_space_tag_t t,
1719 bus_space_handle_t h, bus_size_t o, uint32_t v,
1720 bus_size_t c)
1721 {
1722 while (c-- > 0)
1723 bus_space_write_stream_4(t, h, o, v);
1724 }
1725
1726 void
bus_space_set_multi_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t v,bus_size_t c)1727 bus_space_set_multi_stream_8(bus_space_tag_t t,
1728 bus_space_handle_t h, bus_size_t o, uint64_t v,
1729 bus_size_t c)
1730 {
1731 while (c-- > 0)
1732 bus_space_write_stream_8(t, h, o, v);
1733 }
1734
1735 /*
1736 * void bus_space_copy_region_stream_N(bus_space_tag_t tag,
1737 * bus_space_handle_t bsh1, bus_size_t off1,
1738 * bus_space_handle_t bsh2, bus_size_t off2,
1739 * bus_size_t count);
1740 *
1741 * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
1742 * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
1743 */
1744 void
bus_space_copy_region_stream_1(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1745 bus_space_copy_region_stream_1(bus_space_tag_t t, bus_space_handle_t h1,
1746 bus_size_t o1, bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1747 {
1748 for (; c; c--, o1++, o2++)
1749 bus_space_write_stream_1(t, h1, o1, bus_space_read_stream_1(t, h2, o2));
1750 }
1751
1752 void
bus_space_copy_region_stream_2(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1753 bus_space_copy_region_stream_2(bus_space_tag_t t, bus_space_handle_t h1,
1754 bus_size_t o1, bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1755 {
1756 for (; c; c--, o1+=2, o2+=2)
1757 bus_space_write_stream_2(t, h1, o1, bus_space_read_stream_2(t, h2, o2));
1758 }
1759
1760 void
bus_space_copy_region_stream_4(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1761 bus_space_copy_region_stream_4(bus_space_tag_t t, bus_space_handle_t h1,
1762 bus_size_t o1, bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1763 {
1764 for (; c; c--, o1+=4, o2+=4)
1765 bus_space_write_stream_4(t, h1, o1, bus_space_read_stream_4(t, h2, o2));
1766 }
1767
1768 void
bus_space_copy_region_stream_8(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1769 bus_space_copy_region_stream_8(bus_space_tag_t t, bus_space_handle_t h1,
1770 bus_size_t o1, bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1771 {
1772 for (; c; c--, o1+=8, o2+=8)
1773 bus_space_write_stream_8(t, h1, o1, bus_space_read_8(t, h2, o2));
1774 }
1775
1776 /*
1777 * void bus_space_set_region_stream_N(bus_space_tag_t tag,
1778 * bus_space_handle_t bsh, bus_size_t off,
1779 * uintN_t *addr, bus_size_t count);
1780 *
1781 */
1782 void
bus_space_set_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t v,bus_size_t c)1783 bus_space_set_region_stream_1(bus_space_tag_t t, bus_space_handle_t h,
1784 bus_size_t o, const uint8_t v, bus_size_t c)
1785 {
1786 for (; c; c--, o++)
1787 bus_space_write_stream_1(t, h, o, v);
1788 }
1789
1790 void
bus_space_set_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t v,bus_size_t c)1791 bus_space_set_region_stream_2(bus_space_tag_t t, bus_space_handle_t h,
1792 bus_size_t o, const uint16_t v, bus_size_t c)
1793 {
1794 for (; c; c--, o+=2)
1795 bus_space_write_stream_2(t, h, o, v);
1796 }
1797
1798 void
bus_space_set_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t v,bus_size_t c)1799 bus_space_set_region_stream_4(bus_space_tag_t t, bus_space_handle_t h,
1800 bus_size_t o, const uint32_t v, bus_size_t c)
1801 {
1802 for (; c; c--, o+=4)
1803 bus_space_write_stream_4(t, h, o, v);
1804 }
1805
1806 void
bus_space_set_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t v,bus_size_t c)1807 bus_space_set_region_stream_8(bus_space_tag_t t, bus_space_handle_t h,
1808 bus_size_t o, const uint64_t v, bus_size_t c)
1809 {
1810 for (; c; c--, o+=8)
1811 bus_space_write_stream_8(t, h, o, v);
1812 }
1813
1814
1815 /*
1816 * void bus_space_read_multi_stream_N(bus_space_tag_t tag,
1817 * bus_space_handle_t bsh, bus_size_t offset,
1818 * uintN_t *addr, bus_size_t count);
1819 *
1820 * Read `count' 1, 2, 4, or 8 byte quantities from bus space
1821 * described by tag/handle/offset and copy into buffer provided.
1822 */
1823 void
bus_space_read_multi_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)1824 bus_space_read_multi_stream_1(bus_space_tag_t t,
1825 bus_space_handle_t h, bus_size_t o,
1826 uint8_t *a, bus_size_t c)
1827 {
1828 while (c-- > 0)
1829 *a++ = bus_space_read_stream_1(t, h, o);
1830 }
1831
1832 void
bus_space_read_multi_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)1833 bus_space_read_multi_stream_2(bus_space_tag_t t,
1834 bus_space_handle_t h, bus_size_t o,
1835 uint16_t *a, bus_size_t c)
1836 {
1837 while (c-- > 0)
1838 *a++ = bus_space_read_stream_2(t, h, o);
1839 }
1840
1841 void
bus_space_read_multi_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)1842 bus_space_read_multi_stream_4(bus_space_tag_t t,
1843 bus_space_handle_t h, bus_size_t o,
1844 uint32_t *a, bus_size_t c)
1845 {
1846 while (c-- > 0)
1847 *a++ = bus_space_read_stream_4(t, h, o);
1848 }
1849
1850 void
bus_space_read_multi_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)1851 bus_space_read_multi_stream_8(bus_space_tag_t t,
1852 bus_space_handle_t h, bus_size_t o,
1853 uint64_t *a, bus_size_t c)
1854 {
1855 while (c-- > 0)
1856 *a++ = bus_space_read_stream_8(t, h, o);
1857 }
1858
1859 /*
1860 * void bus_space_read_region_stream_N(bus_space_tag_t tag,
1861 * bus_space_handle_t bsh, bus_size_t off,
1862 * uintN_t *addr, bus_size_t count);
1863 *
1864 */
1865 void
bus_space_read_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)1866 bus_space_read_region_stream_1(bus_space_tag_t t, bus_space_handle_t h,
1867 bus_size_t o, uint8_t *a, bus_size_t c)
1868 {
1869 for (; c; a++, c--, o++)
1870 *a = bus_space_read_stream_1(t, h, o);
1871 }
1872 void
bus_space_read_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)1873 bus_space_read_region_stream_2(bus_space_tag_t t, bus_space_handle_t h,
1874 bus_size_t o, uint16_t *a, bus_size_t c)
1875 {
1876 for (; c; a++, c--, o+=2)
1877 *a = bus_space_read_stream_2(t, h, o);
1878 }
1879 void
bus_space_read_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)1880 bus_space_read_region_stream_4(bus_space_tag_t t, bus_space_handle_t h,
1881 bus_size_t o, uint32_t *a, bus_size_t c)
1882 {
1883 for (; c; a++, c--, o+=4)
1884 *a = bus_space_read_stream_4(t, h, o);
1885 }
1886 void
bus_space_read_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)1887 bus_space_read_region_stream_8(bus_space_tag_t t, bus_space_handle_t h,
1888 bus_size_t o, uint64_t *a, bus_size_t c)
1889 {
1890 for (; c; a++, c--, o+=8)
1891 *a = bus_space_read_stream_8(t, h, o);
1892 }
1893
1894 /*
1895 * void bus_space_write_multi_stream_N(bus_space_tag_t tag,
1896 * bus_space_handle_t bsh, bus_size_t offset,
1897 * const uintN_t *addr, bus_size_t count);
1898 *
1899 * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
1900 * provided to bus space described by tag/handle/offset.
1901 */
1902 void
bus_space_write_multi_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)1903 bus_space_write_multi_stream_1(bus_space_tag_t t,
1904 bus_space_handle_t h, bus_size_t o,
1905 const uint8_t *a, bus_size_t c)
1906 {
1907 while (c-- > 0)
1908 bus_space_write_stream_1(t, h, o, *a++);
1909 }
1910
1911 void
bus_space_write_multi_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)1912 bus_space_write_multi_stream_2(bus_space_tag_t t,
1913 bus_space_handle_t h, bus_size_t o,
1914 const uint16_t *a, bus_size_t c)
1915 {
1916 while (c-- > 0)
1917 bus_space_write_stream_2(t, h, o, *a++);
1918 }
1919
1920 void
bus_space_write_multi_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)1921 bus_space_write_multi_stream_4(bus_space_tag_t t,
1922 bus_space_handle_t h, bus_size_t o,
1923 const uint32_t *a, bus_size_t c)
1924 {
1925 while (c-- > 0)
1926 bus_space_write_stream_4(t, h, o, *a++);
1927 }
1928
1929 void
bus_space_write_multi_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)1930 bus_space_write_multi_stream_8(bus_space_tag_t t,
1931 bus_space_handle_t h, bus_size_t o,
1932 const uint64_t *a, bus_size_t c)
1933 {
1934 while (c-- > 0)
1935 bus_space_write_stream_8(t, h, o, *a++);
1936 }
1937
1938 /*
1939 * void bus_space_copy_region_N(bus_space_tag_t tag,
1940 * bus_space_handle_t bsh1, bus_size_t off1,
1941 * bus_space_handle_t bsh2, bus_size_t off2,
1942 * bus_size_t count);
1943 *
1944 * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
1945 * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
1946 */
1947 void
bus_space_copy_region_1(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1948 bus_space_copy_region_1(bus_space_tag_t t, bus_space_handle_t h1, bus_size_t o1,
1949 bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1950 {
1951 for (; c; c--, o1++, o2++)
1952 bus_space_write_1(t, h1, o1, bus_space_read_1(t, h2, o2));
1953 }
1954
1955 void
bus_space_copy_region_2(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1956 bus_space_copy_region_2(bus_space_tag_t t, bus_space_handle_t h1, bus_size_t o1,
1957 bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1958 {
1959 for (; c; c--, o1+=2, o2+=2)
1960 bus_space_write_2(t, h1, o1, bus_space_read_2(t, h2, o2));
1961 }
1962
1963 void
bus_space_copy_region_4(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1964 bus_space_copy_region_4(bus_space_tag_t t, bus_space_handle_t h1, bus_size_t o1,
1965 bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1966 {
1967 for (; c; c--, o1+=4, o2+=4)
1968 bus_space_write_4(t, h1, o1, bus_space_read_4(t, h2, o2));
1969 }
1970
1971 void
bus_space_copy_region_8(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1972 bus_space_copy_region_8(bus_space_tag_t t, bus_space_handle_t h1, bus_size_t o1,
1973 bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1974 {
1975 for (; c; c--, o1+=8, o2+=8)
1976 bus_space_write_8(t, h1, o1, bus_space_read_8(t, h2, o2));
1977 }
1978
1979 /*
1980 * void bus_space_set_region_N(bus_space_tag_t tag,
1981 * bus_space_handle_t bsh, bus_size_t off,
1982 * uintN_t *addr, bus_size_t count);
1983 *
1984 */
1985 void
bus_space_set_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t v,bus_size_t c)1986 bus_space_set_region_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
1987 const uint8_t v, bus_size_t c)
1988 {
1989 for (; c; c--, o++)
1990 bus_space_write_1(t, h, o, v);
1991 }
1992
1993 void
bus_space_set_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t v,bus_size_t c)1994 bus_space_set_region_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
1995 const uint16_t v, bus_size_t c)
1996 {
1997 for (; c; c--, o+=2)
1998 bus_space_write_2(t, h, o, v);
1999 }
2000
2001 void
bus_space_set_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t v,bus_size_t c)2002 bus_space_set_region_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2003 const uint32_t v, bus_size_t c)
2004 {
2005 for (; c; c--, o+=4)
2006 bus_space_write_4(t, h, o, v);
2007 }
2008
2009 void
bus_space_set_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t v,bus_size_t c)2010 bus_space_set_region_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2011 const uint64_t v, bus_size_t c)
2012 {
2013 for (; c; c--, o+=8)
2014 bus_space_write_8(t, h, o, v);
2015 }
2016
2017
2018 /*
2019 * void bus_space_set_multi_N(bus_space_tag_t tag,
2020 * bus_space_handle_t bsh, bus_size_t offset, uintN_t val,
2021 * bus_size_t count);
2022 *
2023 * Write the 1, 2, 4, or 8 byte value `val' to bus space described
2024 * by tag/handle/offset `count' times.
2025 */
2026 void
bus_space_set_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t v,bus_size_t c)2027 bus_space_set_multi_1(bus_space_tag_t t,
2028 bus_space_handle_t h, bus_size_t o, uint8_t v,
2029 bus_size_t c)
2030 {
2031 while (c-- > 0)
2032 bus_space_write_1(t, h, o, v);
2033 }
2034
2035 void
bus_space_set_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t v,bus_size_t c)2036 bus_space_set_multi_2(bus_space_tag_t t,
2037 bus_space_handle_t h, bus_size_t o, uint16_t v,
2038 bus_size_t c)
2039 {
2040 while (c-- > 0)
2041 bus_space_write_2(t, h, o, v);
2042 }
2043
2044 void
bus_space_set_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t v,bus_size_t c)2045 bus_space_set_multi_4(bus_space_tag_t t,
2046 bus_space_handle_t h, bus_size_t o, uint32_t v,
2047 bus_size_t c)
2048 {
2049 while (c-- > 0)
2050 bus_space_write_4(t, h, o, v);
2051 }
2052
2053 void
bus_space_set_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t v,bus_size_t c)2054 bus_space_set_multi_8(bus_space_tag_t t,
2055 bus_space_handle_t h, bus_size_t o, uint64_t v,
2056 bus_size_t c)
2057 {
2058 while (c-- > 0)
2059 bus_space_write_8(t, h, o, v);
2060 }
2061
2062 /*
2063 * void bus_space_write_region_N(bus_space_tag_t tag,
2064 * bus_space_handle_t bsh, bus_size_t off,
2065 * uintN_t *addr, bus_size_t count);
2066 *
2067 */
2068 void
bus_space_write_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)2069 bus_space_write_region_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2070 const uint8_t *a, bus_size_t c)
2071 {
2072 for (; c; a++, c--, o++)
2073 bus_space_write_1(t, h, o, *a);
2074 }
2075
2076 void
bus_space_write_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)2077 bus_space_write_region_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2078 const uint16_t *a, bus_size_t c)
2079 {
2080 for (; c; a++, c--, o+=2)
2081 bus_space_write_2(t, h, o, *a);
2082 }
2083
2084 void
bus_space_write_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)2085 bus_space_write_region_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2086 const uint32_t *a, bus_size_t c)
2087 {
2088 for (; c; a++, c--, o+=4)
2089 bus_space_write_4(t, h, o, *a);
2090 }
2091
2092 void
bus_space_write_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)2093 bus_space_write_region_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2094 const uint64_t *a, bus_size_t c)
2095 {
2096 for (; c; a++, c--, o+=8)
2097 bus_space_write_8(t, h, o, *a);
2098 }
2099
2100
2101 /*
2102 * void bus_space_read_region_N(bus_space_tag_t tag,
2103 * bus_space_handle_t bsh, bus_size_t off,
2104 * uintN_t *addr, bus_size_t count);
2105 *
2106 */
2107 void
bus_space_read_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)2108 bus_space_read_region_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2109 uint8_t *a, bus_size_t c)
2110 {
2111 for (; c; a++, c--, o++)
2112 *a = bus_space_read_1(t, h, o);
2113 }
2114 void
bus_space_read_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2115 bus_space_read_region_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2116 uint16_t *a, bus_size_t c)
2117 {
2118 for (; c; a++, c--, o+=2)
2119 *a = bus_space_read_2(t, h, o);
2120 }
2121 void
bus_space_read_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2122 bus_space_read_region_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2123 uint32_t *a, bus_size_t c)
2124 {
2125 for (; c; a++, c--, o+=4)
2126 *a = bus_space_read_4(t, h, o);
2127 }
2128 void
bus_space_read_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2129 bus_space_read_region_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2130 uint64_t *a, bus_size_t c)
2131 {
2132 for (; c; a++, c--, o+=8)
2133 *a = bus_space_read_8(t, h, o);
2134 }
2135
2136 /*
2137 * void bus_space_write_region_stream_N(bus_space_tag_t tag,
2138 * bus_space_handle_t bsh, bus_size_t off,
2139 * uintN_t *addr, bus_size_t count);
2140 *
2141 */
2142 void
bus_space_write_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)2143 bus_space_write_region_stream_1(bus_space_tag_t t, bus_space_handle_t h,
2144 bus_size_t o, const uint8_t *a, bus_size_t c)
2145 {
2146 for (; c; a++, c--, o++)
2147 bus_space_write_stream_1(t, h, o, *a);
2148 }
2149
2150 void
bus_space_write_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)2151 bus_space_write_region_stream_2(bus_space_tag_t t, bus_space_handle_t h,
2152 bus_size_t o, const uint16_t *a, bus_size_t c)
2153 {
2154 for (; c; a++, c--, o+=2)
2155 bus_space_write_stream_2(t, h, o, *a);
2156 }
2157
2158 void
bus_space_write_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)2159 bus_space_write_region_stream_4(bus_space_tag_t t, bus_space_handle_t h,
2160 bus_size_t o, const uint32_t *a, bus_size_t c)
2161 {
2162 for (; c; a++, c--, o+=4)
2163 bus_space_write_stream_4(t, h, o, *a);
2164 }
2165
2166 void
bus_space_write_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)2167 bus_space_write_region_stream_8(bus_space_tag_t t, bus_space_handle_t h,
2168 bus_size_t o, const uint64_t *a, bus_size_t c)
2169 {
2170 for (; c; a++, c--, o+=8)
2171 bus_space_write_stream_8(t, h, o, *a);
2172 }
2173
2174 /*
2175 * Allocate a new bus tag and have it inherit the methods of the
2176 * given parent.
2177 */
2178 bus_space_tag_t
bus_space_tag_alloc(bus_space_tag_t parent,void * cookie)2179 bus_space_tag_alloc(bus_space_tag_t parent, void *cookie)
2180 {
2181 struct sparc_bus_space_tag *sbt;
2182
2183 sbt = malloc(sizeof(struct sparc_bus_space_tag),
2184 M_DEVBUF, M_NOWAIT|M_ZERO);
2185 if (sbt == NULL)
2186 return (NULL);
2187
2188 if (parent) {
2189 memcpy(sbt, parent, sizeof(*sbt));
2190 sbt->parent = parent;
2191 sbt->ranges = NULL;
2192 sbt->nranges = 0;
2193 }
2194
2195 sbt->cookie = cookie;
2196 return (sbt);
2197 }
2198
2199 /*
2200 * Generic routine to translate an address using OpenPROM `ranges'.
2201 */
2202 int
bus_space_translate_address_generic(struct openprom_range * ranges,int nranges,bus_addr_t * bap)2203 bus_space_translate_address_generic(struct openprom_range *ranges, int nranges,
2204 bus_addr_t *bap)
2205 {
2206 int i, space = BUS_ADDR_IOSPACE(*bap);
2207
2208 for (i = 0; i < nranges; i++) {
2209 struct openprom_range *rp = &ranges[i];
2210
2211 if (rp->or_child_space != space)
2212 continue;
2213
2214 /* We've found the connection to the parent bus. */
2215 *bap = BUS_ADDR(rp->or_parent_space,
2216 rp->or_parent_base + BUS_ADDR_PADDR(*bap));
2217 return (0);
2218 }
2219
2220 return (EINVAL);
2221 }
2222
2223 int
sparc_bus_map(bus_space_tag_t t,bus_addr_t addr,bus_size_t size,int flags,vaddr_t unused,bus_space_handle_t * hp)2224 sparc_bus_map(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
2225 int flags, vaddr_t unused, bus_space_handle_t *hp)
2226 {
2227 vaddr_t v;
2228 uint64_t pa;
2229 paddr_t pm_flags = 0;
2230 vm_prot_t pm_prot = VM_PROT_READ;
2231 int err, map_little = 0;
2232
2233 if (io_space == NULL)
2234 /*
2235 * And set up IOSPACE extents.
2236 */
2237 io_space = extent_create("IOSPACE",
2238 (u_long)IODEV_BASE, (u_long)IODEV_END,
2239 0, 0, EX_NOWAIT);
2240
2241
2242 size = round_page(size);
2243 if (size == 0) {
2244 printf("sparc_bus_map: zero size\n");
2245 return (EINVAL);
2246 }
2247 switch (t->type) {
2248 case PCI_CONFIG_BUS_SPACE:
2249 /*
2250 * PCI config space is special.
2251 *
2252 * It's really big and seldom used. In order not to run
2253 * out of IO mappings, config space will not be mapped in,
2254 * rather it will be accessed through MMU bypass ASI accesses.
2255 */
2256 if (flags & BUS_SPACE_MAP_LINEAR)
2257 return (-1);
2258 hp->_ptr = addr;
2259 hp->_asi = ASI_PHYS_NON_CACHED_LITTLE;
2260 hp->_sasi = ASI_PHYS_NON_CACHED;
2261 DPRINTF(BSDB_MAP, ("\n%s: config type %x flags %x "
2262 "addr %016llx size %016llx virt %llx\n", __func__,
2263 (int)t->type, (int) flags, (unsigned long long)addr,
2264 (unsigned long long)size,
2265 (unsigned long long)hp->_ptr));
2266 return (0);
2267 case PCI_IO_BUS_SPACE:
2268 map_little = 1;
2269 break;
2270 case PCI_MEMORY_BUS_SPACE:
2271 map_little = 1;
2272 break;
2273 default:
2274 map_little = 0;
2275 break;
2276 }
2277
2278 #ifdef _LP64
2279 if (!CPU_ISSUN4V) {
2280 /* If it's not LINEAR don't bother to map it. Use phys accesses. */
2281 if ((flags & BUS_SPACE_MAP_LINEAR) == 0) {
2282 hp->_ptr = addr;
2283 if (map_little)
2284 hp->_asi = ASI_PHYS_NON_CACHED_LITTLE;
2285 else
2286 hp->_asi = ASI_PHYS_NON_CACHED;
2287 hp->_sasi = ASI_PHYS_NON_CACHED;
2288 return (0);
2289 }
2290 }
2291 #endif
2292
2293 if (!(flags & BUS_SPACE_MAP_CACHEABLE))
2294 pm_flags |= PMAP_NC;
2295
2296 if ((err = extent_alloc(io_space, size, PAGE_SIZE,
2297 0, EX_NOWAIT|EX_BOUNDZERO, (u_long *)&v)))
2298 panic("sparc_bus_map: cannot allocate io_space: %d", err);
2299
2300 /* note: preserve page offset */
2301 hp->_ptr = (v | ((u_long)addr & PGOFSET));
2302 hp->_sasi = ASI_PRIMARY;
2303 if (map_little)
2304 hp->_asi = ASI_PRIMARY_LITTLE;
2305 else
2306 hp->_asi = ASI_PRIMARY;
2307
2308 pa = trunc_page(addr);
2309 if (!(flags&BUS_SPACE_MAP_READONLY))
2310 pm_prot |= VM_PROT_WRITE;
2311
2312 DPRINTF(BSDB_MAP, ("\n%s: type %x flags %x addr %016llx prot %02x "
2313 "pm_flags %x size %016llx virt %llx paddr %016llx\n", __func__,
2314 (int)t->type, (int)flags, (unsigned long long)addr, pm_prot,
2315 (int)pm_flags, (unsigned long long)size,
2316 (unsigned long long)hp->_ptr, (unsigned long long)pa));
2317
2318 do {
2319 DPRINTF(BSDB_MAP, ("%s: phys %llx virt %p hp %llx\n",
2320 __func__,
2321 (unsigned long long)pa, (char *)v,
2322 (unsigned long long)hp->_ptr));
2323 pmap_kenter_pa(v, pa | pm_flags, pm_prot, 0);
2324 v += PAGE_SIZE;
2325 pa += PAGE_SIZE;
2326 } while ((size -= PAGE_SIZE) > 0);
2327 return (0);
2328 }
2329
2330 int
sparc_bus_subregion(bus_space_tag_t tag,bus_space_handle_t handle,bus_size_t offset,bus_size_t size,bus_space_handle_t * nhandlep)2331 sparc_bus_subregion(bus_space_tag_t tag, bus_space_handle_t handle,
2332 bus_size_t offset, bus_size_t size, bus_space_handle_t *nhandlep)
2333 {
2334 nhandlep->_ptr = handle._ptr + offset;
2335 nhandlep->_asi = handle._asi;
2336 nhandlep->_sasi = handle._sasi;
2337 return (0);
2338 }
2339
2340 int
sparc_bus_unmap(bus_space_tag_t t,bus_space_handle_t bh,bus_size_t size)2341 sparc_bus_unmap(bus_space_tag_t t, bus_space_handle_t bh, bus_size_t size)
2342 {
2343 vaddr_t va = trunc_page((vaddr_t)bh._ptr);
2344 vaddr_t endva = va + round_page(size);
2345 int error = 0;
2346
2347 if (PHYS_ASI(bh._asi)) return (0);
2348
2349 error = extent_free(io_space, va, size, EX_NOWAIT);
2350 if (error) printf("sparc_bus_unmap: extent_free returned %d\n", error);
2351
2352 pmap_remove(pmap_kernel(), va, endva);
2353 return (0);
2354 }
2355
2356 paddr_t
sparc_bus_mmap(bus_space_tag_t t,bus_addr_t paddr,off_t off,int prot,int flags)2357 sparc_bus_mmap(bus_space_tag_t t, bus_addr_t paddr, off_t off, int prot,
2358 int flags)
2359 {
2360 /* Devices are un-cached... although the driver should do that */
2361 return ((paddr+off)|PMAP_NC);
2362 }
2363
2364
2365 void *
sparc_mainbus_intr_establish(bus_space_tag_t t,int pil,int level,int (* handler)(void *),void * arg,void (* fastvec)(void))2366 sparc_mainbus_intr_establish(bus_space_tag_t t, int pil, int level,
2367 int (*handler)(void *), void *arg, void (*fastvec)(void) /* ignored */)
2368 {
2369 struct intrhand *ih;
2370
2371 ih = intrhand_alloc();
2372 ih->ih_fun = handler;
2373 ih->ih_arg = arg;
2374 intr_establish(pil, level != IPL_VM, ih);
2375 return (ih);
2376 }
2377
2378 int
sparc_bus_alloc(bus_space_tag_t t,bus_addr_t rs,bus_addr_t re,bus_size_t s,bus_size_t a,bus_size_t b,int f,bus_addr_t * ap,bus_space_handle_t * hp)2379 sparc_bus_alloc(bus_space_tag_t t, bus_addr_t rs, bus_addr_t re, bus_size_t s,
2380 bus_size_t a, bus_size_t b, int f, bus_addr_t *ap, bus_space_handle_t *hp)
2381 {
2382 return (ENOTTY);
2383 }
2384
2385 void
sparc_bus_free(bus_space_tag_t t,bus_space_handle_t h,bus_size_t s)2386 sparc_bus_free(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s)
2387 {
2388 return;
2389 }
2390
2391 struct sparc_bus_space_tag mainbus_space_tag = {
2392 NULL, /* cookie */
2393 NULL, /* parent bus tag */
2394 NULL, /* ranges */
2395 0, /* nranges */
2396 UPA_BUS_SPACE, /* type */
2397 sparc_bus_alloc,
2398 sparc_bus_free,
2399 sparc_bus_map, /* bus_space_map */
2400 sparc_bus_unmap, /* bus_space_unmap */
2401 sparc_bus_subregion, /* bus_space_subregion */
2402 sparc_bus_mmap, /* bus_space_mmap */
2403 sparc_mainbus_intr_establish /* bus_intr_establish */
2404 };
2405
2406
2407 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flags)2408 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
2409 {
2410 __greg_t *gr = mcp->__gregs;
2411 __greg_t ras_pc;
2412 const struct trapframe64 *tf = l->l_md.md_tf;
2413
2414 /* First ensure consistent stack state (see sendsig). */ /* XXX? */
2415 write_user_windows();
2416 if (rwindow_save(l)) {
2417 mutex_enter(l->l_proc->p_lock);
2418 sigexit(l, SIGILL);
2419 }
2420
2421 /* For now: Erase any random indicators for optional state. */
2422 (void)memset(mcp, 0, sizeof (*mcp));
2423
2424 /* Save general register context. */
2425 #ifdef __arch64__
2426 gr[_REG_CCR] = (tf->tf_tstate & TSTATE_CCR) >> TSTATE_CCR_SHIFT;
2427 #else
2428 gr[_REG_PSR] = TSTATECCR_TO_PSR(tf->tf_tstate);
2429 #endif
2430 gr[_REG_PC] = tf->tf_pc;
2431 gr[_REG_nPC] = tf->tf_npc;
2432 gr[_REG_Y] = tf->tf_y;
2433 gr[_REG_G1] = tf->tf_global[1];
2434 gr[_REG_G2] = tf->tf_global[2];
2435 gr[_REG_G3] = tf->tf_global[3];
2436 gr[_REG_G4] = tf->tf_global[4];
2437 gr[_REG_G5] = tf->tf_global[5];
2438 gr[_REG_G6] = tf->tf_global[6];
2439 gr[_REG_G7] = tf->tf_global[7];
2440 gr[_REG_O0] = tf->tf_out[0];
2441 gr[_REG_O1] = tf->tf_out[1];
2442 gr[_REG_O2] = tf->tf_out[2];
2443 gr[_REG_O3] = tf->tf_out[3];
2444 gr[_REG_O4] = tf->tf_out[4];
2445 gr[_REG_O5] = tf->tf_out[5];
2446 gr[_REG_O6] = tf->tf_out[6];
2447 gr[_REG_O7] = tf->tf_out[7];
2448 #ifdef __arch64__
2449 gr[_REG_ASI] = (tf->tf_tstate & TSTATE_ASI) >> TSTATE_ASI_SHIFT;
2450 #if 0 /* not yet supported */
2451 gr[_REG_FPRS] = ;
2452 #endif
2453 #endif /* __arch64__ */
2454
2455 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
2456 (void *) gr[_REG_PC])) != -1) {
2457 gr[_REG_PC] = ras_pc;
2458 gr[_REG_nPC] = ras_pc + 4;
2459 }
2460
2461 *flags |= (_UC_CPU|_UC_TLSBASE);
2462
2463 mcp->__gwins = NULL;
2464
2465
2466 /* Save FP register context, if any. */
2467 if (l->l_md.md_fpstate != NULL) {
2468 struct fpstate64 *fsp;
2469 __fpregset_t *fpr = &mcp->__fpregs;
2470
2471 /*
2472 * If our FP context is currently held in the FPU, take a
2473 * private snapshot - lazy FPU context switching can deal
2474 * with it later when it becomes necessary.
2475 * Otherwise, get it from the process's save area.
2476 */
2477 fpusave_lwp(l, true);
2478 fsp = l->l_md.md_fpstate;
2479 memcpy(&fpr->__fpu_fr, fsp->fs_regs, sizeof (fpr->__fpu_fr));
2480 mcp->__fpregs.__fpu_q = NULL; /* `Need more info.' */
2481 mcp->__fpregs.__fpu_fsr = fsp->fs_fsr;
2482 mcp->__fpregs.__fpu_qcnt = 0 /*fs.fs_qsize*/; /* See above */
2483 mcp->__fpregs.__fpu_q_entrysize =
2484 (unsigned char) sizeof (*mcp->__fpregs.__fpu_q);
2485 mcp->__fpregs.__fpu_en = 1;
2486 *flags |= _UC_FPU;
2487 } else {
2488 mcp->__fpregs.__fpu_en = 0;
2489 }
2490
2491 mcp->__xrs.__xrs_id = 0; /* Solaris extension? */
2492 }
2493
2494 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mc)2495 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mc)
2496 {
2497 const __greg_t *gr = mc->__gregs;
2498
2499 /*
2500 * Only the icc bits in the psr are used, so it need not be
2501 * verified. pc and npc must be multiples of 4. This is all
2502 * that is required; if it holds, just do it.
2503 */
2504 if (((gr[_REG_PC] | gr[_REG_nPC]) & 3) != 0 ||
2505 gr[_REG_PC] == 0 || gr[_REG_nPC] == 0)
2506 return EINVAL;
2507
2508 return 0;
2509 }
2510
2511 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)2512 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
2513 {
2514 const __greg_t *gr = mcp->__gregs;
2515 struct trapframe64 *tf = l->l_md.md_tf;
2516 struct proc *p = l->l_proc;
2517 int error;
2518
2519 /* First ensure consistent stack state (see sendsig). */
2520 write_user_windows();
2521 if (rwindow_save(l)) {
2522 mutex_enter(p->p_lock);
2523 sigexit(l, SIGILL);
2524 }
2525
2526 if ((flags & _UC_CPU) != 0) {
2527 error = cpu_mcontext_validate(l, mcp);
2528 if (error)
2529 return error;
2530
2531 /* Restore general register context. */
2532 /* take only tstate CCR (and ASI) fields */
2533 #ifdef __arch64__
2534 tf->tf_tstate = (tf->tf_tstate & ~(TSTATE_CCR | TSTATE_ASI)) |
2535 ((gr[_REG_CCR] << TSTATE_CCR_SHIFT) & TSTATE_CCR) |
2536 ((gr[_REG_ASI] << TSTATE_ASI_SHIFT) & TSTATE_ASI);
2537 #else
2538 tf->tf_tstate = (tf->tf_tstate & ~TSTATE_CCR) |
2539 PSRCC_TO_TSTATE(gr[_REG_PSR]);
2540 #endif
2541 tf->tf_pc = (uint64_t)gr[_REG_PC];
2542 tf->tf_npc = (uint64_t)gr[_REG_nPC];
2543 tf->tf_y = (uint64_t)gr[_REG_Y];
2544 tf->tf_global[1] = (uint64_t)gr[_REG_G1];
2545 tf->tf_global[2] = (uint64_t)gr[_REG_G2];
2546 tf->tf_global[3] = (uint64_t)gr[_REG_G3];
2547 tf->tf_global[4] = (uint64_t)gr[_REG_G4];
2548 tf->tf_global[5] = (uint64_t)gr[_REG_G5];
2549 tf->tf_global[6] = (uint64_t)gr[_REG_G6];
2550 /* done in lwp_setprivate */
2551 /* tf->tf_global[7] = (uint64_t)gr[_REG_G7]; */
2552 tf->tf_out[0] = (uint64_t)gr[_REG_O0];
2553 tf->tf_out[1] = (uint64_t)gr[_REG_O1];
2554 tf->tf_out[2] = (uint64_t)gr[_REG_O2];
2555 tf->tf_out[3] = (uint64_t)gr[_REG_O3];
2556 tf->tf_out[4] = (uint64_t)gr[_REG_O4];
2557 tf->tf_out[5] = (uint64_t)gr[_REG_O5];
2558 tf->tf_out[6] = (uint64_t)gr[_REG_O6];
2559 tf->tf_out[7] = (uint64_t)gr[_REG_O7];
2560 /* %asi restored above; %fprs not yet supported. */
2561
2562 /* XXX mcp->__gwins */
2563
2564 if (flags & _UC_TLSBASE)
2565 lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_G7]);
2566 }
2567
2568 /* Restore FP register context, if any. */
2569 if ((flags & _UC_FPU) != 0 && mcp->__fpregs.__fpu_en != 0) {
2570 struct fpstate64 *fsp;
2571 const __fpregset_t *fpr = &mcp->__fpregs;
2572
2573 /*
2574 * If we're the current FPU owner, simply reload it from
2575 * the supplied context. Otherwise, store it into the
2576 * process' FPU save area (which is used to restore from
2577 * by lazy FPU context switching); allocate it if necessary.
2578 */
2579 if ((fsp = l->l_md.md_fpstate) == NULL) {
2580 fsp = pool_cache_get(fpstate_cache, PR_WAITOK);
2581 l->l_md.md_fpstate = fsp;
2582 } else {
2583 /* Drop the live context on the floor. */
2584 fpusave_lwp(l, false);
2585 }
2586 /* Note: sizeof fpr->__fpu_fr <= sizeof fsp->fs_regs. */
2587 memcpy(fsp->fs_regs, &fpr->__fpu_fr, sizeof (fpr->__fpu_fr));
2588 fsp->fs_fsr = mcp->__fpregs.__fpu_fsr;
2589 fsp->fs_qsize = 0;
2590
2591 #if 0
2592 /* Need more info! */
2593 mcp->__fpregs.__fpu_q = NULL; /* `Need more info.' */
2594 mcp->__fpregs.__fpu_qcnt = 0 /*fs.fs_qsize*/; /* See above */
2595 #endif
2596 }
2597
2598 /* XXX mcp->__xrs */
2599 /* XXX mcp->__asrs */
2600
2601 mutex_enter(p->p_lock);
2602 if (flags & _UC_SETSTACK)
2603 l->l_sigstk.ss_flags |= SS_ONSTACK;
2604 if (flags & _UC_CLRSTACK)
2605 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
2606 mutex_exit(p->p_lock);
2607
2608 return 0;
2609 }
2610
2611 /*
2612 * Preempt the current process if in interrupt from user mode,
2613 * or after the current trap/syscall if in system mode.
2614 */
2615 void
cpu_need_resched(struct cpu_info * ci,int flags)2616 cpu_need_resched(struct cpu_info *ci, int flags)
2617 {
2618
2619 ci->ci_want_resched = 1;
2620 ci->ci_want_ast = 1;
2621
2622 #ifdef MULTIPROCESSOR
2623 if (ci == curcpu())
2624 return;
2625 /* Just interrupt the target CPU, so it can notice its AST */
2626 if ((flags & RESCHED_IMMED) != 0 &&
2627 ci->ci_data.cpu_onproc != ci->ci_data.cpu_idlelwp)
2628 sparc64_send_ipi(ci->ci_cpuid, sparc64_ipi_nop, 0, 0);
2629 #endif
2630 }
2631
2632 /*
2633 * Notify an LWP that it has a signal pending, process as soon as possible.
2634 */
2635 void
cpu_signotify(struct lwp * l)2636 cpu_signotify(struct lwp *l)
2637 {
2638 struct cpu_info *ci = l->l_cpu;
2639
2640 ci->ci_want_ast = 1;
2641 #ifdef MULTIPROCESSOR
2642 if (ci != curcpu())
2643 sparc64_send_ipi(ci->ci_cpuid, sparc64_ipi_nop, 0, 0);
2644 #endif
2645 }
2646
2647 bool
cpu_intr_p(void)2648 cpu_intr_p(void)
2649 {
2650
2651 return curcpu()->ci_idepth >= 0;
2652 }
2653
2654 #ifdef MODULAR
2655 void
module_init_md(void)2656 module_init_md(void)
2657 {
2658 }
2659 #endif
2660
2661 int
mm_md_physacc(paddr_t pa,vm_prot_t prot)2662 mm_md_physacc(paddr_t pa, vm_prot_t prot)
2663 {
2664
2665 return pmap_pa_exists(pa) ? 0 : EFAULT;
2666 }
2667
2668 int
mm_md_kernacc(void * ptr,vm_prot_t prot,bool * handled)2669 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled)
2670 {
2671 /* XXX: Don't know where PROMs are on Ultras. Think it's at f000000 */
2672 const vaddr_t prom_vstart = 0xf000000, prom_vend = 0xf0100000;
2673 const vaddr_t msgbufpv = (vaddr_t)msgbufp, v = (vaddr_t)ptr;
2674 const size_t msgbufsz = msgbufp->msg_bufs +
2675 offsetof(struct kern_msgbuf, msg_bufc);
2676
2677 *handled = (v >= msgbufpv && v < msgbufpv + msgbufsz) ||
2678 (v >= prom_vstart && v < prom_vend && (prot & VM_PROT_WRITE) == 0);
2679 return 0;
2680 }
2681
2682 int
mm_md_readwrite(dev_t dev,struct uio * uio)2683 mm_md_readwrite(dev_t dev, struct uio *uio)
2684 {
2685
2686 return ENXIO;
2687 }
2688
2689 #ifdef __arch64__
2690 void
sparc64_elf_mcmodel_check(struct exec_package * epp,const char * model,size_t len)2691 sparc64_elf_mcmodel_check(struct exec_package *epp, const char *model,
2692 size_t len)
2693 {
2694 /* no model specific execution for 32bit processes */
2695 if (epp->ep_flags & EXEC_32)
2696 return;
2697
2698 #ifdef __USE_TOPDOWN_VM
2699 /*
2700 * we allow TOPDOWN_VM for all processes where the binary is compiled
2701 * with the medany or medmid code model.
2702 */
2703 if (strncmp(model, "medany", len) == 0 ||
2704 strncmp(model, "medmid", len) == 0)
2705 epp->ep_flags |= EXEC_TOPDOWN_VM;
2706 #endif
2707 }
2708 #endif
2709