xref: /openbsd/sys/arch/sparc64/sparc64/machdep.c (revision 28d09237)
1 /*	$OpenBSD: machdep.c,v 1.218 2024/05/22 05:51:49 jsg Exp $	*/
2 /*	$NetBSD: machdep.c,v 1.108 2001/07/24 19:30:14 eeh Exp $ */
3 
4 /*-
5  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 1992, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  *
38  * This software was developed by the Computer Systems Engineering group
39  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
40  * contributed to Berkeley.
41  *
42  * All advertising materials mentioning features or use of this software
43  * must display the following acknowledgement:
44  *	This product includes software developed by the University of
45  *	California, Lawrence Berkeley Laboratory.
46  *
47  * Redistribution and use in source and binary forms, with or without
48  * modification, are permitted provided that the following conditions
49  * are met:
50  * 1. Redistributions of source code must retain the above copyright
51  *    notice, this list of conditions and the following disclaimer.
52  * 2. Redistributions in binary form must reproduce the above copyright
53  *    notice, this list of conditions and the following disclaimer in the
54  *    documentation and/or other materials provided with the distribution.
55  * 3. Neither the name of the University nor the names of its contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69  * SUCH DAMAGE.
70  *
71  *	@(#)machdep.c	8.6 (Berkeley) 1/14/94
72  */
73 
74 #include <sys/param.h>
75 #include <sys/signal.h>
76 #include <sys/signalvar.h>
77 #include <sys/proc.h>
78 #include <sys/user.h>
79 #include <sys/buf.h>
80 #include <sys/device.h>
81 #include <sys/reboot.h>
82 #include <sys/systm.h>
83 #include <sys/kernel.h>
84 #include <sys/conf.h>
85 #include <sys/malloc.h>
86 #include <sys/mbuf.h>
87 #include <sys/mount.h>
88 #include <sys/syscallargs.h>
89 #include <sys/exec.h>
90 
91 #include <net/if.h>
92 
93 #include <sys/sysctl.h>
94 #include <sys/exec_elf.h>
95 
96 #define _SPARC_BUS_DMA_PRIVATE
97 #include <machine/bus.h>
98 #include <machine/frame.h>
99 #include <machine/cpu.h>
100 #include <machine/pmap.h>
101 #include <machine/openfirm.h>
102 #include <machine/sparc64.h>
103 
104 #include "pckbc.h"
105 #include "pckbd.h"
106 #if (NPCKBC > 0) && (NPCKBD == 0)
107 #include <dev/ic/pckbcvar.h>
108 #endif
109 
110 int     _bus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, bus_size_t, int,
111 	    bus_size_t, bus_size_t, int, bus_dmamap_t *);
112 void    _bus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
113 int     _bus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, void *,
114             bus_size_t, struct proc *, int);
115 int     _bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
116             struct mbuf *, int);
117 int     _bus_dmamap_load_uio(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
118             struct uio *, int);
119 int     _bus_dmamap_load_raw(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
120             bus_dma_segment_t *, int, bus_size_t, int);
121 int	_bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
122 	    bus_size_t, struct proc *, int, bus_addr_t *, int *, int);
123 
124 void    _bus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
125 void    _bus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
126 	    bus_addr_t, bus_size_t, int);
127 
128 int     _bus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t tag, bus_size_t size,
129             bus_size_t alignment, bus_size_t boundary,
130             bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags);
131 
132 void    _bus_dmamem_free(bus_dma_tag_t tag, bus_dma_tag_t,
133 	    bus_dma_segment_t *segs, int nsegs);
134 int     _bus_dmamem_map(bus_dma_tag_t tag, bus_dma_tag_t,
135 	    bus_dma_segment_t *segs, int nsegs, size_t size, caddr_t *kvap,
136 	    int flags);
137 void    _bus_dmamem_unmap(bus_dma_tag_t tag, bus_dma_tag_t, caddr_t kva,
138             size_t size);
139 paddr_t _bus_dmamem_mmap(bus_dma_tag_t tag, bus_dma_tag_t,
140 	    bus_dma_segment_t *segs, int nsegs, off_t off, int prot, int flags);
141 
142 /*
143  * The "bus_space_debug" flags used by macros elsewhere.
144  * A good set of flags to use when first debugging something is:
145  * int bus_space_debug = BSDB_ACCESS | BSDB_ASSERT | BSDB_MAP;
146  */
147 int bus_space_debug = 0;
148 
149 struct vm_map *exec_map = NULL;
150 struct vm_map *phys_map = NULL;
151 
152 struct uvm_constraint_range  dma_constraint = { 0x0, (paddr_t)-1 };
153 struct uvm_constraint_range *uvm_md_constraints[] = { NULL };
154 
155 int	physmem;
156 
157 int sparc_led_blink = 1;
158 
159 #ifdef APERTURE
160 int allowaperture = 0;
161 #endif
162 
163 extern int ceccerrs;
164 extern int64_t cecclast;
165 
166 /*
167  * Maximum number of DMA segments we'll allow in dmamem_load()
168  * routines.  Can be overridden in config files, etc.
169  */
170 #ifndef MAX_DMA_SEGS
171 #define MAX_DMA_SEGS	20
172 #endif
173 
174 /*
175  * safepri is a safe priority for sleep to set for a spin-wait
176  * during autoconfiguration or after a panic.
177  */
178 int   safepri = 0;
179 
180 void blink_led_timeout(void *);
181 void	dumpsys(void);
182 void	stackdump(void);
183 
184 /*
185  * Machine-dependent startup code
186  */
187 void
cpu_startup(void)188 cpu_startup(void)
189 {
190 #ifdef DEBUG
191 	extern int pmapdebug;
192 	int opmapdebug = pmapdebug;
193 #endif
194 	vaddr_t minaddr, maxaddr;
195 	extern struct user *proc0paddr;
196 
197 #ifdef DEBUG
198 	pmapdebug = 0;
199 #endif
200 
201 	proc0.p_addr = proc0paddr;
202 	(void)pmap_extract(pmap_kernel(), (vaddr_t)proc0paddr,
203 	    &proc0.p_md.md_pcbpaddr);
204 
205 	/*
206 	 * Good {morning,afternoon,evening,night}.
207 	 */
208 	printf("%s", version);
209 	printf("real mem = %lu (%luMB)\n", ptoa((psize_t)physmem),
210 	    ptoa((psize_t)physmem)/1024/1024);
211 
212 	/*
213 	 * Allocate a submap for exec arguments.  This map effectively
214 	 * limits the number of processes exec'ing at any time.
215 	 */
216 	minaddr = vm_map_min(kernel_map);
217 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
218 	    16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
219 
220 	/*
221 	 * Allocate a submap for physio
222 	 */
223 	minaddr = vm_map_min(kernel_map);
224 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
225 	    VM_PHYS_SIZE, 0, FALSE, NULL);
226 
227 #ifdef DEBUG
228 	pmapdebug = opmapdebug;
229 #endif
230 	printf("avail mem = %lu (%luMB)\n", ptoa((psize_t)uvmexp.free),
231 	    ptoa((psize_t)uvmexp.free)/1024/1024);
232 
233 	/*
234 	 * Set up buffers, so they can be used to read disk labels.
235 	 */
236 	bufinit();
237 }
238 
239 /*
240  * Set up registers on exec.
241  */
242 
243 #define CPOUTREG(l,v)	copyout(&(v), (l), sizeof(v))
244 
245 void
setregs(struct proc * p,struct exec_package * pack,u_long stack,struct ps_strings * arginfo)246 setregs(struct proc *p, struct exec_package *pack, u_long stack,
247     struct ps_strings *arginfo)
248 {
249 	struct trapframe *tf = p->p_md.md_tf;
250 	int64_t tstate;
251 	int pstate = PSTATE_USER;
252 	Elf_Ehdr *eh = pack->ep_hdr;
253 
254 	/*
255 	 * Setup the process StackGhost cookie which will be XORed into
256 	 * the return pointer as register windows are over/underflowed.
257 	 */
258 	arc4random_buf(&p->p_addr->u_pcb.pcb_wcookie,
259 	    sizeof(p->p_addr->u_pcb.pcb_wcookie));
260 
261 	/* The cookie needs to guarantee invalid alignment after the XOR. */
262 	switch (p->p_addr->u_pcb.pcb_wcookie % 3) {
263 	case 0: /* Set two lsbs. */
264 		p->p_addr->u_pcb.pcb_wcookie |= 0x3;
265 		break;
266 	case 1: /* Set the lsb. */
267 		p->p_addr->u_pcb.pcb_wcookie = 1 |
268 		    (p->p_addr->u_pcb.pcb_wcookie & ~0x3);
269 		break;
270 	case 2: /* Set the second most lsb. */
271 		p->p_addr->u_pcb.pcb_wcookie = 2 |
272 		    (p->p_addr->u_pcb.pcb_wcookie & ~0x3);
273 		break;
274 	}
275 
276 	/*
277 	 * Set the registers to 0 except for:
278 	 *	%o6: stack pointer, built in exec())
279 	 *	%tstate: (retain icc and xcc and cwp bits)
280 	 *	%tpc,%tnpc: entry point of program
281 	 */
282 	/* Check what memory model is requested */
283 	switch ((eh->e_flags & EF_SPARCV9_MM)) {
284 	default:
285 		printf("Unknown memory model %d\n",
286 		       (eh->e_flags & EF_SPARCV9_MM));
287 		/* FALLTHROUGH */
288 	case EF_SPARCV9_TSO:
289 		pstate = PSTATE_MM_TSO|PSTATE_IE;
290 		break;
291 	case EF_SPARCV9_PSO:
292 		pstate = PSTATE_MM_PSO|PSTATE_IE;
293 		break;
294 	case EF_SPARCV9_RMO:
295 		pstate = PSTATE_MM_RMO|PSTATE_IE;
296 		break;
297 	}
298 
299 	tstate = ((u_int64_t)ASI_PRIMARY_NO_FAULT << TSTATE_ASI_SHIFT) |
300 	    (pstate << TSTATE_PSTATE_SHIFT) | (tf->tf_tstate & TSTATE_CWP);
301 	if (p->p_md.md_fpstate != NULL) {
302 		/*
303 		 * We hold an FPU state.  If we own *the* FPU chip state
304 		 * we must get rid of it, and the only way to do that is
305 		 * to save it.  In any case, get rid of our FPU state.
306 		 */
307 		fpusave_proc(p, 0);
308 		free(p->p_md.md_fpstate, M_SUBPROC, sizeof(struct fpstate));
309 		p->p_md.md_fpstate = NULL;
310 	}
311 	memset(tf, 0, sizeof *tf);
312 	tf->tf_tstate = tstate;
313 	tf->tf_pc = pack->ep_entry & ~3;
314 	tf->tf_npc = tf->tf_pc + 4;
315 	stack -= sizeof(struct rwindow);
316 	tf->tf_out[6] = stack - BIAS;
317 #ifdef NOTDEF_DEBUG
318 	printf("setregs: setting tf %p sp %p pc %p\n", (long)tf,
319 	       (long)tf->tf_out[6], (long)tf->tf_pc);
320 #endif
321 }
322 
323 struct sigframe {
324 	int	sf_signo;		/* signal number */
325 	int	sf_code;		/* signal code (unused) */
326 	siginfo_t *sf_sip;		/* points to siginfo_t */
327 	struct	sigcontext sf_sc;	/* actual sigcontext */
328 	siginfo_t sf_si;
329 };
330 
331 /*
332  * machine dependent system variables.
333  */
334 int
cpu_sysctl(int * name,u_int namelen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,struct proc * p)335 cpu_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
336     size_t newlen, struct proc *p)
337 {
338 	int oldval, ret;
339 
340 	/* all sysctl names at this level are terminal */
341 	if (namelen != 1)
342 		return (ENOTDIR);	/* overloaded */
343 
344 	switch (name[0]) {
345 	case CPU_LED_BLINK:
346 		oldval = sparc_led_blink;
347 		ret = sysctl_int(oldp, oldlenp, newp, newlen,
348 		    &sparc_led_blink);
349 		/*
350 		 * If we were false and are now true, start the timer.
351 		 */
352 		if (!oldval && sparc_led_blink > oldval)
353 			blink_led_timeout(NULL);
354 		return (ret);
355 	case CPU_ALLOWAPERTURE:
356 #ifdef APERTURE
357 		if (securelevel > 0)
358 			return (sysctl_int_lower(oldp, oldlenp, newp, newlen,
359 			    &allowaperture));
360 		else
361 			return (sysctl_int(oldp, oldlenp, newp, newlen,
362 			    &allowaperture));
363 #else
364 		return (sysctl_rdint(oldp, oldlenp, newp, 0));
365 #endif
366 	case CPU_CPUTYPE:
367 		return (sysctl_rdint(oldp, oldlenp, newp, cputyp));
368 	case CPU_CECCERRORS:
369 		return (sysctl_rdint(oldp, oldlenp, newp, ceccerrs));
370 	case CPU_CECCLAST:
371 		return (sysctl_rdquad(oldp, oldlenp, newp, cecclast));
372 	default:
373 		return (EOPNOTSUPP);
374 	}
375 	/* NOTREACHED */
376 }
377 
378 /*
379  * Send an interrupt to process.
380  */
381 int
sendsig(sig_t catcher,int sig,sigset_t mask,const siginfo_t * ksip,int info,int onstack)382 sendsig(sig_t catcher, int sig, sigset_t mask, const siginfo_t *ksip,
383     int info, int onstack)
384 {
385 	struct proc *p = curproc;
386 	struct sigframe *fp;
387 	struct trapframe *tf;
388 	vaddr_t addr, oldsp, newsp;
389 	struct sigframe sf;
390 
391 	tf = p->p_md.md_tf;
392 	oldsp = tf->tf_out[6] + BIAS;
393 
394 	/*
395 	 * Compute new user stack addresses, subtract off
396 	 * one signal frame, and align.
397 	 */
398 	if ((p->p_sigstk.ss_flags & SS_DISABLE) == 0 &&
399 	    !sigonstack(oldsp) && onstack)
400 		fp = (struct sigframe *)
401 		    trunc_page((vaddr_t)p->p_sigstk.ss_sp + p->p_sigstk.ss_size);
402 	else
403 		fp = (struct sigframe *)oldsp;
404 	/* Allocate an aligned sigframe */
405 	fp = (struct sigframe *)((long)(fp - 1) & ~0x0f);
406 
407 	/*
408 	 * Now set up the signal frame.  We build it in kernel space
409 	 * and then copy it out.  We probably ought to just build it
410 	 * directly in user space....
411 	 */
412 	bzero(&sf, sizeof(sf));
413 	sf.sf_signo = sig;
414 	sf.sf_sip = NULL;
415 
416 	/*
417 	 * Build the signal context to be used by sigreturn.
418 	 */
419 	sf.sf_sc.sc_mask = mask;
420 	/* Save register context. */
421 	sf.sf_sc.sc_sp = (long)tf->tf_out[6];
422 	sf.sf_sc.sc_pc = tf->tf_pc;
423 	sf.sf_sc.sc_npc = tf->tf_npc;
424 	sf.sf_sc.sc_tstate = tf->tf_tstate; /* XXX */
425 	sf.sf_sc.sc_g1 = tf->tf_global[1];
426 	sf.sf_sc.sc_o0 = tf->tf_out[0];
427 
428 	if (info) {
429 		sf.sf_sip = &fp->sf_si;
430 		sf.sf_si = *ksip;
431 	}
432 
433 	/*
434 	 * Put the stack in a consistent state before we whack away
435 	 * at it.  Note that write_user_windows may just dump the
436 	 * registers into the pcb; we need them in the process's memory.
437 	 * We also need to make sure that when we start the signal handler,
438 	 * its %i6 (%fp), which is loaded from the newly allocated stack area,
439 	 * joins seamlessly with the frame it was in when the signal occurred,
440 	 * so that the debugger and _longjmp code can back up through it.
441 	 */
442 	newsp = (vaddr_t)fp - sizeof(struct rwindow);
443 	write_user_windows();
444 
445 	sf.sf_sc.sc_cookie = (long)&fp->sf_sc ^ p->p_p->ps_sigcookie;
446 	if (rwindow_save(p) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf) ||
447 	    CPOUTREG(&(((struct rwindow *)newsp)->rw_in[6]), tf->tf_out[6])) {
448 		/*
449 		 * Process has trashed its stack; give it an illegal
450 		 * instruction to halt it in its tracks.
451 		 */
452 #ifdef DEBUG
453 		printf("sendsig: stack was trashed trying to send sig %d, "
454 		    "sending SIGILL\n", sig);
455 #endif
456 		return 1;
457 	}
458 
459 	/*
460 	 * Arrange to continue execution at the code copied out in exec().
461 	 * It needs the function to call in %g1, and a new stack pointer.
462 	 */
463 	addr = p->p_p->ps_sigcode;
464 	tf->tf_global[1] = (vaddr_t)catcher;
465 	tf->tf_pc = addr;
466 	tf->tf_npc = addr + 4;
467 	tf->tf_out[6] = newsp - BIAS;
468 
469 	return 0;
470 }
471 
472 /*
473  * System call to cleanup state after a signal
474  * has been taken.  Reset signal mask and
475  * stack state from context left by sendsig (above),
476  * and return to the given trap frame (if there is one).
477  * Check carefully to make sure that the user has not
478  * modified the state to gain improper privileges or to cause
479  * a machine fault.
480  */
481 int
sys_sigreturn(struct proc * p,void * v,register_t * retval)482 sys_sigreturn(struct proc *p, void *v, register_t *retval)
483 {
484 	struct sys_sigreturn_args /* {
485 		syscallarg(struct sigcontext *) sigcntxp;
486 	} */ *uap = v;
487 	struct sigcontext ksc, *scp = SCARG(uap, sigcntxp);
488 	struct trapframe *tf;
489 	int error = EINVAL;
490 
491 	if (PROC_PC(p) != p->p_p->ps_sigcoderet) {
492 		sigexit(p, SIGILL);
493 		return (EPERM);
494 	}
495 
496 	/* First ensure consistent stack state (see sendsig). */
497 	write_user_windows();
498 
499 	if (rwindow_save(p)) {
500 #ifdef DEBUG
501 		printf("sigreturn: rwindow_save(%p) failed, sending SIGILL\n",
502 		    p);
503 #endif
504 		sigexit(p, SIGILL);
505 	}
506 
507 	if ((vaddr_t)scp & 3)
508 		return (EINVAL);
509 	if ((error = copyin((caddr_t)scp, &ksc, sizeof ksc)))
510 		return (error);
511 
512 	if (ksc.sc_cookie != ((long)scp ^ p->p_p->ps_sigcookie)) {
513 		sigexit(p, SIGILL);
514 		return (EFAULT);
515 	}
516 
517 	/* Prevent reuse of the sigcontext cookie */
518 	ksc.sc_cookie = 0;
519 	(void)copyout(&ksc.sc_cookie, (caddr_t)scp +
520 	    offsetof(struct sigcontext, sc_cookie), sizeof (ksc.sc_cookie));
521 
522 	scp = &ksc;
523 
524 	tf = p->p_md.md_tf;
525 	/*
526 	 * Only the icc bits in the psr are used, so it need not be
527 	 * verified.  pc and npc must be multiples of 4.  This is all
528 	 * that is required; if it holds, just do it.
529 	 */
530 	if (((ksc.sc_pc | ksc.sc_npc) & 3) != 0 ||
531 	    (ksc.sc_pc == 0) || (ksc.sc_npc == 0)) {
532 #ifdef DEBUG
533 		printf("sigreturn: pc %p or npc %p invalid\n",
534 		   (void *)(unsigned long)ksc.sc_pc,
535 		   (void *)(unsigned long)ksc.sc_npc);
536 #endif
537 		return (EINVAL);
538 	}
539 
540 	/* take only psr ICC field */
541 	tf->tf_tstate = (u_int64_t)(tf->tf_tstate & ~TSTATE_CCR) | (scp->sc_tstate & TSTATE_CCR);
542 	tf->tf_pc = (u_int64_t)scp->sc_pc;
543 	tf->tf_npc = (u_int64_t)scp->sc_npc;
544 	tf->tf_global[1] = (u_int64_t)scp->sc_g1;
545 	tf->tf_out[0] = (u_int64_t)scp->sc_o0;
546 	tf->tf_out[6] = (u_int64_t)scp->sc_sp;
547 
548 	/* Restore signal mask. */
549 	p->p_sigmask = scp->sc_mask & ~sigcantmask;
550 
551 	return (EJUSTRETURN);
552 }
553 
554 /*
555  * Notify the current process (p) that it has a signal pending,
556  * process as soon as possible.
557  */
558 void
signotify(struct proc * p)559 signotify(struct proc *p)
560 {
561 	aston(p);
562 	cpu_unidle(p->p_cpu);
563 }
564 
565 int	waittime = -1;
566 struct pcb dumppcb;
567 
568 __dead void
boot(int howto)569 boot(int howto)
570 {
571 	int i;
572 	static char str[128];
573 
574 	if ((howto & RB_RESET) != 0)
575 		goto doreset;
576 
577 	if (cold) {
578 		if ((howto & RB_USERREQ) == 0)
579 			howto |= RB_HALT;
580 		goto haltsys;
581 	}
582 
583 	fb_unblank();
584 	boothowto = howto;
585 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
586 		waittime = 0;
587 		vfs_shutdown(curproc);
588 
589 		if ((howto & RB_TIMEBAD) == 0) {
590 			resettodr();
591 		} else {
592 			printf("WARNING: not updating battery clock\n");
593 		}
594 	}
595 	if_downall();
596 
597 	uvm_shutdown();
598 	splhigh();
599 	cold = 1;
600 
601 	if ((howto & RB_DUMP) != 0)
602 		dumpsys();
603 
604 haltsys:
605 	config_suspend_all(DVACT_POWERDOWN);
606 
607 	/* If powerdown was requested, do it. */
608 	if ((howto & RB_POWERDOWN) != 0) {
609 		/* Let the OBP do the work. */
610 		OF_poweroff();
611 		printf("WARNING: powerdown failed!\n");
612 		/*
613 		 * RB_POWERDOWN implies RB_HALT... fall into it...
614 		 */
615 	}
616 
617 	if ((howto & RB_HALT) != 0) {
618 		printf("halted\n\n");
619 		OF_exit();
620 		panic("PROM exit failed");
621 	}
622 
623 doreset:
624 	printf("rebooting\n\n");
625 #if 0
626 	if (user_boot_string && *user_boot_string) {
627 		i = strlen(user_boot_string);
628 		if (i > sizeof(str))
629 			OF_boot(user_boot_string);	/* XXX */
630 		bcopy(user_boot_string, str, i);
631 	} else
632 #endif
633 	{
634 		i = 1;
635 		str[0] = '\0';
636 	}
637 
638 	if ((howto & RB_SINGLE) != 0)
639 		str[i++] = 's';
640 	if ((howto & RB_KDB) != 0)
641 		str[i++] = 'd';
642 	if (i > 1) {
643 		if (str[0] == '\0')
644 			str[0] = '-';
645 		str[i] = 0;
646 	} else
647 		str[0] = 0;
648 	OF_boot(str);
649 	panic("cpu_reboot -- failed");
650 	for (;;)
651 		continue;
652 	/* NOTREACHED */
653 }
654 
655 u_long	dumpmag = 0x8fca0101;	/* magic number for savecore */
656 int	dumpsize = 0;		/* also for savecore */
657 long	dumplo = 0;
658 
659 void
dumpconf(void)660 dumpconf(void)
661 {
662 	int nblks, dumpblks;
663 
664 	if (dumpdev == NODEV ||
665 	    (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
666 		return;
667 	if (nblks <= ctod(1))
668 		return;
669 
670 	dumpblks = ctod(physmem) + pmap_dumpsize();
671 	if (dumpblks > (nblks - ctod(1)))
672 		/*
673 		 * dump size is too big for the partition.
674 		 * Note, we safeguard a click at the front for a
675 		 * possible disk label.
676 		 */
677 		return;
678 
679 	/* Put the dump at the end of the partition */
680 	dumplo = nblks - dumpblks;
681 
682 	/*
683 	 * savecore(8) expects dumpsize to be the number of pages
684 	 * of actual core dumped (i.e. excluding the MMU stuff).
685 	 */
686 	dumpsize = physmem;
687 }
688 
689 #define	BYTES_PER_DUMP	(NBPG)	/* must be a multiple of pagesize */
690 static vaddr_t dumpspace;
691 
692 caddr_t
reserve_dumppages(caddr_t p)693 reserve_dumppages(caddr_t p)
694 {
695 
696 	dumpspace = (vaddr_t)p;
697 	return (p + BYTES_PER_DUMP);
698 }
699 
700 /*
701  * Write a crash dump.
702  */
703 void
dumpsys(void)704 dumpsys(void)
705 {
706 	int psize;
707 	daddr_t blkno;
708 	int (*dump)(dev_t, daddr_t, caddr_t, size_t);
709 	int error = 0;
710 	struct mem_region *mp;
711 	extern struct mem_region *mem;
712 
713 	/* copy registers to memory */
714 	snapshot(&dumppcb);
715 	stackdump();
716 
717 	if (dumpdev == NODEV)
718 		return;
719 
720 	/*
721 	 * For dumps during autoconfiguration,
722 	 * if dump device has already configured...
723 	 */
724 	if (dumpsize == 0)
725 		dumpconf();
726 	if (!dumpspace) {
727 		printf("\nno address space available, dump not possible\n");
728 		return;
729 	}
730 	if (dumplo <= 0) {
731 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
732 		    minor(dumpdev));
733 		return;
734 	}
735 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
736 	    minor(dumpdev), dumplo);
737 
738 	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
739 	printf("dump ");
740 	if (psize == -1) {
741 		printf("area unavailable\n");
742 		return;
743 	}
744 	blkno = dumplo;
745 	dump = bdevsw[major(dumpdev)].d_dump;
746 
747 	error = pmap_dumpmmu(dump, blkno);
748 	blkno += pmap_dumpsize();
749 printf("starting dump, blkno %lld\n", (long long)blkno);
750 	for (mp = mem; mp->size; mp++) {
751 		u_int64_t i = 0, n;
752 		paddr_t maddr = mp->start;
753 
754 #if 0
755 		/* Remind me: why don't we dump page 0 ? */
756 		if (maddr == 0) {
757 			/* Skip first page at physical address 0 */
758 			maddr += NBPG;
759 			i += NBPG;
760 			blkno += btodb(NBPG);
761 		}
762 #endif
763 		for (; i < mp->size; i += n) {
764 			n = mp->size - i;
765 			if (n > BYTES_PER_DUMP)
766 				n = BYTES_PER_DUMP;
767 
768 			/* print out how many MBs we have dumped */
769 			if (i && (i % (1024*1024)) == 0)
770 				printf("%lld ", i / (1024*1024));
771 			(void) pmap_enter(pmap_kernel(), dumpspace, maddr,
772 			    PROT_READ, PROT_READ | PMAP_WIRED);
773 			pmap_update(pmap_kernel());
774 			error = (*dump)(dumpdev, blkno,
775 					(caddr_t)dumpspace, (int)n);
776 			pmap_remove(pmap_kernel(), dumpspace, dumpspace + n);
777 			pmap_update(pmap_kernel());
778 			if (error)
779 				break;
780 			maddr += n;
781 			blkno += btodb(n);
782 		}
783 	}
784 
785 	switch (error) {
786 
787 	case ENXIO:
788 		printf("device bad\n");
789 		break;
790 
791 	case EFAULT:
792 		printf("device not ready\n");
793 		break;
794 
795 	case EINVAL:
796 		printf("area improper\n");
797 		break;
798 
799 	case EIO:
800 		printf("i/o error\n");
801 		break;
802 
803 	case 0:
804 		printf("succeeded\n");
805 		break;
806 
807 	default:
808 		printf("error %d\n", error);
809 		break;
810 	}
811 }
812 
813 /*
814  * get the fp and dump the stack as best we can.  don't leave the
815  * current stack page
816  */
817 void
stackdump(void)818 stackdump(void)
819 {
820 	struct frame *sfp = getfp(), *fp64;
821 
822 	fp64 = sfp = v9next_frame(sfp);
823 	printf("Frame pointer is at %p\n", fp64);
824 	printf("Call traceback:\n");
825 	while (fp64 && ((u_long)fp64 >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) {
826 		printf("%llx(%llx, %llx, %llx, %llx, %llx, %llx) "
827 		    "fp = %llx\n",
828 		       (unsigned long long)fp64->fr_pc,
829 		       (unsigned long long)fp64->fr_arg[0],
830 		       (unsigned long long)fp64->fr_arg[1],
831 		       (unsigned long long)fp64->fr_arg[2],
832 		       (unsigned long long)fp64->fr_arg[3],
833 		       (unsigned long long)fp64->fr_arg[4],
834 		       (unsigned long long)fp64->fr_arg[5],
835 		       (unsigned long long)fp64->fr_fp);
836 		fp64 = v9next_frame(fp64);
837 	}
838 }
839 
840 
841 /*
842  * Common function for DMA map creation.  May be called by bus-specific
843  * DMA map creation functions.
844  */
845 int
_bus_dmamap_create(bus_dma_tag_t t,bus_dma_tag_t t0,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)846 _bus_dmamap_create(bus_dma_tag_t t, bus_dma_tag_t t0, bus_size_t size,
847     int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags,
848     bus_dmamap_t *dmamp)
849 {
850 	struct sparc_bus_dmamap *map;
851 	void *mapstore;
852 	size_t mapsize;
853 
854 	/*
855 	 * Allocate and initialize the DMA map.  The end of the map
856 	 * is a variable-sized array of segments, so we allocate enough
857 	 * room for them in one shot.
858 	 *
859 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
860 	 * of ALLOCNOW notifies others that we've reserved these resources,
861 	 * and they are not to be freed.
862 	 *
863 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
864 	 * the (nsegments - 1).
865 	 */
866 	mapsize = sizeof(struct sparc_bus_dmamap) +
867 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
868 	if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
869 	    (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
870 		return (ENOMEM);
871 
872 	map = (struct sparc_bus_dmamap *)mapstore;
873 	map->_dm_size = size;
874 	map->_dm_segcnt = nsegments;
875 	map->_dm_maxsegsz = maxsegsz;
876 	map->_dm_boundary = boundary;
877 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK | BUS_DMA_NOWAIT |
878 	    BUS_DMA_COHERENT | BUS_DMA_NOWRITE | BUS_DMA_NOCACHE);
879 	map->dm_mapsize = 0;		/* no valid mappings */
880 	map->dm_nsegs = 0;
881 
882 	*dmamp = map;
883 	return (0);
884 }
885 
886 /*
887  * Common function for DMA map destruction.  May be called by bus-specific
888  * DMA map destruction functions.
889  */
890 void
_bus_dmamap_destroy(bus_dma_tag_t t,bus_dma_tag_t t0,bus_dmamap_t map)891 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map)
892 {
893 	/*
894 	 * Unload the map if it is still loaded.  This is required
895 	 * by the specification (well, the manpage).  Higher level
896 	 * drivers, if any, should do this too.  By the time the
897 	 * system gets here, the higher level "destroy" functions
898 	 * would probably already have clobbered the data needed
899 	 * to do a proper unload.
900 	 */
901 	if (map->dm_nsegs)
902 		bus_dmamap_unload(t0, map);
903 
904 	free(map, M_DEVBUF, 0);
905 }
906 
907 /*
908  * Common function for loading a DMA map with a linear buffer.  May
909  * be called by bus-specific DMA map load functions.
910  *
911  * Most SPARCs have IOMMUs in the bus controllers.  In those cases
912  * they only need one segment and will use virtual addresses for DVMA.
913  * Those bus controllers should intercept these vectors and should
914  * *NEVER* call _bus_dmamap_load() which is used only by devices that
915  * bypass DVMA.
916  */
917 int
_bus_dmamap_load(bus_dma_tag_t t,bus_dma_tag_t t0,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)918 _bus_dmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, void *buf,
919     bus_size_t buflen, struct proc *p, int flags)
920 {
921 	bus_addr_t lastaddr;
922 	int seg, error;
923 
924 	/*
925 	 * Make sure that on error condition we return "no valid mappings".
926 	 */
927 	map->dm_mapsize = 0;
928 	map->dm_nsegs = 0;
929 
930 	if (buflen > map->_dm_size)
931 		return (EINVAL);
932 
933 	seg = 0;
934 	error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
935 	    &lastaddr, &seg, 1);
936 	if (error == 0) {
937 		map->dm_mapsize = buflen;
938 		map->dm_nsegs = seg + 1;
939 	}
940 	return (error);
941 }
942 
943 /*
944  * Like _bus_dmamap_load(), but for mbufs.
945  */
946 int
_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dma_tag_t t0,bus_dmamap_t map,struct mbuf * m,int flags)947 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
948     struct mbuf *m, int flags)
949 {
950 	bus_dma_segment_t segs[MAX_DMA_SEGS];
951 	int i;
952 	size_t len;
953 
954 	/*
955 	 * Make sure that on error condition we return "no valid mappings".
956 	 */
957  	map->dm_mapsize = 0;
958  	map->dm_nsegs = 0;
959 
960 	if (m->m_pkthdr.len > map->_dm_size)
961 		return (EINVAL);
962 
963 	/* Record mbuf for *_unload */
964 	map->_dm_type = _DM_TYPE_MBUF;
965 	map->_dm_source = m;
966 
967 	i = 0;
968 	len = 0;
969 	while (m) {
970 		vaddr_t vaddr = mtod(m, vaddr_t);
971 		long buflen = (long)m->m_len;
972 
973 		len += buflen;
974 		while (buflen > 0 && i < MAX_DMA_SEGS) {
975 			paddr_t pa;
976 			long incr;
977 
978 			incr = min(buflen,
979 			    PAGE_SIZE - ((u_long)vaddr & PGOFSET));
980 
981 			if (pmap_extract(pmap_kernel(), vaddr, &pa) == FALSE) {
982 #ifdef DIAGNOSTIC
983 				printf("_bus_dmamap_load_mbuf: pmap_extract failed %lx\n",
984 					vaddr);
985 				map->_dm_type = 0;
986 				map->_dm_source = NULL;
987 #endif
988 				return EINVAL;
989 			}
990 
991 			buflen -= incr;
992 			vaddr += incr;
993 
994 			if (i > 0 && pa == (segs[i - 1].ds_addr +
995 			    segs[i - 1].ds_len) && ((segs[i - 1].ds_len + incr)
996 			    < map->_dm_maxsegsz)) {
997 				/* Hey, waddyaknow, they're contiguous */
998 				segs[i - 1].ds_len += incr;
999 				continue;
1000 			}
1001 			segs[i].ds_addr = pa;
1002 			segs[i].ds_len = incr;
1003 			segs[i]._ds_boundary = 0;
1004 			segs[i]._ds_align = 0;
1005 			segs[i]._ds_mlist = NULL;
1006 			i++;
1007 		}
1008 		m = m->m_next;
1009 		if (m && i >= MAX_DMA_SEGS) {
1010 			/* Exceeded the size of our dmamap */
1011 			map->_dm_type = 0;
1012 			map->_dm_source = NULL;
1013 			return (EFBIG);
1014 		}
1015 	}
1016 
1017 	return (bus_dmamap_load_raw(t0, map, segs, i,
1018 			    (bus_size_t)len, flags));
1019 }
1020 
1021 /*
1022  * Like _bus_dmamap_load(), but for uios.
1023  */
1024 int
_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dma_tag_t t0,bus_dmamap_t map,struct uio * uio,int flags)1025 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
1026     struct uio *uio, int flags)
1027 {
1028 	/*
1029 	 * XXXXXXX The problem with this routine is that it needs to
1030 	 * lock the user address space that is being loaded, but there
1031 	 * is no real way for us to unlock it during the unload process.
1032 	 * As a result, only UIO_SYSSPACE uio's are allowed for now.
1033 	 */
1034 	bus_dma_segment_t segs[MAX_DMA_SEGS];
1035 	int i, j;
1036 	size_t len;
1037 
1038 	/*
1039 	 * Make sure that on error condition we return "no valid mappings".
1040 	 */
1041  	map->dm_mapsize = 0;
1042  	map->dm_nsegs = 0;
1043 
1044 	if (uio->uio_resid > map->_dm_size)
1045 		return (EINVAL);
1046 
1047 	if (uio->uio_segflg != UIO_SYSSPACE)
1048 		return (EOPNOTSUPP);
1049 
1050 	/* Record for *_unload */
1051 	map->_dm_type = _DM_TYPE_UIO;
1052 	map->_dm_source = (void *)uio;
1053 
1054 	i = j = 0;
1055 	len = 0;
1056 	while (j < uio->uio_iovcnt) {
1057 		vaddr_t vaddr = (vaddr_t)uio->uio_iov[j].iov_base;
1058 		long buflen = (long)uio->uio_iov[j].iov_len;
1059 
1060 		len += buflen;
1061 		while (buflen > 0 && i < MAX_DMA_SEGS) {
1062 			paddr_t pa;
1063 			long incr;
1064 
1065 			incr = min(buflen,
1066 			    PAGE_SIZE - ((u_long)vaddr & PGOFSET));
1067 
1068 			(void) pmap_extract(pmap_kernel(), vaddr, &pa);
1069 			buflen -= incr;
1070 			vaddr += incr;
1071 
1072 			if (i > 0 && pa == (segs[i - 1].ds_addr +
1073 			    segs[i - 1].ds_len) && ((segs[i - 1].ds_len + incr)
1074 			    < map->_dm_maxsegsz)) {
1075 				/* Hey, waddyaknow, they're contiguous */
1076 				segs[i - 1].ds_len += incr;
1077 				continue;
1078 			}
1079 			segs[i].ds_addr = pa;
1080 			segs[i].ds_len = incr;
1081 			segs[i]._ds_boundary = 0;
1082 			segs[i]._ds_align = 0;
1083 			segs[i]._ds_mlist = NULL;
1084 			i++;
1085 		}
1086 		j++;
1087 		if ((uio->uio_iovcnt - j) && i >= MAX_DMA_SEGS) {
1088 			/* Exceeded the size of our dmamap */
1089 			map->_dm_type = 0;
1090 			map->_dm_source = NULL;
1091 			return (EFBIG);
1092 		}
1093 	}
1094 
1095 	return (bus_dmamap_load_raw(t0, map, segs, i, (bus_size_t)len, flags));
1096 }
1097 
1098 /*
1099  * Like _bus_dmamap_load(), but for raw memory allocated with
1100  * bus_dmamem_alloc().
1101  */
1102 int
_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dma_tag_t t0,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)1103 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
1104     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
1105 {
1106 
1107 	panic("_bus_dmamap_load_raw: not implemented");
1108 }
1109 
1110 int
_bus_dmamap_load_buffer(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags,bus_addr_t * lastaddrp,int * segp,int first)1111 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
1112     bus_size_t buflen, struct proc *p, int flags, bus_addr_t *lastaddrp,
1113     int *segp, int first)
1114 {
1115 	bus_size_t sgsize;
1116 	bus_addr_t curaddr, lastaddr, baddr, bmask;
1117 	vaddr_t vaddr = (vaddr_t)buf;
1118 	int seg;
1119 	pmap_t pmap;
1120 
1121 	if (p != NULL)
1122 		pmap = p->p_vmspace->vm_map.pmap;
1123 	else
1124 		pmap = pmap_kernel();
1125 
1126 	lastaddr = *lastaddrp;
1127 	bmask  = ~(map->_dm_boundary - 1);
1128 
1129 	for (seg = *segp; buflen > 0 ; ) {
1130 		/*
1131 		 * Get the physical address for this segment.
1132 		 */
1133 		pmap_extract(pmap, vaddr, (paddr_t *)&curaddr);
1134 
1135 		/*
1136 		 * Compute the segment size, and adjust counts.
1137 		 */
1138 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
1139 		if (buflen < sgsize)
1140 			sgsize = buflen;
1141 
1142 		/*
1143 		 * Make sure we don't cross any boundaries.
1144 		 */
1145 		if (map->_dm_boundary > 0) {
1146 			baddr = (curaddr + map->_dm_boundary) & bmask;
1147 			if (sgsize > (baddr - curaddr))
1148 				sgsize = (baddr - curaddr);
1149 		}
1150 
1151 		/*
1152 		 * Insert chunk into a segment, coalescing with
1153 		 * previous segment if possible.
1154 		 */
1155 		if (first) {
1156 			map->dm_segs[seg].ds_addr = curaddr;
1157 			map->dm_segs[seg].ds_len = sgsize;
1158 			first = 0;
1159 		} else {
1160 			if (curaddr == lastaddr &&
1161 			    (map->dm_segs[seg].ds_len + sgsize) <=
1162 			     map->_dm_maxsegsz &&
1163 			    (map->_dm_boundary == 0 ||
1164 			     (map->dm_segs[seg].ds_addr & bmask) ==
1165 			     (curaddr & bmask)))
1166 				map->dm_segs[seg].ds_len += sgsize;
1167 			else {
1168 				if (++seg >= map->_dm_segcnt)
1169 					break;
1170 				map->dm_segs[seg].ds_addr = curaddr;
1171 				map->dm_segs[seg].ds_len = sgsize;
1172 			}
1173 		}
1174 
1175 		lastaddr = curaddr + sgsize;
1176 		vaddr += sgsize;
1177 		buflen -= sgsize;
1178 	}
1179 
1180 	*segp = seg;
1181 	*lastaddrp = lastaddr;
1182 
1183 	/*
1184 	 * Did we fit?
1185 	 */
1186 	if (buflen != 0)
1187 		return (EFBIG);		/* XXX better return value here? */
1188 	return (0);
1189 }
1190 
1191 /*
1192  * Common function for unloading a DMA map.  May be called by
1193  * bus-specific DMA map unload functions.
1194  */
1195 void
_bus_dmamap_unload(bus_dma_tag_t t,bus_dma_tag_t t0,bus_dmamap_t map)1196 _bus_dmamap_unload(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map)
1197 {
1198 	/* Mark the mappings as invalid. */
1199 	map->dm_mapsize = 0;
1200 	map->dm_nsegs = 0;
1201 
1202 }
1203 
1204 /*
1205  * Common function for DMA map synchronization.  May be called
1206  * by bus-specific DMA map synchronization functions.
1207  */
1208 void
_bus_dmamap_sync(bus_dma_tag_t t,bus_dma_tag_t t0,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)1209 _bus_dmamap_sync(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
1210     bus_addr_t offset, bus_size_t len, int ops)
1211 {
1212 	if (ops & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD))
1213 		__membar("#MemIssue");
1214 }
1215 
1216 /*
1217  * Common function for DMA-safe memory allocation.  May be called
1218  * by bus-specific DMA memory allocation functions.
1219  */
1220 int
_bus_dmamem_alloc(bus_dma_tag_t t,bus_dma_tag_t t0,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)1221 _bus_dmamem_alloc(bus_dma_tag_t t, bus_dma_tag_t t0, bus_size_t size,
1222     bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
1223     int nsegs, int *rsegs, int flags)
1224 {
1225 	struct pglist *mlist;
1226 	int error, plaflag;
1227 
1228 	/* Always round the size. */
1229 	size = round_page(size);
1230 
1231 	if ((mlist = malloc(sizeof(*mlist), M_DEVBUF,
1232 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1233 		return (ENOMEM);
1234 
1235 	/*
1236 	 * If the bus uses DVMA then ignore boundary and alignment.
1237 	 */
1238 	segs[0]._ds_boundary = boundary;
1239 	segs[0]._ds_align = alignment;
1240 	if (flags & BUS_DMA_DVMA) {
1241 		boundary = 0;
1242 		alignment = 0;
1243 	}
1244 
1245 	/*
1246 	 * Allocate pages from the VM system.
1247 	 */
1248 	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
1249 	if (flags & BUS_DMA_ZERO)
1250 		plaflag |= UVM_PLA_ZERO;
1251 
1252 	TAILQ_INIT(mlist);
1253 	error = uvm_pglistalloc(size, (paddr_t)0, (paddr_t)-1,
1254 	    alignment, boundary, mlist, nsegs, plaflag);
1255 	if (error)
1256 		return (error);
1257 
1258 	/*
1259 	 * Compute the location, size, and number of segments actually
1260 	 * returned by the VM code.
1261 	 */
1262 	segs[0].ds_addr = 0UL; /* UPA does not map things */
1263 	segs[0].ds_len = size;
1264 	*rsegs = 1;
1265 
1266 	/*
1267 	 * Simply keep a pointer around to the linked list, so
1268 	 * bus_dmamap_free() can return it.
1269 	 *
1270 	 * NOBODY SHOULD TOUCH THE pageq FIELDS WHILE THESE PAGES
1271 	 * ARE IN OUR CUSTODY.
1272 	 */
1273 	segs[0]._ds_mlist = mlist;
1274 
1275 	/* The bus driver should do the actual mapping */
1276 	return (0);
1277 }
1278 
1279 /*
1280  * Common function for freeing DMA-safe memory.  May be called by
1281  * bus-specific DMA memory free functions.
1282  */
1283 void
_bus_dmamem_free(bus_dma_tag_t t,bus_dma_tag_t t0,bus_dma_segment_t * segs,int nsegs)1284 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dma_segment_t *segs,
1285     int nsegs)
1286 {
1287 
1288 #ifdef DIAGNOSTIC
1289 	if (nsegs != 1)
1290 		panic("bus_dmamem_free: nsegs = %d", nsegs);
1291 #endif
1292 
1293 	/*
1294 	 * Return the list of pages back to the VM system.
1295 	 */
1296 	uvm_pglistfree(segs[0]._ds_mlist);
1297 	free(segs[0]._ds_mlist, M_DEVBUF, 0);
1298 }
1299 
1300 /*
1301  * Common function for mapping DMA-safe memory.  May be called by
1302  * bus-specific DMA memory map functions.
1303  */
1304 int
_bus_dmamem_map(bus_dma_tag_t t,bus_dma_tag_t t0,bus_dma_segment_t * segs,int nsegs,size_t size,caddr_t * kvap,int flags)1305 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dma_segment_t *segs,
1306     int nsegs, size_t size, caddr_t *kvap, int flags)
1307 {
1308 	const struct kmem_dyn_mode *kd;
1309 	struct vm_page *m;
1310 	vaddr_t va, sva;
1311 	size_t ssize;
1312 	bus_addr_t addr, cbit;
1313 	struct pglist *mlist;
1314 	int error;
1315 
1316 #ifdef DIAGNOSTIC
1317 	if (nsegs != 1)
1318 		panic("_bus_dmamem_map: nsegs = %d", nsegs);
1319 #endif
1320 
1321 	size = round_page(size);
1322 	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
1323 	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
1324 	if (va == 0)
1325 		return (ENOMEM);
1326 
1327 	*kvap = (caddr_t)va;
1328 
1329 	cbit = 0;
1330 	if (flags & BUS_DMA_NOCACHE)
1331 		cbit |= PMAP_NC;
1332 
1333 	sva = va;
1334 	ssize = size;
1335 	mlist = segs[0]._ds_mlist;
1336 	TAILQ_FOREACH(m, mlist, pageq) {
1337 #ifdef DIAGNOSTIC
1338 		if (size == 0)
1339 			panic("_bus_dmamem_map: size botch");
1340 #endif
1341 		addr = VM_PAGE_TO_PHYS(m);
1342 		error = pmap_enter(pmap_kernel(), va, addr | cbit,
1343 		    PROT_READ | PROT_WRITE,
1344 		    PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
1345 		if (error) {
1346 			pmap_update(pmap_kernel());
1347 			km_free((void *)sva, ssize, &kv_any, &kp_none);
1348 			return (error);
1349 		}
1350 		va += PAGE_SIZE;
1351 		size -= PAGE_SIZE;
1352 	}
1353 	pmap_update(pmap_kernel());
1354 
1355 	return (0);
1356 }
1357 
1358 /*
1359  * Common function for unmapping DMA-safe memory.  May be called by
1360  * bus-specific DMA memory unmapping functions.
1361  */
1362 void
_bus_dmamem_unmap(bus_dma_tag_t t,bus_dma_tag_t t0,caddr_t kva,size_t size)1363 _bus_dmamem_unmap(bus_dma_tag_t t, bus_dma_tag_t t0, caddr_t kva, size_t size)
1364 {
1365 
1366 #ifdef DIAGNOSTIC
1367 	if ((u_long)kva & PAGE_MASK)
1368 		panic("_bus_dmamem_unmap");
1369 #endif
1370 
1371 	km_free(kva, round_page(size), &kv_any, &kp_none);
1372 }
1373 
1374 /*
1375  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
1376  * bus-specific DMA mmap(2)'ing functions.
1377  */
1378 paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_tag_t t0,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)1379 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dma_segment_t *segs,
1380     int nsegs, off_t off, int prot, int flags)
1381 {
1382 	int i;
1383 
1384 	for (i = 0; i < nsegs; i++) {
1385 #ifdef DIAGNOSTIC
1386 		if (off & PGOFSET)
1387 			panic("_bus_dmamem_mmap: offset unaligned");
1388 		if (segs[i].ds_addr & PGOFSET)
1389 			panic("_bus_dmamem_mmap: segment unaligned");
1390 		if (segs[i].ds_len & PGOFSET)
1391 			panic("_bus_dmamem_mmap: segment size not multiple"
1392 					" of page size");
1393 #endif
1394 		if (off >= segs[i].ds_len) {
1395 			off -= segs[i].ds_len;
1396 			continue;
1397 		}
1398 
1399 		return (segs[i].ds_addr + off);
1400 	}
1401 
1402 	/* Page not found. */
1403 	return (-1);
1404 }
1405 
1406 struct sparc_bus_dma_tag mainbus_dma_tag = {
1407 	NULL,
1408 	NULL,
1409 	_bus_dmamap_create,
1410 	_bus_dmamap_destroy,
1411 	_bus_dmamap_load,
1412 	_bus_dmamap_load_mbuf,
1413 	_bus_dmamap_load_uio,
1414 	_bus_dmamap_load_raw,
1415 	_bus_dmamap_unload,
1416 	_bus_dmamap_sync,
1417 
1418 	_bus_dmamem_alloc,
1419 	_bus_dmamem_free,
1420 	_bus_dmamem_map,
1421 	_bus_dmamem_unmap,
1422 	_bus_dmamem_mmap
1423 };
1424 
1425 
1426 /*
1427  * Base bus space handlers.
1428  */
1429 int sparc_bus_map(bus_space_tag_t, bus_space_tag_t, bus_addr_t, bus_size_t,
1430     int, bus_space_handle_t *);
1431 int sparc_bus_protect(bus_space_tag_t, bus_space_tag_t, bus_space_handle_t,
1432     bus_size_t, int);
1433 int sparc_bus_unmap(bus_space_tag_t, bus_space_tag_t, bus_space_handle_t,
1434     bus_size_t);
1435 bus_addr_t sparc_bus_addr(bus_space_tag_t, bus_space_tag_t,
1436     bus_space_handle_t);
1437 int sparc_bus_subregion(bus_space_tag_t, bus_space_tag_t,  bus_space_handle_t,
1438     bus_size_t, bus_size_t, bus_space_handle_t *);
1439 paddr_t sparc_bus_mmap(bus_space_tag_t, bus_space_tag_t, bus_addr_t, off_t,
1440     int, int);
1441 void *sparc_mainbus_intr_establish(bus_space_tag_t, bus_space_tag_t, int, int,
1442     int, int (*)(void *), void *, const char *);
1443 int sparc_bus_alloc(bus_space_tag_t, bus_space_tag_t, bus_addr_t, bus_addr_t,
1444     bus_size_t, bus_size_t, bus_size_t, int, bus_addr_t *,
1445     bus_space_handle_t *);
1446 void sparc_bus_free(bus_space_tag_t, bus_space_tag_t, bus_space_handle_t,
1447     bus_size_t);
1448 
1449 int
sparc_bus_map(bus_space_tag_t t,bus_space_tag_t t0,bus_addr_t addr,bus_size_t size,int flags,bus_space_handle_t * hp)1450 sparc_bus_map(bus_space_tag_t t, bus_space_tag_t t0, bus_addr_t	addr,
1451     bus_size_t size, int flags, bus_space_handle_t *hp)
1452 {
1453 	vaddr_t va;
1454 	u_int64_t pa;
1455 	paddr_t	pm_flags = 0;
1456 	vm_prot_t pm_prot = PROT_READ;
1457 
1458 	if (flags & BUS_SPACE_MAP_PROMADDRESS) {
1459 		hp->bh_ptr = addr;
1460 		return (0);
1461 	}
1462 
1463 	if (size == 0) {
1464 		char buf[80];
1465 		bus_space_render_tag(t0, buf, sizeof buf);
1466 		printf("\nsparc_bus_map: zero size on %s", buf);
1467 		return (EINVAL);
1468 	}
1469 
1470 	if ( (LITTLE_ASI(t0->asi) && LITTLE_ASI(t0->sasi)) ||
1471 	    (PHYS_ASI(t0->asi) != PHYS_ASI(t0->sasi)) ) {
1472 		char buf[80];
1473 		bus_space_render_tag(t0, buf, sizeof buf);
1474 		printf("\nsparc_bus_map: mismatched ASIs on %s: asi=%x sasi=%x",
1475 		    buf, t0->asi, t0->sasi);
1476 	}
1477 
1478 	if (PHYS_ASI(t0->asi)) {
1479 #ifdef BUS_SPACE_DEBUG
1480 		char buf[80];
1481 		bus_space_render_tag(t0, buf, sizeof buf);
1482 		BUS_SPACE_PRINTF(BSDB_MAP,
1483 		    ("\nsparc_bus_map: physical tag %s asi %x sasi %x flags %x "
1484 		    "paddr %016llx size %016llx",
1485 		    buf,
1486 		    (int)t0->asi, (int)t0->sasi, (int)flags,
1487 		    (unsigned long long)addr, (unsigned long long)size));
1488 #endif /* BUS_SPACE_DEBUG */
1489 		if (flags & BUS_SPACE_MAP_LINEAR) {
1490 			char buf[80];
1491 			bus_space_render_tag(t0, buf, sizeof buf);
1492 			printf("\nsparc_bus_map: linear mapping requested on physical bus %s", buf);
1493 			return (EINVAL);
1494 		}
1495 
1496 		hp->bh_ptr = addr;
1497 		return (0);
1498 	}
1499 
1500 	size = round_page(size);
1501 
1502 	if (LITTLE_ASI(t0->sasi) && !LITTLE_ASI(t0->asi))
1503 		pm_flags |= PMAP_LITTLE;
1504 
1505 	if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0)
1506 		pm_flags |= PMAP_NC;
1507 
1508 	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, &kd_nowait);
1509 	if (va == 0)
1510 		return (ENOMEM);
1511 
1512 	/* note: preserve page offset */
1513 	hp->bh_ptr = va | (addr & PGOFSET);
1514 
1515 	pa = trunc_page(addr);
1516 	if ((flags & BUS_SPACE_MAP_READONLY) == 0)
1517 		pm_prot |= PROT_WRITE;
1518 
1519 #ifdef BUS_SPACE_DEBUG
1520 	{ /* scope */
1521 		char buf[80];
1522 		bus_space_render_tag(t0, buf, sizeof buf);
1523 		BUS_SPACE_PRINTF(BSDB_MAP, ("\nsparc_bus_map: tag %s type %x "
1524 		    "flags %x addr %016llx size %016llx virt %llx paddr "
1525 		    "%016llx", buf, (int)t->default_type, (int) flags,
1526 		    (unsigned long long)addr, (unsigned long long)size,
1527 		    (unsigned long long)hp->bh_ptr, (unsigned long long)pa));
1528 	}
1529 #endif /* BUS_SPACE_DEBUG */
1530 
1531 	do {
1532 		BUS_SPACE_PRINTF(BSDB_MAPDETAIL, ("\nsparc_bus_map: phys %llx "
1533 		    "virt %p hp->bh_ptr %llx", (unsigned long long)pa,
1534 		    (char *)v, (unsigned long long)hp->bh_ptr));
1535 		pmap_enter(pmap_kernel(), va, pa | pm_flags, pm_prot,
1536 			pm_prot|PMAP_WIRED);
1537 		va += PAGE_SIZE;
1538 		pa += PAGE_SIZE;
1539 	} while ((size -= PAGE_SIZE) > 0);
1540 	pmap_update(pmap_kernel());
1541 	return (0);
1542 }
1543 
1544 int
sparc_bus_subregion(bus_space_tag_t tag,bus_space_tag_t tag0,bus_space_handle_t handle,bus_size_t offset,bus_size_t size,bus_space_handle_t * nhandlep)1545 sparc_bus_subregion(bus_space_tag_t tag, bus_space_tag_t tag0,
1546     bus_space_handle_t handle, bus_size_t offset, bus_size_t size,
1547     bus_space_handle_t *nhandlep)
1548 {
1549 	*nhandlep = handle;
1550 	nhandlep->bh_ptr += offset;
1551 	return (0);
1552 }
1553 
1554 /* stolen from uvm_chgkprot() */
1555 /*
1556  * Change protections on kernel pages from addr to addr+len
1557  * (presumably so debugger can plant a breakpoint).
1558  *
1559  * We force the protection change at the pmap level.  If we were
1560  * to use vm_map_protect a change to allow writing would be lazily-
1561  * applied meaning we would still take a protection fault, something
1562  * we really don't want to do.  It would also fragment the kernel
1563  * map unnecessarily.  We cannot use pmap_protect since it also won't
1564  * enforce a write-enable request.  Using pmap_enter is the only way
1565  * we can ensure the change takes place properly.
1566  */
1567 int
sparc_bus_protect(bus_space_tag_t t,bus_space_tag_t t0,bus_space_handle_t h,bus_size_t size,int flags)1568 sparc_bus_protect(bus_space_tag_t t, bus_space_tag_t t0, bus_space_handle_t h,
1569     bus_size_t size, int flags)
1570 {
1571         vm_prot_t prot;
1572 	paddr_t	pm_flags = 0;
1573         paddr_t pa;
1574         vaddr_t sva, eva;
1575 	void* addr = bus_space_vaddr(t0, h);
1576 
1577 	if (addr == 0) {
1578 		printf("\nsparc_bus_protect: null address");
1579 		return (EINVAL);
1580 	}
1581 
1582 	if (PHYS_ASI(t0->asi)) {
1583 		printf("\nsparc_bus_protect: physical ASI");
1584 		return (EINVAL);
1585 	}
1586 
1587         prot = (flags & BUS_SPACE_MAP_READONLY) ?
1588 	    PROT_READ : PROT_READ | PROT_WRITE;
1589 	if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0)
1590 	    pm_flags |= PMAP_NC;
1591 
1592         eva = round_page((vaddr_t)addr + size);
1593         for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
1594                 /*
1595                  * Extract physical address for the page.
1596                  */
1597                 if (pmap_extract(pmap_kernel(), sva, &pa) == FALSE)
1598                         panic("bus_space_protect(): invalid page");
1599                 pmap_enter(pmap_kernel(), sva, pa | pm_flags, prot, prot | PMAP_WIRED);
1600         }
1601 	pmap_update(pmap_kernel());
1602 
1603 	return (0);
1604 }
1605 
1606 int
sparc_bus_unmap(bus_space_tag_t t,bus_space_tag_t t0,bus_space_handle_t bh,bus_size_t size)1607 sparc_bus_unmap(bus_space_tag_t t, bus_space_tag_t t0, bus_space_handle_t bh,
1608     bus_size_t size)
1609 {
1610 	vaddr_t va = trunc_page((vaddr_t)bh.bh_ptr);
1611 	vaddr_t endva = va + round_page(size);
1612 
1613 	if (PHYS_ASI(t0->asi))
1614 		return (0);
1615 
1616 	pmap_remove(pmap_kernel(), va, endva);
1617 	pmap_update(pmap_kernel());
1618 	km_free((void *)va, endva - va, &kv_any, &kp_none);
1619 
1620 	return (0);
1621 }
1622 
1623 paddr_t
sparc_bus_mmap(bus_space_tag_t t,bus_space_tag_t t0,bus_addr_t paddr,off_t off,int prot,int flags)1624 sparc_bus_mmap(bus_space_tag_t t, bus_space_tag_t t0, bus_addr_t paddr,
1625     off_t off, int prot, int flags)
1626 {
1627 	if (PHYS_ASI(t0->asi)) {
1628 		printf("\nsparc_bus_mmap: physical ASI");
1629 		return (0);
1630 	}
1631 
1632 	/* Devices are un-cached... although the driver should do that */
1633 	return ((paddr + off) | PMAP_NC);
1634 }
1635 
1636 bus_addr_t
sparc_bus_addr(bus_space_tag_t t,bus_space_tag_t t0,bus_space_handle_t h)1637 sparc_bus_addr(bus_space_tag_t t, bus_space_tag_t t0, bus_space_handle_t h)
1638 {
1639 	paddr_t addr;
1640 
1641 	if (PHYS_ASI(t0->asi))
1642 		return h.bh_ptr;
1643 
1644 	if (!pmap_extract(pmap_kernel(), h.bh_ptr, &addr))
1645 		return (-1);
1646 	return addr;
1647 }
1648 
1649 void *
bus_intr_allocate(bus_space_tag_t t,int (* handler)(void *),void * arg,int number,int pil,volatile u_int64_t * mapper,volatile u_int64_t * clearer,const char * what)1650 bus_intr_allocate(bus_space_tag_t t, int (*handler)(void *), void *arg,
1651     int number, int pil,
1652     volatile u_int64_t *mapper, volatile u_int64_t *clearer,
1653     const char *what)
1654 {
1655 	struct intrhand *ih;
1656 
1657 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT | M_ZERO);
1658 	if (ih == NULL)
1659 		return (NULL);
1660 
1661 	ih->ih_fun = handler;
1662 	ih->ih_arg = arg;
1663 	ih->ih_number = number;
1664 	ih->ih_pil = pil;
1665 	ih->ih_map = mapper;
1666 	ih->ih_clr = clearer;
1667 	ih->ih_bus = t;
1668 	strlcpy(ih->ih_name, what, sizeof(ih->ih_name));
1669 
1670 	return (ih);
1671 }
1672 
1673 #ifdef notyet
1674 void
bus_intr_free(void * arg)1675 bus_intr_free(void *arg)
1676 {
1677 	free(arg, M_DEVBUF, 0);
1678 }
1679 #endif
1680 
1681 void *
sparc_mainbus_intr_establish(bus_space_tag_t t,bus_space_tag_t t0,int number,int pil,int flags,int (* handler)(void *),void * arg,const char * what)1682 sparc_mainbus_intr_establish(bus_space_tag_t t, bus_space_tag_t t0, int number,
1683     int pil, int flags, int (*handler)(void *), void *arg, const char *what)
1684 {
1685 	struct intrhand *ih;
1686 
1687 	ih = bus_intr_allocate(t0, handler, arg, number, pil, NULL, NULL, what);
1688 	if (ih == NULL)
1689 		return (NULL);
1690 
1691 	intr_establish(ih);
1692 
1693 	return (ih);
1694 }
1695 
1696 int
sparc_bus_alloc(bus_space_tag_t t,bus_space_tag_t t0,bus_addr_t rs,bus_addr_t re,bus_size_t s,bus_size_t a,bus_size_t b,int f,bus_addr_t * ap,bus_space_handle_t * hp)1697 sparc_bus_alloc(bus_space_tag_t t, bus_space_tag_t t0, bus_addr_t rs,
1698     bus_addr_t re, bus_size_t s, bus_size_t a, bus_size_t b, int f,
1699     bus_addr_t *ap, bus_space_handle_t *hp)
1700 {
1701 	return (ENOTTY);
1702 }
1703 
1704 void
sparc_bus_free(bus_space_tag_t t,bus_space_tag_t t0,bus_space_handle_t h,bus_size_t s)1705 sparc_bus_free(bus_space_tag_t t, bus_space_tag_t t0, bus_space_handle_t h,
1706     bus_size_t s)
1707 {
1708 	return;
1709 }
1710 
1711 static const struct sparc_bus_space_tag _mainbus_space_tag = {
1712 	.cookie =			NULL,
1713 	.parent =			NULL,
1714 	.default_type =			UPA_BUS_SPACE,
1715 	.asi =				ASI_PRIMARY,
1716 	.sasi =				ASI_PRIMARY,
1717 	.name =				"mainbus",
1718 	.sparc_bus_alloc =		sparc_bus_alloc,
1719 	.sparc_bus_free =		sparc_bus_free,
1720 	.sparc_bus_map =		sparc_bus_map,
1721 	.sparc_bus_protect =		sparc_bus_protect,
1722 	.sparc_bus_unmap =		sparc_bus_unmap,
1723 	.sparc_bus_subregion =		sparc_bus_subregion,
1724 	.sparc_bus_mmap =		sparc_bus_mmap,
1725 	.sparc_intr_establish =		sparc_mainbus_intr_establish,
1726 	/*.sparc_intr_establish_cpu*/
1727 	.sparc_bus_addr =		sparc_bus_addr
1728 };
1729 const bus_space_tag_t mainbus_space_tag = &_mainbus_space_tag;
1730 
1731 struct cfdriver mainbus_cd = {
1732 	NULL, "mainbus", DV_DULL
1733 };
1734 
1735 #define _BS_PRECALL(t,f)		\
1736         while (t->f == NULL)		\
1737                 t = t->parent;
1738 #define _BS_POSTCALL
1739 
1740 #define _BS_CALL(t,f)			\
1741         (*(t)->f)
1742 
1743 int
bus_space_alloc(bus_space_tag_t t,bus_addr_t rs,bus_addr_t re,bus_size_t s,bus_size_t a,bus_size_t b,int f,bus_addr_t * ap,bus_space_handle_t * hp)1744 bus_space_alloc(bus_space_tag_t t, bus_addr_t rs, bus_addr_t re, bus_size_t s,
1745     bus_size_t a, bus_size_t b, int f, bus_addr_t *ap, bus_space_handle_t *hp)
1746 {
1747         const bus_space_tag_t t0 = t;
1748         int ret;
1749 
1750         _BS_PRECALL(t, sparc_bus_alloc);
1751         ret = _BS_CALL(t, sparc_bus_alloc)(t, t0, rs, re, s, a, b, f, ap, hp);
1752         _BS_POSTCALL;
1753         return ret;
1754 }
1755 
1756 void
bus_space_free(bus_space_tag_t t,bus_space_handle_t h,bus_size_t s)1757 bus_space_free(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s)
1758 {
1759 	const bus_space_tag_t t0 = t;
1760 
1761 	_BS_PRECALL(t, sparc_bus_free);
1762 	_BS_CALL(t, sparc_bus_free)(t, t0, h, s);
1763 	_BS_POSTCALL;
1764 }
1765 
1766 int
bus_space_map(bus_space_tag_t t,bus_addr_t a,bus_size_t s,int f,bus_space_handle_t * hp)1767 bus_space_map(bus_space_tag_t t, bus_addr_t a, bus_size_t s, int f,
1768     bus_space_handle_t *hp)
1769 {
1770 	const bus_space_tag_t t0 = t;
1771 	int ret;
1772 
1773 	_BS_PRECALL(t, sparc_bus_map);
1774 	ret = _BS_CALL(t, sparc_bus_map)(t, t0, a, s, f, hp);
1775 	_BS_POSTCALL;
1776 #ifdef BUS_SPACE_DEBUG
1777 	if(s == 0) {
1778 		char buf[128];
1779 		bus_space_render_tag(t, buf, sizeof buf);
1780 		printf("\n********** bus_space_map: requesting "
1781 		    "zero-length mapping on bus %p:%s",
1782 		    t, buf);
1783 	}
1784 	hp->bh_flags = 0;
1785 	if (ret == 0) {
1786 		hp->bh_size = s;
1787 		hp->bh_tag = t0;
1788 	} else {
1789 		hp->bh_size = 0;
1790 		hp->bh_tag = NULL;
1791 	}
1792 #endif /* BUS_SPACE_DEBUG */
1793 	return (ret);
1794 }
1795 
1796 int
bus_space_protect(bus_space_tag_t t,bus_space_handle_t h,bus_size_t s,int f)1797 bus_space_protect(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s, int f)
1798 {
1799 	const bus_space_tag_t t0 = t;
1800 	int ret;
1801 
1802 	_BS_PRECALL(t, sparc_bus_protect);
1803 	ret = _BS_CALL(t, sparc_bus_protect)(t, t0, h, s, f);
1804 	_BS_POSTCALL;
1805 
1806 	return (ret);
1807 }
1808 
1809 int
bus_space_unmap(bus_space_tag_t t,bus_space_handle_t h,bus_size_t s)1810 bus_space_unmap(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s)
1811 {
1812 	const bus_space_tag_t t0 = t;
1813 	int ret;
1814 
1815 	_BS_PRECALL(t, sparc_bus_unmap);
1816 	BUS_SPACE_ASSERT(t0, h, 0, 1);
1817 #ifdef BUS_SPACE_DEBUG
1818 	if(h.bh_size != s) {
1819 		char buf[128];
1820 		bus_space_render_tag(t0, buf, sizeof buf);
1821 		printf("\n********* bus_space_unmap: %p:%s, map/unmap "
1822 		    "size mismatch (%llx != %llx)",
1823 		    t, buf, h.bh_size, s);
1824 	}
1825 #endif /* BUS_SPACE_DEBUG */
1826 	ret = _BS_CALL(t, sparc_bus_unmap)(t, t0, h, s);
1827 	_BS_POSTCALL;
1828 	return (ret);
1829 }
1830 
1831 int
bus_space_subregion(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,bus_size_t s,bus_space_handle_t * hp)1832 bus_space_subregion(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
1833     bus_size_t s, bus_space_handle_t *hp)
1834 {
1835 	const bus_space_tag_t t0 = t;
1836 	int ret;
1837 
1838 	_BS_PRECALL(t, sparc_bus_subregion);
1839 	BUS_SPACE_ASSERT(t0, h, o, 1);
1840 #ifdef BUS_SPACE_DEBUG
1841 	if(h.bh_size < o + s) {
1842 		char buf[128];
1843 		bus_space_render_tag(t0, buf, sizeof buf);
1844 		printf("\n********** bus_space_subregion: "
1845 		    "%p:%s, %llx < %llx + %llx",
1846 		    t0, buf, h.bh_size, o, s);
1847 		hp->bh_size = 0;
1848 		hp->bh_tag = NULL;
1849 		return (EINVAL);
1850 	}
1851 #endif /* BUS_SPACE_DEBUG */
1852 	ret = _BS_CALL(t, sparc_bus_subregion)(t, t0, h, o, s, hp);
1853 	_BS_POSTCALL;
1854 #ifdef BUS_SPACE_DEBUG
1855 	if (ret == 0) {
1856 		hp->bh_size = s;
1857 		hp->bh_tag = t0;
1858 	} else {
1859 		hp->bh_size = 0;
1860 		hp->bh_tag = NULL;
1861 	}
1862 #endif /* BUS_SPACE_DEBUG */
1863 	return (ret);
1864 }
1865 
1866 paddr_t
bus_space_mmap(bus_space_tag_t t,bus_addr_t a,off_t o,int p,int f)1867 bus_space_mmap(bus_space_tag_t t, bus_addr_t a, off_t o, int p, int f)
1868 {
1869 	const bus_space_tag_t t0 = t;
1870 	paddr_t ret;
1871 
1872 	_BS_PRECALL(t, sparc_bus_mmap);
1873 	ret = _BS_CALL(t, sparc_bus_mmap)(t, t0, a, o, p, f);
1874 	_BS_POSTCALL;
1875 	return (ret);
1876 }
1877 
1878 void *
bus_intr_establish(bus_space_tag_t t,int p,int l,int f,int (* h)(void *),void * a,const char * w)1879 bus_intr_establish(bus_space_tag_t t, int p, int l, int f, int (*h)(void *),
1880     void *a, const char *w)
1881 {
1882 	const bus_space_tag_t t0 = t;
1883 	void *ret;
1884 
1885 	_BS_PRECALL(t, sparc_intr_establish);
1886 	ret = _BS_CALL(t, sparc_intr_establish)(t, t0, p, l, f, h, a, w);
1887 	_BS_POSTCALL;
1888 	return (ret);
1889 }
1890 
1891 void *
bus_intr_establish_cpu(bus_space_tag_t t,int p,int l,int f,struct cpu_info * ci,int (* h)(void *),void * a,const char * w)1892 bus_intr_establish_cpu(bus_space_tag_t t, int p, int l, int f,
1893     struct cpu_info *ci, int (*h)(void *), void *a, const char *w)
1894 {
1895 	const bus_space_tag_t t0 = t;
1896 	void *ret;
1897 
1898 	if (t->sparc_intr_establish_cpu == NULL)
1899 		return (bus_intr_establish(t, p, l, f, h, a, w));
1900 
1901 	_BS_PRECALL(t, sparc_intr_establish_cpu);
1902 	ret = _BS_CALL(t, sparc_intr_establish_cpu)(t, t0, p, l, f, ci,
1903 	    h, a, w);
1904 	_BS_POSTCALL;
1905 	return (ret);
1906 }
1907 
1908 /* XXXX Things get complicated if we use unmapped register accesses. */
1909 void *
bus_space_vaddr(bus_space_tag_t t,bus_space_handle_t h)1910 bus_space_vaddr(bus_space_tag_t t, bus_space_handle_t h)
1911 {
1912 	BUS_SPACE_ASSERT(t, h, 0, 1);
1913         if(t->asi == ASI_PRIMARY || t->asi == ASI_PRIMARY_LITTLE)
1914 		return 	((void *)(vaddr_t)(h.bh_ptr));
1915 
1916 #ifdef BUS_SPACE_DEBUG
1917 	{ /* Scope */
1918 		char buf[64];
1919 		bus_space_render_tag(t, buf, sizeof buf);
1920 		printf("\nbus_space_vaddr: no vaddr for %p:%s (asi=%x)",
1921 			t, buf, t->asi);
1922 	}
1923 #endif
1924 
1925 	return (NULL);
1926 }
1927 
1928 void
bus_space_render_tag(bus_space_tag_t t,char * buf,size_t len)1929 bus_space_render_tag(bus_space_tag_t t, char* buf, size_t len)
1930 {
1931 	if (t == NULL) {
1932 		strlcat(buf, "<NULL>", len);
1933 		return;
1934 	}
1935 	buf[0] = '\0';
1936 	if (t->parent)
1937 		bus_space_render_tag(t->parent, buf, len);
1938 
1939 	strlcat(buf, "/", len);
1940 	strlcat(buf, t->name, len);
1941 }
1942 
1943 #ifdef BUS_SPACE_DEBUG
1944 
1945 void
bus_space_assert(bus_space_tag_t t,const bus_space_handle_t * h,bus_size_t o,int n)1946 bus_space_assert(bus_space_tag_t t, const bus_space_handle_t *h, bus_size_t o,
1947     int n)
1948 {
1949         if (h->bh_tag != t) {
1950 		char buf1[128];
1951 		char buf2[128];
1952 		bus_space_render_tag(t, buf1, sizeof buf1);
1953 		bus_space_render_tag(h->bh_tag, buf2, sizeof buf2);
1954                 printf("\n********** bus_space_assert: wrong tag (%p:%s, "
1955 		    "expecting %p:%s) ", t, buf1, h->bh_tag, buf2);
1956 	}
1957 
1958         if (o >= h->bh_size) {
1959 		char buf[128];
1960 		bus_space_render_tag(t, buf, sizeof buf);
1961                 printf("\n********** bus_space_assert: bus %p:%s, offset "
1962 		    "(%llx) out of mapping range (%llx) ", t, buf, o,
1963 		    h->bh_size);
1964 	}
1965 
1966 	if (o & (n - 1)) {
1967 		char buf[128];
1968 		bus_space_render_tag(t, buf, sizeof buf);
1969                 printf("\n********** bus_space_assert: bus %p:%s, offset "
1970 		    "(%llx) incorrect alignment (%d) ", t, buf, o, n);
1971 	}
1972 }
1973 
1974 #endif /* BUS_SPACE_DEBUG */
1975 
1976 struct blink_led_softc {
1977 	SLIST_HEAD(, blink_led) bls_head;
1978 	int bls_on;
1979 	struct timeout bls_to;
1980 } blink_sc = { SLIST_HEAD_INITIALIZER(blink_sc.bls_head), 0 };
1981 
1982 void
blink_led_register(struct blink_led * l)1983 blink_led_register(struct blink_led *l)
1984 {
1985 	if (SLIST_EMPTY(&blink_sc.bls_head)) {
1986 		timeout_set(&blink_sc.bls_to, blink_led_timeout, &blink_sc);
1987 		blink_sc.bls_on = 0;
1988 		if (sparc_led_blink)
1989 			timeout_add(&blink_sc.bls_to, 1);
1990 	}
1991 	SLIST_INSERT_HEAD(&blink_sc.bls_head, l, bl_next);
1992 }
1993 
1994 void
blink_led_timeout(void * vsc)1995 blink_led_timeout(void *vsc)
1996 {
1997 	struct blink_led_softc *sc = &blink_sc;
1998 	struct blink_led *l;
1999 	int t;
2000 
2001 	if (SLIST_EMPTY(&sc->bls_head))
2002 		return;
2003 
2004 	SLIST_FOREACH(l, &sc->bls_head, bl_next) {
2005 		(*l->bl_func)(l->bl_arg, sc->bls_on);
2006 	}
2007 	sc->bls_on = !sc->bls_on;
2008 
2009 	if (!sparc_led_blink)
2010 		return;
2011 
2012 	/*
2013 	 * Blink rate is:
2014 	 *      full cycle every second if completely idle (loadav = 0)
2015 	 *      full cycle every 2 seconds if loadav = 1
2016 	 *      full cycle every 3 seconds if loadav = 2
2017 	 * etc.
2018 	 */
2019 	t = (((averunnable.ldavg[0] + FSCALE) * hz) >> (FSHIFT + 1));
2020 	timeout_add(&sc->bls_to, t);
2021 }
2022