xref: /netbsd/sys/arch/sparc64/sparc64/machdep.c (revision bf9ec67e)
1 /*	$NetBSD: machdep.c,v 1.119 2002/03/20 18:54:49 eeh Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1992, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  *
44  * This software was developed by the Computer Systems Engineering group
45  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
46  * contributed to Berkeley.
47  *
48  * All advertising materials mentioning features or use of this software
49  * must display the following acknowledgement:
50  *	This product includes software developed by the University of
51  *	California, Lawrence Berkeley Laboratory.
52  *
53  * Redistribution and use in source and binary forms, with or without
54  * modification, are permitted provided that the following conditions
55  * are met:
56  * 1. Redistributions of source code must retain the above copyright
57  *    notice, this list of conditions and the following disclaimer.
58  * 2. Redistributions in binary form must reproduce the above copyright
59  *    notice, this list of conditions and the following disclaimer in the
60  *    documentation and/or other materials provided with the distribution.
61  * 3. All advertising materials mentioning features or use of this software
62  *    must display the following acknowledgement:
63  *	This product includes software developed by the University of
64  *	California, Berkeley and its contributors.
65  * 4. Neither the name of the University nor the names of its contributors
66  *    may be used to endorse or promote products derived from this software
67  *    without specific prior written permission.
68  *
69  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
70  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
71  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
72  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
73  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
74  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
75  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
76  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
77  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
78  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
79  * SUCH DAMAGE.
80  *
81  *	@(#)machdep.c	8.6 (Berkeley) 1/14/94
82  */
83 
84 #include "opt_compat_sunos.h"
85 #include "opt_compat_sunos.h"
86 #include "opt_ddb.h"
87 
88 #include <sys/param.h>
89 #include <sys/extent.h>
90 #include <sys/signal.h>
91 #include <sys/signalvar.h>
92 #include <sys/proc.h>
93 #include <sys/user.h>
94 #include <sys/map.h>
95 #include <sys/buf.h>
96 #include <sys/device.h>
97 #include <sys/reboot.h>
98 #include <sys/systm.h>
99 #include <sys/kernel.h>
100 #include <sys/conf.h>
101 #include <sys/file.h>
102 #include <sys/clist.h>
103 #include <sys/malloc.h>
104 #include <sys/mbuf.h>
105 #include <sys/mount.h>
106 #include <sys/msgbuf.h>
107 #include <sys/syscallargs.h>
108 #include <sys/exec.h>
109 
110 #include <uvm/uvm.h>
111 
112 #include <sys/sysctl.h>
113 #ifndef	ELFSIZE
114 #ifdef __arch64__
115 #define	ELFSIZE	64
116 #else
117 #define	ELFSIZE	32
118 #endif
119 #endif
120 #include <sys/exec_elf.h>
121 
122 #define _SPARC_BUS_DMA_PRIVATE
123 #include <machine/autoconf.h>
124 #include <machine/bus.h>
125 #include <machine/frame.h>
126 #include <machine/cpu.h>
127 #include <machine/pmap.h>
128 #include <machine/openfirm.h>
129 #include <machine/sparc64.h>
130 
131 #include <sparc64/sparc64/cache.h>
132 
133 /* #include "fb.h" */
134 
135 int bus_space_debug = 0; /* This may be used by macros elsewhere. */
136 #ifdef DEBUG
137 #define DPRINTF(l, s)   do { if (bus_space_debug & l) printf s; } while (0)
138 #else
139 #define DPRINTF(l, s)
140 #endif
141 
142 struct vm_map *exec_map = NULL;
143 struct vm_map *mb_map = NULL;
144 extern vaddr_t avail_end;
145 
146 int	physmem;
147 
148 extern	caddr_t msgbufaddr;
149 
150 /*
151  * Maximum number of DMA segments we'll allow in dmamem_load()
152  * routines.  Can be overridden in config files, etc.
153  */
154 #ifndef MAX_DMA_SEGS
155 #define MAX_DMA_SEGS	20
156 #endif
157 
158 /*
159  * safepri is a safe priority for sleep to set for a spin-wait
160  * during autoconfiguration or after a panic.
161  */
162 int   safepri = 0;
163 
164 void	dumpsys __P((void));
165 void	stackdump __P((void));
166 
167 
168 /*
169  * Machine-dependent startup code
170  */
171 void
172 cpu_startup()
173 {
174 	unsigned i;
175 	caddr_t v;
176 	long sz;
177 	int base, residual;
178 #ifdef DEBUG
179 	extern int pmapdebug;
180 	int opmapdebug = pmapdebug;
181 #endif
182 	vaddr_t minaddr, maxaddr;
183 	vsize_t size;
184 	extern struct user *proc0paddr;
185 	char pbuf[9];
186 
187 #ifdef DEBUG
188 	pmapdebug = 0;
189 #endif
190 
191 	proc0.p_addr = proc0paddr;
192 
193 	/*
194 	 * Good {morning,afternoon,evening,night}.
195 	 */
196 	printf(version);
197 	/*identifycpu();*/
198 	format_bytes(pbuf, sizeof(pbuf), ctob((u_int64_t)physmem));
199 	printf("total memory = %s\n", pbuf);
200 
201 	/*
202 	 * Find out how much space we need, allocate it,
203 	 * and then give everything true virtual addresses.
204 	 */
205 	sz = (long)allocsys(NULL, NULL);
206 	if ((v = (caddr_t)uvm_km_alloc(kernel_map, round_page(sz))) == 0)
207 		panic("startup: no room for %lx bytes of tables", sz);
208 	if (allocsys(v, NULL) - v != sz)
209 		panic("startup: table size inconsistency");
210 
211         /*
212          * allocate virtual and physical memory for the buffers.
213          */
214         size = MAXBSIZE * nbuf;         /* # bytes for buffers */
215 
216         /* allocate VM for buffers... area is not managed by VM system */
217         if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
218                     NULL, UVM_UNKNOWN_OFFSET, 0,
219                     UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
220                                 UVM_ADV_NORMAL, 0)) != 0)
221         	panic("cpu_startup: cannot allocate VM for buffers");
222 
223         minaddr = (vaddr_t) buffers;
224         if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
225         	bufpages = btoc(MAXBSIZE) * nbuf; /* do not overallocate RAM */
226         }
227         base = bufpages / nbuf;
228         residual = bufpages % nbuf;
229 
230         /* now allocate RAM for buffers */
231 	for (i = 0 ; i < nbuf ; i++) {
232 		vaddr_t curbuf;
233 		vsize_t curbufsize;
234 		struct vm_page *pg;
235 
236 		/*
237 		 * each buffer has MAXBSIZE bytes of VM space allocated.  of
238 		 * that MAXBSIZE space we allocate and map (base+1) pages
239 		 * for the first "residual" buffers, and then we allocate
240 		 * "base" pages for the rest.
241 		 */
242 		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
243 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
244 
245 		while (curbufsize) {
246 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
247 			if (pg == NULL)
248 				panic("cpu_startup: "
249 				    "not enough RAM for buffer cache");
250 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
251 			    VM_PROT_READ | VM_PROT_WRITE);
252 			curbuf += PAGE_SIZE;
253 			curbufsize -= PAGE_SIZE;
254 		}
255 	}
256 	pmap_update(kernel_map->pmap);
257 
258 	/*
259 	 * Allocate a submap for exec arguments.  This map effectively
260 	 * limits the number of processes exec'ing at any time.
261 	 */
262         exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
263                                  16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
264 
265 	/*
266 	 * Finally, allocate mbuf cluster submap.
267 	 */
268         mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
269 	    nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL);
270 
271 #ifdef DEBUG
272 	pmapdebug = opmapdebug;
273 #endif
274 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
275 	printf("avail memory = %s\n", pbuf);
276 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
277 	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
278 
279 	/*
280 	 * Set up buffers, so they can be used to read disk labels.
281 	 */
282 	bufinit();
283 
284 #if 0
285 	pmap_redzone();
286 #endif
287 }
288 
289 /*
290  * Set up registers on exec.
291  */
292 
293 #ifdef __arch64__
294 #define STACK_OFFSET	BIAS
295 #define CPOUTREG(l,v)	copyout(&(v), (l), sizeof(v))
296 #undef CCFSZ
297 #define CCFSZ	CC64FSZ
298 #else
299 #define STACK_OFFSET	0
300 #define CPOUTREG(l,v)	copyout(&(v), (l), sizeof(v))
301 #endif
302 
303 /* ARGSUSED */
304 void
305 setregs(p, pack, stack)
306 	struct proc *p;
307 	struct exec_package *pack;
308 	vaddr_t stack;
309 {
310 	register struct trapframe64 *tf = p->p_md.md_tf;
311 	register struct fpstate64 *fs;
312 	register int64_t tstate;
313 	int pstate = PSTATE_USER;
314 #ifdef __arch64__
315 	Elf_Ehdr *eh = pack->ep_hdr;
316 #endif
317 
318 	/* Don't allow misaligned code by default */
319 	p->p_md.md_flags &= ~MDP_FIXALIGN;
320 
321 	/*
322 	 * Set the registers to 0 except for:
323 	 *	%o6: stack pointer, built in exec())
324 	 *	%tstate: (retain icc and xcc and cwp bits)
325 	 *	%g1: address of p->p_psstr (used by crt0)
326 	 *	%tpc,%tnpc: entry point of program
327 	 */
328 #ifdef __arch64__
329 	/* Check what memory model is requested */
330 	switch ((eh->e_flags & EF_SPARCV9_MM)) {
331 	default:
332 		printf("Unknown memory model %d\n",
333 		       (eh->e_flags & EF_SPARCV9_MM));
334 		/* FALLTHROUGH */
335 	case EF_SPARCV9_TSO:
336 		pstate = PSTATE_MM_TSO|PSTATE_IE;
337 		break;
338 	case EF_SPARCV9_PSO:
339 		pstate = PSTATE_MM_PSO|PSTATE_IE;
340 		break;
341 	case EF_SPARCV9_RMO:
342 		pstate = PSTATE_MM_RMO|PSTATE_IE;
343 		break;
344 	}
345 #endif
346 	tstate = (ASI_PRIMARY_NO_FAULT<<TSTATE_ASI_SHIFT) |
347 		((pstate)<<TSTATE_PSTATE_SHIFT) |
348 		(tf->tf_tstate & TSTATE_CWP);
349 	if ((fs = p->p_md.md_fpstate) != NULL) {
350 		/*
351 		 * We hold an FPU state.  If we own *the* FPU chip state
352 		 * we must get rid of it, and the only way to do that is
353 		 * to save it.  In any case, get rid of our FPU state.
354 		 */
355 		if (p == fpproc) {
356 			savefpstate(fs);
357 			fpproc = NULL;
358 		}
359 		free((void *)fs, M_SUBPROC);
360 		p->p_md.md_fpstate = NULL;
361 	}
362 	bzero((caddr_t)tf, sizeof *tf);
363 	tf->tf_tstate = tstate;
364 	tf->tf_global[1] = (vaddr_t)p->p_psstr;
365 	/* %g4 needs to point to the start of the data segment */
366 	tf->tf_global[4] = 0;
367 	tf->tf_pc = pack->ep_entry & ~3;
368 	tf->tf_npc = tf->tf_pc + 4;
369 	stack -= sizeof(struct rwindow);
370 	tf->tf_out[6] = stack - STACK_OFFSET;
371 	tf->tf_out[7] = NULL;
372 #ifdef NOTDEF_DEBUG
373 	printf("setregs: setting tf %p sp %p pc %p\n", (long)tf,
374 	       (long)tf->tf_out[6], (long)tf->tf_pc);
375 #ifdef DDB
376 	Debugger();
377 #endif
378 #endif
379 }
380 
381 #ifdef DEBUG
382 /* See sigdebug.h */
383 #include <sparc64/sparc64/sigdebug.h>
384 int sigdebug = 0x0;
385 int sigpid = 0;
386 #endif
387 
388 struct sigframe {
389 	int	sf_signo;		/* signal number */
390 	int	sf_code;		/* code */
391 #ifndef __arch64__
392 	struct	sigcontext *sf_scp;	/* SunOS user addr of sigcontext */
393 	int	sf_addr;		/* SunOS compat, always 0 for now */
394 #endif
395 	struct	sigcontext sf_sc;	/* actual sigcontext */
396 };
397 
398 /*
399  * machine dependent system variables.
400  */
401 int
402 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
403 	int *name;
404 	u_int namelen;
405 	void *oldp;
406 	size_t *oldlenp;
407 	void *newp;
408 	size_t newlen;
409 	struct proc *p;
410 {
411 	u_int chosen;
412 	char bootargs[256];
413 	char *cp = NULL;
414 
415 	/* all sysctl names are this level are terminal */
416 	if (namelen != 1)
417 		return (ENOTDIR);	/* overloaded */
418 
419 	switch (name[0]) {
420 	case CPU_BOOTED_KERNEL:
421 		if (((chosen = OF_finddevice("/chosen")) != -1) &&
422 		    ((OF_getprop(chosen, "bootargs", bootargs, sizeof bootargs))
423 		      >= 0)) {
424 			/*
425 			 * bootargs is of the form: [kernelname] [args...]
426 			 * It can be the empty string if we booted from the default
427 			 * kernel name.
428 			 */
429 			for (cp = bootargs;
430 			     *cp && *cp != ' ' && *cp != '\t' && *cp != '\n';
431 			     cp++);
432 			*cp = 0;
433 			/* Now we've separated out the kernel name from the args */
434 			cp = bootargs;
435 			if (*cp == 0 || *cp == '-')
436 				/*
437 				 * We can leave it NULL && let userland handle
438 				 * the failure or set it to the default name,
439 				 * `netbsd'
440 				 */
441 				cp = "netbsd";
442 		}
443 		if (cp == NULL || cp[0] == '\0')
444 			return (ENOENT);
445 		return (sysctl_rdstring(oldp, oldlenp, newp, cp));
446 	default:
447 		return (EOPNOTSUPP);
448 	}
449 	/* NOTREACHED */
450 }
451 
452 /*
453  * Send an interrupt to process.
454  */
455 void
456 sendsig(catcher, sig, mask, code)
457 	sig_t catcher;
458 	int sig;
459 	sigset_t *mask;
460 	u_long code;
461 {
462 	struct proc *p = curproc;
463 	struct sigframe *fp;
464 	struct trapframe64 *tf;
465 	vaddr_t addr;
466 	struct rwindow *oldsp, *newsp;
467 #ifdef NOT_DEBUG
468 	struct rwindow tmpwin;
469 #endif
470 	struct sigframe sf;
471 	int onstack;
472 
473 	tf = p->p_md.md_tf;
474 	oldsp = (struct rwindow *)(u_long)(tf->tf_out[6] + STACK_OFFSET);
475 
476 	/*
477 	 * Compute new user stack addresses, subtract off
478 	 * one signal frame, and align.
479 	 */
480 	onstack =
481 	    (p->p_sigctx.ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
482 	    (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
483 
484 	if (onstack)
485 		fp = (struct sigframe *)((caddr_t)p->p_sigctx.ps_sigstk.ss_sp +
486 						p->p_sigctx.ps_sigstk.ss_size);
487 	else
488 		fp = (struct sigframe *)oldsp;
489 	/* Allocate an aligned sigframe */
490 	fp = (struct sigframe *)((long)(fp - 1) & ~0x0f);
491 
492 #ifdef DEBUG
493 	sigpid = p->p_pid;
494 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) {
495 		printf("sendsig: %s[%d] sig %d newusp %p scp %p oldsp %p\n",
496 		    p->p_comm, p->p_pid, sig, fp, &fp->sf_sc, oldsp);
497 #ifdef DDB
498 		if (sigdebug & SDB_DDB) Debugger();
499 #endif
500 	}
501 #endif
502 
503 	/*
504 	 * Now set up the signal frame.  We build it in kernel space
505 	 * and then copy it out.  We probably ought to just build it
506 	 * directly in user space....
507 	 */
508 	sf.sf_signo = sig;
509 	sf.sf_code = code;
510 #ifndef __arch64__
511 	sf.sf_scp = 0;
512 	sf.sf_addr = 0;			/* XXX */
513 #endif
514 
515 	/*
516 	 * Build the signal context to be used by sigreturn.
517 	 */
518 	sf.sf_sc.sc_onstack = p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK;
519 	sf.sf_sc.sc_mask = *mask;
520 #ifdef COMPAT_13
521 	/*
522 	 * XXX We always have to save an old style signal mask because
523 	 * XXX we might be delivering a signal to a process which will
524 	 * XXX escape from the signal in a non-standard way and invoke
525 	 * XXX sigreturn() directly.
526 	 */
527 	native_sigset_to_sigset13(mask, &sf.sf_sc.__sc_mask13);
528 #endif
529 	/* Save register context. */
530 	sf.sf_sc.sc_sp = (long)tf->tf_out[6];
531 	sf.sf_sc.sc_pc = tf->tf_pc;
532 	sf.sf_sc.sc_npc = tf->tf_npc;
533 #ifdef __arch64__
534 	sf.sf_sc.sc_tstate = tf->tf_tstate; /* XXX */
535 #else
536 	sf.sf_sc.sc_psr = TSTATECCR_TO_PSR(tf->tf_tstate); /* XXX */
537 #endif
538 	sf.sf_sc.sc_g1 = tf->tf_global[1];
539 	sf.sf_sc.sc_o0 = tf->tf_out[0];
540 
541 	/*
542 	 * Put the stack in a consistent state before we whack away
543 	 * at it.  Note that write_user_windows may just dump the
544 	 * registers into the pcb; we need them in the process's memory.
545 	 * We also need to make sure that when we start the signal handler,
546 	 * its %i6 (%fp), which is loaded from the newly allocated stack area,
547 	 * joins seamlessly with the frame it was in when the signal occurred,
548 	 * so that the debugger and _longjmp code can back up through it.
549 	 */
550 	newsp = (struct rwindow *)((vaddr_t)fp - sizeof(struct rwindow));
551 	write_user_windows();
552 #ifdef DEBUG
553 	if ((sigdebug & SDB_KSTACK))
554 	    printf("sendsig: saving sf to %p, setting stack pointer %p to %p\n",
555 		   fp, &(((struct rwindow *)newsp)->rw_in[6]),
556 		   (void *)(unsigned long)tf->tf_out[6]);
557 #endif
558 	if (rwindow_save(p) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf) ||
559 #ifdef NOT_DEBUG
560 	    copyin(oldsp, &tmpwin, sizeof(tmpwin)) || copyout(&tmpwin, newsp, sizeof(tmpwin)) ||
561 #endif
562 	    CPOUTREG(&(((struct rwindow *)newsp)->rw_in[6]), tf->tf_out[6])) {
563 		/*
564 		 * Process has trashed its stack; give it an illegal
565 		 * instruction to halt it in its tracks.
566 		 */
567 #ifdef DEBUG
568 		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
569 			printf("sendsig: window save or copyout error\n");
570 		printf("sendsig: stack was trashed trying to send sig %d, sending SIGILL\n", sig);
571 #ifdef DDB
572 		if (sigdebug & SDB_DDB) Debugger();
573 #endif
574 #endif
575 		sigexit(p, SIGILL);
576 		/* NOTREACHED */
577 	}
578 
579 #ifdef DEBUG
580 	if (sigdebug & SDB_FOLLOW) {
581 		printf("sendsig: %s[%d] sig %d scp %p\n",
582 		       p->p_comm, p->p_pid, sig, &fp->sf_sc);
583 	}
584 #endif
585 
586 	/*
587 	 * Arrange to continue execution at the code copied out in exec().
588 	 * It needs the function to call in %g1, and a new stack pointer.
589 	 */
590 	addr = (vaddr_t)p->p_sigctx.ps_sigcode;
591 	tf->tf_global[1] = (vaddr_t)catcher;
592 	tf->tf_pc = addr;
593 	tf->tf_npc = addr + 4;
594 	tf->tf_out[6] = (vaddr_t)newsp - STACK_OFFSET;
595 
596 	/* Remember that we're now on the signal stack. */
597 	if (onstack)
598 		p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
599 
600 #ifdef DEBUG
601 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) {
602 		printf("sendsig: about to return to catcher %p thru %p\n",
603 		       catcher, (void *)(unsigned long)addr);
604 #ifdef DDB
605 		if (sigdebug & SDB_DDB) Debugger();
606 #endif
607 	}
608 #endif
609 }
610 
611 /*
612  * System call to cleanup state after a signal
613  * has been taken.  Reset signal mask and
614  * stack state from context left by sendsig (above),
615  * and return to the given trap frame (if there is one).
616  * Check carefully to make sure that the user has not
617  * modified the state to gain improper privileges or to cause
618  * a machine fault.
619  */
620 /* ARGSUSED */
621 int
622 sys___sigreturn14(p, v, retval)
623 	register struct proc *p;
624 	void *v;
625 	register_t *retval;
626 {
627 	struct sys___sigreturn14_args /* {
628 		syscallarg(struct sigcontext *) sigcntxp;
629 	} */ *uap = v;
630 	struct sigcontext sc, *scp;
631 	register struct trapframe64 *tf;
632 	int error = EINVAL;
633 
634 	/* First ensure consistent stack state (see sendsig). */
635 	write_user_windows();
636 	if (rwindow_save(p)) {
637 #ifdef DEBUG
638 		printf("sigreturn14: rwindow_save(%p) failed, sending SIGILL\n", p);
639 #ifdef DDB
640 		if (sigdebug & SDB_DDB) Debugger();
641 #endif
642 #endif
643 		sigexit(p, SIGILL);
644 	}
645 #ifdef DEBUG
646 	if (sigdebug & SDB_FOLLOW) {
647 		printf("sigreturn14: %s[%d], sigcntxp %p\n",
648 		    p->p_comm, p->p_pid, SCARG(uap, sigcntxp));
649 #ifdef DDB
650 		if (sigdebug & SDB_DDB) Debugger();
651 #endif
652 	}
653 #endif
654 	scp = SCARG(uap, sigcntxp);
655  	if ((vaddr_t)scp & 3 || (error = copyin((caddr_t)scp, &sc, sizeof sc) != 0))
656 #ifdef DEBUG
657 	{
658 		printf("sigreturn14: copyin failed: scp=%p\n", scp);
659 #ifdef DDB
660 		if (sigdebug & SDB_DDB) Debugger();
661 #endif
662 		return (error);
663 	}
664 #else
665 		return (error);
666 #endif
667 	scp = &sc;
668 
669 	tf = p->p_md.md_tf;
670 	/*
671 	 * Only the icc bits in the psr are used, so it need not be
672 	 * verified.  pc and npc must be multiples of 4.  This is all
673 	 * that is required; if it holds, just do it.
674 	 */
675 	if (((sc.sc_pc | sc.sc_npc) & 3) != 0 || (sc.sc_pc == 0) || (sc.sc_npc == 0))
676 #ifdef DEBUG
677 	{
678 		printf("sigreturn14: pc %p or npc %p invalid\n",
679 		   (void *)(unsigned long)sc.sc_pc,
680 		   (void *)(unsigned long)sc.sc_npc);
681 #ifdef DDB
682 		if (sigdebug & SDB_DDB) Debugger();
683 #endif
684 		return (EINVAL);
685 	}
686 #else
687 		return (EINVAL);
688 #endif
689 	/* take only psr ICC field */
690 #ifdef __arch64__
691 	tf->tf_tstate = (u_int64_t)(tf->tf_tstate & ~TSTATE_CCR) | (scp->sc_tstate & TSTATE_CCR);
692 #else
693 	tf->tf_tstate = (u_int64_t)(tf->tf_tstate & ~TSTATE_CCR) | PSRCC_TO_TSTATE(scp->sc_psr);
694 #endif
695 	tf->tf_pc = (u_int64_t)scp->sc_pc;
696 	tf->tf_npc = (u_int64_t)scp->sc_npc;
697 	tf->tf_global[1] = (u_int64_t)scp->sc_g1;
698 	tf->tf_out[0] = (u_int64_t)scp->sc_o0;
699 	tf->tf_out[6] = (u_int64_t)scp->sc_sp;
700 #ifdef DEBUG
701 	if (sigdebug & SDB_FOLLOW) {
702 		printf("sigreturn14: return trapframe pc=%p sp=%p tstate=%llx\n",
703 		       (void *)(unsigned long)tf->tf_pc,
704 		       (void *)(unsigned long)tf->tf_out[6],
705 		       (unsigned long long)tf->tf_tstate);
706 #ifdef DDB
707 		if (sigdebug & SDB_DDB) Debugger();
708 #endif
709 	}
710 #endif
711 
712 	/* Restore signal stack. */
713 	if (sc.sc_onstack & SS_ONSTACK)
714 		p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
715 	else
716 		p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK;
717 
718 	/* Restore signal mask. */
719 	(void) sigprocmask1(p, SIG_SETMASK, &sc.sc_mask, 0);
720 
721 	return (EJUSTRETURN);
722 }
723 
724 int	waittime = -1;
725 
726 void
727 cpu_reboot(howto, user_boot_string)
728 	register int howto;
729 	char *user_boot_string;
730 {
731 	int i;
732 	static char str[128];
733 
734 	/* If system is cold, just halt. */
735 	if (cold) {
736 		howto |= RB_HALT;
737 		goto haltsys;
738 	}
739 
740 #if NFB > 0
741 	fb_unblank();
742 #endif
743 	boothowto = howto;
744 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
745 		extern struct proc proc0;
746 		extern int sparc_clock_time_is_ok;
747 
748 		/* XXX protect against curproc->p_stats.foo refs in sync() */
749 		if (curproc == NULL)
750 			curproc = &proc0;
751 		waittime = 0;
752 		vfs_shutdown();
753 
754 		/*
755 		 * If we've been adjusting the clock, the todr
756 		 * will be out of synch; adjust it now.
757 		 * Do this only if the TOD clock has already been read out
758 		 * successfully by inittodr() or set by an explicit call
759 		 * to resettodr() (e.g. from settimeofday()).
760 		 */
761 		if (sparc_clock_time_is_ok)
762 			resettodr();
763 	}
764 	(void) splhigh();		/* ??? */
765 
766 	/* If rebooting and a dump is requested, do it. */
767 	if (howto & RB_DUMP)
768 		dumpsys();
769 
770 haltsys:
771 	/* Run any shutdown hooks. */
772 	doshutdownhooks();
773 
774 	/* If powerdown was requested, do it. */
775 	if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
776 		/* Let the OBP do the work. */
777 		OF_poweroff();
778 		printf("WARNING: powerdown failed!\n");
779 		/*
780 		 * RB_POWERDOWN implies RB_HALT... fall into it...
781 		 */
782 	}
783 
784 	if (howto & RB_HALT) {
785 		printf("halted\n\n");
786 		OF_exit();
787 		panic("PROM exit failed");
788 	}
789 
790 	printf("rebooting\n\n");
791 	if (user_boot_string && *user_boot_string) {
792 		i = strlen(user_boot_string);
793 		if (i > sizeof(str))
794 			OF_boot(user_boot_string);	/* XXX */
795 		bcopy(user_boot_string, str, i);
796 	} else {
797 		i = 1;
798 		str[0] = '\0';
799 	}
800 
801 	if (howto & RB_SINGLE)
802 		str[i++] = 's';
803 	if (howto & RB_KDB)
804 		str[i++] = 'd';
805 	if (i > 1) {
806 		if (str[0] == '\0')
807 			str[0] = '-';
808 		str[i] = 0;
809 	} else
810 		str[0] = 0;
811 	OF_boot(str);
812 	panic("cpu_reboot -- failed");
813 	/*NOTREACHED*/
814 }
815 
816 u_int32_t dumpmag = 0x8fca0101;	/* magic number for savecore */
817 int	dumpsize = 0;		/* also for savecore */
818 long	dumplo = 0;
819 
820 void
821 cpu_dumpconf()
822 {
823 	register int nblks, dumpblks;
824 
825 	if (dumpdev == NODEV || bdevsw[major(dumpdev)].d_psize == 0)
826 		/* No usable dump device */
827 		return;
828 
829 	nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
830 
831 	dumpblks = ctod(physmem) + pmap_dumpsize();
832 	if (dumpblks > (nblks - ctod(1)))
833 		/*
834 		 * dump size is too big for the partition.
835 		 * Note, we safeguard a click at the front for a
836 		 * possible disk label.
837 		 */
838 		return;
839 
840 	/* Put the dump at the end of the partition */
841 	dumplo = nblks - dumpblks;
842 
843 	/*
844 	 * savecore(8) expects dumpsize to be the number of pages
845 	 * of actual core dumped (i.e. excluding the MMU stuff).
846 	 */
847 	dumpsize = physmem;
848 }
849 
850 #define	BYTES_PER_DUMP	(NBPG)	/* must be a multiple of pagesize */
851 static vaddr_t dumpspace;
852 
853 caddr_t
854 reserve_dumppages(p)
855 	caddr_t p;
856 {
857 
858 	dumpspace = (vaddr_t)p;
859 	return (p + BYTES_PER_DUMP);
860 }
861 
862 /*
863  * Write a crash dump.
864  */
865 void
866 dumpsys()
867 {
868 	register int psize;
869 	daddr_t blkno;
870 	register int (*dump)	__P((dev_t, daddr_t, caddr_t, size_t));
871 	int error = 0;
872 	register struct mem_region *mp;
873 	extern struct mem_region *mem;
874 
875 	/* copy registers to memory */
876 	snapshot(cpcb);
877 	stackdump();
878 
879 	if (dumpdev == NODEV)
880 		return;
881 
882 	/*
883 	 * For dumps during autoconfiguration,
884 	 * if dump device has already configured...
885 	 */
886 	if (dumpsize == 0)
887 		cpu_dumpconf();
888 	if (!dumpspace) {
889 		printf("\nno address space available, dump not possible\n");
890 		return;
891 	}
892 	if (dumplo <= 0) {
893 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
894 		    minor(dumpdev));
895 		return;
896 	}
897 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
898 	    minor(dumpdev), dumplo);
899 
900 	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
901 	printf("dump ");
902 	if (psize == -1) {
903 		printf("area unavailable\n");
904 		return;
905 	}
906 	blkno = dumplo;
907 	dump = bdevsw[major(dumpdev)].d_dump;
908 
909 	error = pmap_dumpmmu(dump, blkno);
910 	blkno += pmap_dumpsize();
911 printf("starting dump, blkno %d\n", blkno);
912 	for (mp = mem; mp->size; mp++) {
913 		unsigned i = 0, n;
914 		paddr_t maddr = mp->start;
915 
916 #if 0
917 		/* Remind me: why don't we dump page 0 ? */
918 		if (maddr == 0) {
919 			/* Skip first page at physical address 0 */
920 			maddr += NBPG;
921 			i += NBPG;
922 			blkno += btodb(NBPG);
923 		}
924 #endif
925 		for (; i < mp->size; i += n) {
926 			n = mp->size - i;
927 			if (n > BYTES_PER_DUMP)
928 				 n = BYTES_PER_DUMP;
929 
930 			/* print out how many MBs we have dumped */
931 			if (i && (i % (1024*1024)) == 0)
932 				printf("%d ", i / (1024*1024));
933 			(void) pmap_enter(pmap_kernel(), dumpspace, maddr,
934 					VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
935 			pmap_update(pmap_kernel());
936 			error = (*dump)(dumpdev, blkno,
937 					(caddr_t)dumpspace, (int)n);
938 			pmap_remove(pmap_kernel(), dumpspace, dumpspace + n);
939 			pmap_update(pmap_kernel());
940 			if (error)
941 				break;
942 			maddr += n;
943 			blkno += btodb(n);
944 		}
945 	}
946 
947 	switch (error) {
948 
949 	case ENXIO:
950 		printf("device bad\n");
951 		break;
952 
953 	case EFAULT:
954 		printf("device not ready\n");
955 		break;
956 
957 	case EINVAL:
958 		printf("area improper\n");
959 		break;
960 
961 	case EIO:
962 		printf("i/o error\n");
963 		break;
964 
965 	case 0:
966 		printf("succeeded\n");
967 		break;
968 
969 	default:
970 		printf("error %d\n", error);
971 		break;
972 	}
973 }
974 
975 void trapdump __P((struct trapframe64*));
976 /*
977  * dump out a trapframe.
978  */
979 void
980 trapdump(tf)
981 	struct trapframe64* tf;
982 {
983 	printf("TRAPFRAME: tstate=%llx pc=%llx npc=%llx y=%x\n",
984 	       (unsigned long long)tf->tf_tstate, (unsigned long long)tf->tf_pc,
985 	       (unsigned long long)tf->tf_npc, (unsigned)tf->tf_y);
986 	printf("%%g1-7: %llx %llx %llx %llx %llx %llx %llx\n",
987 	       (unsigned long long)tf->tf_global[1],
988 	       (unsigned long long)tf->tf_global[2],
989 	       (unsigned long long)tf->tf_global[3],
990 	       (unsigned long long)tf->tf_global[4],
991 	       (unsigned long long)tf->tf_global[5],
992 	       (unsigned long long)tf->tf_global[6],
993 	       (unsigned long long)tf->tf_global[7]);
994 	printf("%%o0-7: %llx %llx %llx %llx\n %llx %llx %llx %llx\n",
995 	       (unsigned long long)tf->tf_out[0],
996 	       (unsigned long long)tf->tf_out[1],
997 	       (unsigned long long)tf->tf_out[2],
998 	       (unsigned long long)tf->tf_out[3],
999 	       (unsigned long long)tf->tf_out[4],
1000 	       (unsigned long long)tf->tf_out[5],
1001 	       (unsigned long long)tf->tf_out[6],
1002 	       (unsigned long long)tf->tf_out[7]);
1003 }
1004 /*
1005  * get the fp and dump the stack as best we can.  don't leave the
1006  * current stack page
1007  */
1008 void
1009 stackdump()
1010 {
1011 	struct frame32 *fp = (struct frame32 *)getfp(), *sfp;
1012 	struct frame64 *fp64;
1013 
1014 	sfp = fp;
1015 	printf("Frame pointer is at %p\n", fp);
1016 	printf("Call traceback:\n");
1017 	while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) {
1018 		if( ((long)fp) & 1 ) {
1019 			fp64 = (struct frame64*)(((char*)fp)+BIAS);
1020 			/* 64-bit frame */
1021 			printf("%llx(%llx, %llx, %llx, %llx, %llx, %llx, %llx) fp = %llx\n",
1022 			       (unsigned long long)fp64->fr_pc,
1023 			       (unsigned long long)fp64->fr_arg[0],
1024 			       (unsigned long long)fp64->fr_arg[1],
1025 			       (unsigned long long)fp64->fr_arg[2],
1026 			       (unsigned long long)fp64->fr_arg[3],
1027 			       (unsigned long long)fp64->fr_arg[4],
1028 			       (unsigned long long)fp64->fr_arg[5],
1029 			       (unsigned long long)fp64->fr_arg[6],
1030 			       (unsigned long long)fp64->fr_fp);
1031 			fp = (struct frame32 *)(u_long)fp64->fr_fp;
1032 		} else {
1033 			/* 32-bit frame */
1034 			printf("  pc = %x  args = (%x, %x, %x, %x, %x, %x, %x) fp = %x\n",
1035 			       fp->fr_pc, fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2],
1036 			       fp->fr_arg[3], fp->fr_arg[4], fp->fr_arg[5], fp->fr_arg[6],
1037 			       fp->fr_fp);
1038 			fp = (struct frame32*)(u_long)(u_short)fp->fr_fp;
1039 		}
1040 	}
1041 }
1042 
1043 
1044 int
1045 cpu_exec_aout_makecmds(p, epp)
1046 	struct proc *p;
1047 	struct exec_package *epp;
1048 {
1049 	return (ENOEXEC);
1050 }
1051 
1052 /*
1053  * Common function for DMA map creation.  May be called by bus-specific
1054  * DMA map creation functions.
1055  */
1056 int
1057 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
1058 	bus_dma_tag_t t;
1059 	bus_size_t size;
1060 	int nsegments;
1061 	bus_size_t maxsegsz;
1062 	bus_size_t boundary;
1063 	int flags;
1064 	bus_dmamap_t *dmamp;
1065 {
1066 	struct sparc_bus_dmamap *map;
1067 	void *mapstore;
1068 	size_t mapsize;
1069 
1070 	/*
1071 	 * Allocate and initialize the DMA map.  The end of the map
1072 	 * is a variable-sized array of segments, so we allocate enough
1073 	 * room for them in one shot.
1074 	 *
1075 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
1076 	 * of ALLOCNOW notifies others that we've reserved these resources,
1077 	 * and they are not to be freed.
1078 	 *
1079 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
1080 	 * the (nsegments - 1).
1081 	 */
1082 	mapsize = sizeof(struct sparc_bus_dmamap) +
1083 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
1084 	if ((mapstore = malloc(mapsize, M_DMAMAP,
1085 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1086 		return (ENOMEM);
1087 
1088 	bzero(mapstore, mapsize);
1089 	map = (struct sparc_bus_dmamap *)mapstore;
1090 	map->_dm_size = size;
1091 	map->_dm_segcnt = nsegments;
1092 	map->_dm_maxsegsz = maxsegsz;
1093 	map->_dm_boundary = boundary;
1094 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT|BUS_DMA_COHERENT|
1095 				   BUS_DMA_NOWRITE|BUS_DMA_NOCACHE);
1096 	map->dm_mapsize = 0;		/* no valid mappings */
1097 	map->dm_nsegs = 0;
1098 
1099 	*dmamp = map;
1100 	return (0);
1101 }
1102 
1103 /*
1104  * Common function for DMA map destruction.  May be called by bus-specific
1105  * DMA map destruction functions.
1106  */
1107 void
1108 _bus_dmamap_destroy(t, map)
1109 	bus_dma_tag_t t;
1110 	bus_dmamap_t map;
1111 {
1112 
1113 	free(map, M_DMAMAP);
1114 }
1115 
1116 /*
1117  * Common function for loading a DMA map with a linear buffer.  May
1118  * be called by bus-specific DMA map load functions.
1119  *
1120  * Most SPARCs have IOMMUs in the bus controllers.  In those cases
1121  * they only need one segment and will use virtual addresses for DVMA.
1122  * Those bus controllers should intercept these vectors and should
1123  * *NEVER* call _bus_dmamap_load() which is used only by devices that
1124  * bypass DVMA.
1125  */
1126 int
1127 _bus_dmamap_load(t, map, buf, buflen, p, flags)
1128 	bus_dma_tag_t t;
1129 	bus_dmamap_t map;
1130 	void *buf;
1131 	bus_size_t buflen;
1132 	struct proc *p;
1133 	int flags;
1134 {
1135 	bus_size_t sgsize;
1136 	vaddr_t vaddr = (vaddr_t)buf;
1137 	int i;
1138 
1139 	/*
1140 	 * Make sure that on error condition we return "no valid mappings".
1141 	 */
1142 	map->dm_nsegs = 0;
1143 
1144 	if (buflen > map->_dm_size)
1145 	{
1146 #ifdef DEBUG
1147 		printf("_bus_dmamap_load(): error %lu > %lu -- map size exceeded!\n",
1148 		    (unsigned long)buflen, (unsigned long)map->_dm_size);
1149 #ifdef DDB
1150 		Debugger();
1151 #endif
1152 #endif
1153 		return (EINVAL);
1154 	}
1155 
1156 	sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1157 
1158 	/*
1159 	 * We always use just one segment.
1160 	 */
1161 	map->dm_mapsize = buflen;
1162 	i = 0;
1163 	map->dm_segs[i].ds_addr = NULL;
1164 	map->dm_segs[i].ds_len = 0;
1165 	while (sgsize > 0 && i < map->_dm_segcnt) {
1166 		paddr_t pa;
1167 
1168 		(void) pmap_extract(pmap_kernel(), vaddr, &pa);
1169 		sgsize -= NBPG;
1170 		vaddr += NBPG;
1171 		if (map->dm_segs[i].ds_len == 0)
1172 			map->dm_segs[i].ds_addr = pa;
1173 		if (pa == (map->dm_segs[i].ds_addr + map->dm_segs[i].ds_len)
1174 		    && ((map->dm_segs[i].ds_len + NBPG) < map->_dm_maxsegsz)) {
1175 			/* Hey, waddyaknow, they're contiguous */
1176 			map->dm_segs[i].ds_len += NBPG;
1177 			continue;
1178 		}
1179 		map->dm_segs[++i].ds_addr = pa;
1180 		map->dm_segs[i].ds_len = NBPG;
1181 	}
1182 	map->dm_nsegs = i;
1183 	/* Mapping is bus dependent */
1184 	return (0);
1185 }
1186 
1187 /*
1188  * Like _bus_dmamap_load(), but for mbufs.
1189  */
1190 int
1191 _bus_dmamap_load_mbuf(t, map, m, flags)
1192 	bus_dma_tag_t t;
1193 	bus_dmamap_t map;
1194 	struct mbuf *m;
1195 	int flags;
1196 {
1197 #if 1
1198 	bus_dma_segment_t segs[MAX_DMA_SEGS];
1199 	int i;
1200 	size_t len;
1201 
1202 	/* Record mbuf for *_unload */
1203 	map->_dm_type = _DM_TYPE_MBUF;
1204 	map->_dm_source = (void *)m;
1205 
1206 	i = 0;
1207 	len = 0;
1208 	while (m) {
1209 		vaddr_t vaddr = mtod(m, vaddr_t);
1210 		long buflen = (long)m->m_len;
1211 
1212 		len += buflen;
1213 		while (buflen > 0 && i < MAX_DMA_SEGS) {
1214 			paddr_t pa;
1215 			long incr;
1216 
1217 			incr = NBPG - (vaddr&PGOFSET);
1218 			incr = min(buflen, incr);
1219 
1220 			(void) pmap_extract(pmap_kernel(), vaddr, &pa);
1221 			buflen -= incr;
1222 			vaddr += incr;
1223 
1224 			if (i > 0 && pa == (segs[i-1].ds_addr + segs[i-1].ds_len)
1225 			    && ((segs[i-1].ds_len + incr) < map->_dm_maxsegsz)) {
1226 				/* Hey, waddyaknow, they're contiguous */
1227 				segs[i-1].ds_len += incr;
1228 				continue;
1229 			}
1230 			segs[i].ds_addr = pa;
1231 			segs[i].ds_len = incr;
1232 			segs[i]._ds_boundary = 0;
1233 			segs[i]._ds_align = 0;
1234 			segs[i]._ds_mlist = NULL;
1235 			i++;
1236 		}
1237 		m = m->m_next;
1238 		if (m && i >= MAX_DMA_SEGS) {
1239 			/* Exceeded the size of our dmamap */
1240 			map->_dm_type = 0;
1241 			map->_dm_source = NULL;
1242 			return E2BIG;
1243 		}
1244 	}
1245 
1246 #ifdef DEBUG
1247 	{
1248 		size_t mbuflen, sglen;
1249 		int j;
1250 		int retval;
1251 
1252 		mbuflen = 0;
1253 		for (m = (struct mbuf *)map->_dm_source; m; m = m->m_next)
1254 			mbuflen += (long)m->m_len;
1255 		sglen = 0;
1256 		for (j = 0; j < i; j++)
1257 			sglen += segs[j].ds_len;
1258 		if (sglen != mbuflen) {
1259 			printf("load_mbuf: sglen %ld != mbuflen %lx\n",
1260 				sglen, mbuflen);
1261 			Debugger();
1262 		}
1263 		if (sglen != len) {
1264 			printf("load_mbuf: sglen %ld != len %lx\n",
1265 				sglen, len);
1266 			Debugger();
1267 		}
1268 		retval = bus_dmamap_load_raw(t, map, segs, i,
1269 			(bus_size_t)len, flags);
1270 		if (map->dm_mapsize != len) {
1271 			printf("load_mbuf: mapsize %ld != len %lx\n",
1272 				map->dm_mapsize, len);
1273 			Debugger();
1274 		}
1275 		sglen = 0;
1276 		for (j = 0; j < map->dm_nsegs; j++)
1277 			sglen += map->dm_segs[j].ds_len;
1278 		if (sglen != len) {
1279 			printf("load_mbuf: dmamap sglen %ld != len %lx\n",
1280 				sglen, len);
1281 			Debugger();
1282 		}
1283 		return (retval);
1284 	}
1285 #endif
1286 	return (bus_dmamap_load_raw(t, map, segs, i,
1287 			    (bus_size_t)len, flags));
1288 #else
1289 	panic("_bus_dmamap_load_mbuf: not implemented");
1290 	return 0;
1291 #endif
1292 }
1293 
1294 /*
1295  * Like _bus_dmamap_load(), but for uios.
1296  */
1297 int
1298 _bus_dmamap_load_uio(t, map, uio, flags)
1299 	bus_dma_tag_t t;
1300 	bus_dmamap_t map;
1301 	struct uio *uio;
1302 	int flags;
1303 {
1304 /*
1305  * XXXXXXX The problem with this routine is that it needs to
1306  * lock the user address space that is being loaded, but there
1307  * is no real way for us to unlock it during the unload process.
1308  */
1309 #if 0
1310 	bus_dma_segment_t segs[MAX_DMA_SEGS];
1311 	int i, j;
1312 	size_t len;
1313 	struct proc *p = uio->uio_procp;
1314 	struct pmap *pm;
1315 
1316 	/*
1317 	 * Check user read/write access to the data buffer.
1318 	 */
1319 	if (uio->uio_segflg == UIO_USERSPACE) {
1320 		pm = p->p_vmspace->vm_map.pmap;
1321 		for (i = 0; i < uio->uio_iovcnt; i++) {
1322 			/* XXXCDC: map not locked, rethink */
1323 			if (__predict_false(!uvm_useracc(uio->uio_iov[i].iov_base,
1324 				     uio->uio_iov[i].iov_len,
1325 /* XXX is UIO_WRITE correct? */
1326 				     (uio->uio_rw == UIO_WRITE) ? B_WRITE : B_READ)))
1327 				return (EFAULT);
1328 		}
1329 	} else
1330 		pm = pmap_kernel();
1331 
1332 	i = 0;
1333 	len = 0;
1334 	for (j=0; j<uio->uio_iovcnt; j++) {
1335 		struct iovec *iov = &uio->uio_iov[j];
1336 		vaddr_t vaddr = (vaddr_t)iov->iov_base;
1337 		bus_size_t buflen = iov->iov_len;
1338 
1339 		/*
1340 		 * Lock the part of the user address space involved
1341 		 *    in the transfer.
1342 		 */
1343 		PHOLD(p);
1344 		if (__predict_false(uvm_vslock(p, vaddr, buflen,
1345 			    (uio->uio_rw == UIO_WRITE) ?
1346 			    VM_PROT_WRITE : VM_PROT_READ)
1347 			    != 0)) {
1348 				goto after_vsunlock;
1349 			}
1350 
1351 		len += buflen;
1352 		while (buflen > 0 && i < MAX_DMA_SEGS) {
1353 			paddr_t pa;
1354 			long incr;
1355 
1356 			incr = min(buflen, NBPG);
1357 			(void) pmap_extract(pm, vaddr, &pa);
1358 			buflen -= incr;
1359 			vaddr += incr;
1360 			if (segs[i].ds_len == 0)
1361 				segs[i].ds_addr = pa;
1362 
1363 
1364 			if (i > 0 && pa == (segs[i-1].ds_addr + segs[i-1].ds_len)
1365 			    && ((segs[i-1].ds_len + incr) < map->_dm_maxsegsz)) {
1366 				/* Hey, waddyaknow, they're contiguous */
1367 				segs[i-1].ds_len += incr;
1368 				continue;
1369 			}
1370 			segs[i].ds_addr = pa;
1371 			segs[i].ds_len = incr;
1372 			segs[i]._ds_boundary = 0;
1373 			segs[i]._ds_align = 0;
1374 			segs[i]._ds_mlist = NULL;
1375 			i++;
1376 		}
1377 		uvm_vsunlock(p, bp->b_data, todo);
1378 		PRELE(p);
1379  		if (buflen > 0 && i >= MAX_DMA_SEGS)
1380 			/* Exceeded the size of our dmamap */
1381 			return E2BIG;
1382 	}
1383 	map->_dm_type = DM_TYPE_UIO;
1384 	map->_dm_source = (void *)uio;
1385 	return (bus_dmamap_load_raw(t, map, segs, i,
1386 				    (bus_size_t)len, flags));
1387 #endif
1388 	return 0;
1389 }
1390 
1391 /*
1392  * Like _bus_dmamap_load(), but for raw memory allocated with
1393  * bus_dmamem_alloc().
1394  */
1395 int
1396 _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
1397 	bus_dma_tag_t t;
1398 	bus_dmamap_t map;
1399 	bus_dma_segment_t *segs;
1400 	int nsegs;
1401 	bus_size_t size;
1402 	int flags;
1403 {
1404 
1405 	panic("_bus_dmamap_load_raw: not implemented");
1406 }
1407 
1408 /*
1409  * Common function for unloading a DMA map.  May be called by
1410  * bus-specific DMA map unload functions.
1411  */
1412 void
1413 _bus_dmamap_unload(t, map)
1414 	bus_dma_tag_t t;
1415 	bus_dmamap_t map;
1416 {
1417 	int i;
1418 	struct vm_page *m;
1419 	struct pglist *mlist;
1420 	paddr_t pa;
1421 
1422 	for (i=0; i<map->dm_nsegs; i++) {
1423 		if ((mlist = map->dm_segs[i]._ds_mlist) == NULL) {
1424 			/*
1425 			 * We were asked to load random VAs and lost the
1426 			 * PA info so just blow the entire cache away.
1427 			 */
1428 			blast_vcache();
1429 			break;
1430 		}
1431 		for (m = TAILQ_FIRST(mlist); m != NULL;
1432 		     m = TAILQ_NEXT(m,pageq)) {
1433 			pa = VM_PAGE_TO_PHYS(m);
1434 			/*
1435 			 * We should be flushing a subrange, but we
1436 			 * don't know where the segments starts.
1437 			 */
1438 			dcache_flush_page(pa);
1439 		}
1440 	}
1441 	/* Mark the mappings as invalid. */
1442 	map->dm_mapsize = 0;
1443 	map->dm_nsegs = 0;
1444 
1445 }
1446 
1447 /*
1448  * Common function for DMA map synchronization.  May be called
1449  * by bus-specific DMA map synchronization functions.
1450  */
1451 void
1452 _bus_dmamap_sync(t, map, offset, len, ops)
1453 	bus_dma_tag_t t;
1454 	bus_dmamap_t map;
1455 	bus_addr_t offset;
1456 	bus_size_t len;
1457 	int ops;
1458 {
1459 	int i;
1460 	struct vm_page *m;
1461 	struct pglist *mlist;
1462 
1463 	/*
1464 	 * We sync out our caches, but the bus must do the same.
1465 	 *
1466 	 * Actually a #Sync is expensive.  We should optimize.
1467 	 */
1468 	if ((ops & BUS_DMASYNC_PREREAD) || (ops & BUS_DMASYNC_PREWRITE)) {
1469 		/*
1470 		 * Don't really need to do anything, but flush any pending
1471 		 * writes anyway.
1472 		 */
1473 		__asm("membar #Sync" : );
1474 	}
1475 	if (ops & BUS_DMASYNC_POSTREAD) {
1476 		/* Invalidate the vcache */
1477 		for (i=0; i<map->dm_nsegs; i++) {
1478 			if ((mlist = map->dm_segs[i]._ds_mlist) == NULL)
1479 				/* Should not really happen. */
1480 				continue;
1481 			for (m = TAILQ_FIRST(mlist);
1482 			     m != NULL; m = TAILQ_NEXT(m,pageq)) {
1483 				paddr_t start;
1484 				psize_t size = NBPG;
1485 
1486 				if (offset < NBPG) {
1487 					start = VM_PAGE_TO_PHYS(m) + offset;
1488 					size = NBPG;
1489 					if (size > len)
1490 						size = len;
1491 					cache_flush_phys(start, size, 0);
1492 					len -= size;
1493 					continue;
1494 				}
1495 				offset -= size;
1496 			}
1497 		}
1498 	}
1499 	if (ops & BUS_DMASYNC_POSTWRITE) {
1500 		/* Nothing to do.  Handled by the bus controller. */
1501 	}
1502 }
1503 
1504 extern paddr_t   vm_first_phys, vm_num_phys;
1505 /*
1506  * Common function for DMA-safe memory allocation.  May be called
1507  * by bus-specific DMA memory allocation functions.
1508  */
1509 int
1510 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
1511 	bus_dma_tag_t t;
1512 	bus_size_t size, alignment, boundary;
1513 	bus_dma_segment_t *segs;
1514 	int nsegs;
1515 	int *rsegs;
1516 	int flags;
1517 {
1518 	vaddr_t low, high;
1519 	struct pglist *mlist;
1520 	int error;
1521 
1522 	/* Always round the size. */
1523 	size = round_page(size);
1524 	low = vm_first_phys;
1525 	high = vm_first_phys + vm_num_phys - PAGE_SIZE;
1526 
1527 	if ((mlist = malloc(sizeof(*mlist), M_DEVBUF,
1528 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1529 		return (ENOMEM);
1530 
1531 	/*
1532 	 * If the bus uses DVMA then ignore boundary and alignment.
1533 	 */
1534 	segs[0]._ds_boundary = boundary;
1535 	segs[0]._ds_align = alignment;
1536 	if (flags & BUS_DMA_DVMA) {
1537 		boundary = 0;
1538 		alignment = 0;
1539 	}
1540 
1541 	/*
1542 	 * Allocate pages from the VM system.
1543 	 */
1544 	TAILQ_INIT(mlist);
1545 	error = uvm_pglistalloc(size, low, high,
1546 	    alignment, boundary, mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1547 	if (error)
1548 		return (error);
1549 
1550 	/*
1551 	 * Compute the location, size, and number of segments actually
1552 	 * returned by the VM code.
1553 	 */
1554 	segs[0].ds_addr = NULL; /* UPA does not map things */
1555 	segs[0].ds_len = size;
1556 	*rsegs = 1;
1557 
1558 	/*
1559 	 * Simply keep a pointer around to the linked list, so
1560 	 * bus_dmamap_free() can return it.
1561 	 *
1562 	 * NOBODY SHOULD TOUCH THE pageq FIELDS WHILE THESE PAGES
1563 	 * ARE IN OUR CUSTODY.
1564 	 */
1565 	segs[0]._ds_mlist = mlist;
1566 
1567 	/* The bus driver should do the actual mapping */
1568 	return (0);
1569 }
1570 
1571 /*
1572  * Common function for freeing DMA-safe memory.  May be called by
1573  * bus-specific DMA memory free functions.
1574  */
1575 void
1576 _bus_dmamem_free(t, segs, nsegs)
1577 	bus_dma_tag_t t;
1578 	bus_dma_segment_t *segs;
1579 	int nsegs;
1580 {
1581 
1582 	if (nsegs != 1)
1583 		panic("bus_dmamem_free: nsegs = %d", nsegs);
1584 
1585 	/*
1586 	 * Return the list of pages back to the VM system.
1587 	 */
1588 	uvm_pglistfree(segs[0]._ds_mlist);
1589 	free(segs[0]._ds_mlist, M_DEVBUF);
1590 }
1591 
1592 /*
1593  * Common function for mapping DMA-safe memory.  May be called by
1594  * bus-specific DMA memory map functions.
1595  */
1596 int
1597 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
1598 	bus_dma_tag_t t;
1599 	bus_dma_segment_t *segs;
1600 	int nsegs;
1601 	size_t size;
1602 	caddr_t *kvap;
1603 	int flags;
1604 {
1605 	vaddr_t va, sva;
1606 	struct pglist *mlist;
1607 	int r, cbit;
1608 	size_t oversize;
1609 	u_long align;
1610 
1611 	if (nsegs != 1)
1612 		panic("_bus_dmamem_map: nsegs = %d", nsegs);
1613 
1614 	cbit = PMAP_NC;
1615 	align = PAGE_SIZE;
1616 
1617 	size = round_page(size);
1618 
1619 	/*
1620 	 * Find a region of kernel virtual addresses that can accomodate
1621 	 * our aligment requirements.
1622 	 */
1623 	oversize = size + align - PAGE_SIZE;
1624 	r = uvm_map(kernel_map, &sva, oversize, NULL, UVM_UNKNOWN_OFFSET, 0,
1625 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
1626 	    UVM_ADV_NORMAL, 0));
1627 	if (r != 0)
1628 		return (ENOMEM);
1629 
1630 	/* Compute start of aligned region */
1631 	va = sva;
1632 	va += ((segs[0].ds_addr & (align - 1)) + align - va) & (align - 1);
1633 
1634 	/* Return excess virtual addresses */
1635 	if (va != sva)
1636 		(void)uvm_unmap(kernel_map, sva, va);
1637 	if (va + size != sva + oversize)
1638 		(void)uvm_unmap(kernel_map, va + size, sva + oversize);
1639 
1640 
1641 	*kvap = (caddr_t)va;
1642 	mlist = segs[0]._ds_mlist;
1643 
1644 	return (0);
1645 }
1646 
1647 /*
1648  * Common function for unmapping DMA-safe memory.  May be called by
1649  * bus-specific DMA memory unmapping functions.
1650  */
1651 void
1652 _bus_dmamem_unmap(t, kva, size)
1653 	bus_dma_tag_t t;
1654 	caddr_t kva;
1655 	size_t size;
1656 {
1657 
1658 #ifdef DIAGNOSTIC
1659 	if ((u_long)kva & PAGE_MASK)
1660 		panic("_bus_dmamem_unmap");
1661 #endif
1662 
1663 	size = round_page(size);
1664 	uvm_unmap(kernel_map, (vaddr_t)kva, (vaddr_t)kva + size);
1665 }
1666 
1667 /*
1668  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
1669  * bus-specific DMA mmap(2)'ing functions.
1670  */
1671 paddr_t
1672 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
1673 	bus_dma_tag_t t;
1674 	bus_dma_segment_t *segs;
1675 	int nsegs;
1676 	off_t off;
1677 	int prot, flags;
1678 {
1679 
1680 	panic("_bus_dmamem_mmap: not implemented");
1681 }
1682 
1683 
1684 struct sparc_bus_dma_tag mainbus_dma_tag = {
1685 	NULL,
1686 	NULL,
1687 	_bus_dmamap_create,
1688 	_bus_dmamap_destroy,
1689 	_bus_dmamap_load,
1690 	_bus_dmamap_load_mbuf,
1691 	_bus_dmamap_load_uio,
1692 	_bus_dmamap_load_raw,
1693 	_bus_dmamap_unload,
1694 	_bus_dmamap_sync,
1695 
1696 	_bus_dmamem_alloc,
1697 	_bus_dmamem_free,
1698 	_bus_dmamem_map,
1699 	_bus_dmamem_unmap,
1700 	_bus_dmamem_mmap
1701 };
1702 
1703 
1704 /*
1705  * Base bus space handlers.
1706  */
1707 static int	sparc_bus_map __P(( bus_space_tag_t, bus_addr_t,
1708 				    bus_size_t, int, vaddr_t, bus_space_handle_t *));
1709 static int	sparc_bus_unmap __P((bus_space_tag_t, bus_space_handle_t,
1710 				     bus_size_t));
1711 static int	sparc_bus_subregion __P((bus_space_tag_t, bus_space_handle_t,
1712 					 bus_size_t, bus_size_t,
1713 					 bus_space_handle_t *));
1714 static paddr_t	sparc_bus_mmap __P((bus_space_tag_t, bus_addr_t, off_t, int, int));
1715 static void	*sparc_mainbus_intr_establish __P((bus_space_tag_t, int, int,
1716 						   int, int (*) __P((void *)),
1717 						   void *));
1718 static int	sparc_bus_alloc __P((bus_space_tag_t, bus_addr_t, bus_addr_t,
1719 				     bus_size_t, bus_size_t, bus_size_t, int,
1720 				     bus_addr_t *, bus_space_handle_t *));
1721 static void	sparc_bus_free __P((bus_space_tag_t, bus_space_handle_t,
1722 				    bus_size_t));
1723 
1724 vaddr_t iobase = IODEV_BASE;
1725 struct extent *io_space = NULL;
1726 
1727 int
1728 sparc_bus_map(t, addr, size, flags, unused, hp)
1729 	bus_space_tag_t t;
1730 	bus_addr_t	addr;
1731 	bus_size_t	size;
1732 	vaddr_t unused;
1733 	bus_space_handle_t *hp;
1734 {
1735 	vaddr_t v;
1736 	u_int64_t pa;
1737 	paddr_t	pm_flags = 0;
1738 	vm_prot_t pm_prot = VM_PROT_READ;
1739 	int err;
1740 
1741 	if (iobase == NULL)
1742 		iobase = IODEV_BASE;
1743 	if (io_space == NULL)
1744 		/*
1745 		 * And set up IOSPACE extents.
1746 		 */
1747 		io_space = extent_create("IOSPACE",
1748 					 (u_long)IODEV_BASE, (u_long)IODEV_END,
1749 					 M_DEVBUF, 0, 0, EX_NOWAIT);
1750 
1751 
1752 	size = round_page(size);
1753 	if (size == 0) {
1754 		printf("sparc_bus_map: zero size\n");
1755 		return (EINVAL);
1756 	}
1757 	switch (t->type) {
1758 	case PCI_CONFIG_BUS_SPACE:
1759 		/*
1760 		 * PCI config space is special.
1761 		 *
1762 		 * It's really big and seldom used.  In order not to run
1763 		 * out of IO mappings, config space will not be mapped in,
1764 		 * rather it will be accessed through MMU bypass ASI accesses.
1765 		 */
1766 		if (flags & BUS_SPACE_MAP_LINEAR) return (-1);
1767 		hp->_ptr = addr;
1768 		hp->_asi = ASI_PHYS_NON_CACHED_LITTLE;
1769 		hp->_sasi = ASI_PHYS_NON_CACHED;
1770 		DPRINTF(BSDB_MAP, ("\nsparc_bus_map: type %x flags %x "
1771 			"addr %016llx size %016llx virt %llx paddr %016llx\n",
1772 			(int)t->type, (int) flags, (unsigned long long)addr,
1773 			(unsigned long long)size, (unsigned long long)hp->_ptr,
1774 			(unsigned long long)pa));
1775 		return (0);
1776 		/* FALLTHROUGH */
1777 	case PCI_IO_BUS_SPACE:
1778 		pm_flags = PMAP_LITTLE;
1779 		break;
1780 	case PCI_MEMORY_BUS_SPACE:
1781 		pm_flags = PMAP_LITTLE;
1782 		break;
1783 	default:
1784 		pm_flags = 0;
1785 		break;
1786 	}
1787 
1788 #ifdef _LP64
1789 	/* If it's not LINEAR don't bother to map it.  Use phys accesses. */
1790 	if ((flags & BUS_SPACE_MAP_LINEAR) == 0) {
1791 		hp->_ptr = addr;
1792 		if (pm_flags & PMAP_LITTLE)
1793 			hp->_asi = ASI_PHYS_NON_CACHED_LITTLE;
1794 		else
1795 		hp->_asi = ASI_PHYS_NON_CACHED;
1796 		hp->_sasi = ASI_PHYS_NON_CACHED;
1797 		return (0);
1798 	}
1799 #endif
1800 
1801 	if (!(flags & BUS_SPACE_MAP_CACHEABLE)) pm_flags |= PMAP_NC;
1802 
1803 	if ((err = extent_alloc(io_space, size, NBPG,
1804 		0, EX_NOWAIT|EX_BOUNDZERO, (u_long *)&v)))
1805 			panic("sparc_bus_map: cannot allocate io_space: %d\n", err);
1806 
1807 	/* note: preserve page offset */
1808 	hp->_ptr = (v | ((u_long)addr & PGOFSET));
1809 	hp->_asi = ASI_PRIMARY;
1810 	if (pm_flags & PMAP_LITTLE)
1811 		hp->_sasi = ASI_PRIMARY_LITTLE;
1812 	else
1813 		hp->_sasi = ASI_PRIMARY;
1814 
1815 	pa = addr & ~PAGE_MASK; /* = trunc_page(addr); Will drop high bits */
1816 	if (!(flags&BUS_SPACE_MAP_READONLY)) pm_prot |= VM_PROT_WRITE;
1817 
1818 	DPRINTF(BSDB_MAP, ("\nsparc_bus_map: type %x flags %x "
1819 		"addr %016llx size %016llx virt %llx paddr %016llx\n",
1820 		(int)t->type, (int) flags, (unsigned long long)addr,
1821 		(unsigned long long)size, (unsigned long long)hp->_ptr,
1822 		(unsigned long long)pa));
1823 
1824 	do {
1825 		DPRINTF(BSDB_MAP, ("sparc_bus_map: phys %llx virt %p hp %llx\n",
1826 			(unsigned long long)pa, (char *)v,
1827 			(unsigned long long)hp->_ptr));
1828 		pmap_enter(pmap_kernel(), v, pa | pm_flags, pm_prot,
1829 			pm_prot|PMAP_WIRED);
1830 		v += PAGE_SIZE;
1831 		pa += PAGE_SIZE;
1832 	} while ((size -= PAGE_SIZE) > 0);
1833 	pmap_update(pmap_kernel());
1834 	return (0);
1835 }
1836 
1837 int
1838 sparc_bus_subregion(tag, handle, offset, size, nhandlep)
1839 	bus_space_tag_t		tag;
1840 	bus_space_handle_t	handle;
1841 	bus_size_t		offset;
1842 	bus_size_t		size;
1843 	bus_space_handle_t	*nhandlep;
1844 {
1845 	nhandlep->_ptr = handle._ptr + offset;
1846 	nhandlep->_asi = handle._asi;
1847 	nhandlep->_sasi = handle._sasi;
1848 	return (0);
1849 }
1850 
1851 int
1852 sparc_bus_unmap(t, bh, size)
1853 	bus_space_tag_t t;
1854 	bus_size_t	size;
1855 	bus_space_handle_t bh;
1856 {
1857 	vaddr_t va = trunc_page((vaddr_t)bh._ptr);
1858 	vaddr_t endva = va + round_page(size);
1859 	int error = 0;
1860 
1861 	if (PHYS_ASI(bh._asi)) return (0);
1862 
1863 	error = extent_free(io_space, va, size, EX_NOWAIT);
1864 	if (error) printf("sparc_bus_unmap: extent free sez %d\n", error);
1865 
1866 	pmap_remove(pmap_kernel(), va, endva);
1867 	return (0);
1868 }
1869 
1870 paddr_t
1871 sparc_bus_mmap(t, paddr, off, prot, flags)
1872 	bus_space_tag_t t;
1873 	bus_addr_t	paddr;
1874 	off_t		off;
1875 	int		prot;
1876 	int		flags;
1877 {
1878 	/* Devices are un-cached... although the driver should do that */
1879 	return ((paddr+off)|PMAP_NC);
1880 }
1881 
1882 
1883 void *
1884 sparc_mainbus_intr_establish(t, pil, level, flags, handler, arg)
1885 	bus_space_tag_t t;
1886 	int	pil;
1887 	int	level;
1888 	int	flags;
1889 	int	(*handler)__P((void *));
1890 	void	*arg;
1891 {
1892 	struct intrhand *ih;
1893 
1894 	ih = (struct intrhand *)
1895 		malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
1896 	if (ih == NULL)
1897 		return (NULL);
1898 
1899 	ih->ih_fun = handler;
1900 	ih->ih_arg = arg;
1901 	intr_establish(pil, ih);
1902 	return (ih);
1903 }
1904 
1905 int
1906 sparc_bus_alloc(t, rs, re, s, a, b, f, ap, hp)
1907 	bus_space_tag_t t;
1908 	bus_addr_t	rs;
1909 	bus_addr_t	re;
1910 	bus_size_t	s;
1911 	bus_size_t	a;
1912 	bus_size_t	b;
1913 	int		f;
1914 	bus_addr_t	*ap;
1915 	bus_space_handle_t *hp;
1916 {
1917 	return (ENOTTY);
1918 }
1919 
1920 void
1921 sparc_bus_free(t, h, s)
1922 	bus_space_tag_t	t;
1923 	bus_space_handle_t	h;
1924 	bus_size_t	s;
1925 {
1926 	return;
1927 }
1928 
1929 struct sparc_bus_space_tag mainbus_space_tag = {
1930 	NULL,				/* cookie */
1931 	NULL,				/* parent bus tag */
1932 	UPA_BUS_SPACE,			/* type */
1933 	sparc_bus_alloc,
1934 	sparc_bus_free,
1935 	sparc_bus_map,			/* bus_space_map */
1936 	sparc_bus_unmap,		/* bus_space_unmap */
1937 	sparc_bus_subregion,		/* bus_space_subregion */
1938 	sparc_bus_mmap,			/* bus_space_mmap */
1939 	sparc_mainbus_intr_establish	/* bus_intr_establish */
1940 };
1941