xref: /netbsd/sys/arch/sparc64/sparc64/machdep.c (revision c4a72b64)
1 /*	$NetBSD: machdep.c,v 1.134 2002/11/27 18:00:27 pk Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1992, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  *
44  * This software was developed by the Computer Systems Engineering group
45  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
46  * contributed to Berkeley.
47  *
48  * All advertising materials mentioning features or use of this software
49  * must display the following acknowledgement:
50  *	This product includes software developed by the University of
51  *	California, Lawrence Berkeley Laboratory.
52  *
53  * Redistribution and use in source and binary forms, with or without
54  * modification, are permitted provided that the following conditions
55  * are met:
56  * 1. Redistributions of source code must retain the above copyright
57  *    notice, this list of conditions and the following disclaimer.
58  * 2. Redistributions in binary form must reproduce the above copyright
59  *    notice, this list of conditions and the following disclaimer in the
60  *    documentation and/or other materials provided with the distribution.
61  * 3. All advertising materials mentioning features or use of this software
62  *    must display the following acknowledgement:
63  *	This product includes software developed by the University of
64  *	California, Berkeley and its contributors.
65  * 4. Neither the name of the University nor the names of its contributors
66  *    may be used to endorse or promote products derived from this software
67  *    without specific prior written permission.
68  *
69  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
70  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
71  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
72  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
73  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
74  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
75  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
76  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
77  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
78  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
79  * SUCH DAMAGE.
80  *
81  *	@(#)machdep.c	8.6 (Berkeley) 1/14/94
82  */
83 
84 #include "opt_compat_sunos.h"
85 #include "opt_compat_sunos.h"
86 #include "opt_ddb.h"
87 
88 #include <sys/param.h>
89 #include <sys/extent.h>
90 #include <sys/signal.h>
91 #include <sys/signalvar.h>
92 #include <sys/proc.h>
93 #include <sys/user.h>
94 #include <sys/buf.h>
95 #include <sys/device.h>
96 #include <sys/reboot.h>
97 #include <sys/systm.h>
98 #include <sys/kernel.h>
99 #include <sys/conf.h>
100 #include <sys/file.h>
101 #include <sys/malloc.h>
102 #include <sys/mbuf.h>
103 #include <sys/mount.h>
104 #include <sys/msgbuf.h>
105 #include <sys/syscallargs.h>
106 #include <sys/exec.h>
107 
108 #include <uvm/uvm.h>
109 
110 #include <sys/sysctl.h>
111 #ifndef	ELFSIZE
112 #ifdef __arch64__
113 #define	ELFSIZE	64
114 #else
115 #define	ELFSIZE	32
116 #endif
117 #endif
118 #include <sys/exec_elf.h>
119 
120 #define _SPARC_BUS_DMA_PRIVATE
121 #include <machine/autoconf.h>
122 #include <machine/bus.h>
123 #include <machine/frame.h>
124 #include <machine/cpu.h>
125 #include <machine/pmap.h>
126 #include <machine/openfirm.h>
127 #include <machine/sparc64.h>
128 
129 #include <sparc64/sparc64/cache.h>
130 
131 /* #include "fb.h" */
132 
133 int bus_space_debug = 0; /* This may be used by macros elsewhere. */
134 #ifdef DEBUG
135 #define DPRINTF(l, s)   do { if (bus_space_debug & l) printf s; } while (0)
136 #else
137 #define DPRINTF(l, s)
138 #endif
139 
140 struct vm_map *exec_map = NULL;
141 struct vm_map *mb_map = NULL;
142 extern vaddr_t avail_end;
143 
144 int	physmem;
145 
146 extern	caddr_t msgbufaddr;
147 
148 /*
149  * Maximum number of DMA segments we'll allow in dmamem_load()
150  * routines.  Can be overridden in config files, etc.
151  */
152 #ifndef MAX_DMA_SEGS
153 #define MAX_DMA_SEGS	20
154 #endif
155 
156 /*
157  * safepri is a safe priority for sleep to set for a spin-wait
158  * during autoconfiguration or after a panic.
159  */
160 int   safepri = 0;
161 
162 void	dumpsys __P((void));
163 void	stackdump __P((void));
164 
165 
166 /*
167  * Machine-dependent startup code
168  */
169 void
170 cpu_startup()
171 {
172 	caddr_t v;
173 	long sz;
174 	u_int i, base, residual;
175 #ifdef DEBUG
176 	extern int pmapdebug;
177 	int opmapdebug = pmapdebug;
178 #endif
179 	vaddr_t minaddr, maxaddr;
180 	vsize_t size;
181 	extern struct user *proc0paddr;
182 	char pbuf[9];
183 
184 #ifdef DEBUG
185 	pmapdebug = 0;
186 #endif
187 
188 	proc0.p_addr = proc0paddr;
189 
190 	/*
191 	 * Good {morning,afternoon,evening,night}.
192 	 */
193 	printf(version);
194 	/*identifycpu();*/
195 	format_bytes(pbuf, sizeof(pbuf), ctob((u_int64_t)physmem));
196 	printf("total memory = %s\n", pbuf);
197 
198 	/*
199 	 * Find out how much space we need, allocate it,
200 	 * and then give everything true virtual addresses.
201 	 */
202 	sz = (long)allocsys(NULL, NULL);
203 	if ((v = (caddr_t)uvm_km_alloc(kernel_map, round_page(sz))) == 0)
204 		panic("startup: no room for %lx bytes of tables", sz);
205 	if (allocsys(v, NULL) - v != sz)
206 		panic("startup: table size inconsistency");
207 
208         /*
209          * allocate virtual and physical memory for the buffers.
210          */
211         size = MAXBSIZE * nbuf;         /* # bytes for buffers */
212 
213         /* allocate VM for buffers... area is not managed by VM system */
214         if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
215                     NULL, UVM_UNKNOWN_OFFSET, 0,
216                     UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
217                                 UVM_ADV_NORMAL, 0)) != 0)
218         	panic("cpu_startup: cannot allocate VM for buffers");
219 
220         minaddr = (vaddr_t) buffers;
221         if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
222         	bufpages = btoc(MAXBSIZE) * nbuf; /* do not overallocate RAM */
223         }
224         base = bufpages / nbuf;
225         residual = bufpages % nbuf;
226 
227         /* now allocate RAM for buffers */
228 	for (i = 0 ; i < nbuf ; i++) {
229 		vaddr_t curbuf;
230 		vsize_t curbufsize;
231 		struct vm_page *pg;
232 
233 		/*
234 		 * each buffer has MAXBSIZE bytes of VM space allocated.  of
235 		 * that MAXBSIZE space we allocate and map (base+1) pages
236 		 * for the first "residual" buffers, and then we allocate
237 		 * "base" pages for the rest.
238 		 */
239 		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
240 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
241 
242 		while (curbufsize) {
243 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
244 			if (pg == NULL)
245 				panic("cpu_startup: "
246 				    "not enough RAM for buffer cache");
247 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
248 			    VM_PROT_READ | VM_PROT_WRITE);
249 			curbuf += PAGE_SIZE;
250 			curbufsize -= PAGE_SIZE;
251 		}
252 	}
253 	pmap_update(kernel_map->pmap);
254 
255 	/*
256 	 * Allocate a submap for exec arguments.  This map effectively
257 	 * limits the number of processes exec'ing at any time.
258 	 */
259         exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
260                                  16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
261 
262 	/*
263 	 * Finally, allocate mbuf cluster submap.
264 	 */
265         mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
266 	    nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL);
267 
268 #ifdef DEBUG
269 	pmapdebug = opmapdebug;
270 #endif
271 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
272 	printf("avail memory = %s\n", pbuf);
273 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
274 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
275 
276 	/*
277 	 * Set up buffers, so they can be used to read disk labels.
278 	 */
279 	bufinit();
280 
281 #if 0
282 	pmap_redzone();
283 #endif
284 }
285 
286 /*
287  * Set up registers on exec.
288  */
289 
290 #ifdef __arch64__
291 #define STACK_OFFSET	BIAS
292 #define CPOUTREG(l,v)	copyout(&(v), (l), sizeof(v))
293 #undef CCFSZ
294 #define CCFSZ	CC64FSZ
295 #else
296 #define STACK_OFFSET	0
297 #define CPOUTREG(l,v)	copyout(&(v), (l), sizeof(v))
298 #endif
299 
300 /* ARGSUSED */
301 void
302 setregs(p, pack, stack)
303 	struct proc *p;
304 	struct exec_package *pack;
305 	vaddr_t stack;
306 {
307 	register struct trapframe64 *tf = p->p_md.md_tf;
308 	register struct fpstate64 *fs;
309 	register int64_t tstate;
310 	int pstate = PSTATE_USER;
311 #ifdef __arch64__
312 	Elf_Ehdr *eh = pack->ep_hdr;
313 #endif
314 
315 	/* Clear the P_32 flag. */
316 	p->p_flag &= ~P_32;
317 
318 	/* Don't allow misaligned code by default */
319 	p->p_md.md_flags &= ~MDP_FIXALIGN;
320 
321 	/*
322 	 * Set the registers to 0 except for:
323 	 *	%o6: stack pointer, built in exec())
324 	 *	%tstate: (retain icc and xcc and cwp bits)
325 	 *	%g1: address of p->p_psstr (used by crt0)
326 	 *	%tpc,%tnpc: entry point of program
327 	 */
328 #ifdef __arch64__
329 	/* Check what memory model is requested */
330 	switch ((eh->e_flags & EF_SPARCV9_MM)) {
331 	default:
332 		printf("Unknown memory model %d\n",
333 		       (eh->e_flags & EF_SPARCV9_MM));
334 		/* FALLTHROUGH */
335 	case EF_SPARCV9_TSO:
336 		pstate = PSTATE_MM_TSO|PSTATE_IE;
337 		break;
338 	case EF_SPARCV9_PSO:
339 		pstate = PSTATE_MM_PSO|PSTATE_IE;
340 		break;
341 	case EF_SPARCV9_RMO:
342 		pstate = PSTATE_MM_RMO|PSTATE_IE;
343 		break;
344 	}
345 #endif
346 	tstate = (ASI_PRIMARY_NO_FAULT<<TSTATE_ASI_SHIFT) |
347 		((pstate)<<TSTATE_PSTATE_SHIFT) |
348 		(tf->tf_tstate & TSTATE_CWP);
349 	if ((fs = p->p_md.md_fpstate) != NULL) {
350 		/*
351 		 * We hold an FPU state.  If we own *the* FPU chip state
352 		 * we must get rid of it, and the only way to do that is
353 		 * to save it.  In any case, get rid of our FPU state.
354 		 */
355 		if (p == fpproc) {
356 			savefpstate(fs);
357 			fpproc = NULL;
358 		}
359 		free((void *)fs, M_SUBPROC);
360 		p->p_md.md_fpstate = NULL;
361 	}
362 	bzero((caddr_t)tf, sizeof *tf);
363 	tf->tf_tstate = tstate;
364 	tf->tf_global[1] = (vaddr_t)p->p_psstr;
365 	/* %g4 needs to point to the start of the data segment */
366 	tf->tf_global[4] = 0;
367 	tf->tf_pc = pack->ep_entry & ~3;
368 	tf->tf_npc = tf->tf_pc + 4;
369 	stack -= sizeof(struct rwindow);
370 	tf->tf_out[6] = stack - STACK_OFFSET;
371 	tf->tf_out[7] = NULL;
372 #ifdef NOTDEF_DEBUG
373 	printf("setregs: setting tf %p sp %p pc %p\n", (long)tf,
374 	       (long)tf->tf_out[6], (long)tf->tf_pc);
375 #ifdef DDB
376 	Debugger();
377 #endif
378 #endif
379 }
380 
381 #ifdef DEBUG
382 /* See sigdebug.h */
383 #include <sparc64/sparc64/sigdebug.h>
384 int sigdebug = 0x0;
385 int sigpid = 0;
386 #endif
387 
388 struct sigframe {
389 	int	sf_signo;		/* signal number */
390 	int	sf_code;		/* code */
391 #ifndef __arch64__
392 	struct	sigcontext *sf_scp;	/* SunOS user addr of sigcontext */
393 	int	sf_addr;		/* SunOS compat, always 0 for now */
394 #endif
395 	struct	sigcontext sf_sc;	/* actual sigcontext */
396 };
397 
398 
399 static char *parse_bootfile(char *);
400 static char *parse_bootargs(char *);
401 
402 static char *
403 parse_bootfile(args)
404 	char *args;
405 {
406 	char *cp;
407 
408 	/*
409 	 * bootargs is of the form: [kernelname] [args...]
410 	 * It can be the empty string if we booted from the default
411 	 * kernel name.
412 	 */
413 	cp = args;
414 	for (cp = args; *cp != 0 && *cp != ' ' && *cp != '\t'; cp++) {
415 		if (*cp == '-') {
416 			int c;
417 			/*
418 			 * If this `-' is most likely the start of boot
419 			 * options, we're done.
420 			 */
421 			if (cp == args)
422 				break;
423 			if ((c = *(cp-1)) == ' ' || c == '\t')
424 				break;
425 		}
426 	}
427 	/* Now we've separated out the kernel name from the args */
428 	*cp = '\0';
429 	return (args);
430 }
431 
432 static char *
433 parse_bootargs(args)
434 	char *args;
435 {
436 	char *cp;
437 
438 	for (cp = args; *cp != '\0'; cp++) {
439 		if (*cp == '-') {
440 			int c;
441 			/*
442 			 * Looks like options start here, but check this
443 			 * `-' is not part of the kernel name.
444 			 */
445 			if (cp == args)
446 				break;
447 			if ((c = *(cp-1)) == ' ' || c == '\t')
448 				break;
449 		}
450 	}
451 	return (cp);
452 }
453 
454 /*
455  * machine dependent system variables.
456  */
457 int
458 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
459 	int *name;
460 	u_int namelen;
461 	void *oldp;
462 	size_t *oldlenp;
463 	void *newp;
464 	size_t newlen;
465 	struct proc *p;
466 {
467 	u_int chosen;
468 	char bootargs[256];
469 	char *cp = NULL;
470 
471 	/* all sysctl names are this level are terminal */
472 	if (namelen != 1)
473 		return (ENOTDIR);	/* overloaded */
474 
475 	switch (name[0]) {
476 	case CPU_BOOTED_KERNEL:
477 		if (((chosen = OF_finddevice("/chosen")) == -1) ||
478 		    ((OF_getprop(chosen, "bootargs", bootargs, sizeof bootargs))
479 		      < 0))
480 			return (ENOENT);
481 
482 		cp = parse_bootfile(bootargs);
483 		if (cp == NULL)
484 			return (ENOENT);
485 		if (*cp == '\0')
486 			/* Unknown to firmware, return default name */
487 			cp = "netbsd";
488 		return (sysctl_rdstring(oldp, oldlenp, newp, cp));
489 
490 	case CPU_BOOT_ARGS:
491 		if (((chosen = OF_finddevice("/chosen")) == -1) ||
492 		    ((OF_getprop(chosen, "bootargs", bootargs, sizeof bootargs))
493 		      < 0))
494 			return (ENOENT);
495 
496 		cp = parse_bootargs(bootargs);
497 		if (cp == NULL || cp[0] == '\0')
498 			return (ENOENT);
499 		return (sysctl_rdstring(oldp, oldlenp, newp, cp));
500 
501 	case CPU_BOOTED_DEVICE:
502 		if (((chosen = OF_finddevice("/chosen")) == -1) ||
503 		    ((OF_getprop(chosen, "bootpath", bootargs, sizeof bootargs))
504 		      < 0))
505 			return (ENOENT);
506 
507 		return (sysctl_rdstring(oldp, oldlenp, newp, bootargs));
508 
509 	case CPU_ARCH:
510 		/* CPU architecture version */
511 		return (sysctl_rdint(oldp, oldlenp, newp, 9));
512 
513 	default:
514 		return (EOPNOTSUPP);
515 	}
516 	/* NOTREACHED */
517 }
518 
519 /*
520  * Send an interrupt to process.
521  */
522 void
523 sendsig(sig, mask, code)
524 	int sig;
525 	sigset_t *mask;
526 	u_long code;
527 {
528 	struct proc *p = curproc;
529 	struct sigacts *ps = p->p_sigacts;
530 	struct sigframe *fp;
531 	struct trapframe64 *tf;
532 	vaddr_t addr;
533 	struct rwindow *oldsp, *newsp;
534 #ifdef NOT_DEBUG
535 	struct rwindow tmpwin;
536 #endif
537 	struct sigframe sf;
538 	int onstack;
539 	sig_t catcher = SIGACTION(p, sig).sa_handler;
540 
541 	tf = p->p_md.md_tf;
542 	oldsp = (struct rwindow *)(u_long)(tf->tf_out[6] + STACK_OFFSET);
543 
544 	/*
545 	 * Compute new user stack addresses, subtract off
546 	 * one signal frame, and align.
547 	 */
548 	onstack =
549 	    (p->p_sigctx.ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
550 	    (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
551 
552 	if (onstack)
553 		fp = (struct sigframe *)((caddr_t)p->p_sigctx.ps_sigstk.ss_sp +
554 						p->p_sigctx.ps_sigstk.ss_size);
555 	else
556 		fp = (struct sigframe *)oldsp;
557 	/* Allocate an aligned sigframe */
558 	fp = (struct sigframe *)((long)(fp - 1) & ~0x0f);
559 
560 #ifdef DEBUG
561 	sigpid = p->p_pid;
562 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) {
563 		printf("sendsig: %s[%d] sig %d newusp %p scp %p oldsp %p\n",
564 		    p->p_comm, p->p_pid, sig, fp, &fp->sf_sc, oldsp);
565 #ifdef DDB
566 		if (sigdebug & SDB_DDB) Debugger();
567 #endif
568 	}
569 #endif
570 
571 	/*
572 	 * Now set up the signal frame.  We build it in kernel space
573 	 * and then copy it out.  We probably ought to just build it
574 	 * directly in user space....
575 	 */
576 	sf.sf_signo = sig;
577 	sf.sf_code = code;
578 #ifndef __arch64__
579 	sf.sf_scp = 0;
580 	sf.sf_addr = 0;			/* XXX */
581 #endif
582 
583 	/*
584 	 * Build the signal context to be used by sigreturn.
585 	 */
586 	sf.sf_sc.sc_onstack = p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK;
587 	sf.sf_sc.sc_mask = *mask;
588 #ifdef COMPAT_13
589 	/*
590 	 * XXX We always have to save an old style signal mask because
591 	 * XXX we might be delivering a signal to a process which will
592 	 * XXX escape from the signal in a non-standard way and invoke
593 	 * XXX sigreturn() directly.
594 	 */
595 	native_sigset_to_sigset13(mask, &sf.sf_sc.__sc_mask13);
596 #endif
597 	/* Save register context. */
598 	sf.sf_sc.sc_sp = (long)tf->tf_out[6];
599 	sf.sf_sc.sc_pc = tf->tf_pc;
600 	sf.sf_sc.sc_npc = tf->tf_npc;
601 #ifdef __arch64__
602 	sf.sf_sc.sc_tstate = tf->tf_tstate; /* XXX */
603 #else
604 	sf.sf_sc.sc_psr = TSTATECCR_TO_PSR(tf->tf_tstate); /* XXX */
605 #endif
606 	sf.sf_sc.sc_g1 = tf->tf_global[1];
607 	sf.sf_sc.sc_o0 = tf->tf_out[0];
608 
609 	/*
610 	 * Put the stack in a consistent state before we whack away
611 	 * at it.  Note that write_user_windows may just dump the
612 	 * registers into the pcb; we need them in the process's memory.
613 	 * We also need to make sure that when we start the signal handler,
614 	 * its %i6 (%fp), which is loaded from the newly allocated stack area,
615 	 * joins seamlessly with the frame it was in when the signal occurred,
616 	 * so that the debugger and _longjmp code can back up through it.
617 	 */
618 	newsp = (struct rwindow *)((vaddr_t)fp - sizeof(struct rwindow));
619 	write_user_windows();
620 #ifdef DEBUG
621 	if ((sigdebug & SDB_KSTACK))
622 	    printf("sendsig: saving sf to %p, setting stack pointer %p to %p\n",
623 		   fp, &(((struct rwindow *)newsp)->rw_in[6]),
624 		   (void *)(unsigned long)tf->tf_out[6]);
625 #endif
626 	if (rwindow_save(p) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf) ||
627 #ifdef NOT_DEBUG
628 	    copyin(oldsp, &tmpwin, sizeof(tmpwin)) || copyout(&tmpwin, newsp, sizeof(tmpwin)) ||
629 #endif
630 	    CPOUTREG(&(((struct rwindow *)newsp)->rw_in[6]), tf->tf_out[6])) {
631 		/*
632 		 * Process has trashed its stack; give it an illegal
633 		 * instruction to halt it in its tracks.
634 		 */
635 #ifdef DEBUG
636 		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
637 			printf("sendsig: window save or copyout error\n");
638 		printf("sendsig: stack was trashed trying to send sig %d, sending SIGILL\n", sig);
639 #ifdef DDB
640 		if (sigdebug & SDB_DDB) Debugger();
641 #endif
642 #endif
643 		sigexit(p, SIGILL);
644 		/* NOTREACHED */
645 	}
646 
647 #ifdef DEBUG
648 	if (sigdebug & SDB_FOLLOW) {
649 		printf("sendsig: %s[%d] sig %d scp %p\n",
650 		       p->p_comm, p->p_pid, sig, &fp->sf_sc);
651 	}
652 #endif
653 
654 	/*
655 	 * Arrange to continue execution at the code copied out in exec().
656 	 * It needs the function to call in %g1, and a new stack pointer.
657 	 */
658 	switch (ps->sa_sigdesc[sig].sd_vers) {
659 #if 1 /* COMPAT_16 */
660 	case 0:		/* legacy on-stack sigtramp */
661 		addr = (vaddr_t)p->p_sigctx.ps_sigcode;
662 		break;
663 #endif /* COMPAT_16 */
664 
665 	case 1:
666 		addr = (vaddr_t)ps->sa_sigdesc[sig].sd_tramp;
667 		break;
668 
669 	default:
670 		/* Don't know what trampoline version; kill it. */
671 		sigexit(p, SIGILL);
672 	}
673 
674 	tf->tf_global[1] = (vaddr_t)catcher;
675 	tf->tf_pc = addr;
676 	tf->tf_npc = addr + 4;
677 	tf->tf_out[6] = (vaddr_t)newsp - STACK_OFFSET;
678 
679 	/* Remember that we're now on the signal stack. */
680 	if (onstack)
681 		p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
682 
683 #ifdef DEBUG
684 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) {
685 		printf("sendsig: about to return to catcher %p thru %p\n",
686 		       catcher, (void *)(unsigned long)addr);
687 #ifdef DDB
688 		if (sigdebug & SDB_DDB) Debugger();
689 #endif
690 	}
691 #endif
692 }
693 
694 /*
695  * System call to cleanup state after a signal
696  * has been taken.  Reset signal mask and
697  * stack state from context left by sendsig (above),
698  * and return to the given trap frame (if there is one).
699  * Check carefully to make sure that the user has not
700  * modified the state to gain improper privileges or to cause
701  * a machine fault.
702  */
703 /* ARGSUSED */
704 int
705 sys___sigreturn14(p, v, retval)
706 	register struct proc *p;
707 	void *v;
708 	register_t *retval;
709 {
710 	struct sys___sigreturn14_args /* {
711 		syscallarg(struct sigcontext *) sigcntxp;
712 	} */ *uap = v;
713 	struct sigcontext sc, *scp;
714 	register struct trapframe64 *tf;
715 	int error = EINVAL;
716 
717 	/* First ensure consistent stack state (see sendsig). */
718 	write_user_windows();
719 	if (rwindow_save(p)) {
720 #ifdef DEBUG
721 		printf("sigreturn14: rwindow_save(%p) failed, sending SIGILL\n", p);
722 #ifdef DDB
723 		if (sigdebug & SDB_DDB) Debugger();
724 #endif
725 #endif
726 		sigexit(p, SIGILL);
727 	}
728 #ifdef DEBUG
729 	if (sigdebug & SDB_FOLLOW) {
730 		printf("sigreturn14: %s[%d], sigcntxp %p\n",
731 		    p->p_comm, p->p_pid, SCARG(uap, sigcntxp));
732 #ifdef DDB
733 		if (sigdebug & SDB_DDB) Debugger();
734 #endif
735 	}
736 #endif
737 	scp = SCARG(uap, sigcntxp);
738  	if ((vaddr_t)scp & 3 || (error = copyin((caddr_t)scp, &sc, sizeof sc) != 0))
739 #ifdef DEBUG
740 	{
741 		printf("sigreturn14: copyin failed: scp=%p\n", scp);
742 #ifdef DDB
743 		if (sigdebug & SDB_DDB) Debugger();
744 #endif
745 		return (error);
746 	}
747 #else
748 		return (error);
749 #endif
750 	scp = &sc;
751 
752 	tf = p->p_md.md_tf;
753 	/*
754 	 * Only the icc bits in the psr are used, so it need not be
755 	 * verified.  pc and npc must be multiples of 4.  This is all
756 	 * that is required; if it holds, just do it.
757 	 */
758 	if (((sc.sc_pc | sc.sc_npc) & 3) != 0 || (sc.sc_pc == 0) || (sc.sc_npc == 0))
759 #ifdef DEBUG
760 	{
761 		printf("sigreturn14: pc %p or npc %p invalid\n",
762 		   (void *)(unsigned long)sc.sc_pc,
763 		   (void *)(unsigned long)sc.sc_npc);
764 #ifdef DDB
765 		if (sigdebug & SDB_DDB) Debugger();
766 #endif
767 		return (EINVAL);
768 	}
769 #else
770 		return (EINVAL);
771 #endif
772 	/* take only psr ICC field */
773 #ifdef __arch64__
774 	tf->tf_tstate = (u_int64_t)(tf->tf_tstate & ~TSTATE_CCR) | (scp->sc_tstate & TSTATE_CCR);
775 #else
776 	tf->tf_tstate = (u_int64_t)(tf->tf_tstate & ~TSTATE_CCR) | PSRCC_TO_TSTATE(scp->sc_psr);
777 #endif
778 	tf->tf_pc = (u_int64_t)scp->sc_pc;
779 	tf->tf_npc = (u_int64_t)scp->sc_npc;
780 	tf->tf_global[1] = (u_int64_t)scp->sc_g1;
781 	tf->tf_out[0] = (u_int64_t)scp->sc_o0;
782 	tf->tf_out[6] = (u_int64_t)scp->sc_sp;
783 #ifdef DEBUG
784 	if (sigdebug & SDB_FOLLOW) {
785 		printf("sigreturn14: return trapframe pc=%p sp=%p tstate=%llx\n",
786 		       (void *)(unsigned long)tf->tf_pc,
787 		       (void *)(unsigned long)tf->tf_out[6],
788 		       (unsigned long long)tf->tf_tstate);
789 #ifdef DDB
790 		if (sigdebug & SDB_DDB) Debugger();
791 #endif
792 	}
793 #endif
794 
795 	/* Restore signal stack. */
796 	if (sc.sc_onstack & SS_ONSTACK)
797 		p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
798 	else
799 		p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK;
800 
801 	/* Restore signal mask. */
802 	(void) sigprocmask1(p, SIG_SETMASK, &sc.sc_mask, 0);
803 
804 	return (EJUSTRETURN);
805 }
806 
807 int	waittime = -1;
808 
809 void
810 cpu_reboot(howto, user_boot_string)
811 	register int howto;
812 	char *user_boot_string;
813 {
814 	int i;
815 	static char str[128];
816 
817 	/* If system is cold, just halt. */
818 	if (cold) {
819 		howto |= RB_HALT;
820 		goto haltsys;
821 	}
822 
823 #if NFB > 0
824 	fb_unblank();
825 #endif
826 	boothowto = howto;
827 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
828 		extern struct proc proc0;
829 		extern int sparc_clock_time_is_ok;
830 
831 		/* XXX protect against curproc->p_stats.foo refs in sync() */
832 		if (curproc == NULL)
833 			curproc = &proc0;
834 		waittime = 0;
835 		vfs_shutdown();
836 
837 		/*
838 		 * If we've been adjusting the clock, the todr
839 		 * will be out of synch; adjust it now.
840 		 * Do this only if the TOD clock has already been read out
841 		 * successfully by inittodr() or set by an explicit call
842 		 * to resettodr() (e.g. from settimeofday()).
843 		 */
844 		if (sparc_clock_time_is_ok)
845 			resettodr();
846 	}
847 	(void) splhigh();		/* ??? */
848 
849 	/* If rebooting and a dump is requested, do it. */
850 	if (howto & RB_DUMP)
851 		dumpsys();
852 
853 haltsys:
854 	/* Run any shutdown hooks. */
855 	doshutdownhooks();
856 
857 	/* If powerdown was requested, do it. */
858 	if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
859 		/* Let the OBP do the work. */
860 		OF_poweroff();
861 		printf("WARNING: powerdown failed!\n");
862 		/*
863 		 * RB_POWERDOWN implies RB_HALT... fall into it...
864 		 */
865 	}
866 
867 	if (howto & RB_HALT) {
868 		printf("halted\n\n");
869 		OF_exit();
870 		panic("PROM exit failed");
871 	}
872 
873 	printf("rebooting\n\n");
874 	if (user_boot_string && *user_boot_string) {
875 		i = strlen(user_boot_string);
876 		if (i > sizeof(str))
877 			OF_boot(user_boot_string);	/* XXX */
878 		bcopy(user_boot_string, str, i);
879 	} else {
880 		i = 1;
881 		str[0] = '\0';
882 	}
883 
884 	if (howto & RB_SINGLE)
885 		str[i++] = 's';
886 	if (howto & RB_KDB)
887 		str[i++] = 'd';
888 	if (i > 1) {
889 		if (str[0] == '\0')
890 			str[0] = '-';
891 		str[i] = 0;
892 	} else
893 		str[0] = 0;
894 	OF_boot(str);
895 	panic("cpu_reboot -- failed");
896 	/*NOTREACHED*/
897 }
898 
899 u_int32_t dumpmag = 0x8fca0101;	/* magic number for savecore */
900 int	dumpsize = 0;		/* also for savecore */
901 long	dumplo = 0;
902 
903 void
904 cpu_dumpconf()
905 {
906 	const struct bdevsw *bdev;
907 	register int nblks, dumpblks;
908 
909 	if (dumpdev == NODEV)
910 		/* No usable dump device */
911 		return;
912 	bdev = bdevsw_lookup(dumpdev);
913 	if (bdev == NULL || bdev->d_psize == NULL)
914 		/* No usable dump device */
915 		return;
916 
917 	nblks = (*bdev->d_psize)(dumpdev);
918 
919 	dumpblks = ctod(physmem) + pmap_dumpsize();
920 	if (dumpblks > (nblks - ctod(1)))
921 		/*
922 		 * dump size is too big for the partition.
923 		 * Note, we safeguard a click at the front for a
924 		 * possible disk label.
925 		 */
926 		return;
927 
928 	/* Put the dump at the end of the partition */
929 	dumplo = nblks - dumpblks;
930 
931 	/*
932 	 * savecore(8) expects dumpsize to be the number of pages
933 	 * of actual core dumped (i.e. excluding the MMU stuff).
934 	 */
935 	dumpsize = physmem;
936 }
937 
938 #define	BYTES_PER_DUMP	(NBPG)	/* must be a multiple of pagesize */
939 static vaddr_t dumpspace;
940 
941 caddr_t
942 reserve_dumppages(p)
943 	caddr_t p;
944 {
945 
946 	dumpspace = (vaddr_t)p;
947 	return (p + BYTES_PER_DUMP);
948 }
949 
950 /*
951  * Write a crash dump.
952  */
953 void
954 dumpsys()
955 {
956 	const struct bdevsw *bdev;
957 	register int psize;
958 	daddr_t blkno;
959 	register int (*dump)	__P((dev_t, daddr_t, caddr_t, size_t));
960 	int error = 0;
961 	register struct mem_region *mp;
962 	extern struct mem_region *mem;
963 
964 	/* copy registers to memory */
965 	snapshot(cpcb);
966 	stackdump();
967 
968 	if (dumpdev == NODEV)
969 		return;
970 	bdev = bdevsw_lookup(dumpdev);
971 	if (bdev == NULL || bdev->d_psize == NULL)
972 		return;
973 
974 	/*
975 	 * For dumps during autoconfiguration,
976 	 * if dump device has already configured...
977 	 */
978 	if (dumpsize == 0)
979 		cpu_dumpconf();
980 	if (!dumpspace) {
981 		printf("\nno address space available, dump not possible\n");
982 		return;
983 	}
984 	if (dumplo <= 0) {
985 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
986 		    minor(dumpdev));
987 		return;
988 	}
989 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
990 	    minor(dumpdev), dumplo);
991 
992 	psize = (*bdev->d_psize)(dumpdev);
993 	printf("dump ");
994 	if (psize == -1) {
995 		printf("area unavailable\n");
996 		return;
997 	}
998 	blkno = dumplo;
999 	dump = bdev->d_dump;
1000 
1001 	error = pmap_dumpmmu(dump, blkno);
1002 	blkno += pmap_dumpsize();
1003 printf("starting dump, blkno %d\n", blkno);
1004 	for (mp = mem; mp->size; mp++) {
1005 		unsigned i = 0, n;
1006 		paddr_t maddr = mp->start;
1007 
1008 #if 0
1009 		/* Remind me: why don't we dump page 0 ? */
1010 		if (maddr == 0) {
1011 			/* Skip first page at physical address 0 */
1012 			maddr += NBPG;
1013 			i += NBPG;
1014 			blkno += btodb(NBPG);
1015 		}
1016 #endif
1017 		for (; i < mp->size; i += n) {
1018 			n = mp->size - i;
1019 			if (n > BYTES_PER_DUMP)
1020 				 n = BYTES_PER_DUMP;
1021 
1022 			/* print out how many MBs we have dumped */
1023 			if (i && (i % (1024*1024)) == 0)
1024 				printf("%d ", i / (1024*1024));
1025 			pmap_kenter_pa(dumpspace, maddr, VM_PROT_READ);
1026 			pmap_update(pmap_kernel());
1027 			error = (*dump)(dumpdev, blkno,
1028 					(caddr_t)dumpspace, (int)n);
1029 			pmap_kremove(dumpspace, n);
1030 			pmap_update(pmap_kernel());
1031 			if (error)
1032 				break;
1033 			maddr += n;
1034 			blkno += btodb(n);
1035 		}
1036 	}
1037 
1038 	switch (error) {
1039 
1040 	case ENXIO:
1041 		printf("device bad\n");
1042 		break;
1043 
1044 	case EFAULT:
1045 		printf("device not ready\n");
1046 		break;
1047 
1048 	case EINVAL:
1049 		printf("area improper\n");
1050 		break;
1051 
1052 	case EIO:
1053 		printf("i/o error\n");
1054 		break;
1055 
1056 	case 0:
1057 		printf("succeeded\n");
1058 		break;
1059 
1060 	default:
1061 		printf("error %d\n", error);
1062 		break;
1063 	}
1064 }
1065 
1066 void trapdump __P((struct trapframe64*));
1067 /*
1068  * dump out a trapframe.
1069  */
1070 void
1071 trapdump(tf)
1072 	struct trapframe64* tf;
1073 {
1074 	printf("TRAPFRAME: tstate=%llx pc=%llx npc=%llx y=%x\n",
1075 	       (unsigned long long)tf->tf_tstate, (unsigned long long)tf->tf_pc,
1076 	       (unsigned long long)tf->tf_npc, (unsigned)tf->tf_y);
1077 	printf("%%g1-7: %llx %llx %llx %llx %llx %llx %llx\n",
1078 	       (unsigned long long)tf->tf_global[1],
1079 	       (unsigned long long)tf->tf_global[2],
1080 	       (unsigned long long)tf->tf_global[3],
1081 	       (unsigned long long)tf->tf_global[4],
1082 	       (unsigned long long)tf->tf_global[5],
1083 	       (unsigned long long)tf->tf_global[6],
1084 	       (unsigned long long)tf->tf_global[7]);
1085 	printf("%%o0-7: %llx %llx %llx %llx\n %llx %llx %llx %llx\n",
1086 	       (unsigned long long)tf->tf_out[0],
1087 	       (unsigned long long)tf->tf_out[1],
1088 	       (unsigned long long)tf->tf_out[2],
1089 	       (unsigned long long)tf->tf_out[3],
1090 	       (unsigned long long)tf->tf_out[4],
1091 	       (unsigned long long)tf->tf_out[5],
1092 	       (unsigned long long)tf->tf_out[6],
1093 	       (unsigned long long)tf->tf_out[7]);
1094 }
1095 /*
1096  * get the fp and dump the stack as best we can.  don't leave the
1097  * current stack page
1098  */
1099 void
1100 stackdump()
1101 {
1102 	struct frame32 *fp = (struct frame32 *)getfp(), *sfp;
1103 	struct frame64 *fp64;
1104 
1105 	sfp = fp;
1106 	printf("Frame pointer is at %p\n", fp);
1107 	printf("Call traceback:\n");
1108 	while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) {
1109 		if( ((long)fp) & 1 ) {
1110 			fp64 = (struct frame64*)(((char*)fp)+BIAS);
1111 			/* 64-bit frame */
1112 			printf("%llx(%llx, %llx, %llx, %llx, %llx, %llx, %llx) fp = %llx\n",
1113 			       (unsigned long long)fp64->fr_pc,
1114 			       (unsigned long long)fp64->fr_arg[0],
1115 			       (unsigned long long)fp64->fr_arg[1],
1116 			       (unsigned long long)fp64->fr_arg[2],
1117 			       (unsigned long long)fp64->fr_arg[3],
1118 			       (unsigned long long)fp64->fr_arg[4],
1119 			       (unsigned long long)fp64->fr_arg[5],
1120 			       (unsigned long long)fp64->fr_arg[6],
1121 			       (unsigned long long)fp64->fr_fp);
1122 			fp = (struct frame32 *)(u_long)fp64->fr_fp;
1123 		} else {
1124 			/* 32-bit frame */
1125 			printf("  pc = %x  args = (%x, %x, %x, %x, %x, %x, %x) fp = %x\n",
1126 			       fp->fr_pc, fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2],
1127 			       fp->fr_arg[3], fp->fr_arg[4], fp->fr_arg[5], fp->fr_arg[6],
1128 			       fp->fr_fp);
1129 			fp = (struct frame32*)(u_long)(u_short)fp->fr_fp;
1130 		}
1131 	}
1132 }
1133 
1134 
1135 int
1136 cpu_exec_aout_makecmds(p, epp)
1137 	struct proc *p;
1138 	struct exec_package *epp;
1139 {
1140 	return (ENOEXEC);
1141 }
1142 
1143 /*
1144  * Common function for DMA map creation.  May be called by bus-specific
1145  * DMA map creation functions.
1146  */
1147 int
1148 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
1149 	bus_dma_tag_t t;
1150 	bus_size_t size;
1151 	int nsegments;
1152 	bus_size_t maxsegsz;
1153 	bus_size_t boundary;
1154 	int flags;
1155 	bus_dmamap_t *dmamp;
1156 {
1157 	struct sparc_bus_dmamap *map;
1158 	void *mapstore;
1159 	size_t mapsize;
1160 
1161 	/*
1162 	 * Allocate and initialize the DMA map.  The end of the map
1163 	 * is a variable-sized array of segments, so we allocate enough
1164 	 * room for them in one shot.
1165 	 *
1166 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
1167 	 * of ALLOCNOW notifies others that we've reserved these resources,
1168 	 * and they are not to be freed.
1169 	 *
1170 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
1171 	 * the (nsegments - 1).
1172 	 */
1173 	mapsize = sizeof(struct sparc_bus_dmamap) +
1174 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
1175 	if ((mapstore = malloc(mapsize, M_DMAMAP,
1176 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1177 		return (ENOMEM);
1178 
1179 	bzero(mapstore, mapsize);
1180 	map = (struct sparc_bus_dmamap *)mapstore;
1181 	map->_dm_size = size;
1182 	map->_dm_segcnt = nsegments;
1183 	map->_dm_maxsegsz = maxsegsz;
1184 	map->_dm_boundary = boundary;
1185 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT|BUS_DMA_COHERENT|
1186 				   BUS_DMA_NOWRITE|BUS_DMA_NOCACHE);
1187 	map->dm_mapsize = 0;		/* no valid mappings */
1188 	map->dm_nsegs = 0;
1189 
1190 	*dmamp = map;
1191 	return (0);
1192 }
1193 
1194 /*
1195  * Common function for DMA map destruction.  May be called by bus-specific
1196  * DMA map destruction functions.
1197  */
1198 void
1199 _bus_dmamap_destroy(t, map)
1200 	bus_dma_tag_t t;
1201 	bus_dmamap_t map;
1202 {
1203 	if (map->dm_nsegs)
1204 		bus_dmamap_unload(t, map);
1205 	free(map, M_DMAMAP);
1206 }
1207 
1208 /*
1209  * Common function for loading a DMA map with a linear buffer.  May
1210  * be called by bus-specific DMA map load functions.
1211  *
1212  * Most SPARCs have IOMMUs in the bus controllers.  In those cases
1213  * they only need one segment and will use virtual addresses for DVMA.
1214  * Those bus controllers should intercept these vectors and should
1215  * *NEVER* call _bus_dmamap_load() which is used only by devices that
1216  * bypass DVMA.
1217  */
1218 int
1219 _bus_dmamap_load(t, map, buf, buflen, p, flags)
1220 	bus_dma_tag_t t;
1221 	bus_dmamap_t map;
1222 	void *buf;
1223 	bus_size_t buflen;
1224 	struct proc *p;
1225 	int flags;
1226 {
1227 	bus_size_t sgsize;
1228 	vaddr_t vaddr = (vaddr_t)buf;
1229 	long incr;
1230 	int i;
1231 
1232 	/*
1233 	 * Make sure that on error condition we return "no valid mappings".
1234 	 */
1235 	map->dm_nsegs = 0;
1236 
1237 	if (buflen > map->_dm_size)
1238 	{
1239 #ifdef DEBUG
1240 		printf("_bus_dmamap_load(): error %lu > %lu -- map size exceeded!\n",
1241 		    (unsigned long)buflen, (unsigned long)map->_dm_size);
1242 #ifdef DDB
1243 		Debugger();
1244 #endif
1245 #endif
1246 		return (EINVAL);
1247 	}
1248 
1249 	sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1250 
1251 	/*
1252 	 * We always use just one segment.
1253 	 */
1254 	map->dm_mapsize = buflen;
1255 	i = 0;
1256 	map->dm_segs[i].ds_addr = NULL;
1257 	map->dm_segs[i].ds_len = 0;
1258 
1259 	incr = NBPG - (vaddr & PGOFSET);
1260 	while (sgsize > 0) {
1261 		paddr_t pa;
1262 
1263 		incr = min(sgsize, incr);
1264 
1265 		(void) pmap_extract(pmap_kernel(), vaddr, &pa);
1266 		sgsize -= incr;
1267 		vaddr += incr;
1268 		if (map->dm_segs[i].ds_len == 0)
1269 			map->dm_segs[i].ds_addr = pa;
1270 		if (pa == (map->dm_segs[i].ds_addr + map->dm_segs[i].ds_len)
1271 		    && ((map->dm_segs[i].ds_len + incr) <= map->_dm_maxsegsz)) {
1272 			/* Hey, waddyaknow, they're contiguous */
1273 			map->dm_segs[i].ds_len += incr;
1274 			incr = NBPG;
1275 			continue;
1276 		}
1277 		if (++i >= map->_dm_segcnt)
1278 			return (E2BIG);
1279 		map->dm_segs[i].ds_addr = pa;
1280 		map->dm_segs[i].ds_len = incr = NBPG;
1281 	}
1282 	map->dm_nsegs = i + 1;
1283 	/* Mapping is bus dependent */
1284 	return (0);
1285 }
1286 
1287 /*
1288  * Like _bus_dmamap_load(), but for mbufs.
1289  */
1290 int
1291 _bus_dmamap_load_mbuf(t, map, m, flags)
1292 	bus_dma_tag_t t;
1293 	bus_dmamap_t map;
1294 	struct mbuf *m;
1295 	int flags;
1296 {
1297 	bus_dma_segment_t segs[MAX_DMA_SEGS];
1298 	int i;
1299 	size_t len;
1300 
1301 	/* Record mbuf for *_unload */
1302 	map->_dm_type = _DM_TYPE_MBUF;
1303 	map->_dm_source = (void *)m;
1304 
1305 	i = 0;
1306 	len = 0;
1307 	while (m) {
1308 		vaddr_t vaddr = mtod(m, vaddr_t);
1309 		long buflen = (long)m->m_len;
1310 
1311 		len += buflen;
1312 		while (buflen > 0 && i < MAX_DMA_SEGS) {
1313 			paddr_t pa;
1314 			long incr;
1315 
1316 			incr = NBPG - (vaddr & PGOFSET);
1317 			incr = min(buflen, incr);
1318 
1319 			(void) pmap_extract(pmap_kernel(), vaddr, &pa);
1320 			buflen -= incr;
1321 			vaddr += incr;
1322 
1323 			if (i > 0 &&
1324 				pa == (segs[i-1].ds_addr + segs[i-1].ds_len) &&
1325 				((segs[i-1].ds_len + incr) <=
1326 					map->_dm_maxsegsz)) {
1327 				/* Hey, waddyaknow, they're contiguous */
1328 				segs[i-1].ds_len += incr;
1329 				continue;
1330 			}
1331 			segs[i].ds_addr = pa;
1332 			segs[i].ds_len = incr;
1333 			segs[i]._ds_boundary = 0;
1334 			segs[i]._ds_align = 0;
1335 			segs[i]._ds_mlist = NULL;
1336 			i++;
1337 		}
1338 		m = m->m_next;
1339 		if (m && i >= MAX_DMA_SEGS) {
1340 			/* Exceeded the size of our dmamap */
1341 			map->_dm_type = 0;
1342 			map->_dm_source = NULL;
1343 			return E2BIG;
1344 		}
1345 	}
1346 
1347 #ifdef DEBUG
1348 	{
1349 		size_t mbuflen, sglen;
1350 		int j;
1351 		int retval;
1352 
1353 		mbuflen = 0;
1354 		for (m = (struct mbuf *)map->_dm_source; m; m = m->m_next)
1355 			mbuflen += (long)m->m_len;
1356 		sglen = 0;
1357 		for (j = 0; j < i; j++)
1358 			sglen += segs[j].ds_len;
1359 		if (sglen != mbuflen) {
1360 			printf("load_mbuf: sglen %ld != mbuflen %lx\n",
1361 				sglen, mbuflen);
1362 			Debugger();
1363 		}
1364 		if (sglen != len) {
1365 			printf("load_mbuf: sglen %ld != len %lx\n",
1366 				sglen, len);
1367 			Debugger();
1368 		}
1369 		retval = bus_dmamap_load_raw(t, map, segs, i,
1370 			(bus_size_t)len, flags);
1371 		if (map->dm_mapsize != len) {
1372 			printf("load_mbuf: mapsize %ld != len %lx\n",
1373 				(long)map->dm_mapsize, len);
1374 			Debugger();
1375 		}
1376 		sglen = 0;
1377 		for (j = 0; j < map->dm_nsegs; j++)
1378 			sglen += map->dm_segs[j].ds_len;
1379 		if (sglen != len) {
1380 			printf("load_mbuf: dmamap sglen %ld != len %lx\n",
1381 				sglen, len);
1382 			Debugger();
1383 		}
1384 		return (retval);
1385 	}
1386 #endif
1387 	return (bus_dmamap_load_raw(t, map, segs, i, (bus_size_t)len, flags));
1388 }
1389 
1390 /*
1391  * Like _bus_dmamap_load(), but for uios.
1392  */
1393 int
1394 _bus_dmamap_load_uio(t, map, uio, flags)
1395 	bus_dma_tag_t t;
1396 	bus_dmamap_t map;
1397 	struct uio *uio;
1398 	int flags;
1399 {
1400 /*
1401  * XXXXXXX The problem with this routine is that it needs to
1402  * lock the user address space that is being loaded, but there
1403  * is no real way for us to unlock it during the unload process.
1404  */
1405 #if 0
1406 	bus_dma_segment_t segs[MAX_DMA_SEGS];
1407 	int i, j;
1408 	size_t len;
1409 	struct proc *p = uio->uio_procp;
1410 	struct pmap *pm;
1411 
1412 	/*
1413 	 * Check user read/write access to the data buffer.
1414 	 */
1415 	if (uio->uio_segflg == UIO_USERSPACE) {
1416 		pm = p->p_vmspace->vm_map.pmap;
1417 		for (i = 0; i < uio->uio_iovcnt; i++) {
1418 			/* XXXCDC: map not locked, rethink */
1419 			if (__predict_false(!uvm_useracc(uio->uio_iov[i].iov_base,
1420 				     uio->uio_iov[i].iov_len,
1421 /* XXX is UIO_WRITE correct? */
1422 				     (uio->uio_rw == UIO_WRITE) ? B_WRITE : B_READ)))
1423 				return (EFAULT);
1424 		}
1425 	} else
1426 		pm = pmap_kernel();
1427 
1428 	i = 0;
1429 	len = 0;
1430 	for (j=0; j<uio->uio_iovcnt; j++) {
1431 		struct iovec *iov = &uio->uio_iov[j];
1432 		vaddr_t vaddr = (vaddr_t)iov->iov_base;
1433 		bus_size_t buflen = iov->iov_len;
1434 
1435 		/*
1436 		 * Lock the part of the user address space involved
1437 		 *    in the transfer.
1438 		 */
1439 		PHOLD(p);
1440 		if (__predict_false(uvm_vslock(p, vaddr, buflen,
1441 			    (uio->uio_rw == UIO_WRITE) ?
1442 			    VM_PROT_WRITE : VM_PROT_READ)
1443 			    != 0)) {
1444 				goto after_vsunlock;
1445 			}
1446 
1447 		len += buflen;
1448 		while (buflen > 0 && i < MAX_DMA_SEGS) {
1449 			paddr_t pa;
1450 			long incr;
1451 
1452 			incr = min(buflen, NBPG);
1453 			(void) pmap_extract(pm, vaddr, &pa);
1454 			buflen -= incr;
1455 			vaddr += incr;
1456 			if (segs[i].ds_len == 0)
1457 				segs[i].ds_addr = pa;
1458 
1459 
1460 			if (i > 0 && pa == (segs[i-1].ds_addr + segs[i-1].ds_len)
1461 			    && ((segs[i-1].ds_len + incr) <= map->_dm_maxsegsz)) {
1462 				/* Hey, waddyaknow, they're contiguous */
1463 				segs[i-1].ds_len += incr;
1464 				continue;
1465 			}
1466 			segs[i].ds_addr = pa;
1467 			segs[i].ds_len = incr;
1468 			segs[i]._ds_boundary = 0;
1469 			segs[i]._ds_align = 0;
1470 			segs[i]._ds_mlist = NULL;
1471 			i++;
1472 		}
1473 		uvm_vsunlock(p, bp->b_data, todo);
1474 		PRELE(p);
1475  		if (buflen > 0 && i >= MAX_DMA_SEGS)
1476 			/* Exceeded the size of our dmamap */
1477 			return E2BIG;
1478 	}
1479 	map->_dm_type = DM_TYPE_UIO;
1480 	map->_dm_source = (void *)uio;
1481 	return (bus_dmamap_load_raw(t, map, segs, i,
1482 				    (bus_size_t)len, flags));
1483 #endif
1484 	return 0;
1485 }
1486 
1487 /*
1488  * Like _bus_dmamap_load(), but for raw memory allocated with
1489  * bus_dmamem_alloc().
1490  */
1491 int
1492 _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
1493 	bus_dma_tag_t t;
1494 	bus_dmamap_t map;
1495 	bus_dma_segment_t *segs;
1496 	int nsegs;
1497 	bus_size_t size;
1498 	int flags;
1499 {
1500 
1501 	panic("_bus_dmamap_load_raw: not implemented");
1502 }
1503 
1504 /*
1505  * Common function for unloading a DMA map.  May be called by
1506  * bus-specific DMA map unload functions.
1507  */
1508 void
1509 _bus_dmamap_unload(t, map)
1510 	bus_dma_tag_t t;
1511 	bus_dmamap_t map;
1512 {
1513 	int i;
1514 	struct vm_page *pg;
1515 	struct pglist *pglist;
1516 	paddr_t pa;
1517 
1518 	for (i = 0; i < map->dm_nsegs; i++) {
1519 		if ((pglist = map->dm_segs[i]._ds_mlist) == NULL) {
1520 
1521 			/*
1522 			 * We were asked to load random VAs and lost the
1523 			 * PA info so just blow the entire cache away.
1524 			 */
1525 			blast_dcache();
1526 			break;
1527 		}
1528 		TAILQ_FOREACH(pg, pglist, pageq) {
1529 			pa = VM_PAGE_TO_PHYS(pg);
1530 
1531 			/*
1532 			 * We should be flushing a subrange, but we
1533 			 * don't know where the segments starts.
1534 			 */
1535 			dcache_flush_page(pa);
1536 		}
1537 	}
1538 
1539 	/* Mark the mappings as invalid. */
1540 	map->dm_mapsize = 0;
1541 	map->dm_nsegs = 0;
1542 
1543 }
1544 
1545 /*
1546  * Common function for DMA map synchronization.  May be called
1547  * by bus-specific DMA map synchronization functions.
1548  */
1549 void
1550 _bus_dmamap_sync(t, map, offset, len, ops)
1551 	bus_dma_tag_t t;
1552 	bus_dmamap_t map;
1553 	bus_addr_t offset;
1554 	bus_size_t len;
1555 	int ops;
1556 {
1557 	int i;
1558 	struct vm_page *pg;
1559 	struct pglist *pglist;
1560 
1561 	/*
1562 	 * We sync out our caches, but the bus must do the same.
1563 	 *
1564 	 * Actually a #Sync is expensive.  We should optimize.
1565 	 */
1566 	if ((ops & BUS_DMASYNC_PREREAD) || (ops & BUS_DMASYNC_PREWRITE)) {
1567 
1568 		/*
1569 		 * Don't really need to do anything, but flush any pending
1570 		 * writes anyway.
1571 		 */
1572 		__asm("membar #Sync" : );
1573 	}
1574 	if (ops & BUS_DMASYNC_POSTREAD) {
1575 		/* Invalidate the vcache */
1576 		for (i = 0; i < map->dm_nsegs; i++) {
1577 			if ((pglist = map->dm_segs[i]._ds_mlist) == NULL)
1578 				/* Should not really happen. */
1579 				continue;
1580 			TAILQ_FOREACH(pg, pglist, pageq) {
1581 				paddr_t start;
1582 				psize_t size = NBPG;
1583 
1584 				if (offset < NBPG) {
1585 					start = VM_PAGE_TO_PHYS(pg) + offset;
1586 					if (size > len)
1587 						size = len;
1588 					cache_flush_phys(start, size, 0);
1589 					len -= size;
1590 					continue;
1591 				}
1592 				offset -= size;
1593 			}
1594 		}
1595 	}
1596 	if (ops & BUS_DMASYNC_POSTWRITE) {
1597 		/* Nothing to do.  Handled by the bus controller. */
1598 	}
1599 }
1600 
1601 extern paddr_t   vm_first_phys, vm_num_phys;
1602 /*
1603  * Common function for DMA-safe memory allocation.  May be called
1604  * by bus-specific DMA memory allocation functions.
1605  */
1606 int
1607 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
1608 	bus_dma_tag_t t;
1609 	bus_size_t size, alignment, boundary;
1610 	bus_dma_segment_t *segs;
1611 	int nsegs;
1612 	int *rsegs;
1613 	int flags;
1614 {
1615 	vaddr_t low, high;
1616 	struct pglist *pglist;
1617 	int error;
1618 
1619 	/* Always round the size. */
1620 	size = round_page(size);
1621 	low = vm_first_phys;
1622 	high = vm_first_phys + vm_num_phys - PAGE_SIZE;
1623 
1624 	if ((pglist = malloc(sizeof(*pglist), M_DEVBUF,
1625 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1626 		return (ENOMEM);
1627 
1628 	/*
1629 	 * If the bus uses DVMA then ignore boundary and alignment.
1630 	 */
1631 	segs[0]._ds_boundary = boundary;
1632 	segs[0]._ds_align = alignment;
1633 	if (flags & BUS_DMA_DVMA) {
1634 		boundary = 0;
1635 		alignment = 0;
1636 	}
1637 
1638 	/*
1639 	 * Allocate pages from the VM system.
1640 	 */
1641 	error = uvm_pglistalloc(size, low, high,
1642 	    alignment, boundary, pglist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1643 	if (error)
1644 		return (error);
1645 
1646 	/*
1647 	 * Compute the location, size, and number of segments actually
1648 	 * returned by the VM code.
1649 	 */
1650 	segs[0].ds_addr = NULL; /* UPA does not map things */
1651 	segs[0].ds_len = size;
1652 	*rsegs = 1;
1653 
1654 	/*
1655 	 * Simply keep a pointer around to the linked list, so
1656 	 * bus_dmamap_free() can return it.
1657 	 *
1658 	 * NOBODY SHOULD TOUCH THE pageq FIELDS WHILE THESE PAGES
1659 	 * ARE IN OUR CUSTODY.
1660 	 */
1661 	segs[0]._ds_mlist = pglist;
1662 
1663 	/* The bus driver should do the actual mapping */
1664 	return (0);
1665 }
1666 
1667 /*
1668  * Common function for freeing DMA-safe memory.  May be called by
1669  * bus-specific DMA memory free functions.
1670  */
1671 void
1672 _bus_dmamem_free(t, segs, nsegs)
1673 	bus_dma_tag_t t;
1674 	bus_dma_segment_t *segs;
1675 	int nsegs;
1676 {
1677 
1678 	if (nsegs != 1)
1679 		panic("bus_dmamem_free: nsegs = %d", nsegs);
1680 
1681 	/*
1682 	 * Return the list of pages back to the VM system.
1683 	 */
1684 	uvm_pglistfree(segs[0]._ds_mlist);
1685 	free(segs[0]._ds_mlist, M_DEVBUF);
1686 }
1687 
1688 /*
1689  * Common function for mapping DMA-safe memory.  May be called by
1690  * bus-specific DMA memory map functions.
1691  */
1692 int
1693 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
1694 	bus_dma_tag_t t;
1695 	bus_dma_segment_t *segs;
1696 	int nsegs;
1697 	size_t size;
1698 	caddr_t *kvap;
1699 	int flags;
1700 {
1701 	vaddr_t va, sva;
1702 	int r, cbit;
1703 	size_t oversize;
1704 	u_long align;
1705 
1706 	if (nsegs != 1)
1707 		panic("_bus_dmamem_map: nsegs = %d", nsegs);
1708 
1709 	cbit = PMAP_NC;
1710 	align = PAGE_SIZE;
1711 
1712 	size = round_page(size);
1713 
1714 	/*
1715 	 * Find a region of kernel virtual addresses that can accomodate
1716 	 * our aligment requirements.
1717 	 */
1718 	oversize = size + align - PAGE_SIZE;
1719 	r = uvm_map(kernel_map, &sva, oversize, NULL, UVM_UNKNOWN_OFFSET, 0,
1720 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
1721 	    UVM_ADV_NORMAL, 0));
1722 	if (r != 0)
1723 		return (ENOMEM);
1724 
1725 	/* Compute start of aligned region */
1726 	va = sva;
1727 	va += ((segs[0].ds_addr & (align - 1)) + align - va) & (align - 1);
1728 
1729 	/* Return excess virtual addresses */
1730 	if (va != sva)
1731 		uvm_unmap(kernel_map, sva, va);
1732 	if (va + size != sva + oversize)
1733 		uvm_unmap(kernel_map, va + size, sva + oversize);
1734 
1735 	*kvap = (caddr_t)va;
1736 	return (0);
1737 }
1738 
1739 /*
1740  * Common function for unmapping DMA-safe memory.  May be called by
1741  * bus-specific DMA memory unmapping functions.
1742  */
1743 void
1744 _bus_dmamem_unmap(t, kva, size)
1745 	bus_dma_tag_t t;
1746 	caddr_t kva;
1747 	size_t size;
1748 {
1749 
1750 #ifdef DIAGNOSTIC
1751 	if ((u_long)kva & PAGE_MASK)
1752 		panic("_bus_dmamem_unmap");
1753 #endif
1754 
1755 	size = round_page(size);
1756 	uvm_unmap(kernel_map, (vaddr_t)kva, (vaddr_t)kva + size);
1757 }
1758 
1759 /*
1760  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
1761  * bus-specific DMA mmap(2)'ing functions.
1762  */
1763 paddr_t
1764 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
1765 	bus_dma_tag_t t;
1766 	bus_dma_segment_t *segs;
1767 	int nsegs;
1768 	off_t off;
1769 	int prot, flags;
1770 {
1771 
1772 	panic("_bus_dmamem_mmap: not implemented");
1773 }
1774 
1775 
1776 struct sparc_bus_dma_tag mainbus_dma_tag = {
1777 	NULL,
1778 	NULL,
1779 	_bus_dmamap_create,
1780 	_bus_dmamap_destroy,
1781 	_bus_dmamap_load,
1782 	_bus_dmamap_load_mbuf,
1783 	_bus_dmamap_load_uio,
1784 	_bus_dmamap_load_raw,
1785 	_bus_dmamap_unload,
1786 	_bus_dmamap_sync,
1787 
1788 	_bus_dmamem_alloc,
1789 	_bus_dmamem_free,
1790 	_bus_dmamem_map,
1791 	_bus_dmamem_unmap,
1792 	_bus_dmamem_mmap
1793 };
1794 
1795 
1796 /*
1797  * Base bus space handlers.
1798  */
1799 static int	sparc_bus_map __P(( bus_space_tag_t, bus_addr_t,
1800 				    bus_size_t, int, vaddr_t, bus_space_handle_t *));
1801 static int	sparc_bus_unmap __P((bus_space_tag_t, bus_space_handle_t,
1802 				     bus_size_t));
1803 static int	sparc_bus_subregion __P((bus_space_tag_t, bus_space_handle_t,
1804 					 bus_size_t, bus_size_t,
1805 					 bus_space_handle_t *));
1806 static paddr_t	sparc_bus_mmap __P((bus_space_tag_t, bus_addr_t, off_t, int, int));
1807 static void	*sparc_mainbus_intr_establish __P((bus_space_tag_t, int, int,
1808 						   int, int (*) __P((void *)),
1809 						   void *));
1810 static int	sparc_bus_alloc __P((bus_space_tag_t, bus_addr_t, bus_addr_t,
1811 				     bus_size_t, bus_size_t, bus_size_t, int,
1812 				     bus_addr_t *, bus_space_handle_t *));
1813 static void	sparc_bus_free __P((bus_space_tag_t, bus_space_handle_t,
1814 				    bus_size_t));
1815 
1816 vaddr_t iobase = IODEV_BASE;
1817 struct extent *io_space = NULL;
1818 
1819 int
1820 sparc_bus_map(t, addr, size, flags, unused, hp)
1821 	bus_space_tag_t t;
1822 	bus_addr_t	addr;
1823 	bus_size_t	size;
1824 	vaddr_t unused;
1825 	bus_space_handle_t *hp;
1826 {
1827 	vaddr_t v;
1828 	u_int64_t pa;
1829 	paddr_t	pm_flags = 0;
1830 	vm_prot_t pm_prot = VM_PROT_READ;
1831 	int err, map_little = 0;
1832 
1833 	if (iobase == NULL)
1834 		iobase = IODEV_BASE;
1835 	if (io_space == NULL)
1836 		/*
1837 		 * And set up IOSPACE extents.
1838 		 */
1839 		io_space = extent_create("IOSPACE",
1840 					 (u_long)IODEV_BASE, (u_long)IODEV_END,
1841 					 M_DEVBUF, 0, 0, EX_NOWAIT);
1842 
1843 
1844 	size = round_page(size);
1845 	if (size == 0) {
1846 		printf("sparc_bus_map: zero size\n");
1847 		return (EINVAL);
1848 	}
1849 	switch (t->type) {
1850 	case PCI_CONFIG_BUS_SPACE:
1851 		/*
1852 		 * PCI config space is special.
1853 		 *
1854 		 * It's really big and seldom used.  In order not to run
1855 		 * out of IO mappings, config space will not be mapped in,
1856 		 * rather it will be accessed through MMU bypass ASI accesses.
1857 		 */
1858 		if (flags & BUS_SPACE_MAP_LINEAR) return (-1);
1859 		hp->_ptr = addr;
1860 		hp->_asi = ASI_PHYS_NON_CACHED_LITTLE;
1861 		hp->_sasi = ASI_PHYS_NON_CACHED;
1862 		DPRINTF(BSDB_MAP, ("\nsparc_bus_map: type %x flags %x "
1863 			"addr %016llx size %016llx virt %llx paddr %016llx\n",
1864 			(int)t->type, (int) flags, (unsigned long long)addr,
1865 			(unsigned long long)size, (unsigned long long)hp->_ptr,
1866 			(unsigned long long)pa));
1867 		return (0);
1868 		/* FALLTHROUGH */
1869 	case PCI_IO_BUS_SPACE:
1870 		map_little = 1;
1871 		break;
1872 	case PCI_MEMORY_BUS_SPACE:
1873 		map_little = 1;
1874 		break;
1875 	default:
1876 		map_little = 0;
1877 		break;
1878 	}
1879 
1880 #ifdef _LP64
1881 	/* If it's not LINEAR don't bother to map it.  Use phys accesses. */
1882 	if ((flags & BUS_SPACE_MAP_LINEAR) == 0) {
1883 		hp->_ptr = addr;
1884 		if (map_little)
1885 			hp->_asi = ASI_PHYS_NON_CACHED_LITTLE;
1886 		else
1887 			hp->_asi = ASI_PHYS_NON_CACHED;
1888 		hp->_sasi = ASI_PHYS_NON_CACHED;
1889 		return (0);
1890 	}
1891 #endif
1892 
1893 	if (!(flags & BUS_SPACE_MAP_CACHEABLE)) pm_flags |= PMAP_NC;
1894 
1895 	if ((err = extent_alloc(io_space, size, NBPG,
1896 		0, EX_NOWAIT|EX_BOUNDZERO, (u_long *)&v)))
1897 			panic("sparc_bus_map: cannot allocate io_space: %d", err);
1898 
1899 	/* note: preserve page offset */
1900 	hp->_ptr = (v | ((u_long)addr & PGOFSET));
1901 	hp->_sasi = ASI_PRIMARY;
1902 	if (map_little)
1903 		hp->_asi = ASI_PRIMARY_LITTLE;
1904 	else
1905 		hp->_asi = ASI_PRIMARY;
1906 
1907 	pa = addr & ~PAGE_MASK; /* = trunc_page(addr); Will drop high bits */
1908 	if (!(flags&BUS_SPACE_MAP_READONLY))
1909 		pm_prot |= VM_PROT_WRITE;
1910 
1911 	DPRINTF(BSDB_MAP, ("\nsparc_bus_map: type %x flags %x "
1912 		"addr %016llx size %016llx virt %llx paddr %016llx\n",
1913 		(int)t->type, (int) flags, (unsigned long long)addr,
1914 		(unsigned long long)size, (unsigned long long)hp->_ptr,
1915 		(unsigned long long)pa));
1916 
1917 	do {
1918 		DPRINTF(BSDB_MAP, ("sparc_bus_map: phys %llx virt %p hp %llx\n",
1919 			(unsigned long long)pa, (char *)v,
1920 			(unsigned long long)hp->_ptr));
1921 		pmap_kenter_pa(v, pa | pm_flags, pm_prot);
1922 		v += PAGE_SIZE;
1923 		pa += PAGE_SIZE;
1924 	} while ((size -= PAGE_SIZE) > 0);
1925 	pmap_update(pmap_kernel());
1926 	return (0);
1927 }
1928 
1929 int
1930 sparc_bus_subregion(tag, handle, offset, size, nhandlep)
1931 	bus_space_tag_t		tag;
1932 	bus_space_handle_t	handle;
1933 	bus_size_t		offset;
1934 	bus_size_t		size;
1935 	bus_space_handle_t	*nhandlep;
1936 {
1937 	nhandlep->_ptr = handle._ptr + offset;
1938 	nhandlep->_asi = handle._asi;
1939 	nhandlep->_sasi = handle._sasi;
1940 	return (0);
1941 }
1942 
1943 int
1944 sparc_bus_unmap(t, bh, size)
1945 	bus_space_tag_t t;
1946 	bus_size_t	size;
1947 	bus_space_handle_t bh;
1948 {
1949 	vaddr_t va = trunc_page((vaddr_t)bh._ptr);
1950 	vaddr_t endva = va + round_page(size);
1951 	int error = 0;
1952 
1953 	if (PHYS_ASI(bh._asi)) return (0);
1954 
1955 	error = extent_free(io_space, va, size, EX_NOWAIT);
1956 	if (error) printf("sparc_bus_unmap: extent free sez %d\n", error);
1957 
1958 	pmap_remove(pmap_kernel(), va, endva);
1959 	return (0);
1960 }
1961 
1962 paddr_t
1963 sparc_bus_mmap(t, paddr, off, prot, flags)
1964 	bus_space_tag_t t;
1965 	bus_addr_t	paddr;
1966 	off_t		off;
1967 	int		prot;
1968 	int		flags;
1969 {
1970 	/* Devices are un-cached... although the driver should do that */
1971 	return ((paddr+off)|PMAP_NC);
1972 }
1973 
1974 
1975 void *
1976 sparc_mainbus_intr_establish(t, pil, level, flags, handler, arg)
1977 	bus_space_tag_t t;
1978 	int	pil;
1979 	int	level;
1980 	int	flags;
1981 	int	(*handler)__P((void *));
1982 	void	*arg;
1983 {
1984 	struct intrhand *ih;
1985 
1986 	ih = (struct intrhand *)
1987 		malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
1988 	if (ih == NULL)
1989 		return (NULL);
1990 
1991 	ih->ih_fun = handler;
1992 	ih->ih_arg = arg;
1993 	intr_establish(pil, ih);
1994 	return (ih);
1995 }
1996 
1997 int
1998 sparc_bus_alloc(t, rs, re, s, a, b, f, ap, hp)
1999 	bus_space_tag_t t;
2000 	bus_addr_t	rs;
2001 	bus_addr_t	re;
2002 	bus_size_t	s;
2003 	bus_size_t	a;
2004 	bus_size_t	b;
2005 	int		f;
2006 	bus_addr_t	*ap;
2007 	bus_space_handle_t *hp;
2008 {
2009 	return (ENOTTY);
2010 }
2011 
2012 void
2013 sparc_bus_free(t, h, s)
2014 	bus_space_tag_t	t;
2015 	bus_space_handle_t	h;
2016 	bus_size_t	s;
2017 {
2018 	return;
2019 }
2020 
2021 struct sparc_bus_space_tag mainbus_space_tag = {
2022 	NULL,				/* cookie */
2023 	NULL,				/* parent bus tag */
2024 	UPA_BUS_SPACE,			/* type */
2025 	sparc_bus_alloc,
2026 	sparc_bus_free,
2027 	sparc_bus_map,			/* bus_space_map */
2028 	sparc_bus_unmap,		/* bus_space_unmap */
2029 	sparc_bus_subregion,		/* bus_space_subregion */
2030 	sparc_bus_mmap,			/* bus_space_mmap */
2031 	sparc_mainbus_intr_establish	/* bus_intr_establish */
2032 };
2033