xref: /netbsd/sys/arch/sparc/sparc/machdep.c (revision bf9ec67e)
1 /*	$NetBSD: machdep.c,v 1.193 2002/03/28 15:45:01 pk Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1992, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  *
44  * This software was developed by the Computer Systems Engineering group
45  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
46  * contributed to Berkeley.
47  *
48  * All advertising materials mentioning features or use of this software
49  * must display the following acknowledgement:
50  *	This product includes software developed by the University of
51  *	California, Lawrence Berkeley Laboratory.
52  *
53  * Redistribution and use in source and binary forms, with or without
54  * modification, are permitted provided that the following conditions
55  * are met:
56  * 1. Redistributions of source code must retain the above copyright
57  *    notice, this list of conditions and the following disclaimer.
58  * 2. Redistributions in binary form must reproduce the above copyright
59  *    notice, this list of conditions and the following disclaimer in the
60  *    documentation and/or other materials provided with the distribution.
61  * 3. All advertising materials mentioning features or use of this software
62  *    must display the following acknowledgement:
63  *	This product includes software developed by the University of
64  *	California, Berkeley and its contributors.
65  * 4. Neither the name of the University nor the names of its contributors
66  *    may be used to endorse or promote products derived from this software
67  *    without specific prior written permission.
68  *
69  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
70  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
71  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
72  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
73  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
74  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
75  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
76  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
77  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
78  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
79  * SUCH DAMAGE.
80  *
81  *	@(#)machdep.c	8.6 (Berkeley) 1/14/94
82  */
83 
84 #include "opt_compat_netbsd.h"
85 #include "opt_compat_sunos.h"
86 #include "opt_sparc_arch.h"
87 
88 #include <sys/param.h>
89 #include <sys/signal.h>
90 #include <sys/signalvar.h>
91 #include <sys/proc.h>
92 #include <sys/user.h>
93 #include <sys/extent.h>
94 #include <sys/map.h>
95 #include <sys/buf.h>
96 #include <sys/device.h>
97 #include <sys/reboot.h>
98 #include <sys/systm.h>
99 #include <sys/kernel.h>
100 #include <sys/conf.h>
101 #include <sys/file.h>
102 #include <sys/clist.h>
103 #include <sys/malloc.h>
104 #include <sys/mbuf.h>
105 #include <sys/mount.h>
106 #include <sys/msgbuf.h>
107 #include <sys/syscallargs.h>
108 #include <sys/exec.h>
109 
110 #include <uvm/uvm.h>		/* we use uvm.kernel_object */
111 
112 #include <sys/sysctl.h>
113 
114 #define _SPARC_BUS_DMA_PRIVATE
115 #include <machine/autoconf.h>
116 #include <machine/bus.h>
117 #include <machine/frame.h>
118 #include <machine/cpu.h>
119 #include <machine/pmap.h>
120 #include <machine/oldmon.h>
121 #include <machine/bsd_openprom.h>
122 #include <machine/bootinfo.h>
123 
124 #include <sparc/sparc/asm.h>
125 #include <sparc/sparc/cache.h>
126 #include <sparc/sparc/vaddrs.h>
127 #include <sparc/sparc/cpuvar.h>
128 
129 #include "fb.h"
130 #include "power.h"
131 #include "tctrl.h"
132 
133 #if NPOWER > 0
134 #include <sparc/dev/power.h>
135 #endif
136 #if NTCTRL > 0
137 #include <machine/tctrl.h>
138 #include <sparc/dev/tctrlvar.h>
139 #endif
140 
141 struct vm_map *exec_map = NULL;
142 struct vm_map *mb_map = NULL;
143 extern paddr_t avail_end;
144 
145 int	physmem;
146 
147 /*
148  * safepri is a safe priority for sleep to set for a spin-wait
149  * during autoconfiguration or after a panic.
150  */
151 int   safepri = 0;
152 
153 /*
154  * dvmamap24 is used to manage DVMA memory for devices that have the upper
155  * eight address bits wired to all-ones (e.g. `le' and `ie')
156  */
157 struct extent *dvmamap24;
158 
159 void	dumpsys __P((void));
160 void	stackdump __P((void));
161 
162 caddr_t	mdallocsys __P((caddr_t));
163 
164 /*
165  * Machine-dependent startup code
166  */
167 void
168 cpu_startup()
169 {
170 	unsigned i;
171 	caddr_t v;
172 	int base, residual;
173 #ifdef DEBUG
174 	extern int pmapdebug;
175 	int opmapdebug = pmapdebug;
176 #endif
177 	vaddr_t minaddr, maxaddr;
178 	vsize_t size;
179 	paddr_t pa;
180 	char pbuf[9];
181 
182 #ifdef DEBUG
183 	pmapdebug = 0;
184 #endif
185 
186 	/*
187 	 * Re-map the message buffer from its temporary address
188 	 * at KERNBASE to MSGBUF_VA.
189 	 */
190 #if !defined(MSGBUFSIZE) || MSGBUFSIZE <= 8192
191 	/*
192 	 * We use the free page(s) in front of the kernel load address.
193 	 */
194 	size = 8192;
195 
196 	/* Get physical address of the message buffer */
197 	pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa);
198 
199 	/* Invalidate the current mapping at KERNBASE. */
200 	pmap_kremove((vaddr_t)KERNBASE, size);
201 	pmap_update(pmap_kernel());
202 
203 	/* Enter the new mapping */
204 	pmap_map(MSGBUF_VA, pa, pa + size, VM_PROT_READ|VM_PROT_WRITE);
205 
206 	/*
207 	 * Re-initialize the message buffer.
208 	 */
209 	initmsgbuf((caddr_t)MSGBUF_VA, size);
210 #else /* MSGBUFSIZE */
211 	{
212 	struct pglist mlist;
213 	struct vm_page *m;
214 	vaddr_t va0, va;
215 
216 	/*
217 	 * We use the free page(s) in front of the kernel load address,
218 	 * and then allocate some more.
219 	 */
220 	size = round_page(MSGBUFSIZE);
221 
222 	/* Get physical address of first 8192 chunk of the message buffer */
223 	pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa);
224 
225 	/* Allocate additional physical pages */
226 	TAILQ_INIT(&mlist);
227 	if (uvm_pglistalloc(size - 8192,
228 			    vm_first_phys, vm_first_phys+vm_num_phys,
229 			    0, 0, &mlist, 1, 0) != 0)
230 		panic("cpu_start: no memory for message buffer");
231 
232 	/* Invalidate the current mapping at KERNBASE. */
233 	pmap_kremove((vaddr_t)KERNBASE, 8192);
234 	pmap_update(pmap_kernel());
235 
236 	/* Allocate virtual memory space */
237 	va0 = va = uvm_km_valloc(kernel_map, size);
238 	if (va == 0)
239 		panic("cpu_start: no virtual memory for message buffer");
240 
241 	/* Map first 8192 */
242 	while (va < va0 + 8192) {
243 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
244 		pa += PAGE_SIZE;
245 		va += PAGE_SIZE;
246 	}
247 	pmap_update(pmap_kernel());
248 
249 	/* Map the rest of the pages */
250 	TAILQ_FOREACH(m, &mlist ,pageq) {
251 		if (va >= va0 + size)
252 			panic("cpu_start: memory buffer size botch");
253 		pa = VM_PAGE_TO_PHYS(m);
254 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
255 		va += PAGE_SIZE;
256 	}
257 	pmap_update(pmap_kernel());
258 
259 	/*
260 	 * Re-initialize the message buffer.
261 	 */
262 	initmsgbuf((caddr_t)va0, size);
263 	}
264 #endif /* MSGBUFSIZE */
265 
266 	/*
267 	 * Good {morning,afternoon,evening,night}.
268 	 */
269 	printf(version);
270 	/*identifycpu();*/
271 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
272 	printf("total memory = %s\n", pbuf);
273 
274 	/*
275 	 * Find out how much space we need, allocate it,
276 	 * and then give everything true virtual addresses.
277 	 */
278 	size = (vsize_t)allocsys(NULL, mdallocsys);
279 
280 	if ((v = (caddr_t)uvm_km_alloc(kernel_map, round_page(size))) == 0)
281 		panic("startup: no room for tables");
282 
283 	if ((vsize_t)(allocsys(v, mdallocsys) - v) != size)
284 		panic("startup: table size inconsistency");
285 
286         /*
287          * allocate virtual and physical memory for the buffers.
288          */
289         size = MAXBSIZE * nbuf;         /* # bytes for buffers */
290 
291         /* allocate VM for buffers... area is not managed by VM system */
292         if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
293                     NULL, UVM_UNKNOWN_OFFSET, 0,
294                     UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
295                                 UVM_ADV_NORMAL, 0)) != 0)
296         	panic("cpu_startup: cannot allocate VM for buffers");
297 
298         minaddr = (vaddr_t) buffers;
299         if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
300         	bufpages = btoc(MAXBSIZE) * nbuf; /* do not overallocate RAM */
301         }
302         base = bufpages / nbuf;
303         residual = bufpages % nbuf;
304 
305         /* now allocate RAM for buffers */
306 	for (i = 0 ; i < nbuf ; i++) {
307 		vaddr_t curbuf;
308 		vsize_t curbufsize;
309 		struct vm_page *pg;
310 
311 		/*
312 		 * each buffer has MAXBSIZE bytes of VM space allocated.  of
313 		 * that MAXBSIZE space we allocate and map (base+1) pages
314 		 * for the first "residual" buffers, and then we allocate
315 		 * "base" pages for the rest.
316 		 */
317 		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
318 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
319 
320 		while (curbufsize) {
321 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
322 			if (pg == NULL)
323 				panic("cpu_startup: "
324 				    "not enough RAM for buffer cache");
325 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
326 			    VM_PROT_READ | VM_PROT_WRITE);
327 			curbuf += PAGE_SIZE;
328 			curbufsize -= PAGE_SIZE;
329 		}
330 	}
331 	pmap_update(pmap_kernel());
332 
333 	/*
334 	 * Allocate a submap for exec arguments.  This map effectively
335 	 * limits the number of processes exec'ing at any time.
336 	 */
337         exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
338                                  16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
339 
340 	if (CPU_ISSUN4OR4C) {
341 		/*
342 		 * Allocate dma map for 24-bit devices (le, ie)
343 		 * [dvma_base - dvma_end] is for VME devices..
344 		 */
345 		dvmamap24 = extent_create("dvmamap24",
346 					  D24_DVMA_BASE, D24_DVMA_END,
347 					  M_DEVBUF, 0, 0, EX_NOWAIT);
348 		if (dvmamap24 == NULL)
349 			panic("unable to allocate DVMA map");
350 	}
351 
352 	/*
353 	 * Finally, allocate mbuf cluster submap.
354 	 */
355         mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
356 	    nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL);
357 
358 #ifdef DEBUG
359 	pmapdebug = opmapdebug;
360 #endif
361 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
362 	printf("avail memory = %s\n", pbuf);
363 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
364 	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
365 
366 	/*
367 	 * Set up buffers, so they can be used to read disk labels.
368 	 */
369 	bufinit();
370 
371 	pmap_redzone();
372 }
373 
374 caddr_t
375 mdallocsys(v)
376 	caddr_t v;
377 {
378 
379 	/* Clip bufpages if necessary. */
380 	if (CPU_ISSUN4C && bufpages > (128 * (65536/MAXBSIZE)))
381 		bufpages = (128 * (65536/MAXBSIZE));
382 
383 	return (v);
384 }
385 
386 /*
387  * Set up registers on exec.
388  *
389  * XXX this entire mess must be fixed
390  */
391 /* ARGSUSED */
392 void
393 setregs(p, pack, stack)
394 	struct proc *p;
395 	struct exec_package *pack;
396 	u_long stack;
397 {
398 	struct trapframe *tf = p->p_md.md_tf;
399 	struct fpstate *fs;
400 	int psr;
401 
402 	/* Don't allow misaligned code by default */
403 	p->p_md.md_flags &= ~MDP_FIXALIGN;
404 
405 	/*
406 	 * Set the registers to 0 except for:
407 	 *	%o6: stack pointer, built in exec())
408 	 *	%psr: (retain CWP and PSR_S bits)
409 	 *	%g1: address of p->p_psstr (used by crt0)
410 	 *	%pc,%npc: entry point of program
411 	 */
412 	psr = tf->tf_psr & (PSR_S | PSR_CWP);
413 	if ((fs = p->p_md.md_fpstate) != NULL) {
414 		/*
415 		 * We hold an FPU state.  If we own *the* FPU chip state
416 		 * we must get rid of it, and the only way to do that is
417 		 * to save it.  In any case, get rid of our FPU state.
418 		 */
419 		if (p == cpuinfo.fpproc) {
420 			savefpstate(fs);
421 			cpuinfo.fpproc = NULL;
422 		} else if (p->p_md.md_fpumid != -1)
423 			panic("setreg: own FPU on module %d; fix this",
424 				p->p_md.md_fpumid);
425 		p->p_md.md_fpumid = -1;
426 		free((void *)fs, M_SUBPROC);
427 		p->p_md.md_fpstate = NULL;
428 	}
429 	bzero((caddr_t)tf, sizeof *tf);
430 	tf->tf_psr = psr;
431 	tf->tf_global[1] = (int)p->p_psstr;
432 	tf->tf_pc = pack->ep_entry & ~3;
433 	tf->tf_npc = tf->tf_pc + 4;
434 	stack -= sizeof(struct rwindow);
435 	tf->tf_out[6] = stack;
436 }
437 
438 #ifdef DEBUG
439 int sigdebug = 0;
440 int sigpid = 0;
441 #define SDB_FOLLOW	0x01
442 #define SDB_KSTACK	0x02
443 #define SDB_FPSTATE	0x04
444 #endif
445 
446 struct sigframe {
447 	int	sf_signo;		/* signal number */
448 	int	sf_code;		/* code */
449 	struct	sigcontext *sf_scp;	/* SunOS user addr of sigcontext */
450 	int	sf_addr;		/* SunOS compat, always 0 for now */
451 	struct	sigcontext sf_sc;	/* actual sigcontext */
452 };
453 
454 /*
455  * machine dependent system variables.
456  */
457 int
458 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
459 	int *name;
460 	u_int namelen;
461 	void *oldp;
462 	size_t *oldlenp;
463 	void *newp;
464 	size_t newlen;
465 	struct proc *p;
466 {
467 	char *cp;
468 	struct btinfo_kernelfile *bi_file;
469 
470 	/* all sysctl names are this level are terminal */
471 	if (namelen != 1)
472 		return (ENOTDIR);	/* overloaded */
473 
474 	switch (name[0]) {
475 	case CPU_BOOTED_KERNEL:
476 		if ((bi_file = lookup_bootinfo(BTINFO_KERNELFILE)) != NULL)
477 			cp = bi_file->name;
478 		else
479 			cp = prom_getbootfile();
480 		if (cp == NULL)
481 			return (ENOENT);
482 		if (*cp == '\0')
483 			cp = "netbsd";
484 		return (sysctl_rdstring(oldp, oldlenp, newp, cp));
485 	case CPU_BOOTED_DEVICE:
486 		cp = prom_getbootpath();
487 		if (cp == NULL || cp[0] == '\0')
488 			return (ENOENT);
489 		return (sysctl_rdstring(oldp, oldlenp, newp, cp));
490 	case CPU_BOOT_ARGS:
491 		cp = prom_getbootargs();
492 		if (cp == NULL || cp[0] == '\0')
493 			return (ENOENT);
494 		return (sysctl_rdstring(oldp, oldlenp, newp, cp));
495 	default:
496 		return (EOPNOTSUPP);
497 	}
498 	/* NOTREACHED */
499 }
500 
501 /*
502  * Send an interrupt to process.
503  */
504 void
505 sendsig(catcher, sig, mask, code)
506 	sig_t catcher;
507 	int sig;
508 	sigset_t *mask;
509 	u_long code;
510 {
511 	struct proc *p = curproc;
512 	struct sigframe *fp;
513 	struct trapframe *tf;
514 	int addr, onstack, oldsp, newsp;
515 	struct sigframe sf;
516 
517 	tf = p->p_md.md_tf;
518 	oldsp = tf->tf_out[6];
519 
520 	/*
521 	 * Compute new user stack addresses, subtract off
522 	 * one signal frame, and align.
523 	 */
524 	onstack =
525 	    (p->p_sigctx.ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
526 	    (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
527 
528 	if (onstack)
529 		fp = (struct sigframe *)((caddr_t)p->p_sigctx.ps_sigstk.ss_sp +
530 		                               p->p_sigctx.ps_sigstk.ss_size);
531 	else
532 		fp = (struct sigframe *)oldsp;
533 
534 	fp = (struct sigframe *)((int)(fp - 1) & ~7);
535 
536 #ifdef DEBUG
537 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
538 		printf("sendsig: %s[%d] sig %d newusp %p scp %p\n",
539 		    p->p_comm, p->p_pid, sig, fp, &fp->sf_sc);
540 #endif
541 	/*
542 	 * Now set up the signal frame.  We build it in kernel space
543 	 * and then copy it out.  We probably ought to just build it
544 	 * directly in user space....
545 	 */
546 	sf.sf_signo = sig;
547 	sf.sf_code = code;
548 	sf.sf_scp = 0;
549 	sf.sf_addr = 0;			/* XXX */
550 
551 	/*
552 	 * Build the signal context to be used by sigreturn.
553 	 */
554 	sf.sf_sc.sc_onstack = p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK;
555 	sf.sf_sc.sc_mask = *mask;
556 #ifdef COMPAT_13
557 	/*
558 	 * XXX We always have to save an old style signal mask because
559 	 * XXX we might be delivering a signal to a process which will
560 	 * XXX escape from the signal in a non-standard way and invoke
561 	 * XXX sigreturn() directly.
562 	 */
563 	native_sigset_to_sigset13(mask, &sf.sf_sc.__sc_mask13);
564 #endif
565 	sf.sf_sc.sc_sp = oldsp;
566 	sf.sf_sc.sc_pc = tf->tf_pc;
567 	sf.sf_sc.sc_npc = tf->tf_npc;
568 	sf.sf_sc.sc_psr = tf->tf_psr;
569 	sf.sf_sc.sc_g1 = tf->tf_global[1];
570 	sf.sf_sc.sc_o0 = tf->tf_out[0];
571 
572 	/*
573 	 * Put the stack in a consistent state before we whack away
574 	 * at it.  Note that write_user_windows may just dump the
575 	 * registers into the pcb; we need them in the process's memory.
576 	 * We also need to make sure that when we start the signal handler,
577 	 * its %i6 (%fp), which is loaded from the newly allocated stack area,
578 	 * joins seamlessly with the frame it was in when the signal occurred,
579 	 * so that the debugger and _longjmp code can back up through it.
580 	 */
581 	newsp = (int)fp - sizeof(struct rwindow);
582 	write_user_windows();
583 	if (rwindow_save(p) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf) ||
584 	    suword(&((struct rwindow *)newsp)->rw_in[6], oldsp)) {
585 		/*
586 		 * Process has trashed its stack; give it an illegal
587 		 * instruction to halt it in its tracks.
588 		 */
589 #ifdef DEBUG
590 		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
591 			printf("sendsig: window save or copyout error\n");
592 #endif
593 		sigexit(p, SIGILL);
594 		/* NOTREACHED */
595 	}
596 #ifdef DEBUG
597 	if (sigdebug & SDB_FOLLOW)
598 		printf("sendsig: %s[%d] sig %d scp %p\n",
599 		       p->p_comm, p->p_pid, sig, &fp->sf_sc);
600 #endif
601 	/*
602 	 * Arrange to continue execution at the code copied out in exec().
603 	 * It needs the function to call in %g1, and a new stack pointer.
604 	 */
605 	addr = (int)p->p_sigctx.ps_sigcode;
606 	tf->tf_global[1] = (int)catcher;
607 	tf->tf_pc = addr;
608 	tf->tf_npc = addr + 4;
609 	tf->tf_out[6] = newsp;
610 
611 	/* Remember that we're now on the signal stack. */
612 	if (onstack)
613 		p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
614 
615 #ifdef DEBUG
616 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
617 		printf("sendsig: about to return to catcher\n");
618 #endif
619 }
620 
621 /*
622  * System call to cleanup state after a signal
623  * has been taken.  Reset signal mask and
624  * stack state from context left by sendsig (above),
625  * and return to the given trap frame (if there is one).
626  * Check carefully to make sure that the user has not
627  * modified the state to gain improper privileges or to cause
628  * a machine fault.
629  */
630 /* ARGSUSED */
631 int
632 sys___sigreturn14(p, v, retval)
633 	struct proc *p;
634 	void *v;
635 	register_t *retval;
636 {
637 	struct sys___sigreturn14_args /* {
638 		syscallarg(struct sigcontext *) sigcntxp;
639 	} */ *uap = v;
640 	struct sigcontext sc, *scp;
641 	struct trapframe *tf;
642 	int error;
643 
644 	/* First ensure consistent stack state (see sendsig). */
645 	write_user_windows();
646 	if (rwindow_save(p))
647 		sigexit(p, SIGILL);
648 #ifdef DEBUG
649 	if (sigdebug & SDB_FOLLOW)
650 		printf("sigreturn: %s[%d], sigcntxp %p\n",
651 		    p->p_comm, p->p_pid, SCARG(uap, sigcntxp));
652 #endif
653 	if ((error = copyin(SCARG(uap, sigcntxp), &sc, sizeof sc)) != 0)
654 		return (error);
655 	scp = &sc;
656 
657 	tf = p->p_md.md_tf;
658 	/*
659 	 * Only the icc bits in the psr are used, so it need not be
660 	 * verified.  pc and npc must be multiples of 4.  This is all
661 	 * that is required; if it holds, just do it.
662 	 */
663 	if (((scp->sc_pc | scp->sc_npc) & 3) != 0)
664 		return (EINVAL);
665 
666 	/* take only psr ICC field */
667 	tf->tf_psr = (tf->tf_psr & ~PSR_ICC) | (scp->sc_psr & PSR_ICC);
668 	tf->tf_pc = scp->sc_pc;
669 	tf->tf_npc = scp->sc_npc;
670 	tf->tf_global[1] = scp->sc_g1;
671 	tf->tf_out[0] = scp->sc_o0;
672 	tf->tf_out[6] = scp->sc_sp;
673 
674 	if (scp->sc_onstack & SS_ONSTACK)
675 		p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
676 	else
677 		p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK;
678 
679 	/* Restore signal mask */
680 	(void) sigprocmask1(p, SIG_SETMASK, &scp->sc_mask, 0);
681 
682 	return (EJUSTRETURN);
683 }
684 
685 
686 int	waittime = -1;
687 
688 void
689 cpu_reboot(howto, user_boot_string)
690 	int howto;
691 	char *user_boot_string;
692 {
693 	int i;
694 	static char str[128];
695 
696 	/* If system is cold, just halt. */
697 	if (cold) {
698 		howto |= RB_HALT;
699 		goto haltsys;
700 	}
701 
702 #if NFB > 0
703 	fb_unblank();
704 #endif
705 	boothowto = howto;
706 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
707 		extern struct proc proc0;
708 		extern int sparc_clock_time_is_ok;
709 
710 		/* XXX protect against curproc->p_stats.foo refs in sync() */
711 		if (curproc == NULL)
712 			curproc = &proc0;
713 		waittime = 0;
714 		vfs_shutdown();
715 
716 		/*
717 		 * If we've been adjusting the clock, the todr
718 		 * will be out of synch; adjust it now.
719 		 * Do this only if the TOD clock has already been read out
720 		 * successfully by inittodr() or set by an explicit call
721 		 * to resettodr() (e.g. from settimeofday()).
722 		 */
723 		if (sparc_clock_time_is_ok)
724 			resettodr();
725 	}
726 
727 	/* Disable interrupts. */
728 	(void) splhigh();
729 
730 	/* If rebooting and a dump is requested, do it. */
731 #if 0
732 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
733 #else
734 	if (howto & RB_DUMP)
735 #endif
736 		dumpsys();
737 
738  haltsys:
739 
740 	/* Run any shutdown hooks. */
741 	doshutdownhooks();
742 
743 	/* If powerdown was requested, do it. */
744 	if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
745 #if NPOWER > 0
746 		powerdown();
747 #endif
748 #if NTCTRL > 0
749 		tadpole_powerdown();
750 #endif
751 #if NPOWER > 0 || NTCTRL > 0
752 		printf("WARNING: powerdown failed!\n");
753 #endif
754 		/*
755 		 * RB_POWERDOWN implies RB_HALT... fall into it...
756 		 */
757 	}
758 
759 	if (howto & RB_HALT) {
760 		printf("halted\n\n");
761 		prom_halt();
762 	}
763 
764 	printf("rebooting\n\n");
765 	if (user_boot_string && *user_boot_string) {
766 		i = strlen(user_boot_string);
767 		if (i > sizeof(str))
768 			prom_boot(user_boot_string);	/* XXX */
769 		bcopy(user_boot_string, str, i);
770 	} else {
771 		i = 1;
772 		str[0] = '\0';
773 	}
774 
775 	if (howto & RB_SINGLE)
776 		str[i++] = 's';
777 	if (howto & RB_KDB)
778 		str[i++] = 'd';
779 	if (i > 1) {
780 		if (str[0] == '\0')
781 			str[0] = '-';
782 		str[i] = 0;
783 	} else
784 		str[0] = 0;
785 	prom_boot(str);
786 	/*NOTREACHED*/
787 }
788 
789 u_int32_t dumpmag = 0x8fca0101;	/* magic number for savecore */
790 int	dumpsize = 0;		/* also for savecore */
791 long	dumplo = 0;
792 
793 void
794 cpu_dumpconf()
795 {
796 	int nblks, dumpblks;
797 
798 	if (dumpdev == NODEV || bdevsw[major(dumpdev)].d_psize == 0)
799 		/* No usable dump device */
800 		return;
801 
802 	nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
803 
804 	dumpblks = ctod(physmem) + pmap_dumpsize();
805 	if (dumpblks > (nblks - ctod(1)))
806 		/*
807 		 * dump size is too big for the partition.
808 		 * Note, we safeguard a click at the front for a
809 		 * possible disk label.
810 		 */
811 		return;
812 
813 	/* Put the dump at the end of the partition */
814 	dumplo = nblks - dumpblks;
815 
816 	/*
817 	 * savecore(8) expects dumpsize to be the number of pages
818 	 * of actual core dumped (i.e. excluding the MMU stuff).
819 	 */
820 	dumpsize = physmem;
821 }
822 
823 #define	BYTES_PER_DUMP	(32 * 1024)	/* must be a multiple of pagesize */
824 static vaddr_t dumpspace;
825 
826 caddr_t
827 reserve_dumppages(p)
828 	caddr_t p;
829 {
830 
831 	dumpspace = (vaddr_t)p;
832 	return (p + BYTES_PER_DUMP);
833 }
834 
835 /*
836  * Write a crash dump.
837  */
838 void
839 dumpsys()
840 {
841 	int psize;
842 	daddr_t blkno;
843 	int (*dump)	__P((dev_t, daddr_t, caddr_t, size_t));
844 	int error = 0;
845 	struct memarr *mp;
846 	int nmem;
847 	extern struct memarr pmemarr[];
848 	extern int npmemarr;
849 
850 	/* copy registers to memory */
851 	snapshot(cpuinfo.curpcb);
852 	stackdump();
853 
854 	if (dumpdev == NODEV)
855 		return;
856 
857 	/*
858 	 * For dumps during autoconfiguration,
859 	 * if dump device has already configured...
860 	 */
861 	if (dumpsize == 0)
862 		cpu_dumpconf();
863 	if (dumplo <= 0) {
864 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
865 		    minor(dumpdev));
866 		return;
867 	}
868 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
869 	    minor(dumpdev), dumplo);
870 
871 	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
872 	printf("dump ");
873 	if (psize == -1) {
874 		printf("area unavailable\n");
875 		return;
876 	}
877 	blkno = dumplo;
878 	dump = bdevsw[major(dumpdev)].d_dump;
879 
880 	error = pmap_dumpmmu(dump, blkno);
881 	blkno += pmap_dumpsize();
882 
883 	for (mp = pmemarr, nmem = npmemarr; --nmem >= 0 && error == 0; mp++) {
884 		unsigned i = 0, n;
885 		int maddr = mp->addr;
886 
887 		if (maddr == 0) {
888 			/* Skip first page at physical address 0 */
889 			maddr += NBPG;
890 			i += NBPG;
891 			blkno += btodb(NBPG);
892 		}
893 
894 		for (; i < mp->len; i += n) {
895 			n = mp->len - i;
896 			if (n > BYTES_PER_DUMP)
897 				 n = BYTES_PER_DUMP;
898 
899 			/* print out how many MBs we have dumped */
900 			if (i && (i % (1024*1024)) == 0)
901 				printf("%d ", i / (1024*1024));
902 
903 			(void) pmap_map(dumpspace, maddr, maddr + n,
904 					VM_PROT_READ);
905 			error = (*dump)(dumpdev, blkno,
906 					(caddr_t)dumpspace, (int)n);
907 			pmap_remove(pmap_kernel(), dumpspace, dumpspace + n);
908 			pmap_update(pmap_kernel());
909 			if (error)
910 				break;
911 			maddr += n;
912 			blkno += btodb(n);
913 		}
914 	}
915 
916 	switch (error) {
917 
918 	case ENXIO:
919 		printf("device bad\n");
920 		break;
921 
922 	case EFAULT:
923 		printf("device not ready\n");
924 		break;
925 
926 	case EINVAL:
927 		printf("area improper\n");
928 		break;
929 
930 	case EIO:
931 		printf("i/o error\n");
932 		break;
933 
934 	case 0:
935 		printf("succeeded\n");
936 		break;
937 
938 	default:
939 		printf("error %d\n", error);
940 		break;
941 	}
942 }
943 
944 /*
945  * get the fp and dump the stack as best we can.  don't leave the
946  * current stack page
947  */
948 void
949 stackdump()
950 {
951 	struct frame *fp = getfp(), *sfp;
952 
953 	sfp = fp;
954 	printf("Frame pointer is at %p\n", fp);
955 	printf("Call traceback:\n");
956 	while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) {
957 		printf("  pc = 0x%x  args = (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp = %p\n",
958 		    fp->fr_pc, fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2],
959 		    fp->fr_arg[3], fp->fr_arg[4], fp->fr_arg[5], fp->fr_arg[6],
960 		    fp->fr_fp);
961 		fp = fp->fr_fp;
962 	}
963 }
964 
965 int
966 cpu_exec_aout_makecmds(p, epp)
967 	struct proc *p;
968 	struct exec_package *epp;
969 {
970 	return (ENOEXEC);
971 }
972 
973 #if defined(SUN4)
974 void
975 oldmon_w_trace(va)
976 	u_long va;
977 {
978 	u_long stop;
979 	struct frame *fp;
980 
981 	if (curproc)
982 		printf("curproc = %p, pid %d\n", curproc, curproc->p_pid);
983 	else
984 		printf("no curproc\n");
985 
986 	printf("uvm: swtch %d, trap %d, sys %d, intr %d, soft %d, faults %d\n",
987 	    uvmexp.swtch, uvmexp.traps, uvmexp.syscalls, uvmexp.intrs,
988 		uvmexp.softs, uvmexp.faults);
989 	write_user_windows();
990 
991 #define round_up(x) (( (x) + (NBPG-1) ) & (~(NBPG-1)) )
992 
993 	printf("\nstack trace with sp = 0x%lx\n", va);
994 	stop = round_up(va);
995 	printf("stop at 0x%lx\n", stop);
996 	fp = (struct frame *) va;
997 	while (round_up((u_long) fp) == stop) {
998 		printf("  0x%x(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp %p\n", fp->fr_pc,
999 		    fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2], fp->fr_arg[3],
1000 		    fp->fr_arg[4], fp->fr_arg[5], fp->fr_arg[6], fp->fr_fp);
1001 		fp = fp->fr_fp;
1002 		if (fp == NULL)
1003 			break;
1004 	}
1005 	printf("end of stack trace\n");
1006 }
1007 
1008 void
1009 oldmon_w_cmd(va, ar)
1010 	u_long va;
1011 	char *ar;
1012 {
1013 	switch (*ar) {
1014 	case '\0':
1015 		switch (va) {
1016 		case 0:
1017 			panic("g0 panic");
1018 		case 4:
1019 			printf("w: case 4\n");
1020 			break;
1021 		default:
1022 			printf("w: unknown case %ld\n", va);
1023 			break;
1024 		}
1025 		break;
1026 	case 't':
1027 		oldmon_w_trace(va);
1028 		break;
1029 	default:
1030 		printf("w: arg not allowed\n");
1031 	}
1032 }
1033 #endif /* SUN4 */
1034 
1035 int
1036 ldcontrolb(addr)
1037 caddr_t addr;
1038 {
1039 	struct pcb *xpcb;
1040 	extern struct user *proc0paddr;
1041 	u_long saveonfault;
1042 	int res;
1043 	int s;
1044 
1045 	if (CPU_ISSUN4M) {
1046 		printf("warning: ldcontrolb called in sun4m\n");
1047 		return 0;
1048 	}
1049 
1050 	s = splhigh();
1051 	if (curproc == NULL)
1052 		xpcb = (struct pcb *)proc0paddr;
1053 	else
1054 		xpcb = &curproc->p_addr->u_pcb;
1055 
1056 	saveonfault = (u_long)xpcb->pcb_onfault;
1057         res = xldcontrolb(addr, xpcb);
1058 	xpcb->pcb_onfault = (caddr_t)saveonfault;
1059 
1060 	splx(s);
1061 	return (res);
1062 }
1063 
1064 void
1065 wzero(vb, l)
1066 	void *vb;
1067 	u_int l;
1068 {
1069 	u_char *b = vb;
1070 	u_char *be = b + l;
1071 	u_short *sp;
1072 
1073 	if (l == 0)
1074 		return;
1075 
1076 	/* front, */
1077 	if ((u_long)b & 1)
1078 		*b++ = 0;
1079 
1080 	/* back, */
1081 	if (b != be && ((u_long)be & 1) != 0) {
1082 		be--;
1083 		*be = 0;
1084 	}
1085 
1086 	/* and middle. */
1087 	sp = (u_short *)b;
1088 	while (sp != (u_short *)be)
1089 		*sp++ = 0;
1090 }
1091 
1092 void
1093 wcopy(vb1, vb2, l)
1094 	const void *vb1;
1095 	void *vb2;
1096 	u_int l;
1097 {
1098 	const u_char *b1e, *b1 = vb1;
1099 	u_char *b2 = vb2;
1100 	u_short *sp;
1101 	int bstore = 0;
1102 
1103 	if (l == 0)
1104 		return;
1105 
1106 	/* front, */
1107 	if ((u_long)b1 & 1) {
1108 		*b2++ = *b1++;
1109 		l--;
1110 	}
1111 
1112 	/* middle, */
1113 	sp = (u_short *)b1;
1114 	b1e = b1 + l;
1115 	if (l & 1)
1116 		b1e--;
1117 	bstore = (u_long)b2 & 1;
1118 
1119 	while (sp < (u_short *)b1e) {
1120 		if (bstore) {
1121 			b2[1] = *sp & 0xff;
1122 			b2[0] = *sp >> 8;
1123 		} else
1124 			*((short *)b2) = *sp;
1125 		sp++;
1126 		b2 += 2;
1127 	}
1128 
1129 	/* and back. */
1130 	if (l & 1)
1131 		*b2 = *b1e;
1132 }
1133 
1134 
1135 /*
1136  * Common function for DMA map creation.  May be called by bus-specific
1137  * DMA map creation functions.
1138  */
1139 int
1140 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
1141 	bus_dma_tag_t t;
1142 	bus_size_t size;
1143 	int nsegments;
1144 	bus_size_t maxsegsz;
1145 	bus_size_t boundary;
1146 	int flags;
1147 	bus_dmamap_t *dmamp;
1148 {
1149 	struct sparc_bus_dmamap *map;
1150 	void *mapstore;
1151 	size_t mapsize;
1152 
1153 	/*
1154 	 * Allocate and initialize the DMA map.  The end of the map
1155 	 * is a variable-sized array of segments, so we allocate enough
1156 	 * room for them in one shot.
1157 	 *
1158 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
1159 	 * of ALLOCNOW notifies others that we've reserved these resources,
1160 	 * and they are not to be freed.
1161 	 *
1162 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
1163 	 * the (nsegments - 1).
1164 	 */
1165 	mapsize = sizeof(struct sparc_bus_dmamap) +
1166 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
1167 	if ((mapstore = malloc(mapsize, M_DMAMAP,
1168 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1169 		return (ENOMEM);
1170 
1171 	bzero(mapstore, mapsize);
1172 	map = (struct sparc_bus_dmamap *)mapstore;
1173 	map->_dm_size = size;
1174 	map->_dm_segcnt = nsegments;
1175 	map->_dm_maxsegsz = maxsegsz;
1176 	map->_dm_boundary = boundary;
1177 	map->_dm_align = PAGE_SIZE;
1178 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
1179 	map->dm_mapsize = 0;		/* no valid mappings */
1180 	map->dm_nsegs = 0;
1181 
1182 	*dmamp = map;
1183 	return (0);
1184 }
1185 
1186 /*
1187  * Common function for DMA map destruction.  May be called by bus-specific
1188  * DMA map destruction functions.
1189  */
1190 void
1191 _bus_dmamap_destroy(t, map)
1192 	bus_dma_tag_t t;
1193 	bus_dmamap_t map;
1194 {
1195 
1196 	free(map, M_DMAMAP);
1197 }
1198 
1199 /*
1200  * Like _bus_dmamap_load(), but for mbufs.
1201  */
1202 int
1203 _bus_dmamap_load_mbuf(t, map, m, flags)
1204 	bus_dma_tag_t t;
1205 	bus_dmamap_t map;
1206 	struct mbuf *m;
1207 	int flags;
1208 {
1209 
1210 	panic("_bus_dmamap_load_mbuf: not implemented");
1211 }
1212 
1213 /*
1214  * Like _bus_dmamap_load(), but for uios.
1215  */
1216 int
1217 _bus_dmamap_load_uio(t, map, uio, flags)
1218 	bus_dma_tag_t t;
1219 	bus_dmamap_t map;
1220 	struct uio *uio;
1221 	int flags;
1222 {
1223 
1224 	panic("_bus_dmamap_load_uio: not implemented");
1225 }
1226 
1227 /*
1228  * Like _bus_dmamap_load(), but for raw memory allocated with
1229  * bus_dmamem_alloc().
1230  */
1231 int
1232 _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
1233 	bus_dma_tag_t t;
1234 	bus_dmamap_t map;
1235 	bus_dma_segment_t *segs;
1236 	int nsegs;
1237 	bus_size_t size;
1238 	int flags;
1239 {
1240 
1241 	panic("_bus_dmamap_load_raw: not implemented");
1242 }
1243 
1244 /*
1245  * Common function for DMA map synchronization.  May be called
1246  * by bus-specific DMA map synchronization functions.
1247  */
1248 void
1249 _bus_dmamap_sync(t, map, offset, len, ops)
1250 	bus_dma_tag_t t;
1251 	bus_dmamap_t map;
1252 	bus_addr_t offset;
1253 	bus_size_t len;
1254 	int ops;
1255 {
1256 }
1257 
1258 /*
1259  * Common function for DMA-safe memory allocation.  May be called
1260  * by bus-specific DMA memory allocation functions.
1261  */
1262 int
1263 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
1264 	bus_dma_tag_t t;
1265 	bus_size_t size, alignment, boundary;
1266 	bus_dma_segment_t *segs;
1267 	int nsegs;
1268 	int *rsegs;
1269 	int flags;
1270 {
1271 	vaddr_t low, high;
1272 	struct pglist *mlist;
1273 	int error;
1274 
1275 	/* Always round the size. */
1276 	size = round_page(size);
1277 	low = vm_first_phys;
1278 	high = vm_first_phys + vm_num_phys - PAGE_SIZE;
1279 
1280 	if ((mlist = malloc(sizeof(*mlist), M_DEVBUF,
1281 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1282 		return (ENOMEM);
1283 
1284 	/*
1285 	 * Allocate pages from the VM system.
1286 	 */
1287 	TAILQ_INIT(mlist);
1288 	error = uvm_pglistalloc(size, low, high, 0, 0,
1289 				mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1290 	if (error)
1291 		return (error);
1292 
1293 	/*
1294 	 * Simply keep a pointer around to the linked list, so
1295 	 * bus_dmamap_free() can return it.
1296 	 *
1297 	 * NOBODY SHOULD TOUCH THE pageq FIELDS WHILE THESE PAGES
1298 	 * ARE IN OUR CUSTODY.
1299 	 */
1300 	segs[0]._ds_mlist = mlist;
1301 
1302 	/*
1303 	 * We now have physical pages, but no DVMA addresses yet. These
1304 	 * will be allocated in bus_dmamap_load*() routines. Hence we
1305 	 * save any alignment and boundary requirements in this dma
1306 	 * segment.
1307 	 */
1308 	segs[0].ds_addr = 0;
1309 	segs[0].ds_len = 0;
1310 	segs[0]._ds_va = 0;
1311 	*rsegs = 1;
1312 	return (0);
1313 }
1314 
1315 /*
1316  * Common function for freeing DMA-safe memory.  May be called by
1317  * bus-specific DMA memory free functions.
1318  */
1319 void
1320 _bus_dmamem_free(t, segs, nsegs)
1321 	bus_dma_tag_t t;
1322 	bus_dma_segment_t *segs;
1323 	int nsegs;
1324 {
1325 
1326 	if (nsegs != 1)
1327 		panic("bus_dmamem_free: nsegs = %d", nsegs);
1328 
1329 	/*
1330 	 * Return the list of pages back to the VM system.
1331 	 */
1332 	uvm_pglistfree(segs[0]._ds_mlist);
1333 	free(segs[0]._ds_mlist, M_DEVBUF);
1334 }
1335 
1336 /*
1337  * Common function for unmapping DMA-safe memory.  May be called by
1338  * bus-specific DMA memory unmapping functions.
1339  */
1340 void
1341 _bus_dmamem_unmap(t, kva, size)
1342 	bus_dma_tag_t t;
1343 	caddr_t kva;
1344 	size_t size;
1345 {
1346 
1347 #ifdef DIAGNOSTIC
1348 	if ((u_long)kva & PAGE_MASK)
1349 		panic("_bus_dmamem_unmap");
1350 #endif
1351 
1352 	size = round_page(size);
1353 	uvm_unmap(kernel_map, (vaddr_t)kva, (vaddr_t)kva + size);
1354 }
1355 
1356 /*
1357  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
1358  * bus-specific DMA mmap(2)'ing functions.
1359  */
1360 paddr_t
1361 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
1362 	bus_dma_tag_t t;
1363 	bus_dma_segment_t *segs;
1364 	int nsegs;
1365 	off_t off;
1366 	int prot, flags;
1367 {
1368 
1369 	panic("_bus_dmamem_mmap: not implemented");
1370 }
1371 
1372 /*
1373  * Utility to allocate an aligned kernel virtual address range
1374  */
1375 vaddr_t
1376 _bus_dma_valloc_skewed(size, boundary, align, skew)
1377 	size_t size;
1378 	u_long boundary;
1379 	u_long align;
1380 	u_long skew;
1381 {
1382 	size_t oversize;
1383 	vaddr_t va, sva;
1384 
1385 	/*
1386 	 * Find a region of kernel virtual addresses that is aligned
1387 	 * to the given address modulo the requested alignment, i.e.
1388 	 *
1389 	 *	(va - skew) == 0 mod align
1390 	 *
1391 	 * The following conditions apply to the arguments:
1392 	 *
1393 	 *	- `size' must be a multiple of the VM page size
1394 	 *	- `align' must be a power of two
1395 	 *	   and greater than or equal to the VM page size
1396 	 *	- `skew' must be smaller than `align'
1397 	 *	- `size' must be smaller than `boundary'
1398 	 */
1399 
1400 #ifdef DIAGNOSTIC
1401 	if ((size & PAGE_MASK) != 0)
1402 		panic("_bus_dma_valloc_skewed: invalid size %lx", size);
1403 	if ((align & PAGE_MASK) != 0)
1404 		panic("_bus_dma_valloc_skewed: invalid alignment %lx", align);
1405 	if (align < skew)
1406 		panic("_bus_dma_valloc_skewed: align %lx < skew %lx",
1407 			align, skew);
1408 #endif
1409 
1410 	/* XXX - Implement this! */
1411 	if (boundary) {
1412 		printf("_bus_dma_valloc_skewed: "
1413 			"boundary check not implemented");
1414 		return (0);
1415 	}
1416 
1417 	/*
1418 	 * First, find a region large enough to contain any aligned chunk
1419 	 */
1420 	oversize = size + align - PAGE_SIZE;
1421 	sva = uvm_km_valloc(kernel_map, oversize);
1422 	if (sva == 0)
1423 		return (0);
1424 
1425 	/*
1426 	 * Compute start of aligned region
1427 	 */
1428 	va = sva;
1429 	va += (skew + align - va) & (align - 1);
1430 
1431 	/*
1432 	 * Return excess virtual addresses
1433 	 */
1434 	if (va != sva)
1435 		(void)uvm_unmap(kernel_map, sva, va);
1436 	if (va + size != sva + oversize)
1437 		(void)uvm_unmap(kernel_map, va + size, sva + oversize);
1438 
1439 	return (va);
1440 }
1441 
1442 /* sun4/sun4c dma map functions */
1443 int	sun4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
1444 				bus_size_t, struct proc *, int));
1445 int	sun4_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
1446 				bus_dma_segment_t *, int, bus_size_t, int));
1447 void	sun4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
1448 int	sun4_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
1449 				int nsegs, size_t size, caddr_t *kvap,
1450 				int flags));
1451 
1452 /*
1453  * sun4/sun4c: load DMA map with a linear buffer.
1454  */
1455 int
1456 sun4_dmamap_load(t, map, buf, buflen, p, flags)
1457 	bus_dma_tag_t t;
1458 	bus_dmamap_t map;
1459 	void *buf;
1460 	bus_size_t buflen;
1461 	struct proc *p;
1462 	int flags;
1463 {
1464 	bus_size_t sgsize;
1465 	vaddr_t va = (vaddr_t)buf;
1466 	int pagesz = PAGE_SIZE;
1467 	vaddr_t dva;
1468 	pmap_t pmap;
1469 
1470 	/*
1471 	 * Make sure that on error condition we return "no valid mappings".
1472 	 */
1473 	map->dm_nsegs = 0;
1474 
1475 	if (buflen > map->_dm_size)
1476 		return (EINVAL);
1477 
1478 	cpuinfo.cache_flush(buf, buflen);
1479 
1480 	if ((map->_dm_flags & BUS_DMA_24BIT) == 0) {
1481 		/*
1482 		 * XXX Need to implement "don't dma across this boundry".
1483 		 */
1484 		if (map->_dm_boundary != 0) {
1485 			bus_addr_t baddr;
1486 
1487 			/* Calculate first boundary line after `buf' */
1488 			baddr = ((bus_addr_t)va + map->_dm_boundary) &
1489 					-map->_dm_boundary;
1490 
1491 			/*
1492 			 * If the requested segment crosses the boundary,
1493 			 * we can't grant a direct map. For now, steal some
1494 			 * space from the `24BIT' map instead.
1495 			 *
1496 			 * (XXX - no overflow detection here)
1497 			 */
1498 			if (buflen > (baddr - (bus_addr_t)va))
1499 				goto no_fit;
1500 		}
1501 		map->dm_mapsize = buflen;
1502 		map->dm_nsegs = 1;
1503 		map->dm_segs[0].ds_addr = (bus_addr_t)va;
1504 		map->dm_segs[0].ds_len = buflen;
1505 		map->_dm_flags |= _BUS_DMA_DIRECTMAP;
1506 		return (0);
1507 	}
1508 
1509 no_fit:
1510 	sgsize = round_page(buflen + (va & (pagesz - 1)));
1511 
1512 	if (extent_alloc(dvmamap24, sgsize, pagesz, map->_dm_boundary,
1513 			 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
1514 			 &dva) != 0) {
1515 		return (ENOMEM);
1516 	}
1517 
1518 	/*
1519 	 * We always use just one segment.
1520 	 */
1521 	map->dm_mapsize = buflen;
1522 	map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
1523 	map->dm_segs[0].ds_len = buflen;
1524 	map->dm_segs[0]._ds_sgsize = sgsize;
1525 
1526 	if (p != NULL)
1527 		pmap = p->p_vmspace->vm_map.pmap;
1528 	else
1529 		pmap = pmap_kernel();
1530 
1531 	for (; buflen > 0; ) {
1532 		paddr_t pa;
1533 
1534 		/*
1535 		 * Get the physical address for this page.
1536 		 */
1537 		(void) pmap_extract(pmap, va, &pa);
1538 
1539 		/*
1540 		 * Compute the segment size, and adjust counts.
1541 		 */
1542 		sgsize = pagesz - (va & (pagesz - 1));
1543 		if (buflen < sgsize)
1544 			sgsize = buflen;
1545 
1546 #ifdef notyet
1547 #if defined(SUN4)
1548 		if (have_iocache)
1549 			pa |= PG_IOC;
1550 #endif
1551 #endif
1552 		pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC,
1553 		    VM_PROT_READ | VM_PROT_WRITE);
1554 
1555 		dva += pagesz;
1556 		va += sgsize;
1557 		buflen -= sgsize;
1558 	}
1559 	pmap_update(pmap_kernel());
1560 
1561 	map->dm_nsegs = 1;
1562 	return (0);
1563 }
1564 
1565 /*
1566  * Like _bus_dmamap_load(), but for raw memory allocated with
1567  * bus_dmamem_alloc().
1568  */
1569 int
1570 sun4_dmamap_load_raw(t, map, segs, nsegs, size, flags)
1571 	bus_dma_tag_t t;
1572 	bus_dmamap_t map;
1573 	bus_dma_segment_t *segs;
1574 	int nsegs;
1575 	bus_size_t size;
1576 	int flags;
1577 {
1578 	struct vm_page *m;
1579 	paddr_t pa;
1580 	vaddr_t dva;
1581 	bus_size_t sgsize;
1582 	struct pglist *mlist;
1583 	int pagesz = PAGE_SIZE;
1584 	int error;
1585 
1586 	map->dm_nsegs = 0;
1587 	sgsize = (size + pagesz - 1) & -pagesz;
1588 
1589 	/* Allocate DVMA addresses */
1590 	if ((map->_dm_flags & BUS_DMA_24BIT) != 0) {
1591 		error = extent_alloc(dvmamap24, sgsize, pagesz,
1592 					map->_dm_boundary,
1593 					(flags & BUS_DMA_NOWAIT) == 0
1594 						? EX_WAITOK : EX_NOWAIT,
1595 					&dva);
1596 		if (error)
1597 			return (error);
1598 	} else {
1599 		/* Any properly aligned virtual address will do */
1600 		dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary,
1601 					     pagesz, 0);
1602 		if (dva == 0)
1603 			return (ENOMEM);
1604 	}
1605 
1606 	map->dm_segs[0].ds_addr = dva;
1607 	map->dm_segs[0].ds_len = size;
1608 	map->dm_segs[0]._ds_sgsize = sgsize;
1609 
1610 	/* Map physical pages into IOMMU */
1611 	mlist = segs[0]._ds_mlist;
1612 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
1613 		if (sgsize == 0)
1614 			panic("sun4_dmamap_load_raw: size botch");
1615 		pa = VM_PAGE_TO_PHYS(m);
1616 #ifdef notyet
1617 #if defined(SUN4)
1618 		if (have_iocache)
1619 			pa |= PG_IOC;
1620 #endif
1621 #endif
1622 		pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC,
1623 		    VM_PROT_READ | VM_PROT_WRITE);
1624 
1625 		dva += pagesz;
1626 		sgsize -= pagesz;
1627 	}
1628 	pmap_update(pmap_kernel());
1629 
1630 	map->dm_nsegs = 1;
1631 	map->dm_mapsize = size;
1632 
1633 	return (0);
1634 }
1635 
1636 /*
1637  * sun4/sun4c function for unloading a DMA map.
1638  */
1639 void
1640 sun4_dmamap_unload(t, map)
1641 	bus_dma_tag_t t;
1642 	bus_dmamap_t map;
1643 {
1644 	bus_dma_segment_t *segs = map->dm_segs;
1645 	int nsegs = map->dm_nsegs;
1646 	int flags = map->_dm_flags;
1647 	vaddr_t dva;
1648 	bus_size_t len;
1649 	int i, s, error;
1650 
1651 	if ((flags & _BUS_DMA_DIRECTMAP) != 0) {
1652 		/* Nothing to release */
1653 		map->dm_mapsize = 0;
1654 		map->dm_nsegs = 0;
1655 		map->_dm_flags &= ~_BUS_DMA_DIRECTMAP;
1656 		return;
1657 	}
1658 
1659 	for (i = 0; i < nsegs; i++) {
1660 		dva = segs[i].ds_addr & -PAGE_SIZE;
1661 		len = segs[i]._ds_sgsize;
1662 
1663 		pmap_kremove(dva, len);
1664 
1665 		if ((flags & BUS_DMA_24BIT) != 0) {
1666 			s = splhigh();
1667 			error = extent_free(dvmamap24, dva, len, EX_NOWAIT);
1668 			splx(s);
1669 			if (error != 0)
1670 				printf("warning: %ld of DVMA space lost\n", len);
1671 		} else {
1672 			uvm_unmap(kernel_map, dva, dva + len);
1673 		}
1674 	}
1675 	pmap_update(pmap_kernel());
1676 
1677 	/* Mark the mappings as invalid. */
1678 	map->dm_mapsize = 0;
1679 	map->dm_nsegs = 0;
1680 }
1681 
1682 /*
1683  * Common function for mapping DMA-safe memory.  May be called by
1684  * bus-specific DMA memory map functions.
1685  */
1686 int
1687 sun4_dmamem_map(t, segs, nsegs, size, kvap, flags)
1688 	bus_dma_tag_t t;
1689 	bus_dma_segment_t *segs;
1690 	int nsegs;
1691 	size_t size;
1692 	caddr_t *kvap;
1693 	int flags;
1694 {
1695 	struct vm_page *m;
1696 	vaddr_t va;
1697 	struct pglist *mlist;
1698 
1699 	if (nsegs != 1)
1700 		panic("sun4_dmamem_map: nsegs = %d", nsegs);
1701 
1702 	size = round_page(size);
1703 
1704 	va = uvm_km_valloc(kernel_map, size);
1705 	if (va == 0)
1706 		return (ENOMEM);
1707 
1708 	segs[0]._ds_va = va;
1709 	*kvap = (caddr_t)va;
1710 
1711 	mlist = segs[0]._ds_mlist;
1712 	TAILQ_FOREACH(m, mlist, pageq) {
1713 		paddr_t pa;
1714 
1715 		if (size == 0)
1716 			panic("sun4_dmamem_map: size botch");
1717 
1718 		pa = VM_PAGE_TO_PHYS(m);
1719 		pmap_kenter_pa(va, pa | PMAP_NC, VM_PROT_READ | VM_PROT_WRITE);
1720 
1721 		va += PAGE_SIZE;
1722 		size -= PAGE_SIZE;
1723 	}
1724 	pmap_update(pmap_kernel());
1725 
1726 	return (0);
1727 }
1728 
1729 
1730 struct sparc_bus_dma_tag mainbus_dma_tag = {
1731 	NULL,
1732 	_bus_dmamap_create,
1733 	_bus_dmamap_destroy,
1734 	sun4_dmamap_load,
1735 	_bus_dmamap_load_mbuf,
1736 	_bus_dmamap_load_uio,
1737 	sun4_dmamap_load_raw,
1738 	sun4_dmamap_unload,
1739 	_bus_dmamap_sync,
1740 
1741 	_bus_dmamem_alloc,
1742 	_bus_dmamem_free,
1743 	sun4_dmamem_map,
1744 	_bus_dmamem_unmap,
1745 	_bus_dmamem_mmap
1746 };
1747 
1748 
1749 /*
1750  * Base bus space handlers.
1751  */
1752 static int	sparc_bus_map __P(( bus_space_tag_t, bus_addr_t,
1753 				    bus_size_t, int, vaddr_t,
1754 				    bus_space_handle_t *));
1755 static int	sparc_bus_unmap __P((bus_space_tag_t, bus_space_handle_t,
1756 				     bus_size_t));
1757 static int	sparc_bus_subregion __P((bus_space_tag_t, bus_space_handle_t,
1758 					 bus_size_t, bus_size_t,
1759 					 bus_space_handle_t *));
1760 static paddr_t	sparc_bus_mmap __P((bus_space_tag_t, bus_addr_t, off_t,
1761 				    int, int));
1762 static void	*sparc_mainbus_intr_establish __P((bus_space_tag_t, int, int,
1763 						   int, int (*) __P((void *)),
1764 						   void *));
1765 static void     sparc_bus_barrier __P(( bus_space_tag_t, bus_space_handle_t,
1766 					bus_size_t, bus_size_t, int));
1767 
1768 
1769 int
1770 sparc_bus_map(t, ba, size, flags, va, hp)
1771 	bus_space_tag_t t;
1772 	bus_addr_t	ba;
1773 	bus_size_t	size;
1774 	vaddr_t		va;
1775 	bus_space_handle_t *hp;
1776 {
1777 	vaddr_t v;
1778 	paddr_t pa;
1779 	unsigned int pmtype;
1780 static	vaddr_t iobase;
1781 
1782 
1783 	if (iobase == NULL)
1784 		iobase = IODEV_BASE;
1785 
1786 	size = round_page(size);
1787 	if (size == 0) {
1788 		printf("sparc_bus_map: zero size\n");
1789 		return (EINVAL);
1790 	}
1791 
1792 	if (va)
1793 		v = trunc_page(va);
1794 	else {
1795 		v = iobase;
1796 		iobase += size;
1797 		if (iobase > IODEV_END)	/* unlikely */
1798 			panic("sparc_bus_map: iobase=0x%lx", iobase);
1799 	}
1800 
1801 	pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba));
1802 	pa = BUS_ADDR_PADDR(ba);
1803 
1804 	/* note: preserve page offset */
1805 	*hp = (bus_space_handle_t)(v | ((u_long)pa & PGOFSET));
1806 
1807 	pa = trunc_page(pa);
1808 	do {
1809 		pmap_kenter_pa(v, pa | pmtype | PMAP_NC,
1810 		    VM_PROT_READ | VM_PROT_WRITE);
1811 		v += PAGE_SIZE;
1812 		pa += PAGE_SIZE;
1813 	} while ((size -= PAGE_SIZE) > 0);
1814 
1815 	pmap_update(pmap_kernel());
1816 	return (0);
1817 }
1818 
1819 int
1820 sparc_bus_unmap(t, bh, size)
1821 	bus_space_tag_t t;
1822 	bus_size_t	size;
1823 	bus_space_handle_t bh;
1824 {
1825 	vaddr_t va = trunc_page((vaddr_t)bh);
1826 
1827 	pmap_kremove(va, round_page(size));
1828 	pmap_update(pmap_kernel());
1829 	return (0);
1830 }
1831 
1832 int
1833 sparc_bus_subregion(tag, handle, offset, size, nhandlep)
1834 	bus_space_tag_t		tag;
1835 	bus_space_handle_t	handle;
1836 	bus_size_t		offset;
1837 	bus_size_t		size;
1838 	bus_space_handle_t	*nhandlep;
1839 {
1840 	*nhandlep = handle + offset;
1841 	return (0);
1842 }
1843 
1844 paddr_t
1845 sparc_bus_mmap(t, ba, off, prot, flags)
1846 	bus_space_tag_t t;
1847 	bus_addr_t	ba;
1848 	off_t		off;
1849 	int		prot;
1850 	int		flags;
1851 {
1852 	u_int pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba));
1853 	paddr_t pa = trunc_page(BUS_ADDR_PADDR(ba) + off);
1854 	return (paddr_t)(pa | pmtype | PMAP_NC);
1855 }
1856 
1857 /*
1858  * Establish a temporary bus mapping for device probing.
1859  */
1860 int
1861 bus_space_probe(tag, paddr, size, offset, flags, callback, arg)
1862 	bus_space_tag_t tag;
1863 	bus_addr_t	paddr;
1864 	bus_size_t	size;
1865 	size_t		offset;
1866 	int		flags;
1867 	int		(*callback) __P((void *, void *));
1868 	void		*arg;
1869 {
1870 	bus_space_handle_t bh;
1871 	caddr_t tmp;
1872 	int result;
1873 
1874 	if (bus_space_map2(tag, paddr, size, flags, TMPMAP_VA, &bh) != 0)
1875 		return (0);
1876 
1877 	tmp = (caddr_t)bh;
1878 	result = (probeget(tmp + offset, size) != -1);
1879 	if (result && callback != NULL)
1880 		result = (*callback)(tmp, arg);
1881 	bus_space_unmap(tag, bh, size);
1882 	return (result);
1883 }
1884 
1885 
1886 void *
1887 sparc_mainbus_intr_establish(t, pil, level, flags, handler, arg)
1888 	bus_space_tag_t t;
1889 	int	pil;
1890 	int	level;
1891 	int	flags;
1892 	int	(*handler)__P((void *));
1893 	void	*arg;
1894 {
1895 	struct intrhand *ih;
1896 
1897 	ih = (struct intrhand *)
1898 		malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
1899 	if (ih == NULL)
1900 		return (NULL);
1901 
1902 	ih->ih_fun = handler;
1903 	ih->ih_arg = arg;
1904 	if ((flags & BUS_INTR_ESTABLISH_FASTTRAP) != 0)
1905 		intr_fasttrap(pil, (void (*)__P((void)))handler);
1906 	else
1907 		intr_establish(pil, ih);
1908 	return (ih);
1909 }
1910 
1911 void sparc_bus_barrier (t, h, offset, size, flags)
1912 	bus_space_tag_t	t;
1913 	bus_space_handle_t h;
1914 	bus_size_t	offset;
1915 	bus_size_t	size;
1916 	int		flags;
1917 {
1918 	/* No default barrier action defined */
1919 	return;
1920 }
1921 
1922 struct sparc_bus_space_tag mainbus_space_tag = {
1923 	NULL,				/* cookie */
1924 	NULL,				/* parent bus tag */
1925 	sparc_bus_map,			/* bus_space_map */
1926 	sparc_bus_unmap,		/* bus_space_unmap */
1927 	sparc_bus_subregion,		/* bus_space_subregion */
1928 	sparc_bus_barrier,		/* bus_space_barrier */
1929 	sparc_bus_mmap,			/* bus_space_mmap */
1930 	sparc_mainbus_intr_establish	/* bus_intr_establish */
1931 };
1932