xref: /netbsd/sys/arch/sun3/sun3/dvma.c (revision bf9ec67e)
1 /*	$NetBSD: dvma.c,v 1.22 2001/09/05 13:21:09 tsutsui Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Gordon W. Ross.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/device.h>
42 #include <sys/proc.h>
43 #include <sys/malloc.h>
44 #include <sys/map.h>
45 #include <sys/buf.h>
46 #include <sys/vnode.h>
47 #include <sys/user.h>
48 #include <sys/core.h>
49 #include <sys/exec.h>
50 
51 #include <uvm/uvm.h> /* XXX: not _extern ... need uvm_map_create */
52 
53 #include <machine/autoconf.h>
54 #include <machine/cpu.h>
55 #include <machine/dvma.h>
56 #include <machine/pmap.h>
57 #include <machine/pte.h>
58 
59 #include <sun3/sun3/control.h>
60 #include <sun3/sun3/machdep.h>
61 
62 /* DVMA is the last 1MB, but the PROM owns the last page. */
63 #define DVMA_MAP_END	(DVMA_MAP_BASE + DVMA_MAP_AVAIL)
64 
65 /* Resource map used by dvma_mapin/dvma_mapout */
66 #define	NUM_DVMA_SEGS 10
67 struct map dvma_segmap[NUM_DVMA_SEGS];
68 
69 /* XXX: Might need to tune this... */
70 vsize_t dvma_segmap_size = 6 * NBSG;
71 
72 /* Using phys_map to manage DVMA scratch-memory pages. */
73 /* Note: Could use separate pagemap for obio if needed. */
74 
75 void
76 dvma_init()
77 {
78 	vaddr_t segmap_addr;
79 
80 	/*
81 	 * Create phys_map covering the entire DVMA space,
82 	 * then allocate the segment pool from that.  The
83 	 * remainder will be used as the DVMA page pool.
84 	 *
85 	 * Note that no INTRSAFE is needed here because the
86 	 * dvma_segmap manages things handled in interrupt
87 	 * context.
88 	 */
89 	phys_map = uvm_map_create(pmap_kernel(),
90 		DVMA_MAP_BASE, DVMA_MAP_END, 0);
91 	if (phys_map == NULL)
92 		panic("unable to create DVMA map");
93 
94 	/*
95 	 * Reserve the DVMA space used for segment remapping.
96 	 * The remainder of phys_map is used for DVMA scratch
97 	 * memory pages (i.e. driver control blocks, etc.)
98 	 */
99 	segmap_addr = uvm_km_valloc_wait(phys_map, dvma_segmap_size);
100 	if (segmap_addr != DVMA_MAP_BASE)
101 		panic("dvma_init: unable to allocate DVMA segments");
102 
103 	/*
104 	 * Create the VM pool used for mapping whole segments
105 	 * into DVMA space for the purpose of data transfer.
106 	 */
107 	rminit(dvma_segmap, dvma_segmap_size, segmap_addr,
108 		   "dvma_segmap", NUM_DVMA_SEGS);
109 }
110 
111 /*
112  * Allocate actual memory pages in DVMA space.
113  * (idea for implementation borrowed from Chris Torek.)
114  */
115 void *
116 dvma_malloc(bytes)
117 	size_t bytes;
118 {
119     caddr_t new_mem;
120     vsize_t new_size;
121 
122     if (!bytes)
123 		return NULL;
124     new_size = m68k_round_page(bytes);
125     new_mem = (caddr_t) uvm_km_alloc(phys_map, new_size);
126     if (!new_mem)
127 		panic("dvma_malloc: no space in phys_map");
128     /* The pmap code always makes DVMA pages non-cached. */
129     return new_mem;
130 }
131 
132 /*
133  * Free pages from dvma_malloc()
134  */
135 void
136 dvma_free(addr, size)
137 	void *addr;
138 	size_t size;
139 {
140 	vsize_t sz = m68k_round_page(size);
141 
142 	uvm_km_free(phys_map, (vaddr_t)addr, sz);
143 }
144 
145 /*
146  * Given a DVMA address, return the physical address that
147  * would be used by some OTHER bus-master besides the CPU.
148  * (Examples: on-board ie/le, VME xy board).
149  */
150 u_long
151 dvma_kvtopa(kva, bustype)
152 	void *kva;
153 	int bustype;
154 {
155 	u_long addr, mask;
156 
157 	addr = (u_long)kva;
158 	if ((addr & DVMA_MAP_BASE) != DVMA_MAP_BASE)
159 		panic("dvma_kvtopa: bad dmva addr=0x%lx\n", addr);
160 
161 	switch (bustype) {
162 	case BUS_OBIO:
163 	case BUS_OBMEM:
164 		mask = DVMA_OBIO_SLAVE_MASK;
165 		break;
166 	default:	/* VME bus device. */
167 		mask = DVMA_VME_SLAVE_MASK;
168 		break;
169 	}
170 
171 	return(addr & mask);
172 }
173 
174 /*
175  * Given a range of kernel virtual space, remap all the
176  * pages found there into the DVMA space (dup mappings).
177  * This IS safe to call at interrupt time.
178  * (Typically called at SPLBIO)
179  */
180 void *
181 dvma_mapin(kva, len, canwait)
182 	void *kva;
183 	int len;
184 	int canwait; /* ignored */
185 {
186 	vaddr_t seg_kva, seg_dma;
187 	vsize_t seg_len, seg_off;
188 	vaddr_t v, x;
189 	int sme;
190 	int s;
191 
192 	/* Get seg-aligned address and length. */
193 	seg_kva = (vaddr_t)kva;
194 	seg_len = (vsize_t)len;
195 	seg_off = seg_kva & SEGOFSET;
196 	seg_kva -= seg_off;
197 	seg_len = m68k_round_seg(seg_len + seg_off);
198 
199 	s = splvm();
200 
201 	/* Allocate the DVMA segment(s) */
202 	seg_dma = rmalloc(dvma_segmap, seg_len);
203 
204 #ifdef	DIAGNOSTIC
205 	if (seg_dma & SEGOFSET)
206 		panic("dvma_mapin: seg not aligned");
207 #endif
208 
209 	if (seg_dma != 0) {
210 		/* Duplicate the mappings into DMA space. */
211 		v = seg_kva;
212 		x = seg_dma;
213 		while (seg_len > 0) {
214 			sme = get_segmap(v);
215 #ifdef	DIAGNOSTIC
216 			if (sme == SEGINV)
217 				panic("dvma_mapin: seg not mapped");
218 #endif
219 #ifdef	HAVECACHE
220 			/* flush write-back on old mappings */
221 			if (cache_size)
222 				cache_flush_segment(v);
223 #endif
224 			set_segmap_allctx(x, sme);
225 			v += NBSG;
226 			x += NBSG;
227 			seg_len -= NBSG;
228 		}
229 		seg_dma += seg_off;
230 	}
231 
232 	splx(s);
233 	return ((caddr_t)seg_dma);
234 }
235 
236 /*
237  * Free some DVMA space allocated by the above.
238  * This IS safe to call at interrupt time.
239  * (Typically called at SPLBIO)
240  */
241 void
242 dvma_mapout(dma, len)
243 	void *dma;
244 	int len;
245 {
246 	vaddr_t seg_dma;
247 	vsize_t seg_len, seg_off;
248 	vaddr_t v, x;
249 	int sme;
250 	int s;
251 
252 	/* Get seg-aligned address and length. */
253 	seg_dma = (vaddr_t)dma;
254 	seg_len = (vsize_t)len;
255 	seg_off = seg_dma & SEGOFSET;
256 	seg_dma -= seg_off;
257 	seg_len = m68k_round_seg(seg_len + seg_off);
258 
259 	s = splvm();
260 
261 	/* Flush cache and remove DVMA mappings. */
262 	v = seg_dma;
263 	x = v + seg_len;
264 	while (v < x) {
265 		sme = get_segmap(v);
266 #ifdef	DIAGNOSTIC
267 		if (sme == SEGINV)
268 			panic("dvma_mapout: seg not mapped");
269 #endif
270 #ifdef	HAVECACHE
271 		/* flush write-back on the DVMA mappings */
272 		if (cache_size)
273 			cache_flush_segment(v);
274 #endif
275 		set_segmap_allctx(v, SEGINV);
276 		v += NBSG;
277 	}
278 
279 	rmfree(dvma_segmap, seg_len, seg_dma);
280 	splx(s);
281 }
282