xref: /netbsd/sys/arch/alpha/common/sgmap_typedep.c (revision bf9ec67e)
1 /* $NetBSD: sgmap_typedep.c,v 1.23 2002/04/26 04:15:18 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 __KERNEL_RCSID(0, "$NetBSD: sgmap_typedep.c,v 1.23 2002/04/26 04:15:18 thorpej Exp $");
41 
42 #include "opt_ddb.h"
43 
44 #ifdef SGMAP_DEBUG
45 int			__C(SGMAP_TYPE,_debug) = 0;
46 #endif
47 
48 SGMAP_PTE_TYPE		__C(SGMAP_TYPE,_prefetch_spill_page_pte);
49 
50 int			__C(SGMAP_TYPE,_load_buffer)(bus_dma_tag_t,
51 			    bus_dmamap_t, void *buf, size_t buflen,
52 			    struct proc *, int, int, struct alpha_sgmap *);
53 
54 void
55 __C(SGMAP_TYPE,_init_spill_page_pte)(void)
56 {
57 
58 	__C(SGMAP_TYPE,_prefetch_spill_page_pte) =
59 	    (alpha_sgmap_prefetch_spill_page_pa >>
60 	     SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
61 }
62 
63 int
64 __C(SGMAP_TYPE,_load_buffer)(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
65     size_t buflen, struct proc *p, int flags, int seg,
66     struct alpha_sgmap *sgmap)
67 {
68 	vaddr_t endva, va = (vaddr_t)buf;
69 	paddr_t pa;
70 	bus_addr_t dmaoffset, sgva;
71 	bus_size_t sgvalen, boundary, alignment;
72 	SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt;
73 	int s, pteidx, error, spill;
74 
75 	/* Initialize the spill page PTE if it hasn't been already. */
76 	if (__C(SGMAP_TYPE,_prefetch_spill_page_pte) == 0)
77 		__C(SGMAP_TYPE,_init_spill_page_pte)();
78 
79 	/*
80 	 * Remember the offset into the first page and the total
81 	 * transfer length.
82 	 */
83 	dmaoffset = ((u_long)buf) & PGOFSET;
84 
85 #ifdef SGMAP_DEBUG
86 	if (__C(SGMAP_TYPE,_debug)) {
87 		printf("sgmap_load: ----- buf = %p -----\n", buf);
88 		printf("sgmap_load: dmaoffset = 0x%lx, buflen = 0x%lx\n",
89 		    dmaoffset, buflen);
90 	}
91 #endif
92 
93 	/*
94 	 * Allocate the necessary virtual address space for the
95 	 * mapping.  Round the size, since we deal with whole pages.
96 	 */
97 
98 	/*
99 	 * XXX Always allocate a spill page for now.  Note
100 	 * the spill page is not needed for an in-bound-only
101 	 * transfer.
102 	 */
103 	if ((flags & BUS_DMA_READ) == 0)
104 		spill = 1;
105 	else
106 		spill = 0;
107 
108 	endva = round_page(va + buflen);
109 	va = trunc_page(va);
110 
111 	boundary = map->_dm_boundary;
112 	alignment = NBPG;
113 
114 	sgvalen = (endva - va);
115 	if (spill) {
116 		sgvalen += NBPG;
117 
118 		/*
119 		 * ARGH!  If the addition of the spill page bumped us
120 		 * over our boundary, we have to 2x the boundary limit.
121 		 */
122 		if (boundary && boundary < sgvalen) {
123 			alignment = boundary;
124 			do {
125 				boundary <<= 1;
126 			} while (boundary < sgvalen);
127 		}
128 	}
129 
130 #if 0
131 	printf("len 0x%lx -> 0x%lx, boundary 0x%lx -> 0x%lx -> ",
132 	    (endva - va), sgvalen, map->_dm_boundary, boundary);
133 #endif
134 
135 	s = splvm();
136 	error = extent_alloc(sgmap->aps_ex, sgvalen, alignment, boundary,
137 	    (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK, &sgva);
138 	splx(s);
139 	if (error)
140 		return (error);
141 
142 #if 0
143 	printf("error %d sgva 0x%lx\n", error, sgva);
144 #endif
145 
146 	pteidx = sgva >> SGMAP_ADDR_PTEIDX_SHIFT;
147 	pte = &page_table[pteidx * SGMAP_PTE_SPACING];
148 
149 #ifdef SGMAP_DEBUG
150 	if (__C(SGMAP_TYPE,_debug))
151 		printf("sgmap_load: sgva = 0x%lx, pteidx = %d, "
152 		    "pte = %p (pt = %p)\n", sgva, pteidx, pte,
153 		    page_table);
154 #endif
155 
156 	/* Generate the DMA address. */
157 	map->dm_segs[seg].ds_addr = sgmap->aps_wbase | sgva | dmaoffset;
158 	map->dm_segs[seg].ds_len = buflen;
159 
160 #ifdef SGMAP_DEBUG
161 	if (__C(SGMAP_TYPE,_debug))
162 		printf("sgmap_load: wbase = 0x%lx, vpage = 0x%x, "
163 		    "dma addr = 0x%lx\n", sgmap->aps_wbase, sgva,
164 		    map->dm_segs[seg].ds_addr);
165 #endif
166 
167 	for (; va < endva; va += NBPG, pteidx++,
168 	     pte = &page_table[pteidx * SGMAP_PTE_SPACING]) {
169 		/* Get the physical address for this segment. */
170 		if (p != NULL)
171 			(void) pmap_extract(p->p_vmspace->vm_map.pmap, va, &pa);
172 		else
173 			pa = vtophys(va);
174 
175 		/* Load the current PTE with this page. */
176 		*pte = (pa >> SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
177 #ifdef SGMAP_DEBUG
178 		if (__C(SGMAP_TYPE,_debug))
179 			printf("sgmap_load:     pa = 0x%lx, pte = %p, "
180 			    "*pte = 0x%lx\n", pa, pte, (u_long)(*pte));
181 #endif
182 	}
183 
184 	if (spill) {
185 		/* ...and the prefetch-spill page. */
186 		*pte = __C(SGMAP_TYPE,_prefetch_spill_page_pte);
187 #ifdef SGMAP_DEBUG
188 		if (__C(SGMAP_TYPE,_debug)) {
189 			printf("sgmap_load:     spill page, pte = %p, "
190 			    "*pte = 0x%lx\n", pte, *pte);
191 			printf("sgmap_load:     pte count = %d\n",
192 			    map->_dm_ptecnt);
193 		}
194 #endif
195 	}
196 
197 	return (0);
198 }
199 
200 int
201 __C(SGMAP_TYPE,_load)(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
202     bus_size_t buflen, struct proc *p, int flags, struct alpha_sgmap *sgmap)
203 {
204 	int seg, error;
205 
206 	/*
207 	 * Make sure that on error condition we return "no valid mappings".
208 	 */
209 	map->dm_mapsize = 0;
210 	map->dm_nsegs = 0;
211 
212 	if (buflen > map->_dm_size)
213 		return (EINVAL);
214 
215 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
216 	    (BUS_DMA_READ|BUS_DMA_WRITE));
217 
218 	map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
219 
220 	seg = 0;
221 	error = __C(SGMAP_TYPE,_load_buffer)(t, map, buf, buflen, p,
222 	    flags, seg, sgmap);
223 
224 	alpha_mb();
225 
226 #if defined(SGMAP_DEBUG) && defined(DDB)
227 	if (__C(SGMAP_TYPE,_debug) > 1)
228 		Debugger();
229 #endif
230 
231 	if (error == 0) {
232 		map->dm_mapsize = buflen;
233 		map->dm_nsegs = 1;
234 		map->_dm_window = t;
235 	} else {
236 		map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
237 		if (t->_next_window != NULL) {
238 			/* Give the next window a chance. */
239 			error = bus_dmamap_load(t->_next_window, map, buf,
240 			    buflen, p, flags);
241 		}
242 	}
243 	return (error);
244 }
245 
246 int
247 __C(SGMAP_TYPE,_load_mbuf)(bus_dma_tag_t t, bus_dmamap_t map,
248     struct mbuf *m0, int flags, struct alpha_sgmap *sgmap)
249 {
250 	struct mbuf *m;
251 	int seg, error;
252 
253 	/*
254 	 * Make sure that on error condition we return "no valid mappings".
255 	 */
256 	map->dm_mapsize = 0;
257 	map->dm_nsegs = 0;
258 
259 #ifdef DIAGNOSTIC
260 	if ((m0->m_flags & M_PKTHDR) == 0)
261 		panic(__S(__C(SGMAP_TYPE,_load_mbuf)) ": no packet header");
262 #endif
263 
264 	if (m0->m_pkthdr.len > map->_dm_size)
265 		return (EINVAL);
266 
267 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
268 	    (BUS_DMA_READ|BUS_DMA_WRITE));
269 
270 	map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
271 
272 	seg = 0;
273 	error = 0;
274 	for (m = m0; m != NULL && error == 0; m = m->m_next, seg++)
275 		error = __C(SGMAP_TYPE,_load_buffer)(t, map,
276 		    m->m_data, m->m_len, NULL, flags, seg, sgmap);
277 
278 	alpha_mb();
279 
280 #if defined(SGMAP_DEBUG) && defined(DDB)
281 	if (__C(SGMAP_TYPE,_debug) > 1)
282 		Debugger();
283 #endif
284 
285 	if (error == 0) {
286 		map->dm_mapsize = m0->m_pkthdr.len;
287 		map->dm_nsegs = seg;
288 		map->_dm_window = t;
289 	} else {
290 		/* Need to back out what we've done so far. */
291 		map->dm_nsegs = seg - 1;
292 		__C(SGMAP_TYPE,_unload)(t, map, sgmap);
293 		map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
294 		if (t->_next_window != NULL) {
295 			/* Give the next window a chance. */
296 			error = bus_dmamap_load_mbuf(t->_next_window, map,
297 			    m0, flags);
298 		}
299 	}
300 
301 	return (error);
302 }
303 
304 int
305 __C(SGMAP_TYPE,_load_uio)(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
306     int flags, struct alpha_sgmap *sgmap)
307 {
308 	bus_size_t minlen, resid;
309 	struct proc *p = NULL;
310 	struct iovec *iov;
311 	caddr_t addr;
312 	int i, seg, error;
313 
314 	/*
315 	 * Make sure that on error condition we return "no valid mappings".
316 	 */
317 	map->dm_mapsize = 0;
318 	map->dm_nsegs = 0;
319 
320 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
321 	    (BUS_DMA_READ|BUS_DMA_WRITE));
322 
323 	map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
324 
325 	resid = uio->uio_resid;
326 	iov = uio->uio_iov;
327 
328 	if (uio->uio_segflg == UIO_USERSPACE) {
329 		p = uio->uio_procp;
330 #ifdef DIAGNOSTIC
331 		if (p == NULL)
332 			panic(__S(__C(SGMAP_TYPE,_load_uio))
333 			    ": USERSPACE but no proc");
334 #endif
335 	}
336 
337 	seg = 0;
338 	error = 0;
339 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0;
340 	     i++, seg++) {
341 		/*
342 		 * Now at the first iovec to load.  Load each iovec
343 		 * until we have exhausted the residual count.
344 		 */
345 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
346 		addr = (caddr_t)iov[i].iov_base;
347 
348 		error = __C(SGMAP_TYPE,_load_buffer)(t, map,
349 		    addr, minlen, p, flags, seg, sgmap);
350 
351 		resid -= minlen;
352 	}
353 
354 	alpha_mb();
355 
356 #if defined(SGMAP_DEBUG) && defined(DDB)
357 	if (__C(SGMAP_TYPE,_debug) > 1)
358 		Debugger();
359 #endif
360 
361 	if (error == 0) {
362 		map->dm_mapsize = uio->uio_resid;
363 		map->dm_nsegs = seg;
364 		map->_dm_window = t;
365 	} else {
366 		/* Need to back out what we've done so far. */
367 		map->dm_nsegs = seg - 1;
368 		__C(SGMAP_TYPE,_unload)(t, map, sgmap);
369 		map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
370 		if (t->_next_window != NULL) {
371 			/* Give the next window a chance. */
372 			error = bus_dmamap_load_uio(t->_next_window, map,
373 			    uio, flags);
374 		}
375 	}
376 
377 	return (error);
378 }
379 
380 int
381 __C(SGMAP_TYPE,_load_raw)(bus_dma_tag_t t, bus_dmamap_t map,
382     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags,
383     struct alpha_sgmap *sgmap)
384 {
385 
386 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
387 	    (BUS_DMA_READ|BUS_DMA_WRITE));
388 
389 	panic(__S(__C(SGMAP_TYPE,_load_raw)) ": not implemented");
390 }
391 
392 void
393 __C(SGMAP_TYPE,_unload)(bus_dma_tag_t t, bus_dmamap_t map,
394     struct alpha_sgmap *sgmap)
395 {
396 	SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt;
397 	bus_addr_t osgva, sgva, esgva;
398 	int s, error, spill, seg, pteidx;
399 
400 	for (seg = 0; seg < map->dm_nsegs; seg++) {
401 		/*
402 		 * XXX Always allocate a spill page for now.  Note
403 		 * the spill page is not needed for an in-bound-only
404 		 * transfer.
405 		 */
406 		if ((map->_dm_flags & BUS_DMA_READ) == 0)
407 			spill = 1;
408 		else
409 			spill = 0;
410 
411 		sgva = map->dm_segs[seg].ds_addr & ~sgmap->aps_wbase;
412 
413 		esgva = round_page(sgva + map->dm_segs[seg].ds_len);
414 		osgva = sgva = trunc_page(sgva);
415 
416 		if (spill)
417 			esgva += NBPG;
418 
419 		/* Invalidate the PTEs for the mapping. */
420 		for (pteidx = sgva >> SGMAP_ADDR_PTEIDX_SHIFT;
421 		     sgva < esgva; sgva += NBPG, pteidx++) {
422 			pte = &page_table[pteidx * SGMAP_PTE_SPACING];
423 #ifdef SGMAP_DEBUG
424 			if (__C(SGMAP_TYPE,_debug))
425 				printf("sgmap_unload:     pte = %p, "
426 				    "*pte = 0x%lx\n", pte, (u_long)(*pte));
427 #endif
428 			*pte = 0;
429 		}
430 
431 		alpha_mb();
432 
433 		/* Free the virtual address space used by the mapping. */
434 		s = splvm();
435 		error = extent_free(sgmap->aps_ex, osgva, (esgva - osgva),
436 		    EX_NOWAIT);
437 		splx(s);
438 		if (error)
439 			panic(__S(__C(SGMAP_TYPE,_unload)));
440 	}
441 
442 	map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
443 
444 	/* Mark the mapping invalid. */
445 	map->dm_mapsize = 0;
446 	map->dm_nsegs = 0;
447 	map->_dm_window = NULL;
448 }
449