xref: /netbsd/sys/arch/alpha/common/sgmap_typedep.c (revision 6550d01e)
1 /* $NetBSD: sgmap_typedep.c,v 1.37 2010/12/15 01:28:24 matt Exp $ */
2 
3 /*-
4  * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(1, "$NetBSD: sgmap_typedep.c,v 1.37 2010/12/15 01:28:24 matt Exp $");
35 
36 #include "opt_ddb.h"
37 
38 #include <uvm/uvm_extern.h>
39 
40 #ifdef SGMAP_DEBUG
41 int			__C(SGMAP_TYPE,_debug) = 0;
42 #endif
43 
44 SGMAP_PTE_TYPE		__C(SGMAP_TYPE,_prefetch_spill_page_pte);
45 
46 int			__C(SGMAP_TYPE,_load_buffer)(bus_dma_tag_t,
47 			    bus_dmamap_t, void *buf, size_t buflen,
48 			    struct vmspace *, int, int, struct alpha_sgmap *);
49 
50 void
51 __C(SGMAP_TYPE,_init_spill_page_pte)(void)
52 {
53 
54 	__C(SGMAP_TYPE,_prefetch_spill_page_pte) =
55 	    (alpha_sgmap_prefetch_spill_page_pa >>
56 	     SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
57 }
58 
59 int
60 __C(SGMAP_TYPE,_load_buffer)(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
61     size_t buflen, struct vmspace *vm, int flags, int seg,
62     struct alpha_sgmap *sgmap)
63 {
64 	vaddr_t endva, va = (vaddr_t)buf;
65 	paddr_t pa;
66 	bus_addr_t dmaoffset, sgva;
67 	bus_size_t sgvalen, boundary, alignment;
68 	SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt;
69 	int s, pteidx, error, spill;
70 
71 	/* Initialize the spill page PTE if it hasn't been already. */
72 	if (__C(SGMAP_TYPE,_prefetch_spill_page_pte) == 0)
73 		__C(SGMAP_TYPE,_init_spill_page_pte)();
74 
75 	/*
76 	 * Remember the offset into the first page and the total
77 	 * transfer length.
78 	 */
79 	dmaoffset = ((u_long)buf) & PGOFSET;
80 
81 #ifdef SGMAP_DEBUG
82 	if (__C(SGMAP_TYPE,_debug)) {
83 		printf("sgmap_load: ----- buf = %p -----\n", buf);
84 		printf("sgmap_load: dmaoffset = 0x%lx, buflen = 0x%lx\n",
85 		    dmaoffset, buflen);
86 	}
87 #endif
88 
89 	/*
90 	 * Allocate the necessary virtual address space for the
91 	 * mapping.  Round the size, since we deal with whole pages.
92 	 */
93 
94 	/*
95 	 * XXX Always allocate a spill page for now.  Note
96 	 * the spill page is not needed for an in-bound-only
97 	 * transfer.
98 	 */
99 	if ((flags & BUS_DMA_READ) == 0)
100 		spill = 1;
101 	else
102 		spill = 0;
103 
104 	endva = round_page(va + buflen);
105 	va = trunc_page(va);
106 
107 	boundary = map->_dm_boundary;
108 	alignment = PAGE_SIZE;
109 
110 	sgvalen = (endva - va);
111 	if (spill) {
112 		sgvalen += PAGE_SIZE;
113 
114 		/*
115 		 * ARGH!  If the addition of the spill page bumped us
116 		 * over our boundary, we have to 2x the boundary limit.
117 		 */
118 		if (boundary && boundary < sgvalen) {
119 			alignment = boundary;
120 			do {
121 				boundary <<= 1;
122 			} while (boundary < sgvalen);
123 		}
124 	}
125 
126 #ifdef SGMAP_DEBUG
127 	if (__C(SGMAP_TYPE,_debug)) {
128 		printf("sgmap_load: va:endva = 0x%lx:0x%lx\n", va, endva);
129 		printf("sgmap_load: sgvalen = 0x%lx, boundary = 0x%lx\n",
130 		       sgvalen, boundary);
131 	}
132 #endif
133 
134 	s = splvm();
135 	error = extent_alloc(sgmap->aps_ex, sgvalen, alignment, boundary,
136 	    (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK, &sgva);
137 	splx(s);
138 	if (error)
139 		return (error);
140 
141 	pteidx = sgva >> SGMAP_ADDR_PTEIDX_SHIFT;
142 	pte = &page_table[pteidx * SGMAP_PTE_SPACING];
143 
144 #ifdef SGMAP_DEBUG
145 	if (__C(SGMAP_TYPE,_debug))
146 		printf("sgmap_load: sgva = 0x%lx, pteidx = %d, "
147 		    "pte = %p (pt = %p)\n", sgva, pteidx, pte,
148 		    page_table);
149 #endif
150 
151 	/* Generate the DMA address. */
152 	map->dm_segs[seg].ds_addr = sgmap->aps_wbase | sgva | dmaoffset;
153 	map->dm_segs[seg].ds_len = buflen;
154 
155 #ifdef SGMAP_DEBUG
156 	if (__C(SGMAP_TYPE,_debug))
157 		printf("sgmap_load: wbase = 0x%lx, vpage = 0x%lx, "
158 		    "DMA addr = 0x%lx\n", sgmap->aps_wbase, (uint64_t)sgva,
159 		    map->dm_segs[seg].ds_addr);
160 #endif
161 
162 	for (; va < endva; va += PAGE_SIZE, pteidx++,
163 	     pte = &page_table[pteidx * SGMAP_PTE_SPACING]) {
164 		/* Get the physical address for this segment. */
165 		if (!VMSPACE_IS_KERNEL_P(vm))
166 			(void) pmap_extract(vm->vm_map.pmap, va, &pa);
167 		else
168 			pa = vtophys(va);
169 
170 		/* Load the current PTE with this page. */
171 		*pte = (pa >> SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
172 #ifdef SGMAP_DEBUG
173 		if (__C(SGMAP_TYPE,_debug))
174 			printf("sgmap_load:     pa = 0x%lx, pte = %p, "
175 			    "*pte = 0x%lx\n", pa, pte, (u_long)(*pte));
176 #endif
177 	}
178 
179 	if (spill) {
180 		/* ...and the prefetch-spill page. */
181 		*pte = __C(SGMAP_TYPE,_prefetch_spill_page_pte);
182 #ifdef SGMAP_DEBUG
183 		if (__C(SGMAP_TYPE,_debug)) {
184 			printf("sgmap_load:     spill page, pte = %p, "
185 			    "*pte = 0x%lx\n", pte, (uint64_t)*pte);
186 		}
187 #endif
188 	}
189 
190 	return (0);
191 }
192 
193 int
194 __C(SGMAP_TYPE,_load)(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
195     bus_size_t buflen, struct proc *p, int flags, struct alpha_sgmap *sgmap)
196 {
197 	int seg, error;
198 	struct vmspace *vm;
199 
200 	/*
201 	 * Make sure that on error condition we return "no valid mappings".
202 	 */
203 	map->dm_mapsize = 0;
204 	map->dm_nsegs = 0;
205 
206 	if (buflen > map->_dm_size)
207 		return (EINVAL);
208 
209 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
210 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
211 	    (BUS_DMA_READ|BUS_DMA_WRITE));
212 
213 	map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
214 
215 	if (p != NULL) {
216 		vm = p->p_vmspace;
217 	} else {
218 		vm = vmspace_kernel();
219 	}
220 	seg = 0;
221 	error = __C(SGMAP_TYPE,_load_buffer)(t, map, buf, buflen, vm,
222 	    flags, seg, sgmap);
223 
224 	alpha_mb();
225 
226 #if defined(SGMAP_DEBUG) && defined(DDB)
227 	if (__C(SGMAP_TYPE,_debug) > 1)
228 		Debugger();
229 #endif
230 
231 	if (error == 0) {
232 		map->dm_mapsize = buflen;
233 		map->dm_nsegs = 1;
234 		map->_dm_window = t;
235 	} else {
236 		map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
237 		if (t->_next_window != NULL) {
238 			/* Give the next window a chance. */
239 			error = bus_dmamap_load(t->_next_window, map, buf,
240 			    buflen, p, flags);
241 		}
242 	}
243 	return (error);
244 }
245 
246 int
247 __C(SGMAP_TYPE,_load_mbuf)(bus_dma_tag_t t, bus_dmamap_t map,
248     struct mbuf *m0, int flags, struct alpha_sgmap *sgmap)
249 {
250 	struct mbuf *m;
251 	int seg, error;
252 
253 	/*
254 	 * Make sure that on error condition we return "no valid mappings".
255 	 */
256 	map->dm_mapsize = 0;
257 	map->dm_nsegs = 0;
258 
259 #ifdef DIAGNOSTIC
260 	if ((m0->m_flags & M_PKTHDR) == 0)
261 		panic(__S(__C(SGMAP_TYPE,_load_mbuf)) ": no packet header");
262 #endif
263 
264 	if (m0->m_pkthdr.len > map->_dm_size)
265 		return (EINVAL);
266 
267 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
268 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
269 	    (BUS_DMA_READ|BUS_DMA_WRITE));
270 
271 	map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
272 
273 	seg = 0;
274 	error = 0;
275 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
276 		if (m->m_len == 0)
277 			continue;
278 		error = __C(SGMAP_TYPE,_load_buffer)(t, map,
279 		    m->m_data, m->m_len, vmspace_kernel(), flags, seg, sgmap);
280 		seg++;
281 	}
282 
283 	alpha_mb();
284 
285 #if defined(SGMAP_DEBUG) && defined(DDB)
286 	if (__C(SGMAP_TYPE,_debug) > 1)
287 		Debugger();
288 #endif
289 
290 	if (error == 0) {
291 		map->dm_mapsize = m0->m_pkthdr.len;
292 		map->dm_nsegs = seg;
293 		map->_dm_window = t;
294 	} else {
295 		/* Need to back out what we've done so far. */
296 		map->dm_nsegs = seg - 1;
297 		__C(SGMAP_TYPE,_unload)(t, map, sgmap);
298 		map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
299 		if (t->_next_window != NULL) {
300 			/* Give the next window a chance. */
301 			error = bus_dmamap_load_mbuf(t->_next_window, map,
302 			    m0, flags);
303 		}
304 	}
305 
306 	return (error);
307 }
308 
309 int
310 __C(SGMAP_TYPE,_load_uio)(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
311     int flags, struct alpha_sgmap *sgmap)
312 {
313 	bus_size_t minlen, resid;
314 	struct vmspace *vm;
315 	struct iovec *iov;
316 	void *addr;
317 	int i, seg, error;
318 
319 	/*
320 	 * Make sure that on error condition we return "no valid mappings".
321 	 */
322 	map->dm_mapsize = 0;
323 	map->dm_nsegs = 0;
324 
325 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
326 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
327 	    (BUS_DMA_READ|BUS_DMA_WRITE));
328 
329 	map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
330 
331 	resid = uio->uio_resid;
332 	iov = uio->uio_iov;
333 
334 	vm = uio->uio_vmspace;
335 
336 	seg = 0;
337 	error = 0;
338 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0;
339 	     i++, seg++) {
340 		/*
341 		 * Now at the first iovec to load.  Load each iovec
342 		 * until we have exhausted the residual count.
343 		 */
344 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
345 		addr = (void *)iov[i].iov_base;
346 
347 		error = __C(SGMAP_TYPE,_load_buffer)(t, map,
348 		    addr, minlen, vm, flags, seg, sgmap);
349 
350 		resid -= minlen;
351 	}
352 
353 	alpha_mb();
354 
355 #if defined(SGMAP_DEBUG) && defined(DDB)
356 	if (__C(SGMAP_TYPE,_debug) > 1)
357 		Debugger();
358 #endif
359 
360 	if (error == 0) {
361 		map->dm_mapsize = uio->uio_resid;
362 		map->dm_nsegs = seg;
363 		map->_dm_window = t;
364 	} else {
365 		/* Need to back out what we've done so far. */
366 		map->dm_nsegs = seg - 1;
367 		__C(SGMAP_TYPE,_unload)(t, map, sgmap);
368 		map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
369 		if (t->_next_window != NULL) {
370 			/* Give the next window a chance. */
371 			error = bus_dmamap_load_uio(t->_next_window, map,
372 			    uio, flags);
373 		}
374 	}
375 
376 	return (error);
377 }
378 
379 int
380 __C(SGMAP_TYPE,_load_raw)(bus_dma_tag_t t, bus_dmamap_t map,
381     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags,
382     struct alpha_sgmap *sgmap)
383 {
384 
385 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
386 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
387 	    (BUS_DMA_READ|BUS_DMA_WRITE));
388 
389 	panic(__S(__C(SGMAP_TYPE,_load_raw)) ": not implemented");
390 }
391 
392 void
393 __C(SGMAP_TYPE,_unload)(bus_dma_tag_t t, bus_dmamap_t map,
394     struct alpha_sgmap *sgmap)
395 {
396 	SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt;
397 	bus_addr_t osgva, sgva, esgva;
398 	int s, error, spill, seg, pteidx;
399 
400 	for (seg = 0; seg < map->dm_nsegs; seg++) {
401 		/*
402 		 * XXX Always allocate a spill page for now.  Note
403 		 * the spill page is not needed for an in-bound-only
404 		 * transfer.
405 		 */
406 		if ((map->_dm_flags & BUS_DMA_READ) == 0)
407 			spill = 1;
408 		else
409 			spill = 0;
410 
411 		sgva = map->dm_segs[seg].ds_addr & ~sgmap->aps_wbase;
412 
413 		esgva = round_page(sgva + map->dm_segs[seg].ds_len);
414 		osgva = sgva = trunc_page(sgva);
415 
416 		if (spill)
417 			esgva += PAGE_SIZE;
418 
419 		/* Invalidate the PTEs for the mapping. */
420 		for (pteidx = sgva >> SGMAP_ADDR_PTEIDX_SHIFT;
421 		     sgva < esgva; sgva += PAGE_SIZE, pteidx++) {
422 			pte = &page_table[pteidx * SGMAP_PTE_SPACING];
423 #ifdef SGMAP_DEBUG
424 			if (__C(SGMAP_TYPE,_debug))
425 				printf("sgmap_unload:     pte = %p, "
426 				    "*pte = 0x%lx\n", pte, (u_long)(*pte));
427 #endif
428 			*pte = 0;
429 		}
430 
431 		alpha_mb();
432 
433 		/* Free the virtual address space used by the mapping. */
434 		s = splvm();
435 		error = extent_free(sgmap->aps_ex, osgva, (esgva - osgva),
436 		    EX_NOWAIT);
437 		splx(s);
438 		if (error)
439 			panic(__S(__C(SGMAP_TYPE,_unload)));
440 	}
441 
442 	map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
443 
444 	/* Mark the mapping invalid. */
445 	map->dm_mapsize = 0;
446 	map->dm_nsegs = 0;
447 	map->_dm_window = NULL;
448 }
449