xref: /netbsd/sys/arch/alpha/isa/isadma_bounce.c (revision bf9ec67e)
1 /* $NetBSD: isadma_bounce.c,v 1.4 2002/04/26 04:15:19 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
41 
42 __KERNEL_RCSID(0, "$NetBSD: isadma_bounce.c,v 1.4 2002/04/26 04:15:19 thorpej Exp $");
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/syslog.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/proc.h>
50 #include <sys/mbuf.h>
51 
52 #define _ALPHA_BUS_DMA_PRIVATE
53 #include <machine/bus.h>
54 
55 #include <dev/isa/isareg.h>
56 #include <dev/isa/isavar.h>
57 
58 #include <uvm/uvm_extern.h>
59 
60 extern	paddr_t avail_end;
61 
62 /*
63  * ISA can only DMA to 0-16M.
64  */
65 #define	ISA_DMA_BOUNCE_THRESHOLD	(16 * 1024 * 1024)
66 
67 /*
68  * Cookie used by bouncing ISA DMA.  A pointer to one of these is stashed
69  * in the DMA map.
70  */
71 struct isadma_bounce_cookie {
72 	int	id_flags;		/* flags; see below */
73 
74 	/*
75 	 * Information about the original buffer used during
76 	 * DMA map syncs.  Note that origbuflen is only used
77 	 * for ID_BUFTYPE_LINEAR.
78 	 */
79 	void	*id_origbuf;		/* pointer to orig buffer if
80 					   bouncing */
81 	bus_size_t id_origbuflen;	/* ...and size */
82 	int	id_buftype;		/* type of buffer */
83 
84 	void	*id_bouncebuf;		/* pointer to the bounce buffer */
85 	bus_size_t id_bouncebuflen;	/* ...and size */
86 	int	id_nbouncesegs;		/* number of valid bounce segs */
87 	bus_dma_segment_t id_bouncesegs[1]; /* array of bounce buffer
88 					       physical memory segments */
89 };
90 
91 /* id_flags */
92 #define	ID_MIGHT_NEED_BOUNCE	0x01	/* map could need bounce buffers */
93 #define	ID_HAS_BOUNCE		0x02	/* map currently has bounce buffers */
94 #define	ID_IS_BOUNCING		0x04	/* map is bouncing current xfer */
95 
96 /* id_buftype */
97 #define	ID_BUFTYPE_INVALID	0
98 #define	ID_BUFTYPE_LINEAR	1
99 #define	ID_BUFTYPE_MBUF		2
100 #define	ID_BUFTYPE_UIO		3
101 #define	ID_BUFTYPE_RAW		4
102 
103 int	isadma_bounce_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
104 	    bus_size_t, int));
105 void	isadma_bounce_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
106 
107 /*
108  * Create an ISA DMA map.
109  */
110 int
111 isadma_bounce_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
112     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
113 {
114 	struct isadma_bounce_cookie *cookie;
115 	bus_dmamap_t map;
116 	int error, cookieflags;
117 	void *cookiestore;
118 	size_t cookiesize;
119 
120 	/* Call common function to create the basic map. */
121 	error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
122 	    flags, dmamp);
123 	if (error)
124 		return (error);
125 
126 	map = *dmamp;
127 	map->_dm_cookie = NULL;
128 
129 	cookiesize = sizeof(*cookie);
130 
131 	/*
132 	 * ISA only has 24-bits of address space.  This means
133 	 * we can't DMA to pages over 16M.  In order to DMA to
134 	 * arbitrary buffers, we use "bounce buffers" - pages
135 	 * in memory below the 16M boundary.  On DMA reads,
136 	 * DMA happens to the bounce buffers, and is copied into
137 	 * the caller's buffer.  On writes, data is copied into
138 	 * but bounce buffer, and the DMA happens from those
139 	 * pages.  To software using the DMA mapping interface,
140 	 * this looks simply like a data cache.
141 	 *
142 	 * If we have more than 16M of RAM in the system, we may
143 	 * need bounce buffers.  We check and remember that here.
144 	 *
145 	 * ...or, there is an opposite case.  The most segments
146 	 * a transfer will require is (maxxfer / PAGE_SIZE) + 1.  If
147 	 * the caller can't handle that many segments (e.g. the
148 	 * ISA DMA controller), we may have to bounce it as well.
149 	 */
150 	cookieflags = 0;
151 	if (avail_end > (t->_wbase + t->_wsize) ||
152 	    ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
153 		cookieflags |= ID_MIGHT_NEED_BOUNCE;
154 		cookiesize += (sizeof(bus_dma_segment_t) *
155 		    (map->_dm_segcnt - 1));
156 	}
157 
158 	/*
159 	 * Allocate our cookie.
160 	 */
161 	if ((cookiestore = malloc(cookiesize, M_DMAMAP,
162 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
163 		error = ENOMEM;
164 		goto out;
165 	}
166 	memset(cookiestore, 0, cookiesize);
167 	cookie = (struct isadma_bounce_cookie *)cookiestore;
168 	cookie->id_flags = cookieflags;
169 	map->_dm_cookie = cookie;
170 
171 	if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
172 		/*
173 		 * Allocate the bounce pages now if the caller
174 		 * wishes us to do so.
175 		 */
176 		if ((flags & BUS_DMA_ALLOCNOW) == 0)
177 			goto out;
178 
179 		error = isadma_bounce_alloc_bouncebuf(t, map, size, flags);
180 	}
181 
182  out:
183 	if (error) {
184 		if (map->_dm_cookie != NULL)
185 			free(map->_dm_cookie, M_DMAMAP);
186 		_bus_dmamap_destroy(t, map);
187 	}
188 	return (error);
189 }
190 
191 /*
192  * Destroy an ISA DMA map.
193  */
194 void
195 isadma_bounce_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
196 {
197 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
198 
199 	/*
200 	 * Free any bounce pages this map might hold.
201 	 */
202 	if (cookie->id_flags & ID_HAS_BOUNCE)
203 		isadma_bounce_free_bouncebuf(t, map);
204 
205 	free(cookie, M_DMAMAP);
206 	_bus_dmamap_destroy(t, map);
207 }
208 
209 /*
210  * Load an ISA DMA map with a linear buffer.
211  */
212 int
213 isadma_bounce_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
214     size_t buflen, struct proc *p, int flags)
215 {
216 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
217 	int error;
218 
219 	/*
220 	 * Make sure that on error condition we return "no valid mappings."
221 	 */
222 	map->dm_mapsize = 0;
223 	map->dm_nsegs = 0;
224 
225 	/*
226 	 * Try to load the map the normal way.  If this errors out,
227 	 * and we can bounce, we will.
228 	 */
229 	error = _bus_dmamap_load_direct(t, map, buf, buflen, p, flags);
230 	if (error == 0 ||
231 	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
232 		return (error);
233 
234 	/*
235 	 * First attempt failed; bounce it.
236 	 */
237 
238 	/*
239 	 * Allocate bounce pages, if necessary.
240 	 */
241 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
242 		error = isadma_bounce_alloc_bouncebuf(t, map, buflen, flags);
243 		if (error)
244 			return (error);
245 	}
246 
247 	/*
248 	 * Cache a pointer to the caller's buffer and load the DMA map
249 	 * with the bounce buffer.
250 	 */
251 	cookie->id_origbuf = buf;
252 	cookie->id_origbuflen = buflen;
253 	cookie->id_buftype = ID_BUFTYPE_LINEAR;
254 	error = _bus_dmamap_load_direct(t, map, cookie->id_bouncebuf, buflen,
255 	    p, flags);
256 	if (error) {
257 		/*
258 		 * Free the bounce pages, unless our resources
259 		 * are reserved for our exclusive use.
260 		 */
261 		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
262 			isadma_bounce_free_bouncebuf(t, map);
263 		return (error);
264 	}
265 
266 	/* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
267 	cookie->id_flags |= ID_IS_BOUNCING;
268 	map->_dm_window = t;
269 	return (0);
270 }
271 
272 /*
273  * Like isadma_bounce_dmamap_load(), but for mbufs.
274  */
275 int
276 isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
277     struct mbuf *m0, int flags)
278 {
279 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
280 	int error;
281 
282 	/*
283 	 * Make sure on error condition we return "no valid mappings."
284 	 */
285 	map->dm_mapsize = 0;
286 	map->dm_nsegs = 0;
287 
288 #ifdef DIAGNOSTIC
289 	if ((m0->m_flags & M_PKTHDR) == 0)
290 		panic("isadma_bounce_dmamap_load_mbuf: no packet header");
291 #endif
292 
293 	if (m0->m_pkthdr.len > map->_dm_size)
294 		return (EINVAL);
295 
296 	/*
297 	 * Try to load the map the normal way.  If this errors out,
298 	 * and we can bounce, we will.
299 	 */
300 	error = _bus_dmamap_load_mbuf_direct(t, map, m0, flags);
301 	if (error == 0 ||
302 	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
303 		return (error);
304 
305 	/*
306 	 * First attempt failed; bounce it.
307 	 */
308 
309 	/*
310 	 * Allocate bounce pages, if necessary.
311 	 */
312 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
313 		error = isadma_bounce_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
314 		    flags);
315 		if (error)
316 			return (error);
317 	}
318 
319 	/*
320 	 * Cache a pointer to the caller's buffer and load the DMA map
321 	 * with the bounce buffer.
322 	 */
323 	cookie->id_origbuf = m0;
324 	cookie->id_origbuflen = m0->m_pkthdr.len;	/* not really used */
325 	cookie->id_buftype = ID_BUFTYPE_MBUF;
326 	error = _bus_dmamap_load_direct(t, map, cookie->id_bouncebuf,
327 	    m0->m_pkthdr.len, NULL, flags);
328 	if (error) {
329 		/*
330 		 * Free the bounce pages, unless our resources
331 		 * are reserved for our exclusive use.
332 		 */
333 		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
334 			isadma_bounce_free_bouncebuf(t, map);
335 		return (error);
336 	}
337 
338 	/* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
339 	cookie->id_flags |= ID_IS_BOUNCING;
340 	map->_dm_window = t;
341 	return (0);
342 }
343 
344 /*
345  * Like isadma_bounce_dmamap_load(), but for uios.
346  */
347 int
348 isadma_bounce_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
349     struct uio *uio, int flags)
350 {
351 
352 	panic("isadma_bounce_dmamap_load_uio: not implemented");
353 }
354 
355 /*
356  * Like isadma_bounce_dmamap_load(), but for raw memory allocated with
357  * bus_dmamem_alloc().
358  */
359 int
360 isadma_bounce_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
361     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
362 {
363 
364 	panic("isadma_bounce_dmamap_load_raw: not implemented");
365 }
366 
367 /*
368  * Unload an ISA DMA map.
369  */
370 void
371 isadma_bounce_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
372 {
373 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
374 
375 	/*
376 	 * If we have bounce pages, free them, unless they're
377 	 * reserved for our exclusive use.
378 	 */
379 	if ((cookie->id_flags & ID_HAS_BOUNCE) &&
380 	    (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
381 		isadma_bounce_free_bouncebuf(t, map);
382 
383 	cookie->id_flags &= ~ID_IS_BOUNCING;
384 	cookie->id_buftype = ID_BUFTYPE_INVALID;
385 
386 	/*
387 	 * Do the generic bits of the unload.
388 	 */
389 	_bus_dmamap_unload(t, map);
390 }
391 
392 /*
393  * Synchronize an ISA DMA map.
394  */
395 void
396 isadma_bounce_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
397     bus_size_t len, int ops)
398 {
399 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
400 
401 	/*
402 	 * Mixing PRE and POST operations is not allowed.
403 	 */
404 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
405 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
406 		panic("isadma_bounce_dmamap_sync: mix PRE and POST");
407 
408 #ifdef DIAGNOSTIC
409 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
410 		if (offset >= map->dm_mapsize)
411 			panic("isadma_bounce_dmamap_sync: bad offset");
412 		if (len == 0 || (offset + len) > map->dm_mapsize)
413 			panic("isadma_bounce_dmamap_sync: bad length");
414 	}
415 #endif
416 
417 	/*
418 	 * If we're not bouncing, just drain the write buffer
419 	 * and return.
420 	 */
421 	if ((cookie->id_flags & ID_IS_BOUNCING) == 0) {
422 		alpha_mb();
423 		return;
424 	}
425 
426 	switch (cookie->id_buftype) {
427 	case ID_BUFTYPE_LINEAR:
428 		/*
429 		 * Nothing to do for pre-read.
430 		 */
431 
432 		if (ops & BUS_DMASYNC_PREWRITE) {
433 			/*
434 			 * Copy the caller's buffer to the bounce buffer.
435 			 */
436 			memcpy((char *)cookie->id_bouncebuf + offset,
437 			    (char *)cookie->id_origbuf + offset, len);
438 		}
439 
440 		if (ops & BUS_DMASYNC_POSTREAD) {
441 			/*
442 			 * Copy the bounce buffer to the caller's buffer.
443 			 */
444 			memcpy((char *)cookie->id_origbuf + offset,
445 			    (char *)cookie->id_bouncebuf + offset, len);
446 		}
447 
448 		/*
449 		 * Nothing to do for post-write.
450 		 */
451 		break;
452 
453 	case ID_BUFTYPE_MBUF:
454 	    {
455 		struct mbuf *m, *m0 = cookie->id_origbuf;
456 		bus_size_t minlen, moff;
457 
458 		/*
459 		 * Nothing to do for pre-read.
460 		 */
461 
462 		if (ops & BUS_DMASYNC_PREWRITE) {
463 			/*
464 			 * Copy the caller's buffer to the bounce buffer.
465 			 */
466 			m_copydata(m0, offset, len,
467 			    (char *)cookie->id_bouncebuf + offset);
468 		}
469 
470 		if (ops & BUS_DMASYNC_POSTREAD) {
471 			/*
472 			 * Copy the bounce buffer to the caller's buffer.
473 			 */
474 			for (moff = offset, m = m0; m != NULL && len != 0;
475 			     m = m->m_next) {
476 				/* Find the beginning mbuf. */
477 				if (moff >= m->m_len) {
478 					moff -= m->m_len;
479 					continue;
480 				}
481 
482 				/*
483 				 * Now at the first mbuf to sync; nail
484 				 * each one until we have exhausted the
485 				 * length.
486 				 */
487 				minlen = len < m->m_len - moff ?
488 				    len : m->m_len - moff;
489 
490 				memcpy(mtod(m, caddr_t) + moff,
491 				    (char *)cookie->id_bouncebuf + offset,
492 				    minlen);
493 
494 				moff = 0;
495 				len -= minlen;
496 				offset += minlen;
497 			}
498 		}
499 
500 		/*
501 		 * Nothing to do for post-write.
502 		 */
503 		break;
504 	    }
505 
506 	case ID_BUFTYPE_UIO:
507 		panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_UIO");
508 		break;
509 
510 	case ID_BUFTYPE_RAW:
511 		panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_RAW");
512 		break;
513 
514 	case ID_BUFTYPE_INVALID:
515 		panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_INVALID");
516 		break;
517 
518 	default:
519 		printf("unknown buffer type %d\n", cookie->id_buftype);
520 		panic("isadma_bounce_dmamap_sync");
521 	}
522 
523 	/* Drain the write buffer. */
524 	alpha_mb();
525 }
526 
527 /*
528  * Allocate memory safe for ISA DMA.
529  */
530 int
531 isadma_bounce_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
532     bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
533     int nsegs, int *rsegs, int flags)
534 {
535 	paddr_t high;
536 
537 	if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)
538 		high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD);
539 	else
540 		high = trunc_page(avail_end);
541 
542 	return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
543 	    segs, nsegs, rsegs, flags, 0, high));
544 }
545 
546 /**********************************************************************
547  * ISA DMA utility functions
548  **********************************************************************/
549 
550 int
551 isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
552     bus_size_t size, int flags)
553 {
554 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
555 	int error = 0;
556 
557 	cookie->id_bouncebuflen = round_page(size);
558 	error = isadma_bounce_dmamem_alloc(t, cookie->id_bouncebuflen,
559 	    PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
560 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
561 	if (error)
562 		goto out;
563 	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
564 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
565 	    (caddr_t *)&cookie->id_bouncebuf, flags);
566 
567  out:
568 	if (error) {
569 		_bus_dmamem_free(t, cookie->id_bouncesegs,
570 		    cookie->id_nbouncesegs);
571 		cookie->id_bouncebuflen = 0;
572 		cookie->id_nbouncesegs = 0;
573 	} else
574 		cookie->id_flags |= ID_HAS_BOUNCE;
575 
576 	return (error);
577 }
578 
579 void
580 isadma_bounce_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
581 {
582 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
583 
584 	_bus_dmamem_unmap(t, cookie->id_bouncebuf,
585 	    cookie->id_bouncebuflen);
586 	_bus_dmamem_free(t, cookie->id_bouncesegs,
587 	    cookie->id_nbouncesegs);
588 	cookie->id_bouncebuflen = 0;
589 	cookie->id_nbouncesegs = 0;
590 	cookie->id_flags &= ~ID_HAS_BOUNCE;
591 }
592