xref: /netbsd/sys/arch/shark/isa/isadma_machdep.c (revision c4a72b64)
1 /*	$NetBSD: isadma_machdep.c,v 1.3 2002/08/17 20:46:29 thorpej Exp $	*/
2 
3 #define ISA_DMA_STATS
4 
5 /*-
6  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
11  * NASA Ames Research Center.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the NetBSD
24  *	Foundation, Inc. and its contributors.
25  * 4. Neither the name of The NetBSD Foundation nor the names of its
26  *    contributors may be used to endorse or promote products derived
27  *    from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39  * POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/syslog.h>
45 #include <sys/device.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 #include <sys/mbuf.h>
49 
50 #define _ARM32_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52 
53 #include <dev/isa/isareg.h>
54 #include <dev/isa/isavar.h>
55 
56 #include <uvm/uvm_extern.h>
57 
58 /*
59  * ISA has a 24-bit address limitation, so at most it has a 16M
60  * DMA range.  However, some platforms have a more limited range,
61  * e.g. the Shark NC.  On these systems, we are provided with
62  * a set of DMA ranges.  The pmap module is aware of these ranges
63  * and places DMA-safe memory for them onto an alternate free list
64  * so that they are protected from being used to service page faults,
65  * etc. (unless we've run out of memory elsewhere).
66  */
67 #define	ISA_DMA_BOUNCE_THRESHOLD	(16 * 1024 * 1024)
68 extern struct arm32_dma_range *shark_isa_dma_ranges;
69 extern int shark_isa_dma_nranges;
70 
71 int	_isa_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
72 	    bus_size_t, bus_size_t, int, bus_dmamap_t *));
73 void	_isa_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
74 int	_isa_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
75 	    bus_size_t, struct proc *, int));
76 int	_isa_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
77 	    struct mbuf *, int));
78 int	_isa_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
79 	    struct uio *, int));
80 int	_isa_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
81 	    bus_dma_segment_t *, int, bus_size_t, int));
82 void	_isa_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
83 void	_isa_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
84 	    bus_addr_t, bus_size_t, int));
85 
86 int	_isa_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t,
87 	    bus_size_t, bus_dma_segment_t *, int, int *, int));
88 
89 int	_isa_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
90 	    bus_size_t, int));
91 void	_isa_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
92 
93 /*
94  * Entry points for ISA DMA.  These are mostly wrappers around
95  * the generic functions that understand how to deal with bounce
96  * buffers, if necessary.
97  */
98 struct arm32_bus_dma_tag isa_bus_dma_tag = {
99 	0,				/* _ranges */
100 	0,				/* _nranges */
101 	_isa_bus_dmamap_create,
102 	_isa_bus_dmamap_destroy,
103 	_isa_bus_dmamap_load,
104 	_isa_bus_dmamap_load_mbuf,
105 	_isa_bus_dmamap_load_uio,
106 	_isa_bus_dmamap_load_raw,
107 	_isa_bus_dmamap_unload,
108 	_isa_bus_dmamap_sync,		/* pre */
109 	_isa_bus_dmamap_sync,		/* post */
110 	_isa_bus_dmamem_alloc,
111 	_bus_dmamem_free,
112 	_bus_dmamem_map,
113 	_bus_dmamem_unmap,
114 	_bus_dmamem_mmap,
115 };
116 
117 /*
118  * Initialize ISA DMA.
119  */
120 void
121 isa_dma_init()
122 {
123 
124 	isa_bus_dma_tag._ranges = shark_isa_dma_ranges;
125 	isa_bus_dma_tag._nranges = shark_isa_dma_nranges;
126 }
127 
128 /**********************************************************************
129  * bus.h dma interface entry points
130  **********************************************************************/
131 
132 #ifdef ISA_DMA_STATS
133 #define	STAT_INCR(v)	(v)++
134 #define	STAT_DECR(v)	do { \
135 		if ((v) == 0) \
136 			printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
137 		else \
138 			(v)--; \
139 		} while (0)
140 u_long	isa_dma_stats_loads;
141 u_long	isa_dma_stats_bounces;
142 u_long	isa_dma_stats_nbouncebufs;
143 #else
144 #define	STAT_INCR(v)
145 #define	STAT_DECR(v)
146 #endif
147 
148 /*
149  * Create an ISA DMA map.
150  */
151 int
152 _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
153 	bus_dma_tag_t t;
154 	bus_size_t size;
155 	int nsegments;
156 	bus_size_t maxsegsz;
157 	bus_size_t boundary;
158 	int flags;
159 	bus_dmamap_t *dmamp;
160 {
161 	struct arm32_isa_dma_cookie *cookie;
162 	bus_dmamap_t map;
163 	int error, cookieflags;
164 	void *cookiestore;
165 	size_t cookiesize;
166 
167 	/* Call common function to create the basic map. */
168 	error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
169 	    flags, dmamp);
170 	if (error)
171 		return (error);
172 
173 	map = *dmamp;
174 	map->_dm_cookie = NULL;
175 
176 	cookiesize = sizeof(struct arm32_isa_dma_cookie);
177 
178 	/*
179 	 * ISA only has 24-bits of address space.  This means
180 	 * we can't DMA to pages over 16M.  In order to DMA to
181 	 * arbitrary buffers, we use "bounce buffers" - pages
182 	 * in memory below the 16M boundary.  On DMA reads,
183 	 * DMA happens to the bounce buffers, and is copied into
184 	 * the caller's buffer.  On writes, data is copied into
185 	 * but bounce buffer, and the DMA happens from those
186 	 * pages.  To software using the DMA mapping interface,
187 	 * this looks simply like a data cache.
188 	 *
189 	 * If we have more than 16M of RAM in the system, we may
190 	 * need bounce buffers.  We check and remember that here.
191 	 *
192 	 * There are exceptions, however.  VLB devices can do
193 	 * 32-bit DMA, and indicate that here.
194 	 *
195 	 * ...or, there is an opposite case.  The most segments
196 	 * a transfer will require is (maxxfer / NBPG) + 1.  If
197 	 * the caller can't handle that many segments (e.g. the
198 	 * ISA DMA controller), we may have to bounce it as well.
199 	 *
200 	 * Well, not really... see note above regarding DMA ranges.
201 	 * Because of the range issue on this platform, we just
202 	 * always "might bounce".
203 	 */
204 	cookieflags = ID_MIGHT_NEED_BOUNCE;
205 	cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
206 
207 	/*
208 	 * Allocate our cookie.
209 	 */
210 	if ((cookiestore = malloc(cookiesize, M_DMAMAP,
211 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
212 		error = ENOMEM;
213 		goto out;
214 	}
215 	memset(cookiestore, 0, cookiesize);
216 	cookie = (struct arm32_isa_dma_cookie *)cookiestore;
217 	cookie->id_flags = cookieflags;
218 	map->_dm_cookie = cookie;
219 
220 	if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
221 		/*
222 		 * Allocate the bounce pages now if the caller
223 		 * wishes us to do so.
224 		 */
225 		if ((flags & BUS_DMA_ALLOCNOW) == 0)
226 			goto out;
227 
228 		error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
229 	}
230 
231  out:
232 	if (error) {
233 		if (map->_dm_cookie != NULL)
234 			free(map->_dm_cookie, M_DMAMAP);
235 		_bus_dmamap_destroy(t, map);
236 	}
237 	return (error);
238 }
239 
240 /*
241  * Destroy an ISA DMA map.
242  */
243 void
244 _isa_bus_dmamap_destroy(t, map)
245 	bus_dma_tag_t t;
246 	bus_dmamap_t map;
247 {
248 	struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
249 
250 	/*
251 	 * Free any bounce pages this map might hold.
252 	 */
253 	if (cookie->id_flags & ID_HAS_BOUNCE)
254 		_isa_dma_free_bouncebuf(t, map);
255 
256 	free(cookie, M_DMAMAP);
257 	_bus_dmamap_destroy(t, map);
258 }
259 
260 /*
261  * Load an ISA DMA map with a linear buffer.
262  */
263 int
264 _isa_bus_dmamap_load(t, map, buf, buflen, p, flags)
265 	bus_dma_tag_t t;
266 	bus_dmamap_t map;
267 	void *buf;
268 	bus_size_t buflen;
269 	struct proc *p;
270 	int flags;
271 {
272 	struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
273 	int error;
274 
275 	STAT_INCR(isa_dma_stats_loads);
276 
277 	/*
278 	 * Make sure that on error condition we return "no valid mappings."
279 	 */
280 	map->dm_mapsize = 0;
281 	map->dm_nsegs = 0;
282 
283 	/*
284 	 * Try to load the map the normal way.  If this errors out,
285 	 * and we can bounce, we will.
286 	 */
287 	error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
288 	if (error == 0 ||
289 	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
290 		return (error);
291 
292 	/*
293 	 * First attempt failed; bounce it.
294 	 */
295 
296 	STAT_INCR(isa_dma_stats_bounces);
297 
298 	/*
299 	 * Allocate bounce pages, if necessary.
300 	 */
301 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
302 		error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
303 		if (error)
304 			return (error);
305 	}
306 
307 	/*
308 	 * Cache a pointer to the caller's buffer and load the DMA map
309 	 * with the bounce buffer.
310 	 */
311 	cookie->id_origbuf = buf;
312 	cookie->id_origbuflen = buflen;
313 	cookie->id_buftype = ID_BUFTYPE_LINEAR;
314 	error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
315 	    NULL, flags);
316 	if (error) {
317 		/*
318 		 * Free the bounce pages, unless our resources
319 		 * are reserved for our exclusive use.
320 		 */
321 		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
322 			_isa_dma_free_bouncebuf(t, map);
323 		return (error);
324 	}
325 
326 	/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
327 	cookie->id_flags |= ID_IS_BOUNCING;
328 	return (0);
329 }
330 
331 /*
332  * Like _isa_bus_dmamap_load(), but for mbufs.
333  */
334 int
335 _isa_bus_dmamap_load_mbuf(t, map, m0, flags)
336 	bus_dma_tag_t t;
337 	bus_dmamap_t map;
338 	struct mbuf *m0;
339 	int flags;
340 {
341 	struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
342 	int error;
343 
344 	/*
345 	 * Make sure that on error condition we return "no valid mappings."
346 	 */
347 	map->dm_mapsize = 0;
348 	map->dm_nsegs = 0;
349 
350 #ifdef DIAGNOSTIC
351 	if ((m0->m_flags & M_PKTHDR) == 0)
352 		panic("_isa_bus_dmamap_load_mbuf: no packet header");
353 #endif
354 
355 	if (m0->m_pkthdr.len > map->_dm_size)
356 		return (EINVAL);
357 
358 	/*
359 	 * Try to load the map the normal way.  If this errors out,
360 	 * and we can bounce, we will.
361 	 */
362 	error = _bus_dmamap_load_mbuf(t, map, m0, flags);
363 	if (error == 0 ||
364 	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
365 		return (error);
366 
367 	/*
368 	 * First attempt failed; bounce it.
369 	 */
370 
371 	STAT_INCR(isa_dma_stats_bounces);
372 
373 	/*
374 	 * Allocate bounce pages, if necessary.
375 	 */
376 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
377 		error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
378 		    flags);
379 		if (error)
380 			return (error);
381 	}
382 
383 	/*
384 	 * Cache a pointer to the caller's buffer and load the DMA map
385 	 * with the bounce buffer.
386 	 */
387 	cookie->id_origbuf = m0;
388 	cookie->id_origbuflen = m0->m_pkthdr.len;	/* not really used */
389 	cookie->id_buftype = ID_BUFTYPE_MBUF;
390 	error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
391 	    m0->m_pkthdr.len, NULL, flags);
392 	if (error) {
393 		/*
394 		 * Free the bounce pages, unless our resources
395 		 * are reserved for our exclusive use.
396 		 */
397 		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
398 			_isa_dma_free_bouncebuf(t, map);
399 		return (error);
400 	}
401 
402 	/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
403 	cookie->id_flags |= ID_IS_BOUNCING;
404 	return (0);
405 }
406 
407 /*
408  * Like _isa_bus_dmamap_load(), but for uios.
409  */
410 int
411 _isa_bus_dmamap_load_uio(t, map, uio, flags)
412 	bus_dma_tag_t t;
413 	bus_dmamap_t map;
414 	struct uio *uio;
415 	int flags;
416 {
417 
418 	panic("_isa_bus_dmamap_load_uio: not implemented");
419 }
420 
421 /*
422  * Like _isa_bus_dmamap_load(), but for raw memory allocated with
423  * bus_dmamem_alloc().
424  */
425 int
426 _isa_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
427 	bus_dma_tag_t t;
428 	bus_dmamap_t map;
429 	bus_dma_segment_t *segs;
430 	int nsegs;
431 	bus_size_t size;
432 	int flags;
433 {
434 
435 	panic("_isa_bus_dmamap_load_raw: not implemented");
436 }
437 
438 /*
439  * Unload an ISA DMA map.
440  */
441 void
442 _isa_bus_dmamap_unload(t, map)
443 	bus_dma_tag_t t;
444 	bus_dmamap_t map;
445 {
446 	struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
447 
448 	/*
449 	 * If we have bounce pages, free them, unless they're
450 	 * reserved for our exclusive use.
451 	 */
452 	if ((cookie->id_flags & ID_HAS_BOUNCE) &&
453 	    (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
454 		_isa_dma_free_bouncebuf(t, map);
455 
456 	cookie->id_flags &= ~ID_IS_BOUNCING;
457 	cookie->id_buftype = ID_BUFTYPE_INVALID;
458 
459 	/*
460 	 * Do the generic bits of the unload.
461 	 */
462 	_bus_dmamap_unload(t, map);
463 }
464 
465 /*
466  * Synchronize an ISA DMA map.
467  */
468 void
469 _isa_bus_dmamap_sync(t, map, offset, len, ops)
470 	bus_dma_tag_t t;
471 	bus_dmamap_t map;
472 	bus_addr_t offset;
473 	bus_size_t len;
474 	int ops;
475 {
476 	struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
477 
478 	/*
479 	 * Mixing PRE and POST operations is not allowed.
480 	 */
481 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
482 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
483 		panic("_isa_bus_dmamap_sync: mix PRE and POST");
484 
485 #ifdef DIAGNOSTIC
486 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
487 		if (offset >= map->dm_mapsize)
488 			panic("_isa_bus_dmamap_sync: bad offset");
489 		if (len == 0 || (offset + len) > map->dm_mapsize)
490 			panic("_isa_bus_dmamap_sync: bad length");
491 	}
492 #endif
493 
494 	/*
495 	 * If we're not bouncing, just return; nothing to do.
496 	 */
497 	if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
498 		return;
499 
500 	switch (cookie->id_buftype) {
501 	case ID_BUFTYPE_LINEAR:
502 		/*
503 		 * Nothing to do for pre-read.
504 		 */
505 
506 		if (ops & BUS_DMASYNC_PREWRITE) {
507 			/*
508 			 * Copy the caller's buffer to the bounce buffer.
509 			 */
510 			memcpy((char *)cookie->id_bouncebuf + offset,
511 			    (char *)cookie->id_origbuf + offset, len);
512 		}
513 
514 		if (ops & BUS_DMASYNC_POSTREAD) {
515 			/*
516 			 * Copy the bounce buffer to the caller's buffer.
517 			 */
518 			memcpy((char *)cookie->id_origbuf + offset,
519 			    (char *)cookie->id_bouncebuf + offset, len);
520 		}
521 
522 		/*
523 		 * Nothing to do for post-write.
524 		 */
525 		break;
526 
527 	case ID_BUFTYPE_MBUF:
528 	    {
529 		struct mbuf *m, *m0 = cookie->id_origbuf;
530 		bus_size_t minlen, moff;
531 
532 		/*
533 		 * Nothing to do for pre-read.
534 		 */
535 
536 		if (ops & BUS_DMASYNC_PREWRITE) {
537 			/*
538 			 * Copy the caller's buffer to the bounce buffer.
539 			 */
540 			m_copydata(m0, offset, len,
541 			    (char *)cookie->id_bouncebuf + offset);
542 		}
543 
544 		if (ops & BUS_DMASYNC_POSTREAD) {
545 			/*
546 			 * Copy the bounce buffer to the caller's buffer.
547 			 */
548 			for (moff = offset, m = m0; m != NULL && len != 0;
549 			    m = m->m_next) {
550 				/* Find the beginning mbuf. */
551 				if (moff >= m->m_len) {
552 					moff -= m->m_len;
553 					continue;
554 				}
555 
556 				/*
557 				 * Now at the first mbuf to sync; nail
558 				 * each one until we have exhausted the
559 				 * length.
560 				 */
561 				minlen = len < m->m_len - moff ?
562 				    len : m->m_len - moff;
563 
564 				memcpy(mtod(m, caddr_t) + moff,
565 				    (char *)cookie->id_bouncebuf + offset,
566 				    minlen);
567 
568 				moff = 0;
569 				len -= minlen;
570 				offset += minlen;
571 			}
572 		}
573 
574 		/*
575 		 * Nothing to do for post-write.
576 		 */
577 		break;
578 	    }
579 
580 	case ID_BUFTYPE_UIO:
581 		panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
582 		break;
583 
584 	case ID_BUFTYPE_RAW:
585 		panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
586 		break;
587 
588 	case ID_BUFTYPE_INVALID:
589 		panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
590 		break;
591 
592 	default:
593 		printf("unknown buffer type %d\n", cookie->id_buftype);
594 		panic("_isa_bus_dmamap_sync");
595 	}
596 }
597 
598 /*
599  * Allocate memory safe for ISA DMA.
600  */
601 int
602 _isa_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
603 	bus_dma_tag_t t;
604 	bus_size_t size, alignment, boundary;
605 	bus_dma_segment_t *segs;
606 	int nsegs;
607 	int *rsegs;
608 	int flags;
609 {
610 
611 	if (t->_ranges == NULL)
612 		return (ENOMEM);
613 
614 	/* _bus_dmamem_alloc() does the range checks for us. */
615 	return (_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs,
616 	    rsegs, flags));
617 }
618 
619 /**********************************************************************
620  * ISA DMA utility functions
621  **********************************************************************/
622 
623 int
624 _isa_dma_alloc_bouncebuf(t, map, size, flags)
625 	bus_dma_tag_t t;
626 	bus_dmamap_t map;
627 	bus_size_t size;
628 	int flags;
629 {
630 	struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
631 	int error = 0;
632 
633 	cookie->id_bouncebuflen = round_page(size);
634 	error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
635 	    NBPG, map->_dm_boundary, cookie->id_bouncesegs,
636 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
637 	if (error)
638 		goto out;
639 	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
640 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
641 	    (caddr_t *)&cookie->id_bouncebuf, flags);
642 
643  out:
644 	if (error) {
645 		_bus_dmamem_free(t, cookie->id_bouncesegs,
646 		    cookie->id_nbouncesegs);
647 		cookie->id_bouncebuflen = 0;
648 		cookie->id_nbouncesegs = 0;
649 	} else {
650 		cookie->id_flags |= ID_HAS_BOUNCE;
651 		STAT_INCR(isa_dma_stats_nbouncebufs);
652 	}
653 
654 	return (error);
655 }
656 
657 void
658 _isa_dma_free_bouncebuf(t, map)
659 	bus_dma_tag_t t;
660 	bus_dmamap_t map;
661 {
662 	struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
663 
664 	STAT_DECR(isa_dma_stats_nbouncebufs);
665 
666 	_bus_dmamem_unmap(t, cookie->id_bouncebuf,
667 	    cookie->id_bouncebuflen);
668 	_bus_dmamem_free(t, cookie->id_bouncesegs,
669 	    cookie->id_nbouncesegs);
670 	cookie->id_bouncebuflen = 0;
671 	cookie->id_nbouncesegs = 0;
672 	cookie->id_flags &= ~ID_HAS_BOUNCE;
673 }
674