1 /*
2  * Copyright (c) 1997, 1998 Justin T. Gibbs.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. The name of the author may not be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $
27  * $DragonFly: src/sys/platform/pc32/i386/busdma_machdep.c,v 1.23 2008/06/05 18:06:32 swildner Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/mbuf.h>
34 #include <sys/uio.h>
35 #include <sys/thread2.h>
36 #include <sys/bus_dma.h>
37 #include <sys/kernel.h>
38 #include <sys/sysctl.h>
39 #include <sys/lock.h>
40 #include <sys/spinlock2.h>
41 
42 #include <vm/vm.h>
43 #include <vm/vm_page.h>
44 
45 /* XXX needed for to access pmap to convert per-proc virtual to physical */
46 #include <sys/proc.h>
47 #include <sys/lock.h>
48 #include <vm/vm_map.h>
49 
50 #include <machine/md_var.h>
51 
52 #define MAX_BPAGES	1024
53 
54 struct bounce_zone;
55 struct bus_dmamap;
56 
57 struct bus_dma_tag {
58 	bus_dma_tag_t	parent;
59 	bus_size_t	alignment;
60 	bus_size_t	boundary;
61 	bus_addr_t	lowaddr;
62 	bus_addr_t	highaddr;
63 	bus_dma_filter_t *filter;
64 	void		*filterarg;
65 	bus_size_t	maxsize;
66 	u_int		nsegments;
67 	bus_size_t	maxsegsz;
68 	int		flags;
69 	int		ref_count;
70 	int		map_count;
71 	bus_dma_segment_t *segments;
72 	struct bounce_zone *bounce_zone;
73 };
74 
75 /*
76  * bus_dma_tag private flags
77  */
78 #define BUS_DMA_BOUNCE_ALIGN	BUS_DMA_BUS2
79 #define BUS_DMA_BOUNCE_LOWADDR	BUS_DMA_BUS3
80 #define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
81 
82 #define BUS_DMA_COULD_BOUNCE	(BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN)
83 
84 #define BUS_DMAMEM_KMALLOC(dmat) \
85 	((dmat)->maxsize <= PAGE_SIZE && \
86 	 (dmat)->alignment <= PAGE_SIZE && \
87 	 (dmat)->lowaddr >= ptoa(Maxmem))
88 
89 struct bounce_page {
90 	vm_offset_t	vaddr;		/* kva of bounce buffer */
91 	bus_addr_t	busaddr;	/* Physical address */
92 	vm_offset_t	datavaddr;	/* kva of client data */
93 	bus_size_t	datacount;	/* client data count */
94 	STAILQ_ENTRY(bounce_page) links;
95 };
96 
97 struct bounce_zone {
98 	STAILQ_ENTRY(bounce_zone) links;
99 	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
100 	STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
101 #ifdef SMP
102 	struct spinlock	spin;
103 #else
104 	int		unused0;
105 #endif
106 	int		total_bpages;
107 	int		free_bpages;
108 	int		reserved_bpages;
109 	int		active_bpages;
110 	int		total_bounced;
111 	int		total_deferred;
112 	int		reserve_failed;
113 	bus_size_t	alignment;
114 	bus_addr_t	lowaddr;
115 	char		zoneid[8];
116 	char		lowaddrid[20];
117 	struct sysctl_ctx_list sysctl_ctx;
118 	struct sysctl_oid *sysctl_tree;
119 };
120 
121 #ifdef SMP
122 #define BZ_LOCK(bz)	spin_lock(&(bz)->spin)
123 #define BZ_UNLOCK(bz)	spin_unlock(&(bz)->spin)
124 #else
125 #define BZ_LOCK(bz)	crit_enter()
126 #define BZ_UNLOCK(bz)	crit_exit()
127 #endif
128 
129 static struct lwkt_token bounce_zone_tok =
130 	LWKT_TOKEN_INITIALIZER(bounce_zone_token);
131 static int busdma_zonecount;
132 static STAILQ_HEAD(, bounce_zone) bounce_zone_list =
133 	STAILQ_HEAD_INITIALIZER(bounce_zone_list);
134 
135 int busdma_swi_pending;
136 static int total_bounce_pages;
137 static int max_bounce_pages = MAX_BPAGES;
138 static int bounce_alignment = 1; /* XXX temporary */
139 
140 TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages);
141 TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment);
142 
143 struct bus_dmamap {
144 	struct bp_list	bpages;
145 	int		pagesneeded;
146 	int		pagesreserved;
147 	bus_dma_tag_t	dmat;
148 	void		*buf;		/* unmapped buffer pointer */
149 	bus_size_t	buflen;		/* unmapped buffer length */
150 	bus_dmamap_callback_t *callback;
151 	void		*callback_arg;
152 	STAILQ_ENTRY(bus_dmamap) links;
153 };
154 
155 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist =
156 	STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist);
157 
158 static struct bus_dmamap nobounce_dmamap;
159 
160 static int		alloc_bounce_zone(bus_dma_tag_t);
161 static int		alloc_bounce_pages(bus_dma_tag_t, u_int, int);
162 static int		reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int);
163 static void		return_bounce_pages(bus_dma_tag_t, bus_dmamap_t);
164 static bus_addr_t	add_bounce_page(bus_dma_tag_t, bus_dmamap_t,
165 			    vm_offset_t, bus_size_t);
166 static void		free_bounce_page(bus_dma_tag_t, struct bounce_page *);
167 
168 static bus_dmamap_t	get_map_waiting(bus_dma_tag_t);
169 static void		add_map_callback(bus_dmamap_t);
170 
171 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
172 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bounce_pages,
173 	   0, "Total bounce pages");
174 SYSCTL_INT(_hw_busdma, OID_AUTO, max_bpages, CTLFLAG_RD, &max_bounce_pages,
175 	   0, "Max bounce pages per bounce zone");
176 SYSCTL_INT(_hw_busdma, OID_AUTO, bounce_alignment, CTLFLAG_RD,
177 	   &bounce_alignment, 0, "Obey alignment constraint");
178 
179 static __inline int
180 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
181 {
182 	int retval;
183 
184 	retval = 0;
185 	do {
186 		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) ||
187 		     (bounce_alignment && (paddr & (dmat->alignment - 1)) != 0))
188 		 && (dmat->filter == NULL ||
189 		     dmat->filter(dmat->filterarg, paddr) != 0))
190 			retval = 1;
191 
192 		dmat = dmat->parent;
193 	} while (retval == 0 && dmat != NULL);
194 	return (retval);
195 }
196 
197 /*
198  * Allocate a device specific dma_tag.
199  */
200 int
201 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
202 		   bus_size_t boundary, bus_addr_t lowaddr,
203 		   bus_addr_t highaddr, bus_dma_filter_t *filter,
204 		   void *filterarg, bus_size_t maxsize, int nsegments,
205 		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
206 {
207 	bus_dma_tag_t newtag;
208 	int error = 0;
209 
210 	/*
211 	 * Sanity checks
212 	 */
213 
214 	if (alignment == 0)
215 		alignment = 1;
216 	if (alignment & (alignment - 1))
217 		panic("alignment must be power of 2\n");
218 
219 	if (boundary != 0) {
220 		if (boundary & (boundary - 1))
221 			panic("boundary must be power of 2\n");
222 		if (boundary < maxsegsz) {
223 			kprintf("boundary < maxsegsz:\n");
224 			print_backtrace(-1);
225 			maxsegsz = boundary;
226 		}
227 	}
228 
229 	/* Return a NULL tag on failure */
230 	*dmat = NULL;
231 
232 	newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT);
233 
234 	newtag->parent = parent;
235 	newtag->alignment = alignment;
236 	newtag->boundary = boundary;
237 	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
238 	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
239 	newtag->filter = filter;
240 	newtag->filterarg = filterarg;
241 	newtag->maxsize = maxsize;
242 	newtag->nsegments = nsegments;
243 	newtag->maxsegsz = maxsegsz;
244 	newtag->flags = flags;
245 	newtag->ref_count = 1; /* Count ourself */
246 	newtag->map_count = 0;
247 	newtag->segments = NULL;
248 	newtag->bounce_zone = NULL;
249 
250 	/* Take into account any restrictions imposed by our parent tag */
251 	if (parent != NULL) {
252 		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
253 		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
254 
255 		if (newtag->boundary == 0) {
256 			newtag->boundary = parent->boundary;
257 		} else if (parent->boundary != 0) {
258 			newtag->boundary = MIN(parent->boundary,
259 					       newtag->boundary);
260 		}
261 
262 #ifdef notyet
263 		newtag->alignment = MAX(parent->alignment, newtag->alignment);
264 #endif
265 
266 		if (newtag->filter == NULL) {
267 			/*
268 			 * Short circuit looking at our parent directly
269 			 * since we have encapsulated all of its information
270 			 */
271 			newtag->filter = parent->filter;
272 			newtag->filterarg = parent->filterarg;
273 			newtag->parent = parent->parent;
274 		}
275 		if (newtag->parent != NULL)
276 			parent->ref_count++;
277 	}
278 
279 	if (newtag->lowaddr < ptoa(Maxmem))
280 		newtag->flags |= BUS_DMA_BOUNCE_LOWADDR;
281 	if (bounce_alignment && newtag->alignment > 1 &&
282 	    !(newtag->flags & BUS_DMA_ALIGNED))
283 		newtag->flags |= BUS_DMA_BOUNCE_ALIGN;
284 
285 	if ((newtag->flags & BUS_DMA_COULD_BOUNCE) &&
286 	    (flags & BUS_DMA_ALLOCNOW) != 0) {
287 		struct bounce_zone *bz;
288 
289 		/* Must bounce */
290 
291 		error = alloc_bounce_zone(newtag);
292 		if (error)
293 			goto back;
294 		bz = newtag->bounce_zone;
295 
296 		if (ptoa(bz->total_bpages) < maxsize) {
297 			int pages;
298 
299 			if (flags & BUS_DMA_ONEBPAGE) {
300 				pages = 1;
301 			} else {
302 				pages = atop(round_page(maxsize)) -
303 					bz->total_bpages;
304 				pages = MAX(pages, 1);
305 			}
306 
307 			/* Add pages to our bounce pool */
308 			if (alloc_bounce_pages(newtag, pages, flags) < pages)
309 				error = ENOMEM;
310 
311 			/* Performed initial allocation */
312 			newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
313 		}
314 	}
315 back:
316 	if (error)
317 		kfree(newtag, M_DEVBUF);
318 	else
319 		*dmat = newtag;
320 	return error;
321 }
322 
323 int
324 bus_dma_tag_destroy(bus_dma_tag_t dmat)
325 {
326 	if (dmat != NULL) {
327 		if (dmat->map_count != 0)
328 			return (EBUSY);
329 
330 		while (dmat != NULL) {
331 			bus_dma_tag_t parent;
332 
333 			parent = dmat->parent;
334 			dmat->ref_count--;
335 			if (dmat->ref_count == 0) {
336 				if (dmat->segments != NULL)
337 					kfree(dmat->segments, M_DEVBUF);
338 				kfree(dmat, M_DEVBUF);
339 				/*
340 				 * Last reference count, so
341 				 * release our reference
342 				 * count on our parent.
343 				 */
344 				dmat = parent;
345 			} else
346 				dmat = NULL;
347 		}
348 	}
349 	return (0);
350 }
351 
352 /*
353  * Allocate a handle for mapping from kva/uva/physical
354  * address space into bus device space.
355  */
356 int
357 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
358 {
359 	int error;
360 
361 	error = 0;
362 
363 	if (dmat->segments == NULL) {
364 		KKASSERT(dmat->nsegments && dmat->nsegments < 16384);
365 		dmat->segments = kmalloc(sizeof(bus_dma_segment_t) *
366 					dmat->nsegments, M_DEVBUF, M_INTWAIT);
367 	}
368 
369 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
370 		struct bounce_zone *bz;
371 		int maxpages;
372 
373 		/* Must bounce */
374 
375 		if (dmat->bounce_zone == NULL) {
376 			error = alloc_bounce_zone(dmat);
377 			if (error)
378 				return error;
379 		}
380 		bz = dmat->bounce_zone;
381 
382 		*mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT | M_ZERO);
383 
384 		/* Initialize the new map */
385 		STAILQ_INIT(&((*mapp)->bpages));
386 
387 		/*
388 		 * Attempt to add pages to our pool on a per-instance
389 		 * basis up to a sane limit.
390 		 */
391 		if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) {
392 			maxpages = max_bounce_pages;
393 		} else {
394 			maxpages = MIN(max_bounce_pages,
395 				       Maxmem - atop(dmat->lowaddr));
396 		}
397 		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
398 		 || (dmat->map_count > 0
399 		  && bz->total_bpages < maxpages)) {
400 			int pages;
401 
402 			if (flags & BUS_DMA_ONEBPAGE) {
403 				pages = 1;
404 			} else {
405 				pages = atop(round_page(dmat->maxsize));
406 				pages = MIN(maxpages - bz->total_bpages, pages);
407 				pages = MAX(pages, 1);
408 			}
409 			if (alloc_bounce_pages(dmat, pages, flags) < pages)
410 				error = ENOMEM;
411 
412 			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
413 				if (!error)
414 					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
415 			} else {
416 				error = 0;
417 			}
418 		}
419 	} else {
420 		*mapp = NULL;
421 	}
422 	if (!error)
423 		dmat->map_count++;
424 	return error;
425 }
426 
427 /*
428  * Destroy a handle for mapping from kva/uva/physical
429  * address space into bus device space.
430  */
431 int
432 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
433 {
434 	if (map != NULL) {
435 		if (STAILQ_FIRST(&map->bpages) != NULL)
436 			return (EBUSY);
437 		kfree(map, M_DEVBUF);
438 	}
439 	dmat->map_count--;
440 	return (0);
441 }
442 
443 static __inline bus_size_t
444 check_kmalloc(bus_dma_tag_t dmat, const void *vaddr0, int verify)
445 {
446 	bus_size_t maxsize = 0;
447 	uintptr_t vaddr = (uintptr_t)vaddr0;
448 
449 	if ((vaddr ^ (vaddr + dmat->maxsize - 1)) & ~PAGE_MASK) {
450 		kprintf("boundary check failed\n");
451 		if (verify)
452 			print_backtrace(-1); /* XXX panic */
453 		maxsize = dmat->maxsize;
454 	}
455 	if (vaddr & (dmat->alignment - 1)) {
456 		kprintf("alignment check failed\n");
457 		if (verify)
458 			print_backtrace(-1); /* XXX panic */
459 		if (dmat->maxsize < dmat->alignment)
460 			maxsize = dmat->alignment;
461 		else
462 			maxsize = dmat->maxsize;
463 	}
464 	return maxsize;
465 }
466 
467 /*
468  * Allocate a piece of memory that can be efficiently mapped into
469  * bus device space based on the constraints lited in the dma tag.
470  *
471  * mapp is degenerate.  By definition this allocation should not require
472  * bounce buffers so do not allocate a dma map.
473  */
474 int
475 bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
476 		 bus_dmamap_t *mapp)
477 {
478 	int mflags;
479 
480 	/* If we succeed, no mapping/bouncing will be required */
481 	*mapp = NULL;
482 
483 	if (dmat->segments == NULL) {
484 		KKASSERT(dmat->nsegments < 16384);
485 		dmat->segments = kmalloc(sizeof(bus_dma_segment_t) *
486 					dmat->nsegments, M_DEVBUF, M_INTWAIT);
487 	}
488 
489 	if (flags & BUS_DMA_NOWAIT)
490 		mflags = M_NOWAIT;
491 	else
492 		mflags = M_WAITOK;
493 	if (flags & BUS_DMA_ZERO)
494 		mflags |= M_ZERO;
495 
496 	if (BUS_DMAMEM_KMALLOC(dmat)) {
497 		bus_size_t maxsize;
498 
499 		*vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags);
500 
501 		/*
502 		 * XXX
503 		 * Check whether the allocation
504 		 * - crossed a page boundary
505 		 * - was not aligned
506 		 * Retry with power-of-2 alignment in the above cases.
507 		 */
508 		maxsize = check_kmalloc(dmat, *vaddr, 0);
509 		if (maxsize) {
510 			size_t size;
511 
512 			kfree(*vaddr, M_DEVBUF);
513 			/* XXX check for overflow? */
514 			for (size = 1; size <= maxsize; size <<= 1)
515 				;
516 			*vaddr = kmalloc(size, M_DEVBUF, mflags);
517 			check_kmalloc(dmat, *vaddr, 1);
518 		}
519 	} else {
520 		/*
521 		 * XXX Use Contigmalloc until it is merged into this facility
522 		 *     and handles multi-seg allocations.  Nobody is doing
523 		 *     multi-seg allocations yet though.
524 		 */
525 		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
526 		    0ul, dmat->lowaddr, dmat->alignment, dmat->boundary);
527 	}
528 	if (*vaddr == NULL)
529 		return (ENOMEM);
530 	return (0);
531 }
532 
533 /*
534  * Free a piece of memory and it's allociated dmamap, that was allocated
535  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
536  */
537 void
538 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
539 {
540 	/*
541 	 * dmamem does not need to be bounced, so the map should be
542 	 * NULL
543 	 */
544 	if (map != NULL)
545 		panic("bus_dmamem_free: Invalid map freed\n");
546 	if (BUS_DMAMEM_KMALLOC(dmat))
547 		kfree(vaddr, M_DEVBUF);
548 	else
549 		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
550 }
551 
552 static __inline vm_paddr_t
553 _bus_dma_extract(pmap_t pmap, vm_offset_t vaddr)
554 {
555 	if (pmap)
556 		return pmap_extract(pmap, vaddr);
557 	else
558 		return pmap_kextract(vaddr);
559 }
560 
561 /*
562  * Utility function to load a linear buffer.  lastaddrp holds state
563  * between invocations (for multiple-buffer loads).  segp contains
564  * the segment following the starting one on entrace, and the ending
565  * segment on exit.  first indicates if this is the first invocation
566  * of this function.
567  */
568 static int
569 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
570 			bus_dmamap_t map,
571 			void *buf, bus_size_t buflen,
572 			bus_dma_segment_t *segments,
573 			int nsegments,
574 			pmap_t pmap,
575 			int flags,
576 			vm_paddr_t *lastpaddrp,
577 			int *segp,
578 			int first)
579 {
580 	vm_offset_t vaddr;
581 	vm_paddr_t paddr, nextpaddr;
582 	bus_dma_segment_t *sg;
583 	bus_addr_t bmask;
584 	int seg, error = 0;
585 
586 	if (map == NULL)
587 		map = &nobounce_dmamap;
588 
589 #ifdef INVARIANTS
590 	if (dmat->flags & BUS_DMA_ALIGNED)
591 		KKASSERT(((uintptr_t)buf & (dmat->alignment - 1)) == 0);
592 #endif
593 
594 	/*
595 	 * If we are being called during a callback, pagesneeded will
596 	 * be non-zero, so we can avoid doing the work twice.
597 	 */
598 	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) &&
599 	    map != &nobounce_dmamap && map->pagesneeded == 0) {
600 		vm_offset_t vendaddr;
601 
602 		/*
603 		 * Count the number of bounce pages
604 		 * needed in order to complete this transfer
605 		 */
606 		vaddr = (vm_offset_t)buf;
607 		vendaddr = (vm_offset_t)buf + buflen;
608 
609 		while (vaddr < vendaddr) {
610 			paddr = _bus_dma_extract(pmap, vaddr);
611 			if (run_filter(dmat, paddr) != 0)
612 				map->pagesneeded++;
613 			vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
614 		}
615 	}
616 
617 	/* Reserve Necessary Bounce Pages */
618 	if (map->pagesneeded != 0) {
619 		struct bounce_zone *bz;
620 
621 		bz = dmat->bounce_zone;
622 		BZ_LOCK(bz);
623 		if (flags & BUS_DMA_NOWAIT) {
624 			if (reserve_bounce_pages(dmat, map, 0) != 0) {
625 				BZ_UNLOCK(bz);
626 				error = ENOMEM;
627 				goto free_bounce;
628 			}
629 		} else {
630 			if (reserve_bounce_pages(dmat, map, 1) != 0) {
631 				/* Queue us for resources */
632 				map->dmat = dmat;
633 				map->buf = buf;
634 				map->buflen = buflen;
635 
636 				STAILQ_INSERT_TAIL(
637 				    &dmat->bounce_zone->bounce_map_waitinglist,
638 				    map, links);
639 				BZ_UNLOCK(bz);
640 
641 				return (EINPROGRESS);
642 			}
643 		}
644 		BZ_UNLOCK(bz);
645 	}
646 
647 	KKASSERT(*segp >= 1 && *segp <= nsegments);
648 	seg = *segp;
649 	sg = &segments[seg - 1];
650 
651 	vaddr = (vm_offset_t)buf;
652 	nextpaddr = *lastpaddrp;
653 	bmask = ~(dmat->boundary - 1);	/* note: will be 0 if boundary is 0 */
654 
655 	/* force at least one segment */
656 	do {
657 		bus_size_t size;
658 
659 		/*
660 		 * Per-page main loop
661 		 */
662 		paddr = _bus_dma_extract(pmap, vaddr);
663 		size = PAGE_SIZE - (paddr & PAGE_MASK);
664 		if (size > buflen)
665 			size = buflen;
666 		if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
667 			/*
668 			 * note: this paddr has the same in-page offset
669 			 * as vaddr and thus the paddr above, so the
670 			 * size does not have to be recalculated
671 			 */
672 			paddr = add_bounce_page(dmat, map, vaddr, size);
673 		}
674 
675 		/*
676 		 * Fill in the bus_dma_segment
677 		 */
678 		if (first) {
679 			sg->ds_addr = paddr;
680 			sg->ds_len = size;
681 			first = 0;
682 		} else if (paddr == nextpaddr) {
683 			sg->ds_len += size;
684 		} else {
685 			sg++;
686 			seg++;
687 			if (seg > nsegments)
688 				break;
689 			sg->ds_addr = paddr;
690 			sg->ds_len = size;
691 		}
692 		nextpaddr = paddr + size;
693 
694 		/*
695 		 * Handle maxsegsz and boundary issues with a nested loop
696 		 */
697 		for (;;) {
698 			bus_size_t tmpsize;
699 
700 			/*
701 			 * Limit to the boundary and maximum segment size
702 			 */
703 			if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) {
704 				tmpsize = dmat->boundary -
705 					  (sg->ds_addr & ~bmask);
706 				if (tmpsize > dmat->maxsegsz)
707 					tmpsize = dmat->maxsegsz;
708 				KKASSERT(tmpsize < sg->ds_len);
709 			} else if (sg->ds_len > dmat->maxsegsz) {
710 				tmpsize = dmat->maxsegsz;
711 			} else {
712 				break;
713 			}
714 
715 			/*
716 			 * Futz, split the data into a new segment.
717 			 */
718 			if (seg >= nsegments)
719 				goto fail;
720 			sg[1].ds_len = sg[0].ds_len - tmpsize;
721 			sg[1].ds_addr = sg[0].ds_addr + tmpsize;
722 			sg[0].ds_len = tmpsize;
723 			sg++;
724 			seg++;
725 		}
726 
727 		/*
728 		 * Adjust for loop
729 		 */
730 		buflen -= size;
731 		vaddr += size;
732 	} while (buflen > 0);
733 fail:
734 	if (buflen != 0)
735 		error = EFBIG;
736 
737 	*segp = seg;
738 	*lastpaddrp = nextpaddr;
739 
740 free_bounce:
741 	if (error && (dmat->flags & BUS_DMA_COULD_BOUNCE) &&
742 	    map != &nobounce_dmamap) {
743 		_bus_dmamap_unload(dmat, map);
744 		return_bounce_pages(dmat, map);
745 	}
746 	return error;
747 }
748 
749 /*
750  * Map the buffer buf into bus space using the dmamap map.
751  */
752 int
753 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
754 		bus_size_t buflen, bus_dmamap_callback_t *callback,
755 		void *callback_arg, int flags)
756 {
757 	vm_paddr_t lastaddr = 0;
758 	int error, nsegs = 1;
759 
760 	if (map != NULL) {
761 		/*
762 		 * XXX
763 		 * Follow old semantics.  Once all of the callers are fixed,
764 		 * we should get rid of these internal flag "adjustment".
765 		 */
766 		flags &= ~BUS_DMA_NOWAIT;
767 		flags |= BUS_DMA_WAITOK;
768 
769 		map->callback = callback;
770 		map->callback_arg = callback_arg;
771 	}
772 
773 	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen,
774 			dmat->segments, dmat->nsegments,
775 			NULL, flags, &lastaddr, &nsegs, 1);
776 	if (error == EINPROGRESS)
777 		return error;
778 
779 	callback(callback_arg, dmat->segments, nsegs, error);
780 	return 0;
781 }
782 
783 /*
784  * Like _bus_dmamap_load(), but for mbufs.
785  */
786 int
787 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
788 		     struct mbuf *m0,
789 		     bus_dmamap_callback2_t *callback, void *callback_arg,
790 		     int flags)
791 {
792 	int nsegs, error;
793 
794 	/*
795 	 * XXX
796 	 * Follow old semantics.  Once all of the callers are fixed,
797 	 * we should get rid of these internal flag "adjustment".
798 	 */
799 	flags &= ~BUS_DMA_WAITOK;
800 	flags |= BUS_DMA_NOWAIT;
801 
802 	error = bus_dmamap_load_mbuf_segment(dmat, map, m0,
803 			dmat->segments, dmat->nsegments, &nsegs, flags);
804 	if (error) {
805 		/* force "no valid mappings" in callback */
806 		callback(callback_arg, dmat->segments, 0, 0, error);
807 	} else {
808 		callback(callback_arg, dmat->segments, nsegs,
809 			 m0->m_pkthdr.len, error);
810 	}
811 	return error;
812 }
813 
814 int
815 bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat, bus_dmamap_t map,
816 			     struct mbuf *m0,
817 			     bus_dma_segment_t *segs, int maxsegs,
818 			     int *nsegs, int flags)
819 {
820 	int error;
821 
822 	M_ASSERTPKTHDR(m0);
823 
824 	KASSERT(maxsegs >= 1, ("invalid maxsegs %d\n", maxsegs));
825 	KASSERT(maxsegs <= dmat->nsegments,
826 		("%d too many segments, dmat only support %d segments\n",
827 		 maxsegs, dmat->nsegments));
828 	KASSERT(flags & BUS_DMA_NOWAIT,
829 		("only BUS_DMA_NOWAIT is supported\n"));
830 
831 	if (m0->m_pkthdr.len <= dmat->maxsize) {
832 		int first = 1;
833 		vm_paddr_t lastaddr = 0;
834 		struct mbuf *m;
835 
836 		*nsegs = 1;
837 		error = 0;
838 		for (m = m0; m != NULL && error == 0; m = m->m_next) {
839 			if (m->m_len == 0)
840 				continue;
841 
842 			error = _bus_dmamap_load_buffer(dmat, map,
843 					m->m_data, m->m_len,
844 					segs, maxsegs,
845 					NULL, flags, &lastaddr,
846 					nsegs, first);
847 			if (error == ENOMEM && !first) {
848 				/*
849 				 * Out of bounce pages due to too many
850 				 * fragments in the mbuf chain; return
851 				 * EFBIG instead.
852 				 */
853 				error = EFBIG;
854 			}
855 			first = 0;
856 		}
857 #ifdef INVARIANTS
858 		if (!error)
859 			KKASSERT(*nsegs <= maxsegs && *nsegs >= 1);
860 #endif
861 	} else {
862 		*nsegs = 0;
863 		error = EINVAL;
864 	}
865 	KKASSERT(error != EINPROGRESS);
866 	return error;
867 }
868 
869 /*
870  * Like _bus_dmamap_load(), but for uios.
871  */
872 int
873 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
874 		    struct uio *uio,
875 		    bus_dmamap_callback2_t *callback, void *callback_arg,
876 		    int flags)
877 {
878 	vm_paddr_t lastaddr;
879 	int nsegs, error, first, i;
880 	bus_size_t resid;
881 	struct iovec *iov;
882 	pmap_t pmap;
883 
884 	/*
885 	 * XXX
886 	 * Follow old semantics.  Once all of the callers are fixed,
887 	 * we should get rid of these internal flag "adjustment".
888 	 */
889 	flags &= ~BUS_DMA_WAITOK;
890 	flags |= BUS_DMA_NOWAIT;
891 
892 	resid = (bus_size_t)uio->uio_resid;
893 	iov = uio->uio_iov;
894 
895 	if (uio->uio_segflg == UIO_USERSPACE) {
896 		struct thread *td;
897 
898 		td = uio->uio_td;
899 		KASSERT(td != NULL && td->td_proc != NULL,
900 			("bus_dmamap_load_uio: USERSPACE but no proc"));
901 		pmap = vmspace_pmap(td->td_proc->p_vmspace);
902 	} else {
903 		pmap = NULL;
904 	}
905 
906 	error = 0;
907 	nsegs = 1;
908 	first = 1;
909 	lastaddr = 0;
910 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
911 		/*
912 		 * Now at the first iovec to load.  Load each iovec
913 		 * until we have exhausted the residual count.
914 		 */
915 		bus_size_t minlen =
916 			resid < iov[i].iov_len ? resid : iov[i].iov_len;
917 		caddr_t addr = (caddr_t) iov[i].iov_base;
918 
919 		error = _bus_dmamap_load_buffer(dmat, map, addr, minlen,
920 				dmat->segments, dmat->nsegments,
921 				pmap, flags, &lastaddr, &nsegs, first);
922 		first = 0;
923 
924 		resid -= minlen;
925 	}
926 
927 	if (error) {
928 		/* force "no valid mappings" in callback */
929 		callback(callback_arg, dmat->segments, 0, 0, error);
930 	} else {
931 		callback(callback_arg, dmat->segments, nsegs,
932 			 (bus_size_t)uio->uio_resid, error);
933 	}
934 	return error;
935 }
936 
937 /*
938  * Release the mapping held by map.
939  */
940 void
941 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
942 {
943 	struct bounce_page *bpage;
944 
945 	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
946 		STAILQ_REMOVE_HEAD(&map->bpages, links);
947 		free_bounce_page(dmat, bpage);
948 	}
949 }
950 
951 void
952 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
953 {
954 	struct bounce_page *bpage;
955 
956 	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
957 		/*
958 		 * Handle data bouncing.  We might also
959 		 * want to add support for invalidating
960 		 * the caches on broken hardware
961 		 */
962 		switch (op) {
963 		case BUS_DMASYNC_PREWRITE:
964 			while (bpage != NULL) {
965 				bcopy((void *)bpage->datavaddr,
966 				      (void *)bpage->vaddr,
967 				      bpage->datacount);
968 				bpage = STAILQ_NEXT(bpage, links);
969 			}
970 			dmat->bounce_zone->total_bounced++;
971 			break;
972 
973 		case BUS_DMASYNC_POSTREAD:
974 			while (bpage != NULL) {
975 				bcopy((void *)bpage->vaddr,
976 				      (void *)bpage->datavaddr,
977 				      bpage->datacount);
978 				bpage = STAILQ_NEXT(bpage, links);
979 			}
980 			dmat->bounce_zone->total_bounced++;
981 			break;
982 
983 		case BUS_DMASYNC_PREREAD:
984 		case BUS_DMASYNC_POSTWRITE:
985 			/* No-ops */
986 			break;
987 		}
988 	}
989 }
990 
991 static int
992 alloc_bounce_zone(bus_dma_tag_t dmat)
993 {
994 	struct bounce_zone *bz, *new_bz;
995 
996 	KASSERT(dmat->bounce_zone == NULL,
997 		("bounce zone was already assigned\n"));
998 
999 	new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO);
1000 
1001 	lwkt_gettoken(&bounce_zone_tok);
1002 
1003 	/* Check to see if we already have a suitable zone */
1004 	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1005 		if (dmat->alignment <= bz->alignment &&
1006 		    dmat->lowaddr >= bz->lowaddr) {
1007 			lwkt_reltoken(&bounce_zone_tok);
1008 
1009 			dmat->bounce_zone = bz;
1010 			kfree(new_bz, M_DEVBUF);
1011 			return 0;
1012 		}
1013 	}
1014 	bz = new_bz;
1015 
1016 #ifdef SMP
1017 	spin_init(&bz->spin);
1018 #endif
1019 	STAILQ_INIT(&bz->bounce_page_list);
1020 	STAILQ_INIT(&bz->bounce_map_waitinglist);
1021 	bz->free_bpages = 0;
1022 	bz->reserved_bpages = 0;
1023 	bz->active_bpages = 0;
1024 	bz->lowaddr = dmat->lowaddr;
1025 	bz->alignment = round_page(dmat->alignment);
1026 	ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1027 	busdma_zonecount++;
1028 	ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1029 	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1030 
1031 	lwkt_reltoken(&bounce_zone_tok);
1032 
1033 	dmat->bounce_zone = bz;
1034 
1035 	sysctl_ctx_init(&bz->sysctl_ctx);
1036 	bz->sysctl_tree = SYSCTL_ADD_NODE(&bz->sysctl_ctx,
1037 	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1038 	    CTLFLAG_RD, 0, "");
1039 	if (bz->sysctl_tree == NULL) {
1040 		sysctl_ctx_free(&bz->sysctl_ctx);
1041 		return 0;	/* XXX error code? */
1042 	}
1043 
1044 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1045 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1046 	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1047 	    "Total bounce pages");
1048 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1049 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1050 	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1051 	    "Free bounce pages");
1052 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1053 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1054 	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1055 	    "Reserved bounce pages");
1056 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1057 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1058 	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1059 	    "Active bounce pages");
1060 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1061 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1062 	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1063 	    "Total bounce requests");
1064 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1065 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1066 	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1067 	    "Total bounce requests that were deferred");
1068 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1069 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1070 	    "reserve_failed", CTLFLAG_RD, &bz->reserve_failed, 0,
1071 	    "Total bounce page reservations that were failed");
1072 	SYSCTL_ADD_STRING(&bz->sysctl_ctx,
1073 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1074 	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1075 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1076 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1077 	    "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1078 
1079 	return 0;
1080 }
1081 
1082 static int
1083 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages, int flags)
1084 {
1085 	struct bounce_zone *bz = dmat->bounce_zone;
1086 	int count = 0, mflags;
1087 
1088 	if (flags & BUS_DMA_NOWAIT)
1089 		mflags = M_NOWAIT;
1090 	else
1091 		mflags = M_WAITOK;
1092 
1093 	while (numpages > 0) {
1094 		struct bounce_page *bpage;
1095 
1096 		bpage = kmalloc(sizeof(*bpage), M_DEVBUF, M_INTWAIT | M_ZERO);
1097 
1098 		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1099 							 mflags, 0ul,
1100 							 bz->lowaddr,
1101 							 bz->alignment, 0);
1102 		if (bpage->vaddr == 0) {
1103 			kfree(bpage, M_DEVBUF);
1104 			break;
1105 		}
1106 		bpage->busaddr = pmap_kextract(bpage->vaddr);
1107 
1108 		BZ_LOCK(bz);
1109 		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1110 		total_bounce_pages++;
1111 		bz->total_bpages++;
1112 		bz->free_bpages++;
1113 		BZ_UNLOCK(bz);
1114 
1115 		count++;
1116 		numpages--;
1117 	}
1118 	return count;
1119 }
1120 
1121 /* Assume caller holds bounce zone spinlock */
1122 static int
1123 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1124 {
1125 	struct bounce_zone *bz = dmat->bounce_zone;
1126 	int pages;
1127 
1128 	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1129 	if (!commit && map->pagesneeded > (map->pagesreserved + pages)) {
1130 		bz->reserve_failed++;
1131 		return (map->pagesneeded - (map->pagesreserved + pages));
1132 	}
1133 
1134 	bz->free_bpages -= pages;
1135 
1136 	bz->reserved_bpages += pages;
1137 	KKASSERT(bz->reserved_bpages <= bz->total_bpages);
1138 
1139 	map->pagesreserved += pages;
1140 	pages = map->pagesneeded - map->pagesreserved;
1141 
1142 	return pages;
1143 }
1144 
1145 static void
1146 return_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
1147 {
1148 	struct bounce_zone *bz = dmat->bounce_zone;
1149 	int reserved = map->pagesreserved;
1150 	bus_dmamap_t wait_map;
1151 
1152 	map->pagesreserved = 0;
1153 	map->pagesneeded = 0;
1154 
1155 	if (reserved == 0)
1156 		return;
1157 
1158 	BZ_LOCK(bz);
1159 
1160 	bz->free_bpages += reserved;
1161 	KKASSERT(bz->free_bpages <= bz->total_bpages);
1162 
1163 	KKASSERT(bz->reserved_bpages >= reserved);
1164 	bz->reserved_bpages -= reserved;
1165 
1166 	wait_map = get_map_waiting(dmat);
1167 
1168 	BZ_UNLOCK(bz);
1169 
1170 	if (wait_map != NULL)
1171 		add_map_callback(map);
1172 }
1173 
1174 static bus_addr_t
1175 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1176 		bus_size_t size)
1177 {
1178 	struct bounce_zone *bz = dmat->bounce_zone;
1179 	struct bounce_page *bpage;
1180 
1181 	KASSERT(map->pagesneeded > 0, ("map doesn't need any pages"));
1182 	map->pagesneeded--;
1183 
1184 	KASSERT(map->pagesreserved > 0, ("map doesn't reserve any pages"));
1185 	map->pagesreserved--;
1186 
1187 	BZ_LOCK(bz);
1188 
1189 	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1190 	KASSERT(bpage != NULL, ("free page list is empty"));
1191 	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1192 
1193 	KKASSERT(bz->reserved_bpages > 0);
1194 	bz->reserved_bpages--;
1195 
1196 	bz->active_bpages++;
1197 	KKASSERT(bz->active_bpages <= bz->total_bpages);
1198 
1199 	BZ_UNLOCK(bz);
1200 
1201 	bpage->datavaddr = vaddr;
1202 	bpage->datacount = size;
1203 	STAILQ_INSERT_TAIL(&map->bpages, bpage, links);
1204 	return bpage->busaddr;
1205 }
1206 
1207 static void
1208 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1209 {
1210 	struct bounce_zone *bz = dmat->bounce_zone;
1211 	bus_dmamap_t map;
1212 
1213 	bpage->datavaddr = 0;
1214 	bpage->datacount = 0;
1215 
1216 	BZ_LOCK(bz);
1217 
1218 	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1219 
1220 	bz->free_bpages++;
1221 	KKASSERT(bz->free_bpages <= bz->total_bpages);
1222 
1223 	KKASSERT(bz->active_bpages > 0);
1224 	bz->active_bpages--;
1225 
1226 	map = get_map_waiting(dmat);
1227 
1228 	BZ_UNLOCK(bz);
1229 
1230 	if (map != NULL)
1231 		add_map_callback(map);
1232 }
1233 
1234 /* Assume caller holds bounce zone spinlock */
1235 static bus_dmamap_t
1236 get_map_waiting(bus_dma_tag_t dmat)
1237 {
1238 	struct bounce_zone *bz = dmat->bounce_zone;
1239 	bus_dmamap_t map;
1240 
1241 	map = STAILQ_FIRST(&bz->bounce_map_waitinglist);
1242 	if (map != NULL) {
1243 		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1244 			STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links);
1245 			bz->total_deferred++;
1246 		} else {
1247 			map = NULL;
1248 		}
1249 	}
1250 	return map;
1251 }
1252 
1253 static void
1254 add_map_callback(bus_dmamap_t map)
1255 {
1256 #ifdef notyet
1257 	/* XXX callbacklist is not MPSAFE */
1258 	crit_enter();
1259 	get_mplock();
1260 	STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links);
1261 	busdma_swi_pending = 1;
1262 	setsoftvm();
1263 	rel_mplock();
1264 	crit_exit();
1265 #else
1266 	panic("%s uncoded\n", __func__);
1267 #endif
1268 }
1269 
1270 #ifdef notyet
1271 void
1272 busdma_swi(void)
1273 {
1274 	bus_dmamap_t map;
1275 
1276 	crit_enter();
1277 	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1278 		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1279 		crit_exit();
1280 		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
1281 				map->callback, map->callback_arg, /*flags*/0);
1282 		crit_enter();
1283 	}
1284 	crit_exit();
1285 }
1286 #endif
1287