xref: /freebsd/sys/x86/x86/busdma_machdep.c (revision aa0a1e58)
1 /*-
2  * Copyright (c) 1997, 1998 Justin T. Gibbs.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. The name of the author may not be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/bus.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/ktr.h>
37 #include <sys/lock.h>
38 #include <sys/proc.h>
39 #include <sys/mutex.h>
40 #include <sys/mbuf.h>
41 #include <sys/uio.h>
42 #include <sys/sysctl.h>
43 
44 #include <vm/vm.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_map.h>
47 
48 #include <machine/atomic.h>
49 #include <machine/bus.h>
50 #include <machine/md_var.h>
51 #include <machine/specialreg.h>
52 
53 #ifdef __i386__
54 #define MAX_BPAGES 512
55 #else
56 #define MAX_BPAGES 8192
57 #endif
58 #define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
59 #define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
60 
61 struct bounce_zone;
62 
63 struct bus_dma_tag {
64 	bus_dma_tag_t	  parent;
65 	bus_size_t	  alignment;
66 	bus_size_t	  boundary;
67 	bus_addr_t	  lowaddr;
68 	bus_addr_t	  highaddr;
69 	bus_dma_filter_t *filter;
70 	void		 *filterarg;
71 	bus_size_t	  maxsize;
72 	u_int		  nsegments;
73 	bus_size_t	  maxsegsz;
74 	int		  flags;
75 	int		  ref_count;
76 	int		  map_count;
77 	bus_dma_lock_t	 *lockfunc;
78 	void		 *lockfuncarg;
79 	bus_dma_segment_t *segments;
80 	struct bounce_zone *bounce_zone;
81 };
82 
83 struct bounce_page {
84 	vm_offset_t	vaddr;		/* kva of bounce buffer */
85 	bus_addr_t	busaddr;	/* Physical address */
86 	vm_offset_t	datavaddr;	/* kva of client data */
87 	bus_size_t	datacount;	/* client data count */
88 	STAILQ_ENTRY(bounce_page) links;
89 };
90 
91 int busdma_swi_pending;
92 
93 struct bounce_zone {
94 	STAILQ_ENTRY(bounce_zone) links;
95 	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
96 	int		total_bpages;
97 	int		free_bpages;
98 	int		reserved_bpages;
99 	int		active_bpages;
100 	int		total_bounced;
101 	int		total_deferred;
102 	int		map_count;
103 	bus_size_t	alignment;
104 	bus_addr_t	lowaddr;
105 	char		zoneid[8];
106 	char		lowaddrid[20];
107 	struct sysctl_ctx_list sysctl_tree;
108 	struct sysctl_oid *sysctl_tree_top;
109 };
110 
111 static struct mtx bounce_lock;
112 static int total_bpages;
113 static int busdma_zonecount;
114 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
115 
116 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
117 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
118 	   "Total bounce pages");
119 
120 struct bus_dmamap {
121 	struct bp_list	       bpages;
122 	int		       pagesneeded;
123 	int		       pagesreserved;
124 	bus_dma_tag_t	       dmat;
125 	void		      *buf;		/* unmapped buffer pointer */
126 	bus_size_t	       buflen;		/* unmapped buffer length */
127 	bus_dmamap_callback_t *callback;
128 	void		      *callback_arg;
129 	STAILQ_ENTRY(bus_dmamap) links;
130 };
131 
132 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
133 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
134 static struct bus_dmamap nobounce_dmamap;
135 
136 static void init_bounce_pages(void *dummy);
137 static int alloc_bounce_zone(bus_dma_tag_t dmat);
138 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
139 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
140 				int commit);
141 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
142 				   vm_offset_t vaddr, bus_size_t size);
143 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
144 int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
145 int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
146     void *buf, bus_size_t buflen, int flags);
147 
148 #ifdef XEN
149 #undef pmap_kextract
150 #define pmap_kextract pmap_kextract_ma
151 #endif
152 
153 /*
154  * Return true if a match is made.
155  *
156  * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
157  *
158  * If paddr is within the bounds of the dma tag then call the filter callback
159  * to check for a match, if there is no filter callback then assume a match.
160  */
161 int
162 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
163 {
164 	int retval;
165 
166 	retval = 0;
167 
168 	do {
169 		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
170 		 || ((paddr & (dmat->alignment - 1)) != 0))
171 		 && (dmat->filter == NULL
172 		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
173 			retval = 1;
174 
175 		dmat = dmat->parent;
176 	} while (retval == 0 && dmat != NULL);
177 	return (retval);
178 }
179 
180 /*
181  * Convenience function for manipulating driver locks from busdma (during
182  * busdma_swi, for example).  Drivers that don't provide their own locks
183  * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
184  * non-mutex locking scheme don't have to use this at all.
185  */
186 void
187 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
188 {
189 	struct mtx *dmtx;
190 
191 	dmtx = (struct mtx *)arg;
192 	switch (op) {
193 	case BUS_DMA_LOCK:
194 		mtx_lock(dmtx);
195 		break;
196 	case BUS_DMA_UNLOCK:
197 		mtx_unlock(dmtx);
198 		break;
199 	default:
200 		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
201 	}
202 }
203 
204 /*
205  * dflt_lock should never get called.  It gets put into the dma tag when
206  * lockfunc == NULL, which is only valid if the maps that are associated
207  * with the tag are meant to never be defered.
208  * XXX Should have a way to identify which driver is responsible here.
209  */
210 static void
211 dflt_lock(void *arg, bus_dma_lock_op_t op)
212 {
213 	panic("driver error: busdma dflt_lock called");
214 }
215 
216 /*
217  * Allocate a device specific dma_tag.
218  */
219 int
220 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
221 		   bus_size_t boundary, bus_addr_t lowaddr,
222 		   bus_addr_t highaddr, bus_dma_filter_t *filter,
223 		   void *filterarg, bus_size_t maxsize, int nsegments,
224 		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
225 		   void *lockfuncarg, bus_dma_tag_t *dmat)
226 {
227 	bus_dma_tag_t newtag;
228 	int error = 0;
229 
230 	/* Basic sanity checking */
231 	if (boundary != 0 && boundary < maxsegsz)
232 		maxsegsz = boundary;
233 
234 	if (maxsegsz == 0) {
235 		return (EINVAL);
236 	}
237 
238 	/* Return a NULL tag on failure */
239 	*dmat = NULL;
240 
241 	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
242 	    M_ZERO | M_NOWAIT);
243 	if (newtag == NULL) {
244 		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
245 		    __func__, newtag, 0, error);
246 		return (ENOMEM);
247 	}
248 
249 	newtag->parent = parent;
250 	newtag->alignment = alignment;
251 	newtag->boundary = boundary;
252 	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
253 	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
254 	newtag->filter = filter;
255 	newtag->filterarg = filterarg;
256 	newtag->maxsize = maxsize;
257 	newtag->nsegments = nsegments;
258 	newtag->maxsegsz = maxsegsz;
259 	newtag->flags = flags;
260 	newtag->ref_count = 1; /* Count ourself */
261 	newtag->map_count = 0;
262 	if (lockfunc != NULL) {
263 		newtag->lockfunc = lockfunc;
264 		newtag->lockfuncarg = lockfuncarg;
265 	} else {
266 		newtag->lockfunc = dflt_lock;
267 		newtag->lockfuncarg = NULL;
268 	}
269 	newtag->segments = NULL;
270 
271 	/* Take into account any restrictions imposed by our parent tag */
272 	if (parent != NULL) {
273 		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
274 		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
275 		if (newtag->boundary == 0)
276 			newtag->boundary = parent->boundary;
277 		else if (parent->boundary != 0)
278 			newtag->boundary = MIN(parent->boundary,
279 					       newtag->boundary);
280 		if ((newtag->filter != NULL) ||
281 		    ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
282 			newtag->flags |= BUS_DMA_COULD_BOUNCE;
283 		if (newtag->filter == NULL) {
284 			/*
285 			 * Short circuit looking at our parent directly
286 			 * since we have encapsulated all of its information
287 			 */
288 			newtag->filter = parent->filter;
289 			newtag->filterarg = parent->filterarg;
290 			newtag->parent = parent->parent;
291 		}
292 		if (newtag->parent != NULL)
293 			atomic_add_int(&parent->ref_count, 1);
294 	}
295 
296 	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem)
297 	 || newtag->alignment > 1)
298 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
299 
300 	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
301 	    (flags & BUS_DMA_ALLOCNOW) != 0) {
302 		struct bounce_zone *bz;
303 
304 		/* Must bounce */
305 
306 		if ((error = alloc_bounce_zone(newtag)) != 0) {
307 			free(newtag, M_DEVBUF);
308 			return (error);
309 		}
310 		bz = newtag->bounce_zone;
311 
312 		if (ptoa(bz->total_bpages) < maxsize) {
313 			int pages;
314 
315 			pages = atop(maxsize) - bz->total_bpages;
316 
317 			/* Add pages to our bounce pool */
318 			if (alloc_bounce_pages(newtag, pages) < pages)
319 				error = ENOMEM;
320 		}
321 		/* Performed initial allocation */
322 		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
323 	}
324 
325 	if (error != 0) {
326 		free(newtag, M_DEVBUF);
327 	} else {
328 		*dmat = newtag;
329 	}
330 	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
331 	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
332 	return (error);
333 }
334 
335 int
336 bus_dma_tag_destroy(bus_dma_tag_t dmat)
337 {
338 	bus_dma_tag_t dmat_copy;
339 	int error;
340 
341 	error = 0;
342 	dmat_copy = dmat;
343 
344 	if (dmat != NULL) {
345 
346 		if (dmat->map_count != 0) {
347 			error = EBUSY;
348 			goto out;
349 		}
350 
351 		while (dmat != NULL) {
352 			bus_dma_tag_t parent;
353 
354 			parent = dmat->parent;
355 			atomic_subtract_int(&dmat->ref_count, 1);
356 			if (dmat->ref_count == 0) {
357 				if (dmat->segments != NULL)
358 					free(dmat->segments, M_DEVBUF);
359 				free(dmat, M_DEVBUF);
360 				/*
361 				 * Last reference count, so
362 				 * release our reference
363 				 * count on our parent.
364 				 */
365 				dmat = parent;
366 			} else
367 				dmat = NULL;
368 		}
369 	}
370 out:
371 	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
372 	return (error);
373 }
374 
375 /*
376  * Allocate a handle for mapping from kva/uva/physical
377  * address space into bus device space.
378  */
379 int
380 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
381 {
382 	int error;
383 
384 	error = 0;
385 
386 	if (dmat->segments == NULL) {
387 		dmat->segments = (bus_dma_segment_t *)malloc(
388 		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
389 		    M_NOWAIT);
390 		if (dmat->segments == NULL) {
391 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
392 			    __func__, dmat, ENOMEM);
393 			return (ENOMEM);
394 		}
395 	}
396 
397 	/*
398 	 * Bouncing might be required if the driver asks for an active
399 	 * exclusion region, a data alignment that is stricter than 1, and/or
400 	 * an active address boundary.
401 	 */
402 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
403 
404 		/* Must bounce */
405 		struct bounce_zone *bz;
406 		int maxpages;
407 
408 		if (dmat->bounce_zone == NULL) {
409 			if ((error = alloc_bounce_zone(dmat)) != 0)
410 				return (error);
411 		}
412 		bz = dmat->bounce_zone;
413 
414 		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
415 					     M_NOWAIT | M_ZERO);
416 		if (*mapp == NULL) {
417 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
418 			    __func__, dmat, ENOMEM);
419 			return (ENOMEM);
420 		}
421 
422 		/* Initialize the new map */
423 		STAILQ_INIT(&((*mapp)->bpages));
424 
425 		/*
426 		 * Attempt to add pages to our pool on a per-instance
427 		 * basis up to a sane limit.
428 		 */
429 		if (dmat->alignment > 1)
430 			maxpages = MAX_BPAGES;
431 		else
432 			maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
433 		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
434 		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
435 			int pages;
436 
437 			pages = MAX(atop(dmat->maxsize), 1);
438 			pages = MIN(maxpages - bz->total_bpages, pages);
439 			pages = MAX(pages, 1);
440 			if (alloc_bounce_pages(dmat, pages) < pages)
441 				error = ENOMEM;
442 
443 			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
444 				if (error == 0)
445 					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
446 			} else {
447 				error = 0;
448 			}
449 		}
450 		bz->map_count++;
451 	} else {
452 		*mapp = NULL;
453 	}
454 	if (error == 0)
455 		dmat->map_count++;
456 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
457 	    __func__, dmat, dmat->flags, error);
458 	return (error);
459 }
460 
461 /*
462  * Destroy a handle for mapping from kva/uva/physical
463  * address space into bus device space.
464  */
465 int
466 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
467 {
468 	if (map != NULL && map != &nobounce_dmamap) {
469 		if (STAILQ_FIRST(&map->bpages) != NULL) {
470 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
471 			    __func__, dmat, EBUSY);
472 			return (EBUSY);
473 		}
474 		if (dmat->bounce_zone)
475 			dmat->bounce_zone->map_count--;
476 		free(map, M_DEVBUF);
477 	}
478 	dmat->map_count--;
479 	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
480 	return (0);
481 }
482 
483 
484 /*
485  * Allocate a piece of memory that can be efficiently mapped into
486  * bus device space based on the constraints lited in the dma tag.
487  * A dmamap to for use with dmamap_load is also allocated.
488  */
489 int
490 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
491 		 bus_dmamap_t *mapp)
492 {
493 	int mflags;
494 
495 	if (flags & BUS_DMA_NOWAIT)
496 		mflags = M_NOWAIT;
497 	else
498 		mflags = M_WAITOK;
499 
500 	/* If we succeed, no mapping/bouncing will be required */
501 	*mapp = NULL;
502 
503 	if (dmat->segments == NULL) {
504 		dmat->segments = (bus_dma_segment_t *)malloc(
505 		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
506 		    mflags);
507 		if (dmat->segments == NULL) {
508 			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
509 			    __func__, dmat, dmat->flags, ENOMEM);
510 			return (ENOMEM);
511 		}
512 	}
513 	if (flags & BUS_DMA_ZERO)
514 		mflags |= M_ZERO;
515 
516 	/*
517 	 * XXX:
518 	 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
519 	 * alignment guarantees of malloc need to be nailed down, and the
520 	 * code below should be rewritten to take that into account.
521 	 *
522 	 * In the meantime, we'll warn the user if malloc gets it wrong.
523 	 */
524 	if ((dmat->maxsize <= PAGE_SIZE) &&
525 	   (dmat->alignment < dmat->maxsize) &&
526 	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
527 		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
528 	} else {
529 		/*
530 		 * XXX Use Contigmalloc until it is merged into this facility
531 		 *     and handles multi-seg allocations.  Nobody is doing
532 		 *     multi-seg allocations yet though.
533 		 * XXX Certain AGP hardware does.
534 		 */
535 		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
536 		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
537 		    dmat->boundary);
538 	}
539 	if (*vaddr == NULL) {
540 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
541 		    __func__, dmat, dmat->flags, ENOMEM);
542 		return (ENOMEM);
543 	} else if (vtophys(*vaddr) & (dmat->alignment - 1)) {
544 		printf("bus_dmamem_alloc failed to align memory properly.\n");
545 	}
546 	if (flags & BUS_DMA_NOCACHE)
547 		pmap_change_attr((vm_offset_t)*vaddr, dmat->maxsize,
548 		    PAT_UNCACHEABLE);
549 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
550 	    __func__, dmat, dmat->flags, 0);
551 	return (0);
552 }
553 
554 /*
555  * Free a piece of memory and it's allociated dmamap, that was allocated
556  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
557  */
558 void
559 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
560 {
561 	/*
562 	 * dmamem does not need to be bounced, so the map should be
563 	 * NULL
564 	 */
565 	if (map != NULL)
566 		panic("bus_dmamem_free: Invalid map freed\n");
567 	pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, PAT_WRITE_BACK);
568 	if ((dmat->maxsize <= PAGE_SIZE) &&
569 	   (dmat->alignment < dmat->maxsize) &&
570 	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
571 		free(vaddr, M_DEVBUF);
572 	else {
573 		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
574 	}
575 	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
576 }
577 
578 int
579 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
580     void *buf, bus_size_t buflen, int flags)
581 {
582 	vm_offset_t vaddr;
583 	vm_offset_t vendaddr;
584 	bus_addr_t paddr;
585 
586 	if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
587 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
588 		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
589 		    dmat->boundary, dmat->alignment);
590 		CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
591 		    map, &nobounce_dmamap, map->pagesneeded);
592 		/*
593 		 * Count the number of bounce pages
594 		 * needed in order to complete this transfer
595 		 */
596 		vaddr = (vm_offset_t)buf;
597 		vendaddr = (vm_offset_t)buf + buflen;
598 
599 		while (vaddr < vendaddr) {
600 			bus_size_t sg_len;
601 
602 			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
603 			if (pmap)
604 				paddr = pmap_extract(pmap, vaddr);
605 			else
606 				paddr = pmap_kextract(vaddr);
607 			if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
608 			    run_filter(dmat, paddr) != 0) {
609 				sg_len = roundup2(sg_len, dmat->alignment);
610 				map->pagesneeded++;
611 			}
612 			vaddr += sg_len;
613 		}
614 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
615 	}
616 
617 	/* Reserve Necessary Bounce Pages */
618 	if (map->pagesneeded != 0) {
619 		mtx_lock(&bounce_lock);
620 		if (flags & BUS_DMA_NOWAIT) {
621 			if (reserve_bounce_pages(dmat, map, 0) != 0) {
622 				mtx_unlock(&bounce_lock);
623 				return (ENOMEM);
624 			}
625 		} else {
626 			if (reserve_bounce_pages(dmat, map, 1) != 0) {
627 				/* Queue us for resources */
628 				map->dmat = dmat;
629 				map->buf = buf;
630 				map->buflen = buflen;
631 				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
632 				    map, links);
633 				mtx_unlock(&bounce_lock);
634 				return (EINPROGRESS);
635 			}
636 		}
637 		mtx_unlock(&bounce_lock);
638 	}
639 
640 	return (0);
641 }
642 
643 /*
644  * Utility function to load a linear buffer.  lastaddrp holds state
645  * between invocations (for multiple-buffer loads).  segp contains
646  * the starting segment on entrace, and the ending segment on exit.
647  * first indicates if this is the first invocation of this function.
648  */
649 static __inline int
650 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
651     			bus_dmamap_t map,
652 			void *buf, bus_size_t buflen,
653 			pmap_t pmap,
654 			int flags,
655 			bus_addr_t *lastaddrp,
656 			bus_dma_segment_t *segs,
657 			int *segp,
658 			int first)
659 {
660 	bus_size_t sgsize;
661 	bus_addr_t curaddr, lastaddr, baddr, bmask;
662 	vm_offset_t vaddr;
663 	int seg, error;
664 
665 	if (map == NULL)
666 		map = &nobounce_dmamap;
667 
668 	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
669 		error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
670 		if (error)
671 			return (error);
672 	}
673 
674 	vaddr = (vm_offset_t)buf;
675 	lastaddr = *lastaddrp;
676 	bmask = ~(dmat->boundary - 1);
677 
678 	for (seg = *segp; buflen > 0 ; ) {
679 		bus_size_t max_sgsize;
680 
681 		/*
682 		 * Get the physical address for this segment.
683 		 */
684 		if (pmap)
685 			curaddr = pmap_extract(pmap, vaddr);
686 		else
687 			curaddr = pmap_kextract(vaddr);
688 
689 		/*
690 		 * Compute the segment size, and adjust counts.
691 		 */
692 		max_sgsize = MIN(buflen, dmat->maxsegsz);
693 		sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK);
694 		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
695 		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
696 			sgsize = roundup2(sgsize, dmat->alignment);
697 			sgsize = MIN(sgsize, max_sgsize);
698 			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
699 		} else {
700 			sgsize = MIN(sgsize, max_sgsize);
701 		}
702 
703 		/*
704 		 * Make sure we don't cross any boundaries.
705 		 */
706 		if (dmat->boundary > 0) {
707 			baddr = (curaddr + dmat->boundary) & bmask;
708 			if (sgsize > (baddr - curaddr))
709 				sgsize = (baddr - curaddr);
710 		}
711 
712 		/*
713 		 * Insert chunk into a segment, coalescing with
714 		 * previous segment if possible.
715 		 */
716 		if (first) {
717 			segs[seg].ds_addr = curaddr;
718 			segs[seg].ds_len = sgsize;
719 			first = 0;
720 		} else {
721 			if (curaddr == lastaddr &&
722 			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
723 			    (dmat->boundary == 0 ||
724 			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
725 				segs[seg].ds_len += sgsize;
726 			else {
727 				if (++seg >= dmat->nsegments)
728 					break;
729 				segs[seg].ds_addr = curaddr;
730 				segs[seg].ds_len = sgsize;
731 			}
732 		}
733 
734 		lastaddr = curaddr + sgsize;
735 		vaddr += sgsize;
736 		buflen -= sgsize;
737 	}
738 
739 	*segp = seg;
740 	*lastaddrp = lastaddr;
741 
742 	/*
743 	 * Did we fit?
744 	 */
745 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
746 }
747 
748 /*
749  * Map the buffer buf into bus space using the dmamap map.
750  */
751 int
752 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
753 		bus_size_t buflen, bus_dmamap_callback_t *callback,
754 		void *callback_arg, int flags)
755 {
756 	bus_addr_t		lastaddr = 0;
757 	int			error, nsegs = 0;
758 
759 	if (map != NULL) {
760 		flags |= BUS_DMA_WAITOK;
761 		map->callback = callback;
762 		map->callback_arg = callback_arg;
763 	}
764 
765 	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
766 	     &lastaddr, dmat->segments, &nsegs, 1);
767 
768 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
769 	    __func__, dmat, dmat->flags, error, nsegs + 1);
770 
771 	if (error == EINPROGRESS) {
772 		return (error);
773 	}
774 
775 	if (error)
776 		(*callback)(callback_arg, dmat->segments, 0, error);
777 	else
778 		(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
779 
780 	/*
781 	 * Return ENOMEM to the caller so that it can pass it up the stack.
782 	 * This error only happens when NOWAIT is set, so deferal is disabled.
783 	 */
784 	if (error == ENOMEM)
785 		return (error);
786 
787 	return (0);
788 }
789 
790 
791 /*
792  * Like _bus_dmamap_load(), but for mbufs.
793  */
794 static __inline int
795 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
796 			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
797 			int flags)
798 {
799 	int error;
800 
801 	M_ASSERTPKTHDR(m0);
802 
803 	flags |= BUS_DMA_NOWAIT;
804 	*nsegs = 0;
805 	error = 0;
806 	if (m0->m_pkthdr.len <= dmat->maxsize) {
807 		int first = 1;
808 		bus_addr_t lastaddr = 0;
809 		struct mbuf *m;
810 
811 		for (m = m0; m != NULL && error == 0; m = m->m_next) {
812 			if (m->m_len > 0) {
813 				error = _bus_dmamap_load_buffer(dmat, map,
814 						m->m_data, m->m_len,
815 						NULL, flags, &lastaddr,
816 						segs, nsegs, first);
817 				first = 0;
818 			}
819 		}
820 	} else {
821 		error = EINVAL;
822 	}
823 
824 	/* XXX FIXME: Having to increment nsegs is really annoying */
825 	++*nsegs;
826 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
827 	    __func__, dmat, dmat->flags, error, *nsegs);
828 	return (error);
829 }
830 
831 int
832 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
833 		     struct mbuf *m0,
834 		     bus_dmamap_callback2_t *callback, void *callback_arg,
835 		     int flags)
836 {
837 	int nsegs, error;
838 
839 	error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs,
840 	    flags);
841 
842 	if (error) {
843 		/* force "no valid mappings" in callback */
844 		(*callback)(callback_arg, dmat->segments, 0, 0, error);
845 	} else {
846 		(*callback)(callback_arg, dmat->segments,
847 			    nsegs, m0->m_pkthdr.len, error);
848 	}
849 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
850 	    __func__, dmat, dmat->flags, error, nsegs);
851 	return (error);
852 }
853 
854 int
855 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
856 			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
857 			int flags)
858 {
859 	return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags));
860 }
861 
862 /*
863  * Like _bus_dmamap_load(), but for uios.
864  */
865 int
866 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
867 		    struct uio *uio,
868 		    bus_dmamap_callback2_t *callback, void *callback_arg,
869 		    int flags)
870 {
871 	bus_addr_t lastaddr = 0;
872 	int nsegs, error, first, i;
873 	bus_size_t resid;
874 	struct iovec *iov;
875 	pmap_t pmap;
876 
877 	flags |= BUS_DMA_NOWAIT;
878 	resid = uio->uio_resid;
879 	iov = uio->uio_iov;
880 
881 	if (uio->uio_segflg == UIO_USERSPACE) {
882 		KASSERT(uio->uio_td != NULL,
883 			("bus_dmamap_load_uio: USERSPACE but no proc"));
884 		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
885 	} else
886 		pmap = NULL;
887 
888 	nsegs = 0;
889 	error = 0;
890 	first = 1;
891 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
892 		/*
893 		 * Now at the first iovec to load.  Load each iovec
894 		 * until we have exhausted the residual count.
895 		 */
896 		bus_size_t minlen =
897 			resid < iov[i].iov_len ? resid : iov[i].iov_len;
898 		caddr_t addr = (caddr_t) iov[i].iov_base;
899 
900 		if (minlen > 0) {
901 			error = _bus_dmamap_load_buffer(dmat, map,
902 					addr, minlen, pmap, flags, &lastaddr,
903 					dmat->segments, &nsegs, first);
904 			first = 0;
905 
906 			resid -= minlen;
907 		}
908 	}
909 
910 	if (error) {
911 		/* force "no valid mappings" in callback */
912 		(*callback)(callback_arg, dmat->segments, 0, 0, error);
913 	} else {
914 		(*callback)(callback_arg, dmat->segments,
915 			    nsegs+1, uio->uio_resid, error);
916 	}
917 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
918 	    __func__, dmat, dmat->flags, error, nsegs + 1);
919 	return (error);
920 }
921 
922 /*
923  * Release the mapping held by map.
924  */
925 void
926 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
927 {
928 	struct bounce_page *bpage;
929 
930 	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
931 		STAILQ_REMOVE_HEAD(&map->bpages, links);
932 		free_bounce_page(dmat, bpage);
933 	}
934 }
935 
936 void
937 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
938 {
939 	struct bounce_page *bpage;
940 
941 	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
942 		/*
943 		 * Handle data bouncing.  We might also
944 		 * want to add support for invalidating
945 		 * the caches on broken hardware
946 		 */
947 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
948 		    "performing bounce", __func__, op, dmat, dmat->flags);
949 
950 		if (op & BUS_DMASYNC_PREWRITE) {
951 			while (bpage != NULL) {
952 				bcopy((void *)bpage->datavaddr,
953 				      (void *)bpage->vaddr,
954 				      bpage->datacount);
955 				bpage = STAILQ_NEXT(bpage, links);
956 			}
957 			dmat->bounce_zone->total_bounced++;
958 		}
959 
960 		if (op & BUS_DMASYNC_POSTREAD) {
961 			while (bpage != NULL) {
962 				bcopy((void *)bpage->vaddr,
963 				      (void *)bpage->datavaddr,
964 				      bpage->datacount);
965 				bpage = STAILQ_NEXT(bpage, links);
966 			}
967 			dmat->bounce_zone->total_bounced++;
968 		}
969 	}
970 }
971 
972 static void
973 init_bounce_pages(void *dummy __unused)
974 {
975 
976 	total_bpages = 0;
977 	STAILQ_INIT(&bounce_zone_list);
978 	STAILQ_INIT(&bounce_map_waitinglist);
979 	STAILQ_INIT(&bounce_map_callbacklist);
980 	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
981 }
982 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
983 
984 static struct sysctl_ctx_list *
985 busdma_sysctl_tree(struct bounce_zone *bz)
986 {
987 	return (&bz->sysctl_tree);
988 }
989 
990 static struct sysctl_oid *
991 busdma_sysctl_tree_top(struct bounce_zone *bz)
992 {
993 	return (bz->sysctl_tree_top);
994 }
995 
996 #if defined(__amd64__) || defined(PAE)
997 #define	SYSCTL_ADD_BUS_SIZE_T	SYSCTL_ADD_UQUAD
998 #else
999 #define	SYSCTL_ADD_BUS_SIZE_T(ctx, parent, nbr, name, flag, ptr, desc)	\
1000 	SYSCTL_ADD_UINT(ctx, parent, nbr, name, flag, ptr, 0, desc)
1001 #endif
1002 
1003 static int
1004 alloc_bounce_zone(bus_dma_tag_t dmat)
1005 {
1006 	struct bounce_zone *bz;
1007 
1008 	/* Check to see if we already have a suitable zone */
1009 	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1010 		if ((dmat->alignment <= bz->alignment)
1011 		 && (dmat->lowaddr >= bz->lowaddr)) {
1012 			dmat->bounce_zone = bz;
1013 			return (0);
1014 		}
1015 	}
1016 
1017 	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1018 	    M_NOWAIT | M_ZERO)) == NULL)
1019 		return (ENOMEM);
1020 
1021 	STAILQ_INIT(&bz->bounce_page_list);
1022 	bz->free_bpages = 0;
1023 	bz->reserved_bpages = 0;
1024 	bz->active_bpages = 0;
1025 	bz->lowaddr = dmat->lowaddr;
1026 	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1027 	bz->map_count = 0;
1028 	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1029 	busdma_zonecount++;
1030 	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1031 	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1032 	dmat->bounce_zone = bz;
1033 
1034 	sysctl_ctx_init(&bz->sysctl_tree);
1035 	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1036 	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1037 	    CTLFLAG_RD, 0, "");
1038 	if (bz->sysctl_tree_top == NULL) {
1039 		sysctl_ctx_free(&bz->sysctl_tree);
1040 		return (0);	/* XXX error code? */
1041 	}
1042 
1043 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1044 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1045 	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1046 	    "Total bounce pages");
1047 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1048 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1049 	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1050 	    "Free bounce pages");
1051 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1052 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1053 	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1054 	    "Reserved bounce pages");
1055 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1056 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1057 	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1058 	    "Active bounce pages");
1059 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1060 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1061 	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1062 	    "Total bounce requests");
1063 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1064 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1065 	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1066 	    "Total bounce requests that were deferred");
1067 	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1068 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1069 	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1070 	SYSCTL_ADD_BUS_SIZE_T(busdma_sysctl_tree(bz),
1071 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1072 	    "alignment", CTLFLAG_RD, &bz->alignment, "");
1073 
1074 	return (0);
1075 }
1076 
1077 static int
1078 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1079 {
1080 	struct bounce_zone *bz;
1081 	int count;
1082 
1083 	bz = dmat->bounce_zone;
1084 	count = 0;
1085 	while (numpages > 0) {
1086 		struct bounce_page *bpage;
1087 
1088 		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1089 						     M_NOWAIT | M_ZERO);
1090 
1091 		if (bpage == NULL)
1092 			break;
1093 		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1094 							 M_NOWAIT, 0ul,
1095 							 bz->lowaddr,
1096 							 PAGE_SIZE,
1097 							 0);
1098 		if (bpage->vaddr == 0) {
1099 			free(bpage, M_DEVBUF);
1100 			break;
1101 		}
1102 		bpage->busaddr = pmap_kextract(bpage->vaddr);
1103 		mtx_lock(&bounce_lock);
1104 		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1105 		total_bpages++;
1106 		bz->total_bpages++;
1107 		bz->free_bpages++;
1108 		mtx_unlock(&bounce_lock);
1109 		count++;
1110 		numpages--;
1111 	}
1112 	return (count);
1113 }
1114 
1115 static int
1116 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1117 {
1118 	struct bounce_zone *bz;
1119 	int pages;
1120 
1121 	mtx_assert(&bounce_lock, MA_OWNED);
1122 	bz = dmat->bounce_zone;
1123 	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1124 	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1125 		return (map->pagesneeded - (map->pagesreserved + pages));
1126 	bz->free_bpages -= pages;
1127 	bz->reserved_bpages += pages;
1128 	map->pagesreserved += pages;
1129 	pages = map->pagesneeded - map->pagesreserved;
1130 
1131 	return (pages);
1132 }
1133 
1134 static bus_addr_t
1135 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1136 		bus_size_t size)
1137 {
1138 	struct bounce_zone *bz;
1139 	struct bounce_page *bpage;
1140 
1141 	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1142 	KASSERT(map != NULL && map != &nobounce_dmamap,
1143 	    ("add_bounce_page: bad map %p", map));
1144 
1145 	bz = dmat->bounce_zone;
1146 	if (map->pagesneeded == 0)
1147 		panic("add_bounce_page: map doesn't need any pages");
1148 	map->pagesneeded--;
1149 
1150 	if (map->pagesreserved == 0)
1151 		panic("add_bounce_page: map doesn't need any pages");
1152 	map->pagesreserved--;
1153 
1154 	mtx_lock(&bounce_lock);
1155 	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1156 	if (bpage == NULL)
1157 		panic("add_bounce_page: free page list is empty");
1158 
1159 	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1160 	bz->reserved_bpages--;
1161 	bz->active_bpages++;
1162 	mtx_unlock(&bounce_lock);
1163 
1164 	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1165 		/* Page offset needs to be preserved. */
1166 		bpage->vaddr |= vaddr & PAGE_MASK;
1167 		bpage->busaddr |= vaddr & PAGE_MASK;
1168 	}
1169 	bpage->datavaddr = vaddr;
1170 	bpage->datacount = size;
1171 	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1172 	return (bpage->busaddr);
1173 }
1174 
1175 static void
1176 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1177 {
1178 	struct bus_dmamap *map;
1179 	struct bounce_zone *bz;
1180 
1181 	bz = dmat->bounce_zone;
1182 	bpage->datavaddr = 0;
1183 	bpage->datacount = 0;
1184 	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1185 		/*
1186 		 * Reset the bounce page to start at offset 0.  Other uses
1187 		 * of this bounce page may need to store a full page of
1188 		 * data and/or assume it starts on a page boundary.
1189 		 */
1190 		bpage->vaddr &= ~PAGE_MASK;
1191 		bpage->busaddr &= ~PAGE_MASK;
1192 	}
1193 
1194 	mtx_lock(&bounce_lock);
1195 	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1196 	bz->free_bpages++;
1197 	bz->active_bpages--;
1198 	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1199 		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1200 			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1201 			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1202 					   map, links);
1203 			busdma_swi_pending = 1;
1204 			bz->total_deferred++;
1205 			swi_sched(vm_ih, 0);
1206 		}
1207 	}
1208 	mtx_unlock(&bounce_lock);
1209 }
1210 
1211 void
1212 busdma_swi(void)
1213 {
1214 	bus_dma_tag_t dmat;
1215 	struct bus_dmamap *map;
1216 
1217 	mtx_lock(&bounce_lock);
1218 	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1219 		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1220 		mtx_unlock(&bounce_lock);
1221 		dmat = map->dmat;
1222 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1223 		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
1224 				map->callback, map->callback_arg, /*flags*/0);
1225 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1226 		mtx_lock(&bounce_lock);
1227 	}
1228 	mtx_unlock(&bounce_lock);
1229 }
1230