1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * From amd64/busdma_machdep.c, r204214
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/bus.h>
40 #include <sys/interrupt.h>
41 #include <sys/kernel.h>
42 #include <sys/ktr.h>
43 #include <sys/lock.h>
44 #include <sys/proc.h>
45 #include <sys/memdesc.h>
46 #include <sys/mutex.h>
47 #include <sys/sysctl.h>
48 #include <sys/uio.h>
49 
50 #include <vm/vm.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_map.h>
55 
56 #include <machine/atomic.h>
57 #include <machine/bus.h>
58 #include <machine/cpufunc.h>
59 #include <machine/md_var.h>
60 
61 #include "iommu_if.h"
62 
63 #define MAX_BPAGES MIN(8192, physmem/40)
64 
65 struct bounce_zone;
66 
67 struct bus_dma_tag {
68 	bus_dma_tag_t	  parent;
69 	bus_size_t	  alignment;
70 	bus_addr_t	  boundary;
71 	bus_addr_t	  lowaddr;
72 	bus_addr_t	  highaddr;
73 	bus_dma_filter_t *filter;
74 	void		 *filterarg;
75 	bus_size_t	  maxsize;
76 	u_int		  nsegments;
77 	bus_size_t	  maxsegsz;
78 	int		  flags;
79 	int		  ref_count;
80 	int		  map_count;
81 	bus_dma_lock_t	 *lockfunc;
82 	void		 *lockfuncarg;
83 	struct bounce_zone *bounce_zone;
84 	device_t	  iommu;
85 	void		 *iommu_cookie;
86 };
87 
88 struct bounce_page {
89 	vm_offset_t	vaddr;		/* kva of bounce buffer */
90 	bus_addr_t	busaddr;	/* Physical address */
91 	vm_offset_t	datavaddr;	/* kva of client data */
92 	vm_page_t	datapage;	/* physical page of client data */
93 	vm_offset_t	dataoffs;	/* page offset of client data */
94 	bus_size_t	datacount;	/* client data count */
95 	STAILQ_ENTRY(bounce_page) links;
96 };
97 
98 int busdma_swi_pending;
99 
100 struct bounce_zone {
101 	STAILQ_ENTRY(bounce_zone) links;
102 	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
103 	int		total_bpages;
104 	int		free_bpages;
105 	int		reserved_bpages;
106 	int		active_bpages;
107 	int		total_bounced;
108 	int		total_deferred;
109 	int		map_count;
110 	bus_size_t	alignment;
111 	bus_addr_t	lowaddr;
112 	char		zoneid[8];
113 	char		lowaddrid[20];
114 	struct sysctl_ctx_list sysctl_tree;
115 	struct sysctl_oid *sysctl_tree_top;
116 };
117 
118 static struct mtx bounce_lock;
119 static int total_bpages;
120 static int busdma_zonecount;
121 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
122 
123 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
124 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
125 	   "Total bounce pages");
126 
127 struct bus_dmamap {
128 	struct bp_list	       bpages;
129 	int		       pagesneeded;
130 	int		       pagesreserved;
131 	bus_dma_tag_t	       dmat;
132 	struct memdesc	       mem;
133 	bus_dma_segment_t     *segments;
134 	int		       nsegs;
135 	bus_dmamap_callback_t *callback;
136 	void		      *callback_arg;
137 	STAILQ_ENTRY(bus_dmamap) links;
138 	int		       contigalloc;
139 };
140 
141 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
142 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
143 
144 static void init_bounce_pages(void *dummy);
145 static int alloc_bounce_zone(bus_dma_tag_t dmat);
146 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
147 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
148 				int commit);
149 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
150 				  vm_offset_t vaddr, bus_addr_t addr,
151 				  bus_size_t size);
152 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
153 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
154 
155 /*
156  * Return true if a match is made.
157  *
158  * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
159  *
160  * If paddr is within the bounds of the dma tag then call the filter callback
161  * to check for a match, if there is no filter callback then assume a match.
162  */
163 static __inline int
164 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
165 {
166 	int retval;
167 
168 	retval = 0;
169 
170 	do {
171 		if (dmat->filter == NULL && dmat->iommu == NULL &&
172 		    paddr > dmat->lowaddr && paddr <= dmat->highaddr)
173 			retval = 1;
174 		if (dmat->filter == NULL &&
175 		    (paddr & (dmat->alignment - 1)) != 0)
176 			retval = 1;
177 		if (dmat->filter != NULL &&
178 		    (*dmat->filter)(dmat->filterarg, paddr) != 0)
179 			retval = 1;
180 
181 		dmat = dmat->parent;
182 	} while (retval == 0 && dmat != NULL);
183 	return (retval);
184 }
185 
186 /*
187  * Convenience function for manipulating driver locks from busdma (during
188  * busdma_swi, for example).  Drivers that don't provide their own locks
189  * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
190  * non-mutex locking scheme don't have to use this at all.
191  */
192 void
193 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
194 {
195 	struct mtx *dmtx;
196 
197 	dmtx = (struct mtx *)arg;
198 	switch (op) {
199 	case BUS_DMA_LOCK:
200 		mtx_lock(dmtx);
201 		break;
202 	case BUS_DMA_UNLOCK:
203 		mtx_unlock(dmtx);
204 		break;
205 	default:
206 		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
207 	}
208 }
209 
210 /*
211  * dflt_lock should never get called.  It gets put into the dma tag when
212  * lockfunc == NULL, which is only valid if the maps that are associated
213  * with the tag are meant to never be defered.
214  * XXX Should have a way to identify which driver is responsible here.
215  */
216 static void
217 dflt_lock(void *arg, bus_dma_lock_op_t op)
218 {
219 	panic("driver error: busdma dflt_lock called");
220 }
221 
222 #define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
223 #define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
224 /*
225  * Allocate a device specific dma_tag.
226  */
227 int
228 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
229 		   bus_addr_t boundary, bus_addr_t lowaddr,
230 		   bus_addr_t highaddr, bus_dma_filter_t *filter,
231 		   void *filterarg, bus_size_t maxsize, int nsegments,
232 		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
233 		   void *lockfuncarg, bus_dma_tag_t *dmat)
234 {
235 	bus_dma_tag_t newtag;
236 	int error = 0;
237 
238 	/* Basic sanity checking */
239 	if (boundary != 0 && boundary < maxsegsz)
240 		maxsegsz = boundary;
241 
242 	if (maxsegsz == 0) {
243 		return (EINVAL);
244 	}
245 
246 	/* Return a NULL tag on failure */
247 	*dmat = NULL;
248 
249 	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
250 	    M_ZERO | M_NOWAIT);
251 	if (newtag == NULL) {
252 		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
253 		    __func__, newtag, 0, error);
254 		return (ENOMEM);
255 	}
256 
257 	newtag->parent = parent;
258 	newtag->alignment = alignment;
259 	newtag->boundary = boundary;
260 	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
261 	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
262 	newtag->filter = filter;
263 	newtag->filterarg = filterarg;
264 	newtag->maxsize = maxsize;
265 	newtag->nsegments = nsegments;
266 	newtag->maxsegsz = maxsegsz;
267 	newtag->flags = flags;
268 	newtag->ref_count = 1; /* Count ourself */
269 	newtag->map_count = 0;
270 	if (lockfunc != NULL) {
271 		newtag->lockfunc = lockfunc;
272 		newtag->lockfuncarg = lockfuncarg;
273 	} else {
274 		newtag->lockfunc = dflt_lock;
275 		newtag->lockfuncarg = NULL;
276 	}
277 
278 	/* Take into account any restrictions imposed by our parent tag */
279 	if (parent != NULL) {
280 		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
281 		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
282 		if (newtag->boundary == 0)
283 			newtag->boundary = parent->boundary;
284 		else if (parent->boundary != 0)
285 			newtag->boundary = MIN(parent->boundary,
286 					       newtag->boundary);
287 		if (newtag->filter == NULL) {
288 			/*
289 			 * Short circuit looking at our parent directly
290 			 * since we have encapsulated all of its information
291 			 */
292 			newtag->filter = parent->filter;
293 			newtag->filterarg = parent->filterarg;
294 			newtag->parent = parent->parent;
295 		}
296 		if (newtag->parent != NULL)
297 			atomic_add_int(&parent->ref_count, 1);
298 		newtag->iommu = parent->iommu;
299 		newtag->iommu_cookie = parent->iommu_cookie;
300 	}
301 
302 	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL)
303 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
304 
305 	if (newtag->alignment > 1)
306 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
307 
308 	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
309 	    (flags & BUS_DMA_ALLOCNOW) != 0) {
310 		struct bounce_zone *bz;
311 
312 		/* Must bounce */
313 
314 		if ((error = alloc_bounce_zone(newtag)) != 0) {
315 			free(newtag, M_DEVBUF);
316 			return (error);
317 		}
318 		bz = newtag->bounce_zone;
319 
320 		if (ptoa(bz->total_bpages) < maxsize) {
321 			int pages;
322 
323 			pages = atop(maxsize) - bz->total_bpages;
324 
325 			/* Add pages to our bounce pool */
326 			if (alloc_bounce_pages(newtag, pages) < pages)
327 				error = ENOMEM;
328 		}
329 		/* Performed initial allocation */
330 		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
331 	}
332 
333 	if (error != 0) {
334 		free(newtag, M_DEVBUF);
335 	} else {
336 		*dmat = newtag;
337 	}
338 	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
339 	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
340 	return (error);
341 }
342 
343 int
344 bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
345 {
346 
347 	return (0);
348 }
349 
350 int
351 bus_dma_tag_destroy(bus_dma_tag_t dmat)
352 {
353 	bus_dma_tag_t dmat_copy;
354 	int error;
355 
356 	error = 0;
357 	dmat_copy = dmat;
358 
359 	if (dmat != NULL) {
360 
361 		if (dmat->map_count != 0) {
362 			error = EBUSY;
363 			goto out;
364 		}
365 
366 		while (dmat != NULL) {
367 			bus_dma_tag_t parent;
368 
369 			parent = dmat->parent;
370 			atomic_subtract_int(&dmat->ref_count, 1);
371 			if (dmat->ref_count == 0) {
372 				free(dmat, M_DEVBUF);
373 				/*
374 				 * Last reference count, so
375 				 * release our reference
376 				 * count on our parent.
377 				 */
378 				dmat = parent;
379 			} else
380 				dmat = NULL;
381 		}
382 	}
383 out:
384 	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
385 	return (error);
386 }
387 
388 /*
389  * Allocate a handle for mapping from kva/uva/physical
390  * address space into bus device space.
391  */
392 int
393 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
394 {
395 	int error;
396 
397 	error = 0;
398 
399 	*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
400 				     M_NOWAIT | M_ZERO);
401 	if (*mapp == NULL) {
402 		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
403 		    __func__, dmat, ENOMEM);
404 		return (ENOMEM);
405 	}
406 
407 
408 	/*
409 	 * Bouncing might be required if the driver asks for an active
410 	 * exclusion region, a data alignment that is stricter than 1, and/or
411 	 * an active address boundary.
412 	 */
413 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
414 
415 		/* Must bounce */
416 		struct bounce_zone *bz;
417 		int maxpages;
418 
419 		if (dmat->bounce_zone == NULL) {
420 			if ((error = alloc_bounce_zone(dmat)) != 0)
421 				return (error);
422 		}
423 		bz = dmat->bounce_zone;
424 
425 		/* Initialize the new map */
426 		STAILQ_INIT(&((*mapp)->bpages));
427 
428 		/*
429 		 * Attempt to add pages to our pool on a per-instance
430 		 * basis up to a sane limit.
431 		 */
432 		if (dmat->alignment > 1)
433 			maxpages = MAX_BPAGES;
434 		else
435 			maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
436 		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
437 		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
438 			int pages;
439 
440 			pages = MAX(atop(dmat->maxsize), 1);
441 			pages = MIN(maxpages - bz->total_bpages, pages);
442 			pages = MAX(pages, 1);
443 			if (alloc_bounce_pages(dmat, pages) < pages)
444 				error = ENOMEM;
445 
446 			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
447 				if (error == 0)
448 					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
449 			} else {
450 				error = 0;
451 			}
452 		}
453 		bz->map_count++;
454 	}
455 
456 	(*mapp)->nsegs = 0;
457 	(*mapp)->segments = (bus_dma_segment_t *)malloc(
458 	    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
459 	    M_NOWAIT);
460 	if ((*mapp)->segments == NULL) {
461 		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
462 		    __func__, dmat, ENOMEM);
463 		return (ENOMEM);
464 	}
465 
466 	if (error == 0)
467 		dmat->map_count++;
468 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
469 	    __func__, dmat, dmat->flags, error);
470 	return (error);
471 }
472 
473 /*
474  * Destroy a handle for mapping from kva/uva/physical
475  * address space into bus device space.
476  */
477 int
478 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
479 {
480 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
481 		if (STAILQ_FIRST(&map->bpages) != NULL) {
482 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
483 			    __func__, dmat, EBUSY);
484 			return (EBUSY);
485 		}
486 		if (dmat->bounce_zone)
487 			dmat->bounce_zone->map_count--;
488 	}
489 	free(map->segments, M_DEVBUF);
490 	free(map, M_DEVBUF);
491 	dmat->map_count--;
492 	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
493 	return (0);
494 }
495 
496 
497 /*
498  * Allocate a piece of memory that can be efficiently mapped into
499  * bus device space based on the constraints lited in the dma tag.
500  * A dmamap to for use with dmamap_load is also allocated.
501  */
502 int
503 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
504 		 bus_dmamap_t *mapp)
505 {
506 	vm_memattr_t attr;
507 	int mflags;
508 
509 	if (flags & BUS_DMA_NOWAIT)
510 		mflags = M_NOWAIT;
511 	else
512 		mflags = M_WAITOK;
513 
514 	bus_dmamap_create(dmat, flags, mapp);
515 
516 	if (flags & BUS_DMA_ZERO)
517 		mflags |= M_ZERO;
518 #ifdef NOTYET
519 	if (flags & BUS_DMA_NOCACHE)
520 		attr = VM_MEMATTR_UNCACHEABLE;
521 	else
522 #endif
523 		attr = VM_MEMATTR_DEFAULT;
524 
525 	/*
526 	 * XXX:
527 	 * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact
528 	 * alignment guarantees of malloc need to be nailed down, and the
529 	 * code below should be rewritten to take that into account.
530 	 *
531 	 * In the meantime, we'll warn the user if malloc gets it wrong.
532 	 */
533 	if ((dmat->maxsize <= PAGE_SIZE) &&
534 	   (dmat->alignment <= dmat->maxsize) &&
535 	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
536 	    attr == VM_MEMATTR_DEFAULT) {
537 		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
538 	} else {
539 		/*
540 		 * XXX Use Contigmalloc until it is merged into this facility
541 		 *     and handles multi-seg allocations.  Nobody is doing
542 		 *     multi-seg allocations yet though.
543 		 * XXX Certain AGP hardware does.
544 		 */
545 		*vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0ul,
546 		    dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul,
547 		    dmat->boundary, attr);
548 		(*mapp)->contigalloc = 1;
549 	}
550 	if (*vaddr == NULL) {
551 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
552 		    __func__, dmat, dmat->flags, ENOMEM);
553 		return (ENOMEM);
554 	} else if (vtophys(*vaddr) & (dmat->alignment - 1)) {
555 		printf("bus_dmamem_alloc failed to align memory properly.\n");
556 	}
557 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
558 	    __func__, dmat, dmat->flags, 0);
559 	return (0);
560 }
561 
562 /*
563  * Free a piece of memory and it's allociated dmamap, that was allocated
564  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
565  */
566 void
567 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
568 {
569 
570 	if (!map->contigalloc)
571 		free(vaddr, M_DEVBUF);
572 	else
573 		kmem_free((vm_offset_t)vaddr, dmat->maxsize);
574 	bus_dmamap_destroy(dmat, map);
575 	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
576 }
577 
578 static void
579 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
580     bus_size_t buflen, int flags)
581 {
582 	bus_addr_t curaddr;
583 	bus_size_t sgsize;
584 
585 	if (map->pagesneeded == 0) {
586 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
587 		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
588 		    dmat->boundary, dmat->alignment);
589 		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
590 		/*
591 		 * Count the number of bounce pages
592 		 * needed in order to complete this transfer
593 		 */
594 		curaddr = buf;
595 		while (buflen != 0) {
596 			sgsize = MIN(buflen, dmat->maxsegsz);
597 			if (run_filter(dmat, curaddr) != 0) {
598 				sgsize = MIN(sgsize,
599 				    PAGE_SIZE - (curaddr & PAGE_MASK));
600 				map->pagesneeded++;
601 			}
602 			curaddr += sgsize;
603 			buflen -= sgsize;
604 		}
605 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
606 	}
607 }
608 
609 static void
610 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
611     void *buf, bus_size_t buflen, int flags)
612 {
613         vm_offset_t vaddr;
614         vm_offset_t vendaddr;
615         bus_addr_t paddr;
616 
617 	if (map->pagesneeded == 0) {
618 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
619 		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
620 		    dmat->boundary, dmat->alignment);
621 		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
622 		/*
623 		 * Count the number of bounce pages
624 		 * needed in order to complete this transfer
625 		 */
626 		vaddr = (vm_offset_t)buf;
627 		vendaddr = (vm_offset_t)buf + buflen;
628 
629 		while (vaddr < vendaddr) {
630 			bus_size_t sg_len;
631 
632 			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
633 			if (pmap == kernel_pmap)
634 				paddr = pmap_kextract(vaddr);
635 			else
636 				paddr = pmap_extract(pmap, vaddr);
637 			if (run_filter(dmat, paddr) != 0) {
638 				sg_len = roundup2(sg_len, dmat->alignment);
639 				map->pagesneeded++;
640 			}
641 			vaddr += sg_len;
642 		}
643 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
644 	}
645 }
646 
647 static int
648 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
649 {
650 
651 	/* Reserve Necessary Bounce Pages */
652 	mtx_lock(&bounce_lock);
653 	if (flags & BUS_DMA_NOWAIT) {
654 		if (reserve_bounce_pages(dmat, map, 0) != 0) {
655 			mtx_unlock(&bounce_lock);
656 			return (ENOMEM);
657 		}
658 	} else {
659 		if (reserve_bounce_pages(dmat, map, 1) != 0) {
660 			/* Queue us for resources */
661 			STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
662 			    map, links);
663 			mtx_unlock(&bounce_lock);
664 			return (EINPROGRESS);
665 		}
666 	}
667 	mtx_unlock(&bounce_lock);
668 
669 	return (0);
670 }
671 
672 /*
673  * Add a single contiguous physical range to the segment list.
674  */
675 static int
676 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
677 		   bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
678 {
679 	bus_addr_t baddr, bmask;
680 	int seg;
681 
682 	/*
683 	 * Make sure we don't cross any boundaries.
684 	 */
685 	bmask = ~(dmat->boundary - 1);
686 	if (dmat->boundary > 0) {
687 		baddr = (curaddr + dmat->boundary) & bmask;
688 		if (sgsize > (baddr - curaddr))
689 			sgsize = (baddr - curaddr);
690 	}
691 
692 	/*
693 	 * Insert chunk into a segment, coalescing with
694 	 * previous segment if possible.
695 	 */
696 	seg = *segp;
697 	if (seg == -1) {
698 		seg = 0;
699 		segs[seg].ds_addr = curaddr;
700 		segs[seg].ds_len = sgsize;
701 	} else {
702 		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
703 		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
704 		    (dmat->boundary == 0 ||
705 		     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
706 			segs[seg].ds_len += sgsize;
707 		else {
708 			if (++seg >= dmat->nsegments)
709 				return (0);
710 			segs[seg].ds_addr = curaddr;
711 			segs[seg].ds_len = sgsize;
712 		}
713 	}
714 	*segp = seg;
715 	return (sgsize);
716 }
717 
718 /*
719  * Utility function to load a physical buffer.  segp contains
720  * the starting segment on entrace, and the ending segment on exit.
721  */
722 int
723 _bus_dmamap_load_phys(bus_dma_tag_t dmat,
724 		      bus_dmamap_t map,
725 		      vm_paddr_t buf, bus_size_t buflen,
726 		      int flags,
727 		      bus_dma_segment_t *segs,
728 		      int *segp)
729 {
730 	bus_addr_t curaddr;
731 	bus_size_t sgsize;
732 	int error;
733 
734 	if (segs == NULL)
735 		segs = map->segments;
736 
737 	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
738 		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
739 		if (map->pagesneeded != 0) {
740 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
741 			if (error)
742 				return (error);
743 		}
744 	}
745 
746 	while (buflen > 0) {
747 		curaddr = buf;
748 		sgsize = MIN(buflen, dmat->maxsegsz);
749 		if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
750 			sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
751 			curaddr = add_bounce_page(dmat, map, 0, curaddr,
752 			    sgsize);
753 		}
754 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
755 		    segp);
756 		if (sgsize == 0)
757 			break;
758 		buf += sgsize;
759 		buflen -= sgsize;
760 	}
761 
762 	/*
763 	 * Did we fit?
764 	 */
765 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
766 }
767 
768 int
769 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
770     struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
771     bus_dma_segment_t *segs, int *segp)
772 {
773 
774 	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
775 	    segs, segp));
776 }
777 
778 /*
779  * Utility function to load a linear buffer.  segp contains
780  * the starting segment on entrance, and the ending segment on exit.
781  */
782 int
783 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
784     			bus_dmamap_t map,
785 			void *buf, bus_size_t buflen,
786 			pmap_t pmap,
787 			int flags,
788 			bus_dma_segment_t *segs,
789 			int *segp)
790 {
791 	bus_size_t sgsize;
792 	bus_addr_t curaddr;
793 	vm_offset_t kvaddr, vaddr;
794 	int error;
795 
796 	if (segs == NULL)
797 		segs = map->segments;
798 
799 	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
800 		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
801 		if (map->pagesneeded != 0) {
802 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
803 			if (error)
804 				return (error);
805 		}
806 	}
807 
808 	vaddr = (vm_offset_t)buf;
809 
810 	while (buflen > 0) {
811 		bus_size_t max_sgsize;
812 
813 		/*
814 		 * Get the physical address for this segment.
815 		 */
816 		if (pmap == kernel_pmap) {
817 			curaddr = pmap_kextract(vaddr);
818 			kvaddr = vaddr;
819 		} else {
820 			curaddr = pmap_extract(pmap, vaddr);
821 			kvaddr = 0;
822 		}
823 
824 		/*
825 		 * Compute the segment size, and adjust counts.
826 		 */
827 		max_sgsize = MIN(buflen, dmat->maxsegsz);
828 		sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
829 		if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
830 			sgsize = roundup2(sgsize, dmat->alignment);
831 			sgsize = MIN(sgsize, max_sgsize);
832 			curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
833 			    sgsize);
834 		} else {
835 			sgsize = MIN(sgsize, max_sgsize);
836 		}
837 
838 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
839 		    segp);
840 		if (sgsize == 0)
841 			break;
842 		vaddr += sgsize;
843 		buflen -= sgsize;
844 	}
845 
846 	/*
847 	 * Did we fit?
848 	 */
849 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
850 }
851 
852 void
853 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
854 		    struct memdesc *mem, bus_dmamap_callback_t *callback,
855 		    void *callback_arg)
856 {
857 
858 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
859 		map->dmat = dmat;
860 		map->mem = *mem;
861 		map->callback = callback;
862 		map->callback_arg = callback_arg;
863 	}
864 }
865 
866 bus_dma_segment_t *
867 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
868 		     bus_dma_segment_t *segs, int nsegs, int error)
869 {
870 
871 	map->nsegs = nsegs;
872 	if (segs != NULL)
873 		memcpy(map->segments, segs, map->nsegs*sizeof(segs[0]));
874 	if (dmat->iommu != NULL)
875 		IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs,
876 		    dmat->lowaddr, dmat->highaddr, dmat->alignment,
877 		    dmat->boundary, dmat->iommu_cookie);
878 
879 	if (segs != NULL)
880 		memcpy(segs, map->segments, map->nsegs*sizeof(segs[0]));
881 	else
882 		segs = map->segments;
883 
884 	return (segs);
885 }
886 
887 /*
888  * Release the mapping held by map.
889  */
890 void
891 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
892 {
893 	struct bounce_page *bpage;
894 
895 	if (dmat->iommu) {
896 		IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie);
897 		map->nsegs = 0;
898 	}
899 
900 	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
901 		STAILQ_REMOVE_HEAD(&map->bpages, links);
902 		free_bounce_page(dmat, bpage);
903 	}
904 }
905 
906 void
907 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
908 {
909 	struct bounce_page *bpage;
910 	vm_offset_t datavaddr, tempvaddr;
911 
912 	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
913 
914 		/*
915 		 * Handle data bouncing.  We might also
916 		 * want to add support for invalidating
917 		 * the caches on broken hardware
918 		 */
919 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
920 		    "performing bounce", __func__, dmat, dmat->flags, op);
921 
922 		if (op & BUS_DMASYNC_PREWRITE) {
923 			while (bpage != NULL) {
924 				tempvaddr = 0;
925 				datavaddr = bpage->datavaddr;
926 				if (datavaddr == 0) {
927 					tempvaddr = pmap_quick_enter_page(
928 					    bpage->datapage);
929 					datavaddr = tempvaddr |
930 					    bpage->dataoffs;
931 				}
932 
933 				bcopy((void *)datavaddr,
934 				    (void *)bpage->vaddr, bpage->datacount);
935 
936 				if (tempvaddr != 0)
937 					pmap_quick_remove_page(tempvaddr);
938 				bpage = STAILQ_NEXT(bpage, links);
939 			}
940 			dmat->bounce_zone->total_bounced++;
941 		}
942 
943 		if (op & BUS_DMASYNC_POSTREAD) {
944 			while (bpage != NULL) {
945 				tempvaddr = 0;
946 				datavaddr = bpage->datavaddr;
947 				if (datavaddr == 0) {
948 					tempvaddr = pmap_quick_enter_page(
949 					    bpage->datapage);
950 					datavaddr = tempvaddr |
951 					    bpage->dataoffs;
952 				}
953 
954 				bcopy((void *)bpage->vaddr,
955 				    (void *)datavaddr, bpage->datacount);
956 
957 				if (tempvaddr != 0)
958 					pmap_quick_remove_page(tempvaddr);
959 				bpage = STAILQ_NEXT(bpage, links);
960 			}
961 			dmat->bounce_zone->total_bounced++;
962 		}
963 	}
964 
965 	powerpc_sync();
966 }
967 
968 static void
969 init_bounce_pages(void *dummy __unused)
970 {
971 
972 	total_bpages = 0;
973 	STAILQ_INIT(&bounce_zone_list);
974 	STAILQ_INIT(&bounce_map_waitinglist);
975 	STAILQ_INIT(&bounce_map_callbacklist);
976 	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
977 }
978 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
979 
980 static struct sysctl_ctx_list *
981 busdma_sysctl_tree(struct bounce_zone *bz)
982 {
983 	return (&bz->sysctl_tree);
984 }
985 
986 static struct sysctl_oid *
987 busdma_sysctl_tree_top(struct bounce_zone *bz)
988 {
989 	return (bz->sysctl_tree_top);
990 }
991 
992 static int
993 alloc_bounce_zone(bus_dma_tag_t dmat)
994 {
995 	struct bounce_zone *bz;
996 
997 	/* Check to see if we already have a suitable zone */
998 	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
999 		if ((dmat->alignment <= bz->alignment)
1000 		 && (dmat->lowaddr >= bz->lowaddr)) {
1001 			dmat->bounce_zone = bz;
1002 			return (0);
1003 		}
1004 	}
1005 
1006 	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1007 	    M_NOWAIT | M_ZERO)) == NULL)
1008 		return (ENOMEM);
1009 
1010 	STAILQ_INIT(&bz->bounce_page_list);
1011 	bz->free_bpages = 0;
1012 	bz->reserved_bpages = 0;
1013 	bz->active_bpages = 0;
1014 	bz->lowaddr = dmat->lowaddr;
1015 	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1016 	bz->map_count = 0;
1017 	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1018 	busdma_zonecount++;
1019 	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1020 	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1021 	dmat->bounce_zone = bz;
1022 
1023 	sysctl_ctx_init(&bz->sysctl_tree);
1024 	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1025 	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1026 	    CTLFLAG_RD, 0, "");
1027 	if (bz->sysctl_tree_top == NULL) {
1028 		sysctl_ctx_free(&bz->sysctl_tree);
1029 		return (0);	/* XXX error code? */
1030 	}
1031 
1032 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1033 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1034 	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1035 	    "Total bounce pages");
1036 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1037 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1038 	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1039 	    "Free bounce pages");
1040 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1041 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1042 	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1043 	    "Reserved bounce pages");
1044 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1045 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1046 	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1047 	    "Active bounce pages");
1048 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1049 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1050 	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1051 	    "Total bounce requests");
1052 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1053 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1054 	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1055 	    "Total bounce requests that were deferred");
1056 	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1057 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1058 	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1059 	SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
1060 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1061 	    "alignment", CTLFLAG_RD, &bz->alignment, "");
1062 
1063 	return (0);
1064 }
1065 
1066 static int
1067 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1068 {
1069 	struct bounce_zone *bz;
1070 	int count;
1071 
1072 	bz = dmat->bounce_zone;
1073 	count = 0;
1074 	while (numpages > 0) {
1075 		struct bounce_page *bpage;
1076 
1077 		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1078 						     M_NOWAIT | M_ZERO);
1079 
1080 		if (bpage == NULL)
1081 			break;
1082 		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1083 							 M_NOWAIT, 0ul,
1084 							 bz->lowaddr,
1085 							 PAGE_SIZE,
1086 							 0);
1087 		if (bpage->vaddr == 0) {
1088 			free(bpage, M_DEVBUF);
1089 			break;
1090 		}
1091 		bpage->busaddr = pmap_kextract(bpage->vaddr);
1092 		mtx_lock(&bounce_lock);
1093 		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1094 		total_bpages++;
1095 		bz->total_bpages++;
1096 		bz->free_bpages++;
1097 		mtx_unlock(&bounce_lock);
1098 		count++;
1099 		numpages--;
1100 	}
1101 	return (count);
1102 }
1103 
1104 static int
1105 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1106 {
1107 	struct bounce_zone *bz;
1108 	int pages;
1109 
1110 	mtx_assert(&bounce_lock, MA_OWNED);
1111 	bz = dmat->bounce_zone;
1112 	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1113 	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1114 		return (map->pagesneeded - (map->pagesreserved + pages));
1115 	bz->free_bpages -= pages;
1116 	bz->reserved_bpages += pages;
1117 	map->pagesreserved += pages;
1118 	pages = map->pagesneeded - map->pagesreserved;
1119 
1120 	return (pages);
1121 }
1122 
1123 static bus_addr_t
1124 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1125 		bus_addr_t addr, bus_size_t size)
1126 {
1127 	struct bounce_zone *bz;
1128 	struct bounce_page *bpage;
1129 
1130 	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1131 
1132 	bz = dmat->bounce_zone;
1133 	if (map->pagesneeded == 0)
1134 		panic("add_bounce_page: map doesn't need any pages");
1135 	map->pagesneeded--;
1136 
1137 	if (map->pagesreserved == 0)
1138 		panic("add_bounce_page: map doesn't need any pages");
1139 	map->pagesreserved--;
1140 
1141 	mtx_lock(&bounce_lock);
1142 	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1143 	if (bpage == NULL)
1144 		panic("add_bounce_page: free page list is empty");
1145 
1146 	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1147 	bz->reserved_bpages--;
1148 	bz->active_bpages++;
1149 	mtx_unlock(&bounce_lock);
1150 
1151 	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1152 		/* Page offset needs to be preserved. */
1153 		bpage->vaddr |= addr & PAGE_MASK;
1154 		bpage->busaddr |= addr & PAGE_MASK;
1155 	}
1156 	bpage->datavaddr = vaddr;
1157 	bpage->datapage = PHYS_TO_VM_PAGE(addr);
1158 	bpage->dataoffs = addr & PAGE_MASK;
1159 	bpage->datacount = size;
1160 	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1161 	return (bpage->busaddr);
1162 }
1163 
1164 static void
1165 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1166 {
1167 	struct bus_dmamap *map;
1168 	struct bounce_zone *bz;
1169 
1170 	bz = dmat->bounce_zone;
1171 	bpage->datavaddr = 0;
1172 	bpage->datacount = 0;
1173 	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1174 		/*
1175 		 * Reset the bounce page to start at offset 0.  Other uses
1176 		 * of this bounce page may need to store a full page of
1177 		 * data and/or assume it starts on a page boundary.
1178 		 */
1179 		bpage->vaddr &= ~PAGE_MASK;
1180 		bpage->busaddr &= ~PAGE_MASK;
1181 	}
1182 
1183 	mtx_lock(&bounce_lock);
1184 	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1185 	bz->free_bpages++;
1186 	bz->active_bpages--;
1187 	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1188 		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1189 			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1190 			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1191 					   map, links);
1192 			busdma_swi_pending = 1;
1193 			bz->total_deferred++;
1194 			swi_sched(vm_ih, 0);
1195 		}
1196 	}
1197 	mtx_unlock(&bounce_lock);
1198 }
1199 
1200 void
1201 busdma_swi(void)
1202 {
1203 	bus_dma_tag_t dmat;
1204 	struct bus_dmamap *map;
1205 
1206 	mtx_lock(&bounce_lock);
1207 	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1208 		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1209 		mtx_unlock(&bounce_lock);
1210 		dmat = map->dmat;
1211 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1212 		bus_dmamap_load_mem(map->dmat, map, &map->mem,
1213 				    map->callback, map->callback_arg,
1214 				    BUS_DMA_WAITOK);
1215 		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1216 		mtx_lock(&bounce_lock);
1217 	}
1218 	mtx_unlock(&bounce_lock);
1219 }
1220 
1221 int
1222 bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie)
1223 {
1224 	tag->iommu = iommu;
1225 	tag->iommu_cookie = cookie;
1226 
1227 	return (0);
1228 }
1229 
1230