1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * From amd64/busdma_machdep.c, r204214
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/kernel.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/proc.h>
42 #include <sys/memdesc.h>
43 #include <sys/mutex.h>
44 #include <sys/sysctl.h>
45 #include <sys/uio.h>
46 
47 #include <vm/vm.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_map.h>
52 
53 #include <machine/atomic.h>
54 #include <machine/bus.h>
55 #include <machine/cpufunc.h>
56 #include <machine/md_var.h>
57 
58 #include "iommu_if.h"
59 
60 #define MAX_BPAGES MIN(8192, physmem/40)
61 
62 struct bounce_page;
63 struct bounce_zone;
64 
65 struct bus_dma_tag {
66 	bus_size_t	  alignment;
67 	bus_addr_t	  boundary;
68 	bus_addr_t	  lowaddr;
69 	bus_addr_t	  highaddr;
70 	bus_size_t	  maxsize;
71 	bus_size_t	  maxsegsz;
72 	u_int		  nsegments;
73 	int		  flags;
74 	int		  map_count;
75 	bus_dma_lock_t	 *lockfunc;
76 	void		 *lockfuncarg;
77 	struct bounce_zone *bounce_zone;
78 	device_t	  iommu;
79 	void		 *iommu_cookie;
80 };
81 
82 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
83     "Busdma parameters");
84 
85 struct bus_dmamap {
86 	STAILQ_HEAD(, bounce_page) bpages;
87 	int		       pagesneeded;
88 	int		       pagesreserved;
89 	bus_dma_tag_t	       dmat;
90 	struct memdesc	       mem;
91 	bus_dma_segment_t     *segments;
92 	int		       nsegs;
93 	bus_dmamap_callback_t *callback;
94 	void		      *callback_arg;
95 	__sbintime_t	       queued_time;
96 	STAILQ_ENTRY(bus_dmamap) links;
97 	int		       contigalloc;
98 };
99 
100 static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
101 
102 #define	dmat_alignment(dmat)	((dmat)->alignment)
103 #define	dmat_flags(dmat)	((dmat)->flags)
104 #define	dmat_highaddr(dmat)	((dmat)->highaddr)
105 #define	dmat_lowaddr(dmat)	((dmat)->lowaddr)
106 #define	dmat_lockfunc(dmat)	((dmat)->lockfunc)
107 #define	dmat_lockfuncarg(dmat)	((dmat)->lockfuncarg)
108 
109 #include "../../kern/subr_busdma_bounce.c"
110 
111 /*
112  * Returns true if the address falls within the tag's exclusion window, or
113  * fails to meet its alignment requirements.
114  */
115 static __inline bool
116 must_bounce(bus_dma_tag_t dmat, bus_addr_t paddr)
117 {
118 
119 	if (dmat->iommu == NULL && paddr > dmat->lowaddr &&
120 	    paddr <= dmat->highaddr)
121 		return (true);
122 	if (!vm_addr_align_ok(paddr, dmat->alignment))
123 		return (true);
124 
125 	return (false);
126 }
127 
128 #define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
129 #define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
130 /*
131  * Allocate a device specific dma_tag.
132  */
133 int
134 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
135 		   bus_addr_t boundary, bus_addr_t lowaddr,
136 		   bus_addr_t highaddr, bus_dma_filter_t *filter,
137 		   void *filterarg, bus_size_t maxsize, int nsegments,
138 		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
139 		   void *lockfuncarg, bus_dma_tag_t *dmat)
140 {
141 	bus_dma_tag_t newtag;
142 	int error = 0;
143 
144 	/* Basic sanity checking */
145 	if (boundary != 0 && boundary < maxsegsz)
146 		maxsegsz = boundary;
147 
148 	if (maxsegsz == 0) {
149 		return (EINVAL);
150 	}
151 
152 	/* Filters are no longer supported. */
153 	if (filter != NULL || filterarg != NULL)
154 		return (EINVAL);
155 
156 	/* Return a NULL tag on failure */
157 	*dmat = NULL;
158 
159 	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
160 	    M_ZERO | M_NOWAIT);
161 	if (newtag == NULL) {
162 		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
163 		    __func__, newtag, 0, error);
164 		return (ENOMEM);
165 	}
166 
167 	newtag->alignment = alignment;
168 	newtag->boundary = boundary;
169 	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
170 	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
171 	newtag->maxsize = maxsize;
172 	newtag->nsegments = nsegments;
173 	newtag->maxsegsz = maxsegsz;
174 	newtag->flags = flags;
175 	newtag->map_count = 0;
176 	if (lockfunc != NULL) {
177 		newtag->lockfunc = lockfunc;
178 		newtag->lockfuncarg = lockfuncarg;
179 	} else {
180 		newtag->lockfunc = _busdma_dflt_lock;
181 		newtag->lockfuncarg = NULL;
182 	}
183 
184 	/* Take into account any restrictions imposed by our parent tag */
185 	if (parent != NULL) {
186 		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
187 		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
188 		if (newtag->boundary == 0)
189 			newtag->boundary = parent->boundary;
190 		else if (parent->boundary != 0)
191 			newtag->boundary = MIN(parent->boundary,
192 					       newtag->boundary);
193 
194 		newtag->iommu = parent->iommu;
195 		newtag->iommu_cookie = parent->iommu_cookie;
196 	}
197 
198 	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL)
199 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
200 
201 	if (newtag->alignment > 1)
202 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
203 
204 	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
205 	    (flags & BUS_DMA_ALLOCNOW) != 0) {
206 		struct bounce_zone *bz;
207 
208 		/* Must bounce */
209 
210 		if ((error = alloc_bounce_zone(newtag)) != 0) {
211 			free(newtag, M_DEVBUF);
212 			return (error);
213 		}
214 		bz = newtag->bounce_zone;
215 
216 		if (ptoa(bz->total_bpages) < maxsize) {
217 			int pages;
218 
219 			pages = atop(maxsize) - bz->total_bpages;
220 
221 			/* Add pages to our bounce pool */
222 			if (alloc_bounce_pages(newtag, pages) < pages)
223 				error = ENOMEM;
224 		}
225 		/* Performed initial allocation */
226 		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
227 	}
228 
229 	if (error != 0) {
230 		free(newtag, M_DEVBUF);
231 	} else {
232 		*dmat = newtag;
233 	}
234 	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
235 	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
236 	return (error);
237 }
238 
239 void
240 bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat)
241 {
242 
243 	if (t == NULL || dmat == NULL)
244 		return;
245 
246 	t->alignment = dmat->alignment;
247 	t->boundary = dmat->boundary;
248 	t->lowaddr = dmat->lowaddr;
249 	t->highaddr = dmat->highaddr;
250 	t->maxsize = dmat->maxsize;
251 	t->nsegments = dmat->nsegments;
252 	t->maxsegsize = dmat->maxsegsz;
253 	t->flags = dmat->flags;
254 	t->lockfunc = dmat->lockfunc;
255 	t->lockfuncarg = dmat->lockfuncarg;
256 }
257 
258 int
259 bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
260 {
261 
262 	return (0);
263 }
264 
265 int
266 bus_dma_tag_destroy(bus_dma_tag_t dmat)
267 {
268 	int error = 0;
269 
270 	if (dmat != NULL) {
271 		if (dmat->map_count != 0) {
272 			error = EBUSY;
273 			goto out;
274 		}
275 
276 		free(dmat, M_DEVBUF);
277 	}
278 out:
279 	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat, error);
280 	return (error);
281 }
282 
283 /*
284  * Allocate a handle for mapping from kva/uva/physical
285  * address space into bus device space.
286  */
287 int
288 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
289 {
290 	int error;
291 
292 	error = 0;
293 
294 	*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
295 				     M_NOWAIT | M_ZERO);
296 	if (*mapp == NULL) {
297 		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
298 		    __func__, dmat, ENOMEM);
299 		return (ENOMEM);
300 	}
301 
302 	/*
303 	 * Bouncing might be required if the driver asks for an active
304 	 * exclusion region, a data alignment that is stricter than 1, and/or
305 	 * an active address boundary.
306 	 */
307 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
308 		/* Must bounce */
309 		struct bounce_zone *bz;
310 		int maxpages;
311 
312 		if (dmat->bounce_zone == NULL) {
313 			if ((error = alloc_bounce_zone(dmat)) != 0)
314 				return (error);
315 		}
316 		bz = dmat->bounce_zone;
317 
318 		/* Initialize the new map */
319 		STAILQ_INIT(&((*mapp)->bpages));
320 
321 		/*
322 		 * Attempt to add pages to our pool on a per-instance
323 		 * basis up to a sane limit.
324 		 */
325 		if (dmat->alignment > 1)
326 			maxpages = MAX_BPAGES;
327 		else
328 			maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
329 		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
330 		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
331 			int pages;
332 
333 			pages = MAX(atop(dmat->maxsize), 1);
334 			pages = MIN(maxpages - bz->total_bpages, pages);
335 			pages = MAX(pages, 1);
336 			if (alloc_bounce_pages(dmat, pages) < pages)
337 				error = ENOMEM;
338 
339 			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
340 				if (error == 0)
341 					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
342 			} else {
343 				error = 0;
344 			}
345 		}
346 		bz->map_count++;
347 	}
348 
349 	(*mapp)->nsegs = 0;
350 	(*mapp)->segments = (bus_dma_segment_t *)malloc(
351 	    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
352 	    M_NOWAIT);
353 	if ((*mapp)->segments == NULL) {
354 		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
355 		    __func__, dmat, ENOMEM);
356 		return (ENOMEM);
357 	}
358 
359 	if (error == 0)
360 		dmat->map_count++;
361 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
362 	    __func__, dmat, dmat->flags, error);
363 	return (error);
364 }
365 
366 /*
367  * Destroy a handle for mapping from kva/uva/physical
368  * address space into bus device space.
369  */
370 int
371 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
372 {
373 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
374 		if (STAILQ_FIRST(&map->bpages) != NULL) {
375 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
376 			    __func__, dmat, EBUSY);
377 			return (EBUSY);
378 		}
379 		if (dmat->bounce_zone)
380 			dmat->bounce_zone->map_count--;
381 	}
382 	free(map->segments, M_DEVBUF);
383 	free(map, M_DEVBUF);
384 	dmat->map_count--;
385 	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
386 	return (0);
387 }
388 
389 /*
390  * Allocate a piece of memory that can be efficiently mapped into
391  * bus device space based on the constraints lited in the dma tag.
392  * A dmamap to for use with dmamap_load is also allocated.
393  */
394 int
395 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
396 		 bus_dmamap_t *mapp)
397 {
398 	vm_memattr_t attr;
399 	int mflags;
400 
401 	if (flags & BUS_DMA_NOWAIT)
402 		mflags = M_NOWAIT;
403 	else
404 		mflags = M_WAITOK;
405 
406 	bus_dmamap_create(dmat, flags, mapp);
407 
408 	if (flags & BUS_DMA_ZERO)
409 		mflags |= M_ZERO;
410 	if (flags & BUS_DMA_NOCACHE)
411 		attr = VM_MEMATTR_UNCACHEABLE;
412 	else
413 		attr = VM_MEMATTR_DEFAULT;
414 
415 	/*
416 	 * XXX:
417 	 * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact
418 	 * alignment guarantees of malloc need to be nailed down, and the
419 	 * code below should be rewritten to take that into account.
420 	 *
421 	 * In the meantime, we'll warn the user if malloc gets it wrong.
422 	 */
423 	if ((dmat->maxsize <= PAGE_SIZE) &&
424 	   (dmat->alignment <= dmat->maxsize) &&
425 	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
426 	    attr == VM_MEMATTR_DEFAULT) {
427 		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
428 	} else {
429 		/*
430 		 * XXX Use Contigmalloc until it is merged into this facility
431 		 *     and handles multi-seg allocations.  Nobody is doing
432 		 *     multi-seg allocations yet though.
433 		 * XXX Certain AGP hardware does.
434 		 */
435 		*vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0ul,
436 		    dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul,
437 		    dmat->boundary, attr);
438 		(*mapp)->contigalloc = 1;
439 	}
440 	if (*vaddr == NULL) {
441 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
442 		    __func__, dmat, dmat->flags, ENOMEM);
443 		return (ENOMEM);
444 	} else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alignment)) {
445 		printf("bus_dmamem_alloc failed to align memory properly.\n");
446 	}
447 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
448 	    __func__, dmat, dmat->flags, 0);
449 	return (0);
450 }
451 
452 /*
453  * Free a piece of memory and it's allociated dmamap, that was allocated
454  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
455  */
456 void
457 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
458 {
459 
460 	if (!map->contigalloc)
461 		free(vaddr, M_DEVBUF);
462 	else
463 		kmem_free(vaddr, dmat->maxsize);
464 	bus_dmamap_destroy(dmat, map);
465 	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
466 }
467 
468 static void
469 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
470     bus_size_t buflen, int flags)
471 {
472 	bus_addr_t curaddr;
473 	bus_size_t sgsize;
474 
475 	if (map->pagesneeded == 0) {
476 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
477 		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
478 		    dmat->boundary, dmat->alignment);
479 		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
480 		/*
481 		 * Count the number of bounce pages
482 		 * needed in order to complete this transfer
483 		 */
484 		curaddr = buf;
485 		while (buflen != 0) {
486 			sgsize = MIN(buflen, dmat->maxsegsz);
487 			if (must_bounce(dmat, curaddr)) {
488 				sgsize = MIN(sgsize,
489 				    PAGE_SIZE - (curaddr & PAGE_MASK));
490 				map->pagesneeded++;
491 			}
492 			curaddr += sgsize;
493 			buflen -= sgsize;
494 		}
495 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
496 	}
497 }
498 
499 static void
500 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
501     void *buf, bus_size_t buflen, int flags)
502 {
503         vm_offset_t vaddr;
504         vm_offset_t vendaddr;
505         bus_addr_t paddr;
506 
507 	if (map->pagesneeded == 0) {
508 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
509 		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
510 		    dmat->boundary, dmat->alignment);
511 		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
512 		/*
513 		 * Count the number of bounce pages
514 		 * needed in order to complete this transfer
515 		 */
516 		vaddr = (vm_offset_t)buf;
517 		vendaddr = (vm_offset_t)buf + buflen;
518 
519 		while (vaddr < vendaddr) {
520 			bus_size_t sg_len;
521 
522 			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
523 			if (pmap == kernel_pmap)
524 				paddr = pmap_kextract(vaddr);
525 			else
526 				paddr = pmap_extract(pmap, vaddr);
527 			if (must_bounce(dmat, paddr)) {
528 				sg_len = roundup2(sg_len, dmat->alignment);
529 				map->pagesneeded++;
530 			}
531 			vaddr += sg_len;
532 		}
533 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
534 	}
535 }
536 
537 /*
538  * Add a single contiguous physical range to the segment list.
539  */
540 static int
541 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
542 		   bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
543 {
544 	int seg;
545 
546 	/*
547 	 * Make sure we don't cross any boundaries.
548 	 */
549 	if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary))
550 		sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
551 
552 	/*
553 	 * Insert chunk into a segment, coalescing with
554 	 * previous segment if possible.
555 	 */
556 	seg = *segp;
557 	if (seg == -1) {
558 		seg = 0;
559 		segs[seg].ds_addr = curaddr;
560 		segs[seg].ds_len = sgsize;
561 	} else {
562 		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
563 		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
564 		    vm_addr_bound_ok(segs[seg].ds_addr,
565 		    segs[seg].ds_len + sgsize, dmat->boundary))
566 			segs[seg].ds_len += sgsize;
567 		else {
568 			if (++seg >= dmat->nsegments)
569 				return (0);
570 			segs[seg].ds_addr = curaddr;
571 			segs[seg].ds_len = sgsize;
572 		}
573 	}
574 	*segp = seg;
575 	return (sgsize);
576 }
577 
578 /*
579  * Utility function to load a physical buffer.  segp contains
580  * the starting segment on entrace, and the ending segment on exit.
581  */
582 int
583 _bus_dmamap_load_phys(bus_dma_tag_t dmat,
584 		      bus_dmamap_t map,
585 		      vm_paddr_t buf, bus_size_t buflen,
586 		      int flags,
587 		      bus_dma_segment_t *segs,
588 		      int *segp)
589 {
590 	bus_addr_t curaddr;
591 	bus_size_t sgsize;
592 	int error;
593 
594 	if (segs == NULL)
595 		segs = map->segments;
596 
597 	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
598 		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
599 		if (map->pagesneeded != 0) {
600 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
601 			if (error)
602 				return (error);
603 		}
604 	}
605 
606 	while (buflen > 0) {
607 		curaddr = buf;
608 		sgsize = MIN(buflen, dmat->maxsegsz);
609 		if (map->pagesneeded != 0 && must_bounce(dmat, curaddr)) {
610 			sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
611 			curaddr = add_bounce_page(dmat, map, 0, curaddr,
612 			    sgsize);
613 		}
614 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
615 		    segp);
616 		if (sgsize == 0)
617 			break;
618 		buf += sgsize;
619 		buflen -= sgsize;
620 	}
621 
622 	/*
623 	 * Did we fit?
624 	 */
625 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
626 }
627 
628 int
629 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
630     struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
631     bus_dma_segment_t *segs, int *segp)
632 {
633 
634 	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
635 	    segs, segp));
636 }
637 
638 /*
639  * Utility function to load a linear buffer.  segp contains
640  * the starting segment on entrance, and the ending segment on exit.
641  */
642 int
643 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
644     			bus_dmamap_t map,
645 			void *buf, bus_size_t buflen,
646 			pmap_t pmap,
647 			int flags,
648 			bus_dma_segment_t *segs,
649 			int *segp)
650 {
651 	bus_size_t sgsize;
652 	bus_addr_t curaddr;
653 	vm_offset_t kvaddr, vaddr;
654 	int error;
655 
656 	if (segs == NULL)
657 		segs = map->segments;
658 
659 	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
660 		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
661 		if (map->pagesneeded != 0) {
662 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
663 			if (error)
664 				return (error);
665 		}
666 	}
667 
668 	vaddr = (vm_offset_t)buf;
669 
670 	while (buflen > 0) {
671 		bus_size_t max_sgsize;
672 
673 		/*
674 		 * Get the physical address for this segment.
675 		 */
676 		if (pmap == kernel_pmap) {
677 			curaddr = pmap_kextract(vaddr);
678 			kvaddr = vaddr;
679 		} else {
680 			curaddr = pmap_extract(pmap, vaddr);
681 			kvaddr = 0;
682 		}
683 
684 		/*
685 		 * Compute the segment size, and adjust counts.
686 		 */
687 		max_sgsize = MIN(buflen, dmat->maxsegsz);
688 		sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
689 		if (map->pagesneeded != 0 && must_bounce(dmat, curaddr)) {
690 			sgsize = roundup2(sgsize, dmat->alignment);
691 			sgsize = MIN(sgsize, max_sgsize);
692 			curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
693 			    sgsize);
694 		} else {
695 			sgsize = MIN(sgsize, max_sgsize);
696 		}
697 
698 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
699 		    segp);
700 		if (sgsize == 0)
701 			break;
702 		vaddr += sgsize;
703 		buflen -= sgsize;
704 	}
705 
706 	/*
707 	 * Did we fit?
708 	 */
709 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
710 }
711 
712 void
713 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
714 		    struct memdesc *mem, bus_dmamap_callback_t *callback,
715 		    void *callback_arg)
716 {
717 
718 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
719 		map->dmat = dmat;
720 		map->mem = *mem;
721 		map->callback = callback;
722 		map->callback_arg = callback_arg;
723 	}
724 }
725 
726 bus_dma_segment_t *
727 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
728 		     bus_dma_segment_t *segs, int nsegs, int error)
729 {
730 
731 	map->nsegs = nsegs;
732 	if (segs != NULL)
733 		memcpy(map->segments, segs, map->nsegs*sizeof(segs[0]));
734 	if (dmat->iommu != NULL)
735 		IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs,
736 		    dmat->lowaddr, dmat->highaddr, dmat->alignment,
737 		    dmat->boundary, dmat->iommu_cookie);
738 
739 	if (segs != NULL)
740 		memcpy(segs, map->segments, map->nsegs*sizeof(segs[0]));
741 	else
742 		segs = map->segments;
743 
744 	return (segs);
745 }
746 
747 /*
748  * Release the mapping held by map.
749  */
750 void
751 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
752 {
753 	if (dmat->iommu) {
754 		IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie);
755 		map->nsegs = 0;
756 	}
757 
758 	free_bounce_pages(dmat, map);
759 }
760 
761 void
762 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
763 {
764 	struct bounce_page *bpage;
765 	vm_offset_t datavaddr, tempvaddr;
766 
767 	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
768 		/*
769 		 * Handle data bouncing.  We might also
770 		 * want to add support for invalidating
771 		 * the caches on broken hardware
772 		 */
773 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
774 		    "performing bounce", __func__, dmat, dmat->flags, op);
775 
776 		if (op & BUS_DMASYNC_PREWRITE) {
777 			while (bpage != NULL) {
778 				tempvaddr = 0;
779 				datavaddr = bpage->datavaddr;
780 				if (datavaddr == 0) {
781 					tempvaddr = pmap_quick_enter_page(
782 					    bpage->datapage);
783 					datavaddr = tempvaddr |
784 					    bpage->dataoffs;
785 				}
786 
787 				bcopy((void *)datavaddr,
788 				    (void *)bpage->vaddr, bpage->datacount);
789 
790 				if (tempvaddr != 0)
791 					pmap_quick_remove_page(tempvaddr);
792 				bpage = STAILQ_NEXT(bpage, links);
793 			}
794 			dmat->bounce_zone->total_bounced++;
795 		}
796 
797 		if (op & BUS_DMASYNC_POSTREAD) {
798 			while (bpage != NULL) {
799 				tempvaddr = 0;
800 				datavaddr = bpage->datavaddr;
801 				if (datavaddr == 0) {
802 					tempvaddr = pmap_quick_enter_page(
803 					    bpage->datapage);
804 					datavaddr = tempvaddr |
805 					    bpage->dataoffs;
806 				}
807 
808 				bcopy((void *)bpage->vaddr,
809 				    (void *)datavaddr, bpage->datacount);
810 
811 				if (tempvaddr != 0)
812 					pmap_quick_remove_page(tempvaddr);
813 				bpage = STAILQ_NEXT(bpage, links);
814 			}
815 			dmat->bounce_zone->total_bounced++;
816 		}
817 	}
818 
819 	powerpc_sync();
820 }
821 
822 int
823 bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie)
824 {
825 	tag->iommu = iommu;
826 	tag->iommu_cookie = cookie;
827 
828 	return (0);
829 }
830