1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * From amd64/busdma_machdep.c, r204214
31  */
32 
33 #include <sys/cdefs.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/bus.h>
38 #include <sys/interrupt.h>
39 #include <sys/kernel.h>
40 #include <sys/ktr.h>
41 #include <sys/lock.h>
42 #include <sys/proc.h>
43 #include <sys/memdesc.h>
44 #include <sys/mutex.h>
45 #include <sys/sysctl.h>
46 #include <sys/uio.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_map.h>
53 
54 #include <machine/atomic.h>
55 #include <machine/bus.h>
56 #include <machine/cpufunc.h>
57 #include <machine/md_var.h>
58 
59 #include "iommu_if.h"
60 
61 #define MAX_BPAGES MIN(8192, physmem/40)
62 
63 struct bounce_page;
64 struct bounce_zone;
65 
66 struct bus_dma_tag {
67 	bus_dma_tag_t	  parent;
68 	bus_size_t	  alignment;
69 	bus_addr_t	  boundary;
70 	bus_addr_t	  lowaddr;
71 	bus_addr_t	  highaddr;
72 	bus_dma_filter_t *filter;
73 	void		 *filterarg;
74 	bus_size_t	  maxsize;
75 	bus_size_t	  maxsegsz;
76 	u_int		  nsegments;
77 	int		  flags;
78 	int		  ref_count;
79 	int		  map_count;
80 	bus_dma_lock_t	 *lockfunc;
81 	void		 *lockfuncarg;
82 	struct bounce_zone *bounce_zone;
83 	device_t	  iommu;
84 	void		 *iommu_cookie;
85 };
86 
87 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
88     "Busdma parameters");
89 
90 struct bus_dmamap {
91 	STAILQ_HEAD(, bounce_page) bpages;
92 	int		       pagesneeded;
93 	int		       pagesreserved;
94 	bus_dma_tag_t	       dmat;
95 	struct memdesc	       mem;
96 	bus_dma_segment_t     *segments;
97 	int		       nsegs;
98 	bus_dmamap_callback_t *callback;
99 	void		      *callback_arg;
100 	STAILQ_ENTRY(bus_dmamap) links;
101 	int		       contigalloc;
102 };
103 
104 static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
105 
106 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
107 
108 #define	dmat_alignment(dmat)	((dmat)->alignment)
109 #define	dmat_flags(dmat)	((dmat)->flags)
110 #define	dmat_lowaddr(dmat)	((dmat)->lowaddr)
111 #define	dmat_lockfunc(dmat)	((dmat)->lockfunc)
112 #define	dmat_lockfuncarg(dmat)	((dmat)->lockfuncarg)
113 
114 #include "../../kern/subr_busdma_bounce.c"
115 
116 /*
117  * Return true if a match is made.
118  *
119  * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
120  *
121  * If paddr is within the bounds of the dma tag then call the filter callback
122  * to check for a match, if there is no filter callback then assume a match.
123  */
124 static __inline int
125 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
126 {
127 	int retval;
128 
129 	retval = 0;
130 
131 	do {
132 		if (dmat->filter == NULL && dmat->iommu == NULL &&
133 		    paddr > dmat->lowaddr && paddr <= dmat->highaddr)
134 			retval = 1;
135 		if (dmat->filter == NULL &&
136 		    !vm_addr_align_ok(paddr, dmat->alignment))
137 			retval = 1;
138 		if (dmat->filter != NULL &&
139 		    (*dmat->filter)(dmat->filterarg, paddr) != 0)
140 			retval = 1;
141 
142 		dmat = dmat->parent;
143 	} while (retval == 0 && dmat != NULL);
144 	return (retval);
145 }
146 
147 #define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
148 #define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
149 /*
150  * Allocate a device specific dma_tag.
151  */
152 int
153 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
154 		   bus_addr_t boundary, bus_addr_t lowaddr,
155 		   bus_addr_t highaddr, bus_dma_filter_t *filter,
156 		   void *filterarg, bus_size_t maxsize, int nsegments,
157 		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
158 		   void *lockfuncarg, bus_dma_tag_t *dmat)
159 {
160 	bus_dma_tag_t newtag;
161 	int error = 0;
162 
163 	/* Basic sanity checking */
164 	if (boundary != 0 && boundary < maxsegsz)
165 		maxsegsz = boundary;
166 
167 	if (maxsegsz == 0) {
168 		return (EINVAL);
169 	}
170 
171 	/* Return a NULL tag on failure */
172 	*dmat = NULL;
173 
174 	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
175 	    M_ZERO | M_NOWAIT);
176 	if (newtag == NULL) {
177 		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
178 		    __func__, newtag, 0, error);
179 		return (ENOMEM);
180 	}
181 
182 	newtag->parent = parent;
183 	newtag->alignment = alignment;
184 	newtag->boundary = boundary;
185 	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
186 	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
187 	newtag->filter = filter;
188 	newtag->filterarg = filterarg;
189 	newtag->maxsize = maxsize;
190 	newtag->nsegments = nsegments;
191 	newtag->maxsegsz = maxsegsz;
192 	newtag->flags = flags;
193 	newtag->ref_count = 1; /* Count ourself */
194 	newtag->map_count = 0;
195 	if (lockfunc != NULL) {
196 		newtag->lockfunc = lockfunc;
197 		newtag->lockfuncarg = lockfuncarg;
198 	} else {
199 		newtag->lockfunc = _busdma_dflt_lock;
200 		newtag->lockfuncarg = NULL;
201 	}
202 
203 	/* Take into account any restrictions imposed by our parent tag */
204 	if (parent != NULL) {
205 		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
206 		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
207 		if (newtag->boundary == 0)
208 			newtag->boundary = parent->boundary;
209 		else if (parent->boundary != 0)
210 			newtag->boundary = MIN(parent->boundary,
211 					       newtag->boundary);
212 		if (newtag->filter == NULL) {
213 			/*
214 			 * Short circuit looking at our parent directly
215 			 * since we have encapsulated all of its information
216 			 */
217 			newtag->filter = parent->filter;
218 			newtag->filterarg = parent->filterarg;
219 			newtag->parent = parent->parent;
220 		}
221 		if (newtag->parent != NULL)
222 			atomic_add_int(&parent->ref_count, 1);
223 		newtag->iommu = parent->iommu;
224 		newtag->iommu_cookie = parent->iommu_cookie;
225 	}
226 
227 	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL)
228 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
229 
230 	if (newtag->alignment > 1)
231 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
232 
233 	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
234 	    (flags & BUS_DMA_ALLOCNOW) != 0) {
235 		struct bounce_zone *bz;
236 
237 		/* Must bounce */
238 
239 		if ((error = alloc_bounce_zone(newtag)) != 0) {
240 			free(newtag, M_DEVBUF);
241 			return (error);
242 		}
243 		bz = newtag->bounce_zone;
244 
245 		if (ptoa(bz->total_bpages) < maxsize) {
246 			int pages;
247 
248 			pages = atop(maxsize) - bz->total_bpages;
249 
250 			/* Add pages to our bounce pool */
251 			if (alloc_bounce_pages(newtag, pages) < pages)
252 				error = ENOMEM;
253 		}
254 		/* Performed initial allocation */
255 		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
256 	}
257 
258 	if (error != 0) {
259 		free(newtag, M_DEVBUF);
260 	} else {
261 		*dmat = newtag;
262 	}
263 	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
264 	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
265 	return (error);
266 }
267 
268 void
269 bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat)
270 {
271 
272 	if (t == NULL || dmat == NULL)
273 		return;
274 
275 	t->parent = dmat->parent;
276 	t->alignment = dmat->alignment;
277 	t->boundary = dmat->boundary;
278 	t->lowaddr = dmat->lowaddr;
279 	t->highaddr = dmat->highaddr;
280 	t->maxsize = dmat->maxsize;
281 	t->nsegments = dmat->nsegments;
282 	t->maxsegsize = dmat->maxsegsz;
283 	t->flags = dmat->flags;
284 	t->lockfunc = dmat->lockfunc;
285 	t->lockfuncarg = dmat->lockfuncarg;
286 }
287 
288 int
289 bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
290 {
291 
292 	return (0);
293 }
294 
295 int
296 bus_dma_tag_destroy(bus_dma_tag_t dmat)
297 {
298 	bus_dma_tag_t dmat_copy __unused;
299 	int error;
300 
301 	error = 0;
302 	dmat_copy = dmat;
303 
304 	if (dmat != NULL) {
305 		if (dmat->map_count != 0) {
306 			error = EBUSY;
307 			goto out;
308 		}
309 
310 		while (dmat != NULL) {
311 			bus_dma_tag_t parent;
312 
313 			parent = dmat->parent;
314 			atomic_subtract_int(&dmat->ref_count, 1);
315 			if (dmat->ref_count == 0) {
316 				free(dmat, M_DEVBUF);
317 				/*
318 				 * Last reference count, so
319 				 * release our reference
320 				 * count on our parent.
321 				 */
322 				dmat = parent;
323 			} else
324 				dmat = NULL;
325 		}
326 	}
327 out:
328 	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
329 	return (error);
330 }
331 
332 /*
333  * Allocate a handle for mapping from kva/uva/physical
334  * address space into bus device space.
335  */
336 int
337 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
338 {
339 	int error;
340 
341 	error = 0;
342 
343 	*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
344 				     M_NOWAIT | M_ZERO);
345 	if (*mapp == NULL) {
346 		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
347 		    __func__, dmat, ENOMEM);
348 		return (ENOMEM);
349 	}
350 
351 	/*
352 	 * Bouncing might be required if the driver asks for an active
353 	 * exclusion region, a data alignment that is stricter than 1, and/or
354 	 * an active address boundary.
355 	 */
356 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
357 		/* Must bounce */
358 		struct bounce_zone *bz;
359 		int maxpages;
360 
361 		if (dmat->bounce_zone == NULL) {
362 			if ((error = alloc_bounce_zone(dmat)) != 0)
363 				return (error);
364 		}
365 		bz = dmat->bounce_zone;
366 
367 		/* Initialize the new map */
368 		STAILQ_INIT(&((*mapp)->bpages));
369 
370 		/*
371 		 * Attempt to add pages to our pool on a per-instance
372 		 * basis up to a sane limit.
373 		 */
374 		if (dmat->alignment > 1)
375 			maxpages = MAX_BPAGES;
376 		else
377 			maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
378 		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
379 		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
380 			int pages;
381 
382 			pages = MAX(atop(dmat->maxsize), 1);
383 			pages = MIN(maxpages - bz->total_bpages, pages);
384 			pages = MAX(pages, 1);
385 			if (alloc_bounce_pages(dmat, pages) < pages)
386 				error = ENOMEM;
387 
388 			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
389 				if (error == 0)
390 					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
391 			} else {
392 				error = 0;
393 			}
394 		}
395 		bz->map_count++;
396 	}
397 
398 	(*mapp)->nsegs = 0;
399 	(*mapp)->segments = (bus_dma_segment_t *)malloc(
400 	    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
401 	    M_NOWAIT);
402 	if ((*mapp)->segments == NULL) {
403 		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
404 		    __func__, dmat, ENOMEM);
405 		return (ENOMEM);
406 	}
407 
408 	if (error == 0)
409 		dmat->map_count++;
410 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
411 	    __func__, dmat, dmat->flags, error);
412 	return (error);
413 }
414 
415 /*
416  * Destroy a handle for mapping from kva/uva/physical
417  * address space into bus device space.
418  */
419 int
420 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
421 {
422 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
423 		if (STAILQ_FIRST(&map->bpages) != NULL) {
424 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
425 			    __func__, dmat, EBUSY);
426 			return (EBUSY);
427 		}
428 		if (dmat->bounce_zone)
429 			dmat->bounce_zone->map_count--;
430 	}
431 	free(map->segments, M_DEVBUF);
432 	free(map, M_DEVBUF);
433 	dmat->map_count--;
434 	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
435 	return (0);
436 }
437 
438 /*
439  * Allocate a piece of memory that can be efficiently mapped into
440  * bus device space based on the constraints lited in the dma tag.
441  * A dmamap to for use with dmamap_load is also allocated.
442  */
443 int
444 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
445 		 bus_dmamap_t *mapp)
446 {
447 	vm_memattr_t attr;
448 	int mflags;
449 
450 	if (flags & BUS_DMA_NOWAIT)
451 		mflags = M_NOWAIT;
452 	else
453 		mflags = M_WAITOK;
454 
455 	bus_dmamap_create(dmat, flags, mapp);
456 
457 	if (flags & BUS_DMA_ZERO)
458 		mflags |= M_ZERO;
459 	if (flags & BUS_DMA_NOCACHE)
460 		attr = VM_MEMATTR_UNCACHEABLE;
461 	else
462 		attr = VM_MEMATTR_DEFAULT;
463 
464 	/*
465 	 * XXX:
466 	 * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact
467 	 * alignment guarantees of malloc need to be nailed down, and the
468 	 * code below should be rewritten to take that into account.
469 	 *
470 	 * In the meantime, we'll warn the user if malloc gets it wrong.
471 	 */
472 	if ((dmat->maxsize <= PAGE_SIZE) &&
473 	   (dmat->alignment <= dmat->maxsize) &&
474 	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
475 	    attr == VM_MEMATTR_DEFAULT) {
476 		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
477 	} else {
478 		/*
479 		 * XXX Use Contigmalloc until it is merged into this facility
480 		 *     and handles multi-seg allocations.  Nobody is doing
481 		 *     multi-seg allocations yet though.
482 		 * XXX Certain AGP hardware does.
483 		 */
484 		*vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0ul,
485 		    dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul,
486 		    dmat->boundary, attr);
487 		(*mapp)->contigalloc = 1;
488 	}
489 	if (*vaddr == NULL) {
490 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
491 		    __func__, dmat, dmat->flags, ENOMEM);
492 		return (ENOMEM);
493 	} else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alignment)) {
494 		printf("bus_dmamem_alloc failed to align memory properly.\n");
495 	}
496 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
497 	    __func__, dmat, dmat->flags, 0);
498 	return (0);
499 }
500 
501 /*
502  * Free a piece of memory and it's allociated dmamap, that was allocated
503  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
504  */
505 void
506 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
507 {
508 
509 	if (!map->contigalloc)
510 		free(vaddr, M_DEVBUF);
511 	else
512 		kmem_free(vaddr, dmat->maxsize);
513 	bus_dmamap_destroy(dmat, map);
514 	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
515 }
516 
517 static void
518 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
519     bus_size_t buflen, int flags)
520 {
521 	bus_addr_t curaddr;
522 	bus_size_t sgsize;
523 
524 	if (map->pagesneeded == 0) {
525 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
526 		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
527 		    dmat->boundary, dmat->alignment);
528 		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
529 		/*
530 		 * Count the number of bounce pages
531 		 * needed in order to complete this transfer
532 		 */
533 		curaddr = buf;
534 		while (buflen != 0) {
535 			sgsize = MIN(buflen, dmat->maxsegsz);
536 			if (run_filter(dmat, curaddr) != 0) {
537 				sgsize = MIN(sgsize,
538 				    PAGE_SIZE - (curaddr & PAGE_MASK));
539 				map->pagesneeded++;
540 			}
541 			curaddr += sgsize;
542 			buflen -= sgsize;
543 		}
544 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
545 	}
546 }
547 
548 static void
549 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
550     void *buf, bus_size_t buflen, int flags)
551 {
552         vm_offset_t vaddr;
553         vm_offset_t vendaddr;
554         bus_addr_t paddr;
555 
556 	if (map->pagesneeded == 0) {
557 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
558 		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
559 		    dmat->boundary, dmat->alignment);
560 		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
561 		/*
562 		 * Count the number of bounce pages
563 		 * needed in order to complete this transfer
564 		 */
565 		vaddr = (vm_offset_t)buf;
566 		vendaddr = (vm_offset_t)buf + buflen;
567 
568 		while (vaddr < vendaddr) {
569 			bus_size_t sg_len;
570 
571 			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
572 			if (pmap == kernel_pmap)
573 				paddr = pmap_kextract(vaddr);
574 			else
575 				paddr = pmap_extract(pmap, vaddr);
576 			if (run_filter(dmat, paddr) != 0) {
577 				sg_len = roundup2(sg_len, dmat->alignment);
578 				map->pagesneeded++;
579 			}
580 			vaddr += sg_len;
581 		}
582 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
583 	}
584 }
585 
586 /*
587  * Add a single contiguous physical range to the segment list.
588  */
589 static int
590 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
591 		   bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
592 {
593 	int seg;
594 
595 	/*
596 	 * Make sure we don't cross any boundaries.
597 	 */
598 	if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary))
599 		sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
600 
601 	/*
602 	 * Insert chunk into a segment, coalescing with
603 	 * previous segment if possible.
604 	 */
605 	seg = *segp;
606 	if (seg == -1) {
607 		seg = 0;
608 		segs[seg].ds_addr = curaddr;
609 		segs[seg].ds_len = sgsize;
610 	} else {
611 		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
612 		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
613 		    vm_addr_bound_ok(segs[seg].ds_addr,
614 		    segs[seg].ds_len + sgsize, dmat->boundary))
615 			segs[seg].ds_len += sgsize;
616 		else {
617 			if (++seg >= dmat->nsegments)
618 				return (0);
619 			segs[seg].ds_addr = curaddr;
620 			segs[seg].ds_len = sgsize;
621 		}
622 	}
623 	*segp = seg;
624 	return (sgsize);
625 }
626 
627 /*
628  * Utility function to load a physical buffer.  segp contains
629  * the starting segment on entrace, and the ending segment on exit.
630  */
631 int
632 _bus_dmamap_load_phys(bus_dma_tag_t dmat,
633 		      bus_dmamap_t map,
634 		      vm_paddr_t buf, bus_size_t buflen,
635 		      int flags,
636 		      bus_dma_segment_t *segs,
637 		      int *segp)
638 {
639 	bus_addr_t curaddr;
640 	bus_size_t sgsize;
641 	int error;
642 
643 	if (segs == NULL)
644 		segs = map->segments;
645 
646 	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
647 		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
648 		if (map->pagesneeded != 0) {
649 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
650 			if (error)
651 				return (error);
652 		}
653 	}
654 
655 	while (buflen > 0) {
656 		curaddr = buf;
657 		sgsize = MIN(buflen, dmat->maxsegsz);
658 		if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
659 			sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
660 			curaddr = add_bounce_page(dmat, map, 0, curaddr,
661 			    sgsize);
662 		}
663 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
664 		    segp);
665 		if (sgsize == 0)
666 			break;
667 		buf += sgsize;
668 		buflen -= sgsize;
669 	}
670 
671 	/*
672 	 * Did we fit?
673 	 */
674 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
675 }
676 
677 int
678 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
679     struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
680     bus_dma_segment_t *segs, int *segp)
681 {
682 
683 	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
684 	    segs, segp));
685 }
686 
687 /*
688  * Utility function to load a linear buffer.  segp contains
689  * the starting segment on entrance, and the ending segment on exit.
690  */
691 int
692 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
693     			bus_dmamap_t map,
694 			void *buf, bus_size_t buflen,
695 			pmap_t pmap,
696 			int flags,
697 			bus_dma_segment_t *segs,
698 			int *segp)
699 {
700 	bus_size_t sgsize;
701 	bus_addr_t curaddr;
702 	vm_offset_t kvaddr, vaddr;
703 	int error;
704 
705 	if (segs == NULL)
706 		segs = map->segments;
707 
708 	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
709 		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
710 		if (map->pagesneeded != 0) {
711 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
712 			if (error)
713 				return (error);
714 		}
715 	}
716 
717 	vaddr = (vm_offset_t)buf;
718 
719 	while (buflen > 0) {
720 		bus_size_t max_sgsize;
721 
722 		/*
723 		 * Get the physical address for this segment.
724 		 */
725 		if (pmap == kernel_pmap) {
726 			curaddr = pmap_kextract(vaddr);
727 			kvaddr = vaddr;
728 		} else {
729 			curaddr = pmap_extract(pmap, vaddr);
730 			kvaddr = 0;
731 		}
732 
733 		/*
734 		 * Compute the segment size, and adjust counts.
735 		 */
736 		max_sgsize = MIN(buflen, dmat->maxsegsz);
737 		sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
738 		if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
739 			sgsize = roundup2(sgsize, dmat->alignment);
740 			sgsize = MIN(sgsize, max_sgsize);
741 			curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
742 			    sgsize);
743 		} else {
744 			sgsize = MIN(sgsize, max_sgsize);
745 		}
746 
747 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
748 		    segp);
749 		if (sgsize == 0)
750 			break;
751 		vaddr += sgsize;
752 		buflen -= sgsize;
753 	}
754 
755 	/*
756 	 * Did we fit?
757 	 */
758 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
759 }
760 
761 void
762 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
763 		    struct memdesc *mem, bus_dmamap_callback_t *callback,
764 		    void *callback_arg)
765 {
766 
767 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
768 		map->dmat = dmat;
769 		map->mem = *mem;
770 		map->callback = callback;
771 		map->callback_arg = callback_arg;
772 	}
773 }
774 
775 bus_dma_segment_t *
776 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
777 		     bus_dma_segment_t *segs, int nsegs, int error)
778 {
779 
780 	map->nsegs = nsegs;
781 	if (segs != NULL)
782 		memcpy(map->segments, segs, map->nsegs*sizeof(segs[0]));
783 	if (dmat->iommu != NULL)
784 		IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs,
785 		    dmat->lowaddr, dmat->highaddr, dmat->alignment,
786 		    dmat->boundary, dmat->iommu_cookie);
787 
788 	if (segs != NULL)
789 		memcpy(segs, map->segments, map->nsegs*sizeof(segs[0]));
790 	else
791 		segs = map->segments;
792 
793 	return (segs);
794 }
795 
796 /*
797  * Release the mapping held by map.
798  */
799 void
800 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
801 {
802 	if (dmat->iommu) {
803 		IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie);
804 		map->nsegs = 0;
805 	}
806 
807 	free_bounce_pages(dmat, map);
808 }
809 
810 void
811 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
812 {
813 	struct bounce_page *bpage;
814 	vm_offset_t datavaddr, tempvaddr;
815 
816 	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
817 		/*
818 		 * Handle data bouncing.  We might also
819 		 * want to add support for invalidating
820 		 * the caches on broken hardware
821 		 */
822 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
823 		    "performing bounce", __func__, dmat, dmat->flags, op);
824 
825 		if (op & BUS_DMASYNC_PREWRITE) {
826 			while (bpage != NULL) {
827 				tempvaddr = 0;
828 				datavaddr = bpage->datavaddr;
829 				if (datavaddr == 0) {
830 					tempvaddr = pmap_quick_enter_page(
831 					    bpage->datapage);
832 					datavaddr = tempvaddr |
833 					    bpage->dataoffs;
834 				}
835 
836 				bcopy((void *)datavaddr,
837 				    (void *)bpage->vaddr, bpage->datacount);
838 
839 				if (tempvaddr != 0)
840 					pmap_quick_remove_page(tempvaddr);
841 				bpage = STAILQ_NEXT(bpage, links);
842 			}
843 			dmat->bounce_zone->total_bounced++;
844 		}
845 
846 		if (op & BUS_DMASYNC_POSTREAD) {
847 			while (bpage != NULL) {
848 				tempvaddr = 0;
849 				datavaddr = bpage->datavaddr;
850 				if (datavaddr == 0) {
851 					tempvaddr = pmap_quick_enter_page(
852 					    bpage->datapage);
853 					datavaddr = tempvaddr |
854 					    bpage->dataoffs;
855 				}
856 
857 				bcopy((void *)bpage->vaddr,
858 				    (void *)datavaddr, bpage->datacount);
859 
860 				if (tempvaddr != 0)
861 					pmap_quick_remove_page(tempvaddr);
862 				bpage = STAILQ_NEXT(bpage, links);
863 			}
864 			dmat->bounce_zone->total_bounced++;
865 		}
866 	}
867 
868 	powerpc_sync();
869 }
870 
871 int
872 bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie)
873 {
874 	tag->iommu = iommu;
875 	tag->iommu_cookie = cookie;
876 
877 	return (0);
878 }
879