1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * From amd64/busdma_machdep.c, r204214
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/kernel.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/proc.h>
42 #include <sys/memdesc.h>
43 #include <sys/mutex.h>
44 #include <sys/sysctl.h>
45 #include <sys/uio.h>
46 
47 #include <vm/vm.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_map.h>
52 
53 #include <machine/atomic.h>
54 #include <machine/bus.h>
55 #include <machine/cpufunc.h>
56 #include <machine/md_var.h>
57 
58 #include "iommu_if.h"
59 
60 #define MAX_BPAGES MIN(8192, physmem/40)
61 
62 struct bounce_page;
63 struct bounce_zone;
64 
65 struct bus_dma_tag {
66 	bus_size_t	  alignment;
67 	bus_addr_t	  boundary;
68 	bus_addr_t	  lowaddr;
69 	bus_addr_t	  highaddr;
70 	bus_size_t	  maxsize;
71 	bus_size_t	  maxsegsz;
72 	u_int		  nsegments;
73 	int		  flags;
74 	int		  map_count;
75 	bus_dma_lock_t	 *lockfunc;
76 	void		 *lockfuncarg;
77 	struct bounce_zone *bounce_zone;
78 	device_t	  iommu;
79 	void		 *iommu_cookie;
80 };
81 
82 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
83     "Busdma parameters");
84 
85 struct bus_dmamap {
86 	STAILQ_HEAD(, bounce_page) bpages;
87 	int		       pagesneeded;
88 	int		       pagesreserved;
89 	bus_dma_tag_t	       dmat;
90 	struct memdesc	       mem;
91 	bus_dma_segment_t     *segments;
92 	int		       nsegs;
93 	bus_dmamap_callback_t *callback;
94 	void		      *callback_arg;
95 	__sbintime_t	       queued_time;
96 	STAILQ_ENTRY(bus_dmamap) links;
97 	int		       contigalloc;
98 };
99 
100 static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
101 
102 #define	dmat_alignment(dmat)	((dmat)->alignment)
103 #define	dmat_flags(dmat)	((dmat)->flags)
104 #define	dmat_highaddr(dmat)	((dmat)->highaddr)
105 #define	dmat_lowaddr(dmat)	((dmat)->lowaddr)
106 #define	dmat_lockfunc(dmat)	((dmat)->lockfunc)
107 #define	dmat_lockfuncarg(dmat)	((dmat)->lockfuncarg)
108 
109 #include "../../kern/subr_busdma_bounce.c"
110 
111 /*
112  * Returns true if the address falls within the tag's exclusion window, or
113  * fails to meet its alignment requirements.
114  */
115 static __inline bool
116 must_bounce(bus_dma_tag_t dmat, bus_addr_t paddr)
117 {
118 
119 	if (dmat->iommu == NULL && paddr > dmat->lowaddr &&
120 	    paddr <= dmat->highaddr)
121 		return (true);
122 	if (!vm_addr_align_ok(paddr, dmat->alignment))
123 		return (true);
124 
125 	return (false);
126 }
127 
128 #define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
129 #define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
130 /*
131  * Allocate a device specific dma_tag.
132  */
133 int
134 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
135 		   bus_addr_t boundary, bus_addr_t lowaddr,
136 		   bus_addr_t highaddr, bus_dma_filter_t *filter,
137 		   void *filterarg, bus_size_t maxsize, int nsegments,
138 		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
139 		   void *lockfuncarg, bus_dma_tag_t *dmat)
140 {
141 	bus_dma_tag_t newtag;
142 	int error = 0;
143 
144 	/* Basic sanity checking */
145 	if (boundary != 0 && boundary < maxsegsz)
146 		maxsegsz = boundary;
147 
148 	if (maxsegsz == 0) {
149 		return (EINVAL);
150 	}
151 
152 	/* Filters are no longer supported. */
153 	if (filter != NULL || filterarg != NULL)
154 		return (EINVAL);
155 
156 	/* Return a NULL tag on failure */
157 	*dmat = NULL;
158 
159 	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
160 	    M_ZERO | M_NOWAIT);
161 	if (newtag == NULL) {
162 		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
163 		    __func__, newtag, 0, error);
164 		return (ENOMEM);
165 	}
166 
167 	newtag->alignment = alignment;
168 	newtag->boundary = boundary;
169 	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
170 	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
171 	newtag->maxsize = maxsize;
172 	newtag->nsegments = nsegments;
173 	newtag->maxsegsz = maxsegsz;
174 	newtag->flags = flags;
175 	newtag->map_count = 0;
176 	if (lockfunc != NULL) {
177 		newtag->lockfunc = lockfunc;
178 		newtag->lockfuncarg = lockfuncarg;
179 	} else {
180 		newtag->lockfunc = _busdma_dflt_lock;
181 		newtag->lockfuncarg = NULL;
182 	}
183 
184 	/* Take into account any restrictions imposed by our parent tag */
185 	if (parent != NULL) {
186 		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
187 		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
188 		if (newtag->boundary == 0)
189 			newtag->boundary = parent->boundary;
190 		else if (parent->boundary != 0)
191 			newtag->boundary = MIN(parent->boundary,
192 					       newtag->boundary);
193 
194 		newtag->iommu = parent->iommu;
195 		newtag->iommu_cookie = parent->iommu_cookie;
196 	}
197 
198 	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL)
199 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
200 
201 	if (newtag->alignment > 1)
202 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
203 
204 	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
205 	    (flags & BUS_DMA_ALLOCNOW) != 0) {
206 		struct bounce_zone *bz;
207 
208 		/* Must bounce */
209 
210 		if ((error = alloc_bounce_zone(newtag)) != 0) {
211 			free(newtag, M_DEVBUF);
212 			return (error);
213 		}
214 		bz = newtag->bounce_zone;
215 
216 		if (ptoa(bz->total_bpages) < maxsize) {
217 			int pages;
218 
219 			pages = atop(maxsize) - bz->total_bpages;
220 
221 			/* Add pages to our bounce pool */
222 			if (alloc_bounce_pages(newtag, pages) < pages)
223 				error = ENOMEM;
224 		}
225 		/* Performed initial allocation */
226 		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
227 	}
228 
229 	if (error != 0) {
230 		free(newtag, M_DEVBUF);
231 	} else {
232 		*dmat = newtag;
233 	}
234 	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
235 	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
236 	return (error);
237 }
238 
239 void
240 bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat)
241 {
242 
243 	if (t == NULL || dmat == NULL)
244 		return;
245 
246 	t->alignment = dmat->alignment;
247 	t->boundary = dmat->boundary;
248 	t->lowaddr = dmat->lowaddr;
249 	t->highaddr = dmat->highaddr;
250 	t->maxsize = dmat->maxsize;
251 	t->nsegments = dmat->nsegments;
252 	t->maxsegsize = dmat->maxsegsz;
253 	t->flags = dmat->flags;
254 	t->lockfunc = dmat->lockfunc;
255 	t->lockfuncarg = dmat->lockfuncarg;
256 }
257 
258 int
259 bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
260 {
261 
262 	return (0);
263 }
264 
265 int
266 bus_dma_tag_destroy(bus_dma_tag_t dmat)
267 {
268 	int error = 0;
269 
270 	if (dmat != NULL) {
271 		if (dmat->map_count != 0) {
272 			error = EBUSY;
273 			goto out;
274 		}
275 
276 		free(dmat, M_DEVBUF);
277 	}
278 out:
279 	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat, error);
280 	return (error);
281 }
282 
283 /*
284  * Allocate a handle for mapping from kva/uva/physical
285  * address space into bus device space.
286  */
287 int
288 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
289 {
290 	int error;
291 
292 	error = 0;
293 
294 	*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
295 				     M_NOWAIT | M_ZERO);
296 	if (*mapp == NULL) {
297 		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
298 		    __func__, dmat, ENOMEM);
299 		return (ENOMEM);
300 	}
301 
302 	/*
303 	 * Bouncing might be required if the driver asks for an active
304 	 * exclusion region, a data alignment that is stricter than 1, and/or
305 	 * an active address boundary.
306 	 */
307 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
308 		/* Must bounce */
309 		struct bounce_zone *bz;
310 		int maxpages;
311 
312 		if (dmat->bounce_zone == NULL) {
313 			if ((error = alloc_bounce_zone(dmat)) != 0)
314 				return (error);
315 		}
316 		bz = dmat->bounce_zone;
317 
318 		/* Initialize the new map */
319 		STAILQ_INIT(&((*mapp)->bpages));
320 
321 		/*
322 		 * Attempt to add pages to our pool on a per-instance
323 		 * basis up to a sane limit.
324 		 */
325 		if (dmat->alignment > 1)
326 			maxpages = MAX_BPAGES;
327 		else
328 			maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
329 		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
330 		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
331 			int pages;
332 
333 			pages = MAX(atop(dmat->maxsize), 1);
334 			pages = MIN(maxpages - bz->total_bpages, pages);
335 			pages = MAX(pages, 1);
336 			if (alloc_bounce_pages(dmat, pages) < pages)
337 				error = ENOMEM;
338 
339 			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
340 				if (error == 0)
341 					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
342 			} else {
343 				error = 0;
344 			}
345 		}
346 		bz->map_count++;
347 	}
348 
349 	(*mapp)->nsegs = 0;
350 	(*mapp)->segments = (bus_dma_segment_t *)malloc(
351 	    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
352 	    M_NOWAIT);
353 	if ((*mapp)->segments == NULL) {
354 		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
355 		    __func__, dmat, ENOMEM);
356 		return (ENOMEM);
357 	}
358 
359 	if (error == 0)
360 		dmat->map_count++;
361 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
362 	    __func__, dmat, dmat->flags, error);
363 	return (error);
364 }
365 
366 /*
367  * Destroy a handle for mapping from kva/uva/physical
368  * address space into bus device space.
369  */
370 int
371 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
372 {
373 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
374 		if (STAILQ_FIRST(&map->bpages) != NULL) {
375 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
376 			    __func__, dmat, EBUSY);
377 			return (EBUSY);
378 		}
379 		if (dmat->bounce_zone)
380 			dmat->bounce_zone->map_count--;
381 	}
382 	free(map->segments, M_DEVBUF);
383 	free(map, M_DEVBUF);
384 	dmat->map_count--;
385 	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
386 	return (0);
387 }
388 
389 /*
390  * Allocate a piece of memory that can be efficiently mapped into
391  * bus device space based on the constraints lited in the dma tag.
392  * A dmamap to for use with dmamap_load is also allocated.
393  */
394 int
395 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
396 		 bus_dmamap_t *mapp)
397 {
398 	vm_memattr_t attr;
399 	int mflags;
400 
401 	if (flags & BUS_DMA_NOWAIT)
402 		mflags = M_NOWAIT;
403 	else
404 		mflags = M_WAITOK;
405 
406 	bus_dmamap_create(dmat, flags, mapp);
407 
408 	if (flags & BUS_DMA_ZERO)
409 		mflags |= M_ZERO;
410 	if (flags & BUS_DMA_NOCACHE)
411 		attr = VM_MEMATTR_UNCACHEABLE;
412 	else
413 		attr = VM_MEMATTR_DEFAULT;
414 
415 	/*
416 	 * XXX:
417 	 * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact
418 	 * alignment guarantees of malloc need to be nailed down, and the
419 	 * code below should be rewritten to take that into account.
420 	 *
421 	 * In the meantime, we'll warn the user if malloc gets it wrong.
422 	 */
423 	if ((dmat->maxsize <= PAGE_SIZE) &&
424 	   (dmat->alignment <= dmat->maxsize) &&
425 	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
426 	    attr == VM_MEMATTR_DEFAULT) {
427 		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
428 	} else {
429 		/*
430 		 * XXX Use Contigmalloc until it is merged into this facility
431 		 *     and handles multi-seg allocations.  Nobody is doing
432 		 *     multi-seg allocations yet though.
433 		 * XXX Certain AGP hardware does.
434 		 */
435 		*vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0ul,
436 		    dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul,
437 		    dmat->boundary, attr);
438 		(*mapp)->contigalloc = 1;
439 	}
440 	if (*vaddr == NULL) {
441 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
442 		    __func__, dmat, dmat->flags, ENOMEM);
443 		return (ENOMEM);
444 	} else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alignment)) {
445 		printf("bus_dmamem_alloc failed to align memory properly.\n");
446 	}
447 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
448 	    __func__, dmat, dmat->flags, 0);
449 	return (0);
450 }
451 
452 /*
453  * Free a piece of memory and it's allociated dmamap, that was allocated
454  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
455  */
456 void
457 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
458 {
459 
460 	if (!map->contigalloc)
461 		free(vaddr, M_DEVBUF);
462 	else
463 		kmem_free(vaddr, dmat->maxsize);
464 	bus_dmamap_destroy(dmat, map);
465 	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
466 }
467 
468 static void
469 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
470     bus_size_t buflen, int flags)
471 {
472 	bus_addr_t curaddr;
473 	bus_size_t sgsize;
474 
475 	if (map->pagesneeded == 0) {
476 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
477 		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
478 		    dmat->boundary, dmat->alignment);
479 		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
480 		/*
481 		 * Count the number of bounce pages
482 		 * needed in order to complete this transfer
483 		 */
484 		curaddr = buf;
485 		while (buflen != 0) {
486 			sgsize = MIN(buflen, dmat->maxsegsz);
487 			if (must_bounce(dmat, curaddr)) {
488 				sgsize = MIN(sgsize,
489 				    PAGE_SIZE - (curaddr & PAGE_MASK));
490 				map->pagesneeded++;
491 			}
492 			curaddr += sgsize;
493 			buflen -= sgsize;
494 		}
495 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
496 	}
497 }
498 
499 static void
500 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
501     void *buf, bus_size_t buflen, int flags)
502 {
503         vm_offset_t vaddr;
504         vm_offset_t vendaddr;
505         bus_addr_t paddr;
506 
507 	if (map->pagesneeded == 0) {
508 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
509 		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
510 		    dmat->boundary, dmat->alignment);
511 		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
512 		/*
513 		 * Count the number of bounce pages
514 		 * needed in order to complete this transfer
515 		 */
516 		vaddr = (vm_offset_t)buf;
517 		vendaddr = (vm_offset_t)buf + buflen;
518 
519 		while (vaddr < vendaddr) {
520 			bus_size_t sg_len;
521 
522 			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
523 			sg_len = MIN(sg_len, dmat->maxsegsz);
524 			if (pmap == kernel_pmap)
525 				paddr = pmap_kextract(vaddr);
526 			else
527 				paddr = pmap_extract(pmap, vaddr);
528 			if (must_bounce(dmat, paddr)) {
529 				sg_len = roundup2(sg_len, dmat->alignment);
530 				map->pagesneeded++;
531 			}
532 			vaddr += sg_len;
533 		}
534 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
535 	}
536 }
537 
538 /*
539  * Add a single contiguous physical range to the segment list.
540  */
541 static int
542 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
543 		   bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
544 {
545 	int seg;
546 
547 	/*
548 	 * Make sure we don't cross any boundaries.
549 	 */
550 	if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary))
551 		sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
552 
553 	/*
554 	 * Insert chunk into a segment, coalescing with
555 	 * previous segment if possible.
556 	 */
557 	seg = *segp;
558 	if (seg == -1) {
559 		seg = 0;
560 		segs[seg].ds_addr = curaddr;
561 		segs[seg].ds_len = sgsize;
562 	} else {
563 		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
564 		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
565 		    vm_addr_bound_ok(segs[seg].ds_addr,
566 		    segs[seg].ds_len + sgsize, dmat->boundary))
567 			segs[seg].ds_len += sgsize;
568 		else {
569 			if (++seg >= dmat->nsegments)
570 				return (0);
571 			segs[seg].ds_addr = curaddr;
572 			segs[seg].ds_len = sgsize;
573 		}
574 	}
575 	*segp = seg;
576 	return (sgsize);
577 }
578 
579 /*
580  * Utility function to load a physical buffer.  segp contains
581  * the starting segment on entrace, and the ending segment on exit.
582  */
583 int
584 _bus_dmamap_load_phys(bus_dma_tag_t dmat,
585 		      bus_dmamap_t map,
586 		      vm_paddr_t buf, bus_size_t buflen,
587 		      int flags,
588 		      bus_dma_segment_t *segs,
589 		      int *segp)
590 {
591 	bus_addr_t curaddr;
592 	bus_size_t sgsize;
593 	int error;
594 
595 	if (segs == NULL)
596 		segs = map->segments;
597 
598 	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
599 		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
600 		if (map->pagesneeded != 0) {
601 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
602 			if (error)
603 				return (error);
604 		}
605 	}
606 
607 	while (buflen > 0) {
608 		curaddr = buf;
609 		sgsize = MIN(buflen, dmat->maxsegsz);
610 		if (map->pagesneeded != 0 && must_bounce(dmat, curaddr)) {
611 			sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
612 			curaddr = add_bounce_page(dmat, map, 0, curaddr,
613 			    sgsize);
614 		}
615 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
616 		    segp);
617 		if (sgsize == 0)
618 			break;
619 		buf += sgsize;
620 		buflen -= sgsize;
621 	}
622 
623 	/*
624 	 * Did we fit?
625 	 */
626 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
627 }
628 
629 int
630 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
631     struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
632     bus_dma_segment_t *segs, int *segp)
633 {
634 
635 	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
636 	    segs, segp));
637 }
638 
639 /*
640  * Utility function to load a linear buffer.  segp contains
641  * the starting segment on entrance, and the ending segment on exit.
642  */
643 int
644 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
645     			bus_dmamap_t map,
646 			void *buf, bus_size_t buflen,
647 			pmap_t pmap,
648 			int flags,
649 			bus_dma_segment_t *segs,
650 			int *segp)
651 {
652 	bus_size_t sgsize;
653 	bus_addr_t curaddr;
654 	vm_offset_t kvaddr, vaddr;
655 	int error;
656 
657 	if (segs == NULL)
658 		segs = map->segments;
659 
660 	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
661 		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
662 		if (map->pagesneeded != 0) {
663 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
664 			if (error)
665 				return (error);
666 		}
667 	}
668 
669 	vaddr = (vm_offset_t)buf;
670 
671 	while (buflen > 0) {
672 		bus_size_t max_sgsize;
673 
674 		/*
675 		 * Get the physical address for this segment.
676 		 */
677 		if (pmap == kernel_pmap) {
678 			curaddr = pmap_kextract(vaddr);
679 			kvaddr = vaddr;
680 		} else {
681 			curaddr = pmap_extract(pmap, vaddr);
682 			kvaddr = 0;
683 		}
684 
685 		/*
686 		 * Compute the segment size, and adjust counts.
687 		 */
688 		max_sgsize = MIN(buflen, dmat->maxsegsz);
689 		sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
690 		if (map->pagesneeded != 0 && must_bounce(dmat, curaddr)) {
691 			sgsize = roundup2(sgsize, dmat->alignment);
692 			sgsize = MIN(sgsize, max_sgsize);
693 			curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
694 			    sgsize);
695 		} else {
696 			sgsize = MIN(sgsize, max_sgsize);
697 		}
698 
699 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
700 		    segp);
701 		if (sgsize == 0)
702 			break;
703 		vaddr += sgsize;
704 		buflen -= sgsize;
705 	}
706 
707 	/*
708 	 * Did we fit?
709 	 */
710 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
711 }
712 
713 void
714 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
715 		    struct memdesc *mem, bus_dmamap_callback_t *callback,
716 		    void *callback_arg)
717 {
718 
719 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
720 		map->dmat = dmat;
721 		map->mem = *mem;
722 		map->callback = callback;
723 		map->callback_arg = callback_arg;
724 	}
725 }
726 
727 bus_dma_segment_t *
728 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
729 		     bus_dma_segment_t *segs, int nsegs, int error)
730 {
731 
732 	map->nsegs = nsegs;
733 	if (segs != NULL)
734 		memcpy(map->segments, segs, map->nsegs*sizeof(segs[0]));
735 	if (dmat->iommu != NULL)
736 		IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs,
737 		    dmat->lowaddr, dmat->highaddr, dmat->alignment,
738 		    dmat->boundary, dmat->iommu_cookie);
739 
740 	if (segs != NULL)
741 		memcpy(segs, map->segments, map->nsegs*sizeof(segs[0]));
742 	else
743 		segs = map->segments;
744 
745 	return (segs);
746 }
747 
748 /*
749  * Release the mapping held by map.
750  */
751 void
752 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
753 {
754 	if (dmat->iommu) {
755 		IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie);
756 		map->nsegs = 0;
757 	}
758 
759 	free_bounce_pages(dmat, map);
760 }
761 
762 void
763 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
764 {
765 	struct bounce_page *bpage;
766 	vm_offset_t datavaddr, tempvaddr;
767 
768 	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
769 		/*
770 		 * Handle data bouncing.  We might also
771 		 * want to add support for invalidating
772 		 * the caches on broken hardware
773 		 */
774 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
775 		    "performing bounce", __func__, dmat, dmat->flags, op);
776 
777 		if (op & BUS_DMASYNC_PREWRITE) {
778 			while (bpage != NULL) {
779 				tempvaddr = 0;
780 				datavaddr = bpage->datavaddr;
781 				if (datavaddr == 0) {
782 					tempvaddr = pmap_quick_enter_page(
783 					    bpage->datapage);
784 					datavaddr = tempvaddr |
785 					    bpage->dataoffs;
786 				}
787 
788 				bcopy((void *)datavaddr,
789 				    (void *)bpage->vaddr, bpage->datacount);
790 
791 				if (tempvaddr != 0)
792 					pmap_quick_remove_page(tempvaddr);
793 				bpage = STAILQ_NEXT(bpage, links);
794 			}
795 			dmat->bounce_zone->total_bounced++;
796 		}
797 
798 		if (op & BUS_DMASYNC_POSTREAD) {
799 			while (bpage != NULL) {
800 				tempvaddr = 0;
801 				datavaddr = bpage->datavaddr;
802 				if (datavaddr == 0) {
803 					tempvaddr = pmap_quick_enter_page(
804 					    bpage->datapage);
805 					datavaddr = tempvaddr |
806 					    bpage->dataoffs;
807 				}
808 
809 				bcopy((void *)bpage->vaddr,
810 				    (void *)datavaddr, bpage->datacount);
811 
812 				if (tempvaddr != 0)
813 					pmap_quick_remove_page(tempvaddr);
814 				bpage = STAILQ_NEXT(bpage, links);
815 			}
816 			dmat->bounce_zone->total_bounced++;
817 		}
818 	}
819 
820 	powerpc_sync();
821 }
822 
823 int
824 bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie)
825 {
826 	tag->iommu = iommu;
827 	tag->iommu_cookie = cookie;
828 
829 	return (0);
830 }
831