xref: /freebsd/sys/arm/arm/busdma_machdep.c (revision a0ee8cc6)
1 /*-
2  * Copyright (c) 2012 Ian Lepore
3  * Copyright (c) 2004 Olivier Houchard
4  * Copyright (c) 2002 Peter Grehan
5  * Copyright (c) 1997, 1998 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /*
36  * ARM bus dma support routines.
37  *
38  * XXX Things to investigate / fix some day...
39  *  - What is the earliest that this API can be called?  Could there be any
40  *    fallout from changing the SYSINIT() order from SI_SUB_VM to SI_SUB_KMEM?
41  *  - The manpage mentions the BUS_DMA_NOWAIT flag only in the context of the
42  *    bus_dmamap_load() function.  This code has historically (and still does)
43  *    honor it in bus_dmamem_alloc().  If we got rid of that we could lose some
44  *    error checking because some resource management calls would become WAITOK
45  *    and thus "cannot fail."
46  *  - The decisions made by _bus_dma_can_bounce() should be made once, at tag
47  *    creation time, and the result stored in the tag.
48  *  - It should be possible to take some shortcuts when mapping a buffer we know
49  *    came from the uma(9) allocators based on what we know about such buffers
50  *    (aligned, contiguous, etc).
51  *  - The allocation of bounce pages could probably be cleaned up, then we could
52  *    retire arm_remap_nocache().
53  */
54 
55 #define _ARM32_BUS_DMA_PRIVATE
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/malloc.h>
59 #include <sys/bus.h>
60 #include <sys/busdma_bufalloc.h>
61 #include <sys/counter.h>
62 #include <sys/interrupt.h>
63 #include <sys/kernel.h>
64 #include <sys/ktr.h>
65 #include <sys/lock.h>
66 #include <sys/memdesc.h>
67 #include <sys/proc.h>
68 #include <sys/mutex.h>
69 #include <sys/sysctl.h>
70 #include <sys/uio.h>
71 
72 #include <vm/vm.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_extern.h>
76 #include <vm/vm_kern.h>
77 
78 #include <machine/atomic.h>
79 #include <machine/bus.h>
80 #include <machine/cpufunc.h>
81 #include <machine/md_var.h>
82 
83 #define	MAX_BPAGES		64
84 #define	MAX_DMA_SEGMENTS	4096
85 #define	BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
86 #define	BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
87 
88 struct bounce_zone;
89 
90 struct bus_dma_tag {
91 	bus_dma_tag_t		parent;
92 	bus_size_t		alignment;
93 	bus_addr_t		boundary;
94 	bus_addr_t		lowaddr;
95 	bus_addr_t		highaddr;
96 	bus_dma_filter_t	*filter;
97 	void			*filterarg;
98 	bus_size_t		maxsize;
99 	u_int			nsegments;
100 	bus_size_t		maxsegsz;
101 	int			flags;
102 	int			ref_count;
103 	int			map_count;
104 	bus_dma_lock_t		*lockfunc;
105 	void			*lockfuncarg;
106 	struct bounce_zone	*bounce_zone;
107 	/*
108 	 * DMA range for this tag.  If the page doesn't fall within
109 	 * one of these ranges, an error is returned.  The caller
110 	 * may then decide what to do with the transfer.  If the
111 	 * range pointer is NULL, it is ignored.
112 	 */
113 	struct arm32_dma_range	*ranges;
114 	int			_nranges;
115 };
116 
117 struct bounce_page {
118 	vm_offset_t	vaddr;		/* kva of bounce buffer */
119 	bus_addr_t	busaddr;	/* Physical address */
120 	vm_offset_t	datavaddr;	/* kva of client data */
121 	vm_page_t	datapage;	/* physical page of client data */
122 	vm_offset_t	dataoffs;	/* page offset of client data */
123 	bus_size_t	datacount;	/* client data count */
124 	STAILQ_ENTRY(bounce_page) links;
125 };
126 
127 struct sync_list {
128 	vm_offset_t	vaddr;		/* kva of client data */
129 	vm_page_t	pages;		/* starting page of client data */
130 	vm_offset_t	dataoffs;	/* page offset of client data */
131 	bus_size_t	datacount;	/* client data count */
132 };
133 
134 int busdma_swi_pending;
135 
136 struct bounce_zone {
137 	STAILQ_ENTRY(bounce_zone) links;
138 	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
139 	int		total_bpages;
140 	int		free_bpages;
141 	int		reserved_bpages;
142 	int		active_bpages;
143 	int		total_bounced;
144 	int		total_deferred;
145 	int		map_count;
146 	bus_size_t	alignment;
147 	bus_addr_t	lowaddr;
148 	char		zoneid[8];
149 	char		lowaddrid[20];
150 	struct sysctl_ctx_list sysctl_tree;
151 	struct sysctl_oid *sysctl_tree_top;
152 };
153 
154 static struct mtx bounce_lock;
155 static int total_bpages;
156 static int busdma_zonecount;
157 static uint32_t tags_total;
158 static uint32_t maps_total;
159 static uint32_t maps_dmamem;
160 static uint32_t maps_coherent;
161 static counter_u64_t maploads_total;
162 static counter_u64_t maploads_bounced;
163 static counter_u64_t maploads_coherent;
164 static counter_u64_t maploads_dmamem;
165 static counter_u64_t maploads_mbuf;
166 static counter_u64_t maploads_physmem;
167 
168 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
169 
170 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
171 SYSCTL_UINT(_hw_busdma, OID_AUTO, tags_total, CTLFLAG_RD, &tags_total, 0,
172    "Number of active tags");
173 SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_total, CTLFLAG_RD, &maps_total, 0,
174    "Number of active maps");
175 SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_dmamem, CTLFLAG_RD, &maps_dmamem, 0,
176    "Number of active maps for bus_dmamem_alloc buffers");
177 SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_coherent, CTLFLAG_RD, &maps_coherent, 0,
178    "Number of active maps with BUS_DMA_COHERENT flag set");
179 SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD,
180     &maploads_total, "Number of load operations performed");
181 SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_bounced, CTLFLAG_RD,
182     &maploads_bounced, "Number of load operations that used bounce buffers");
183 SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_coherent, CTLFLAG_RD,
184     &maploads_dmamem, "Number of load operations on BUS_DMA_COHERENT memory");
185 SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_dmamem, CTLFLAG_RD,
186     &maploads_dmamem, "Number of load operations on bus_dmamem_alloc buffers");
187 SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_mbuf, CTLFLAG_RD,
188     &maploads_mbuf, "Number of load operations for mbufs");
189 SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_physmem, CTLFLAG_RD,
190     &maploads_physmem, "Number of load operations on physical buffers");
191 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
192    "Total bounce pages");
193 
194 struct bus_dmamap {
195 	struct bp_list		bpages;
196 	int			pagesneeded;
197 	int			pagesreserved;
198 	bus_dma_tag_t		dmat;
199 	struct memdesc		mem;
200 	bus_dmamap_callback_t	*callback;
201 	void			*callback_arg;
202 	int			flags;
203 #define	DMAMAP_COHERENT		(1 << 0)
204 #define	DMAMAP_DMAMEM_ALLOC	(1 << 1)
205 #define	DMAMAP_MBUF		(1 << 2)
206 #define	DMAMAP_CACHE_ALIGNED	(1 << 3)
207 	STAILQ_ENTRY(bus_dmamap) links;
208 	bus_dma_segment_t	*segments;
209 	int			sync_count;
210 	struct sync_list	slist[];
211 };
212 
213 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
214 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
215 
216 static void init_bounce_pages(void *dummy);
217 static int alloc_bounce_zone(bus_dma_tag_t dmat);
218 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
219 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
220     int commit);
221 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
222     vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
223 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
224 static void bus_dmamap_sync_sl(struct sync_list *sl, bus_dmasync_op_t op,
225     int bufaligned);
226 
227 /*
228  * ----------------------------------------------------------------------------
229  * Begin block of code useful to transplant to other implementations.
230  */
231 
232 static busdma_bufalloc_t coherent_allocator;	/* Cache of coherent buffers */
233 static busdma_bufalloc_t standard_allocator;	/* Cache of standard buffers */
234 
235 MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
236 MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
237 
238 static void
239 busdma_init(void *dummy)
240 {
241 
242 	maploads_total    = counter_u64_alloc(M_WAITOK);
243 	maploads_bounced  = counter_u64_alloc(M_WAITOK);
244 	maploads_coherent = counter_u64_alloc(M_WAITOK);
245 	maploads_dmamem   = counter_u64_alloc(M_WAITOK);
246 	maploads_mbuf     = counter_u64_alloc(M_WAITOK);
247 	maploads_physmem  = counter_u64_alloc(M_WAITOK);
248 
249 	/* Create a cache of buffers in standard (cacheable) memory. */
250 	standard_allocator = busdma_bufalloc_create("buffer",
251 	    arm_dcache_align,	/* minimum_alignment */
252 	    NULL,		/* uma_alloc func */
253 	    NULL,		/* uma_free func */
254 	    0);			/* uma_zcreate_flags */
255 
256 	/*
257 	 * Create a cache of buffers in uncacheable memory, to implement the
258 	 * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag.
259 	 */
260 	coherent_allocator = busdma_bufalloc_create("coherent",
261 	    arm_dcache_align,	/* minimum_alignment */
262 	    busdma_bufalloc_alloc_uncacheable,
263 	    busdma_bufalloc_free_uncacheable,
264 	    0);			/* uma_zcreate_flags */
265 }
266 
267 /*
268  * This init historically used SI_SUB_VM, but now the init code requires
269  * malloc(9) using M_BUSDMA memory and the pcpu zones for counter(9), which get
270  * set up by SI_SUB_KMEM and SI_ORDER_LAST, so we'll go right after that by
271  * using SI_SUB_KMEM+1.
272  */
273 SYSINIT(busdma, SI_SUB_KMEM+1, SI_ORDER_FIRST, busdma_init, NULL);
274 
275 /*
276  * End block of code useful to transplant to other implementations.
277  * ----------------------------------------------------------------------------
278  */
279 
280 /*
281  * Return true if a match is made.
282  *
283  * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
284  *
285  * If paddr is within the bounds of the dma tag then call the filter callback
286  * to check for a match, if there is no filter callback then assume a match.
287  */
288 static int
289 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
290 {
291 	int retval;
292 
293 	retval = 0;
294 
295 	do {
296 		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
297 		 || ((paddr & (dmat->alignment - 1)) != 0))
298 		 && (dmat->filter == NULL
299 		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
300 			retval = 1;
301 
302 		dmat = dmat->parent;
303 	} while (retval == 0 && dmat != NULL);
304 	return (retval);
305 }
306 
307 /*
308  * This routine checks the exclusion zone constraints from a tag against the
309  * physical RAM available on the machine.  If a tag specifies an exclusion zone
310  * but there's no RAM in that zone, then we avoid allocating resources to bounce
311  * a request, and we can use any memory allocator (as opposed to needing
312  * kmem_alloc_contig() just because it can allocate pages in an address range).
313  *
314  * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the
315  * same value on 32-bit architectures) as their lowaddr constraint, and we can't
316  * possibly have RAM at an address higher than the highest address we can
317  * express, so we take a fast out.
318  */
319 static __inline int
320 _bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
321 {
322 	int i;
323 
324 	if (lowaddr >= BUS_SPACE_MAXADDR)
325 		return (0);
326 
327 	for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
328 		if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
329 		    || (lowaddr < phys_avail[i] &&
330 		    highaddr > phys_avail[i]))
331 			return (1);
332 	}
333 	return (0);
334 }
335 
336 static __inline struct arm32_dma_range *
337 _bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
338     bus_addr_t curaddr)
339 {
340 	struct arm32_dma_range *dr;
341 	int i;
342 
343 	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
344 		if (curaddr >= dr->dr_sysbase &&
345 		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
346 			return (dr);
347 	}
348 
349 	return (NULL);
350 }
351 
352 /*
353  * Convenience function for manipulating driver locks from busdma (during
354  * busdma_swi, for example).  Drivers that don't provide their own locks
355  * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
356  * non-mutex locking scheme don't have to use this at all.
357  */
358 void
359 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
360 {
361 	struct mtx *dmtx;
362 
363 	dmtx = (struct mtx *)arg;
364 	switch (op) {
365 	case BUS_DMA_LOCK:
366 		mtx_lock(dmtx);
367 		break;
368 	case BUS_DMA_UNLOCK:
369 		mtx_unlock(dmtx);
370 		break;
371 	default:
372 		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
373 	}
374 }
375 
376 /*
377  * dflt_lock should never get called.  It gets put into the dma tag when
378  * lockfunc == NULL, which is only valid if the maps that are associated
379  * with the tag are meant to never be defered.
380  * XXX Should have a way to identify which driver is responsible here.
381  */
382 static void
383 dflt_lock(void *arg, bus_dma_lock_op_t op)
384 {
385 #ifdef INVARIANTS
386 	panic("driver error: busdma dflt_lock called");
387 #else
388 	printf("DRIVER_ERROR: busdma dflt_lock called\n");
389 #endif
390 }
391 
392 /*
393  * Allocate a device specific dma_tag.
394  */
395 int
396 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
397     bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
398     bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
399     int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
400     void *lockfuncarg, bus_dma_tag_t *dmat)
401 {
402 	bus_dma_tag_t newtag;
403 	int error = 0;
404 	/* Return a NULL tag on failure */
405 	*dmat = NULL;
406 
407 	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_NOWAIT);
408 	if (newtag == NULL) {
409 		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
410 		    __func__, newtag, 0, error);
411 		return (ENOMEM);
412 	}
413 
414 	newtag->parent = parent;
415 	newtag->alignment = alignment ? alignment : 1;
416 	newtag->boundary = boundary;
417 	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
418 	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
419 	newtag->filter = filter;
420 	newtag->filterarg = filterarg;
421 	newtag->maxsize = maxsize;
422 	newtag->nsegments = nsegments;
423 	newtag->maxsegsz = maxsegsz;
424 	newtag->flags = flags;
425 	newtag->ref_count = 1; /* Count ourself */
426 	newtag->map_count = 0;
427 	newtag->ranges = bus_dma_get_range();
428 	newtag->_nranges = bus_dma_get_range_nb();
429 	if (lockfunc != NULL) {
430 		newtag->lockfunc = lockfunc;
431 		newtag->lockfuncarg = lockfuncarg;
432 	} else {
433 		newtag->lockfunc = dflt_lock;
434 		newtag->lockfuncarg = NULL;
435 	}
436 
437 	/* Take into account any restrictions imposed by our parent tag */
438 	if (parent != NULL) {
439 		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
440 		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
441 		if (newtag->boundary == 0)
442 			newtag->boundary = parent->boundary;
443 		else if (parent->boundary != 0)
444 			newtag->boundary = MIN(parent->boundary,
445 					       newtag->boundary);
446 		if ((newtag->filter != NULL) ||
447 		    ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
448 			newtag->flags |= BUS_DMA_COULD_BOUNCE;
449 		if (newtag->filter == NULL) {
450 			/*
451 			 * Short circuit looking at our parent directly
452 			 * since we have encapsulated all of its information
453 			 */
454 			newtag->filter = parent->filter;
455 			newtag->filterarg = parent->filterarg;
456 			newtag->parent = parent->parent;
457 		}
458 		if (newtag->parent != NULL)
459 			atomic_add_int(&parent->ref_count, 1);
460 	}
461 	if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
462 	 || newtag->alignment > 1)
463 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
464 
465 	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
466 	    (flags & BUS_DMA_ALLOCNOW) != 0) {
467 		struct bounce_zone *bz;
468 
469 		/* Must bounce */
470 
471 		if ((error = alloc_bounce_zone(newtag)) != 0) {
472 			free(newtag, M_BUSDMA);
473 			return (error);
474 		}
475 		bz = newtag->bounce_zone;
476 
477 		if (ptoa(bz->total_bpages) < maxsize) {
478 			int pages;
479 
480 			pages = atop(maxsize) - bz->total_bpages;
481 
482 			/* Add pages to our bounce pool */
483 			if (alloc_bounce_pages(newtag, pages) < pages)
484 				error = ENOMEM;
485 		}
486 		/* Performed initial allocation */
487 		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
488 	} else
489 		newtag->bounce_zone = NULL;
490 
491 	if (error != 0) {
492 		free(newtag, M_BUSDMA);
493 	} else {
494 		atomic_add_32(&tags_total, 1);
495 		*dmat = newtag;
496 	}
497 	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
498 	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
499 	return (error);
500 }
501 
502 int
503 bus_dma_tag_destroy(bus_dma_tag_t dmat)
504 {
505 	bus_dma_tag_t dmat_copy;
506 	int error;
507 
508 	error = 0;
509 	dmat_copy = dmat;
510 
511 	if (dmat != NULL) {
512 
513 		if (dmat->map_count != 0) {
514 			error = EBUSY;
515 			goto out;
516 		}
517 
518 		while (dmat != NULL) {
519 			bus_dma_tag_t parent;
520 
521 			parent = dmat->parent;
522 			atomic_subtract_int(&dmat->ref_count, 1);
523 			if (dmat->ref_count == 0) {
524 				atomic_subtract_32(&tags_total, 1);
525 				free(dmat, M_BUSDMA);
526 				/*
527 				 * Last reference count, so
528 				 * release our reference
529 				 * count on our parent.
530 				 */
531 				dmat = parent;
532 			} else
533 				dmat = NULL;
534 		}
535 	}
536 out:
537 	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
538 	return (error);
539 }
540 
541 static int
542 allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
543 {
544 	int error;
545 
546 	/*
547 	 * Bouncing might be required if the driver asks for an active
548 	 * exclusion region, a data alignment that is stricter than 1, and/or
549 	 * an active address boundary.
550 	 */
551 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
552 
553 		/* Must bounce */
554 		struct bounce_zone *bz;
555 		int maxpages;
556 
557 		if (dmat->bounce_zone == NULL) {
558 			if ((error = alloc_bounce_zone(dmat)) != 0) {
559 				return (error);
560 			}
561 		}
562 		bz = dmat->bounce_zone;
563 
564 		/* Initialize the new map */
565 		STAILQ_INIT(&(map->bpages));
566 
567 		/*
568 		 * Attempt to add pages to our pool on a per-instance
569 		 * basis up to a sane limit.
570 		 */
571 		maxpages = MAX_BPAGES;
572 		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
573 		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
574 			int pages;
575 
576 			pages = MAX(atop(dmat->maxsize), 1);
577 			pages = MIN(maxpages - bz->total_bpages, pages);
578 			pages = MAX(pages, 1);
579 			if (alloc_bounce_pages(dmat, pages) < pages)
580 				return (ENOMEM);
581 
582 			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0)
583 				dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
584 		}
585 		bz->map_count++;
586 	}
587 	return (0);
588 }
589 
590 static bus_dmamap_t
591 allocate_map(bus_dma_tag_t dmat, int mflags)
592 {
593 	int mapsize, segsize;
594 	bus_dmamap_t map;
595 
596 	/*
597 	 * Allocate the map.  The map structure ends with an embedded
598 	 * variable-sized array of sync_list structures.  Following that
599 	 * we allocate enough extra space to hold the array of bus_dma_segments.
600 	 */
601 	KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS,
602 	   ("cannot allocate %u dma segments (max is %u)",
603 	    dmat->nsegments, MAX_DMA_SEGMENTS));
604 	segsize = sizeof(struct bus_dma_segment) * dmat->nsegments;
605 	mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments;
606 	map = malloc(mapsize + segsize, M_BUSDMA, mflags | M_ZERO);
607 	if (map == NULL) {
608 		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
609 		return (NULL);
610 	}
611 	map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize);
612 	return (map);
613 }
614 
615 /*
616  * Allocate a handle for mapping from kva/uva/physical
617  * address space into bus device space.
618  */
619 int
620 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
621 {
622 	bus_dmamap_t map;
623 	int error = 0;
624 
625 	*mapp = map = allocate_map(dmat, M_NOWAIT);
626 	if (map == NULL) {
627 		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
628 		return (ENOMEM);
629 	}
630 
631 	/*
632 	 * Bouncing might be required if the driver asks for an exclusion
633 	 * region, a data alignment that is stricter than 1, or DMA that begins
634 	 * or ends with a partial cacheline.  Whether bouncing will actually
635 	 * happen can't be known until mapping time, but we need to pre-allocate
636 	 * resources now because we might not be allowed to at mapping time.
637 	 */
638 	error = allocate_bz_and_pages(dmat, map);
639 	if (error != 0) {
640 		free(map, M_BUSDMA);
641 		*mapp = NULL;
642 		return (error);
643 	}
644 	if (map->flags & DMAMAP_COHERENT)
645 		atomic_add_32(&maps_coherent, 1);
646 	atomic_add_32(&maps_total, 1);
647 	dmat->map_count++;
648 
649 	return (0);
650 }
651 
652 /*
653  * Destroy a handle for mapping from kva/uva/physical
654  * address space into bus device space.
655  */
656 int
657 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
658 {
659 
660 	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
661 		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
662 		    __func__, dmat, EBUSY);
663 		return (EBUSY);
664 	}
665 	if (dmat->bounce_zone)
666 		dmat->bounce_zone->map_count--;
667 	if (map->flags & DMAMAP_COHERENT)
668 		atomic_subtract_32(&maps_coherent, 1);
669 	atomic_subtract_32(&maps_total, 1);
670 	free(map, M_BUSDMA);
671 	dmat->map_count--;
672 	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
673 	return (0);
674 }
675 
676 /*
677  * Allocate a piece of memory that can be efficiently mapped into bus device
678  * space based on the constraints listed in the dma tag.  Returns a pointer to
679  * the allocated memory, and a pointer to an associated bus_dmamap.
680  */
681 int
682 bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
683     bus_dmamap_t *mapp)
684 {
685 	busdma_bufalloc_t ba;
686 	struct busdma_bufzone *bufzone;
687 	bus_dmamap_t map;
688 	vm_memattr_t memattr;
689 	int mflags;
690 
691 	if (flags & BUS_DMA_NOWAIT)
692 		mflags = M_NOWAIT;
693 	else
694 		mflags = M_WAITOK;
695 	if (flags & BUS_DMA_ZERO)
696 		mflags |= M_ZERO;
697 
698 	*mapp = map = allocate_map(dmat, mflags);
699 	if (map == NULL) {
700 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
701 		    __func__, dmat, dmat->flags, ENOMEM);
702 		return (ENOMEM);
703 	}
704 	map->flags = DMAMAP_DMAMEM_ALLOC;
705 
706 	/* Choose a busdma buffer allocator based on memory type flags. */
707 	if (flags & BUS_DMA_COHERENT) {
708 		memattr = VM_MEMATTR_UNCACHEABLE;
709 		ba = coherent_allocator;
710 		map->flags |= DMAMAP_COHERENT;
711 	} else {
712 		memattr = VM_MEMATTR_DEFAULT;
713 		ba = standard_allocator;
714 	}
715 
716 	/*
717 	 * Try to find a bufzone in the allocator that holds a cache of buffers
718 	 * of the right size for this request.  If the buffer is too big to be
719 	 * held in the allocator cache, this returns NULL.
720 	 */
721 	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
722 
723 	/*
724 	 * Allocate the buffer from the uma(9) allocator if...
725 	 *  - It's small enough to be in the allocator (bufzone not NULL).
726 	 *  - The alignment constraint isn't larger than the allocation size
727 	 *    (the allocator aligns buffers to their size boundaries).
728 	 *  - There's no need to handle lowaddr/highaddr exclusion zones.
729 	 * else allocate non-contiguous pages if...
730 	 *  - The page count that could get allocated doesn't exceed nsegments.
731 	 *  - The alignment constraint isn't larger than a page boundary.
732 	 *  - There are no boundary-crossing constraints.
733 	 * else allocate a block of contiguous pages because one or more of the
734 	 * constraints is something that only the contig allocator can fulfill.
735 	 */
736 	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
737 	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
738 		*vaddr = uma_zalloc(bufzone->umazone, mflags);
739 	} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
740 	    dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
741 		*vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
742 		    mflags, 0, dmat->lowaddr, memattr);
743 	} else {
744 		*vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
745 		    mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
746 		    memattr);
747 	}
748 	if (*vaddr == NULL) {
749 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
750 		    __func__, dmat, dmat->flags, ENOMEM);
751 		free(map, M_BUSDMA);
752 		*mapp = NULL;
753 		return (ENOMEM);
754 	}
755 	if (map->flags & DMAMAP_COHERENT)
756 		atomic_add_32(&maps_coherent, 1);
757 	atomic_add_32(&maps_dmamem, 1);
758 	atomic_add_32(&maps_total, 1);
759 	dmat->map_count++;
760 
761 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
762 	    __func__, dmat, dmat->flags, 0);
763 	return (0);
764 }
765 
766 /*
767  * Free a piece of memory that was allocated via bus_dmamem_alloc, along with
768  * its associated map.
769  */
770 void
771 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
772 {
773 	struct busdma_bufzone *bufzone;
774 	busdma_bufalloc_t ba;
775 
776 	if (map->flags & DMAMAP_COHERENT)
777 		ba = coherent_allocator;
778 	else
779 		ba = standard_allocator;
780 
781 	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
782 
783 	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
784 	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
785 		uma_zfree(bufzone->umazone, vaddr);
786 	else
787 		kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
788 
789 	dmat->map_count--;
790 	if (map->flags & DMAMAP_COHERENT)
791 		atomic_subtract_32(&maps_coherent, 1);
792 	atomic_subtract_32(&maps_total, 1);
793 	atomic_subtract_32(&maps_dmamem, 1);
794 	free(map, M_BUSDMA);
795 	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
796 }
797 
798 static void
799 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
800     bus_size_t buflen, int flags)
801 {
802 	bus_addr_t curaddr;
803 	bus_size_t sgsize;
804 
805 	if (map->pagesneeded == 0) {
806 		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
807 		    dmat->lowaddr, dmat->boundary, dmat->alignment);
808 		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
809 		    map, map->pagesneeded);
810 		/*
811 		 * Count the number of bounce pages
812 		 * needed in order to complete this transfer
813 		 */
814 		curaddr = buf;
815 		while (buflen != 0) {
816 			sgsize = MIN(buflen, dmat->maxsegsz);
817 			if (run_filter(dmat, curaddr) != 0) {
818 				sgsize = MIN(sgsize,
819 				    PAGE_SIZE - (curaddr & PAGE_MASK));
820 				map->pagesneeded++;
821 			}
822 			curaddr += sgsize;
823 			buflen -= sgsize;
824 		}
825 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
826 	}
827 }
828 
829 static void
830 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
831     void *buf, bus_size_t buflen, int flags)
832 {
833 	vm_offset_t vaddr;
834 	vm_offset_t vendaddr;
835 	bus_addr_t paddr;
836 
837 	if (map->pagesneeded == 0) {
838 		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
839 		    dmat->lowaddr, dmat->boundary, dmat->alignment);
840 		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
841 		    map, map->pagesneeded);
842 		/*
843 		 * Count the number of bounce pages
844 		 * needed in order to complete this transfer
845 		 */
846 		vaddr = trunc_page((vm_offset_t)buf);
847 		vendaddr = (vm_offset_t)buf + buflen;
848 
849 		while (vaddr < vendaddr) {
850 			if (__predict_true(pmap == kernel_pmap))
851 				paddr = pmap_kextract(vaddr);
852 			else
853 				paddr = pmap_extract(pmap, vaddr);
854 			if (run_filter(dmat, paddr) != 0)
855 				map->pagesneeded++;
856 			vaddr += PAGE_SIZE;
857 		}
858 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
859 	}
860 }
861 
862 static int
863 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
864 {
865 
866 	/* Reserve Necessary Bounce Pages */
867 	mtx_lock(&bounce_lock);
868 	if (flags & BUS_DMA_NOWAIT) {
869 		if (reserve_bounce_pages(dmat, map, 0) != 0) {
870 			mtx_unlock(&bounce_lock);
871 			return (ENOMEM);
872 		}
873 	} else {
874 		if (reserve_bounce_pages(dmat, map, 1) != 0) {
875 			/* Queue us for resources */
876 			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
877 			mtx_unlock(&bounce_lock);
878 			return (EINPROGRESS);
879 		}
880 	}
881 	mtx_unlock(&bounce_lock);
882 
883 	return (0);
884 }
885 
886 /*
887  * Add a single contiguous physical range to the segment list.
888  */
889 static int
890 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
891     bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
892 {
893 	bus_addr_t baddr, bmask;
894 	int seg;
895 
896 	/*
897 	 * Make sure we don't cross any boundaries.
898 	 */
899 	bmask = ~(dmat->boundary - 1);
900 	if (dmat->boundary > 0) {
901 		baddr = (curaddr + dmat->boundary) & bmask;
902 		if (sgsize > (baddr - curaddr))
903 			sgsize = (baddr - curaddr);
904 	}
905 	if (dmat->ranges) {
906 		struct arm32_dma_range *dr;
907 
908 		dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
909 		    curaddr);
910 		if (dr == NULL)
911 			return (0);
912 		/*
913 		 * In a valid DMA range.  Translate the physical
914 		 * memory address to an address in the DMA window.
915 		 */
916 		curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
917 
918 	}
919 
920 	seg = *segp;
921 	/*
922 	 * Insert chunk into a segment, coalescing with
923 	 * the previous segment if possible.
924 	 */
925 	if (seg >= 0 &&
926 	    curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
927 	    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
928 	    (dmat->boundary == 0 ||
929 	    (segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
930 		segs[seg].ds_len += sgsize;
931 	} else {
932 		if (++seg >= dmat->nsegments)
933 			return (0);
934 		segs[seg].ds_addr = curaddr;
935 		segs[seg].ds_len = sgsize;
936 	}
937 	*segp = seg;
938 	return (sgsize);
939 }
940 
941 /*
942  * Utility function to load a physical buffer.  segp contains
943  * the starting segment on entrace, and the ending segment on exit.
944  */
945 int
946 _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
947     bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
948 {
949 	bus_addr_t curaddr;
950 	bus_addr_t sl_end = 0;
951 	bus_size_t sgsize;
952 	struct sync_list *sl;
953 	int error;
954 
955 	if (segs == NULL)
956 		segs = map->segments;
957 
958 	counter_u64_add(maploads_total, 1);
959 	counter_u64_add(maploads_physmem, 1);
960 
961 	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
962 		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
963 		if (map->pagesneeded != 0) {
964 			counter_u64_add(maploads_bounced, 1);
965 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
966 			if (error)
967 				return (error);
968 		}
969 	}
970 
971 	sl = map->slist + map->sync_count - 1;
972 
973 	while (buflen > 0) {
974 		curaddr = buf;
975 		sgsize = MIN(buflen, dmat->maxsegsz);
976 		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
977 		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
978 			sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
979 			curaddr = add_bounce_page(dmat, map, 0, curaddr,
980 			    sgsize);
981 		} else {
982 			if (map->sync_count > 0)
983 				sl_end = VM_PAGE_TO_PHYS(sl->pages) +
984 				    sl->dataoffs + sl->datacount;
985 
986 			if (map->sync_count == 0 || curaddr != sl_end) {
987 				if (++map->sync_count > dmat->nsegments)
988 					break;
989 				sl++;
990 				sl->vaddr = 0;
991 				sl->datacount = sgsize;
992 				sl->pages = PHYS_TO_VM_PAGE(curaddr);
993 				sl->dataoffs = curaddr & PAGE_MASK;
994 			} else
995 				sl->datacount += sgsize;
996 		}
997 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
998 		    segp);
999 		if (sgsize == 0)
1000 			break;
1001 		buf += sgsize;
1002 		buflen -= sgsize;
1003 	}
1004 
1005 	/*
1006 	 * Did we fit?
1007 	 */
1008 	if (buflen != 0) {
1009 		_bus_dmamap_unload(dmat, map);
1010 		return (EFBIG); /* XXX better return value here? */
1011 	}
1012 	return (0);
1013 }
1014 
1015 int
1016 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
1017     struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
1018     bus_dma_segment_t *segs, int *segp)
1019 {
1020 
1021 	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
1022 	    segs, segp));
1023 }
1024 
1025 /*
1026  * Utility function to load a linear buffer.  segp contains
1027  * the starting segment on entrance, and the ending segment on exit.
1028  */
1029 int
1030 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
1031     bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
1032     int *segp)
1033 {
1034 	bus_size_t sgsize;
1035 	bus_addr_t curaddr;
1036 	bus_addr_t sl_pend = 0;
1037 	struct sync_list *sl;
1038 	vm_offset_t kvaddr;
1039 	vm_offset_t vaddr = (vm_offset_t)buf;
1040 	vm_offset_t sl_vend = 0;
1041 	int error = 0;
1042 
1043 	counter_u64_add(maploads_total, 1);
1044 	if (map->flags & DMAMAP_COHERENT)
1045 		counter_u64_add(maploads_coherent, 1);
1046 	if (map->flags & DMAMAP_DMAMEM_ALLOC)
1047 		counter_u64_add(maploads_dmamem, 1);
1048 
1049 	if (segs == NULL)
1050 		segs = map->segments;
1051 	if (flags & BUS_DMA_LOAD_MBUF) {
1052 		counter_u64_add(maploads_mbuf, 1);
1053 		map->flags |= DMAMAP_CACHE_ALIGNED;
1054 	}
1055 
1056 	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
1057 		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
1058 		if (map->pagesneeded != 0) {
1059 			counter_u64_add(maploads_bounced, 1);
1060 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
1061 			if (error)
1062 				return (error);
1063 		}
1064 	}
1065 	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
1066 	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
1067 
1068 	sl = map->slist + map->sync_count - 1;
1069 
1070 	while (buflen > 0) {
1071 		/*
1072 		 * Get the physical address for this segment.
1073 		 */
1074 		if (__predict_true(pmap == kernel_pmap)) {
1075 			curaddr = pmap_kextract(vaddr);
1076 			kvaddr = vaddr;
1077 		} else {
1078 			curaddr = pmap_extract(pmap, vaddr);
1079 			map->flags &= ~DMAMAP_COHERENT;
1080 			kvaddr = 0;
1081 		}
1082 
1083 		/*
1084 		 * Compute the segment size, and adjust counts.
1085 		 */
1086 		sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
1087 		if (sgsize > dmat->maxsegsz)
1088 			sgsize = dmat->maxsegsz;
1089 		if (buflen < sgsize)
1090 			sgsize = buflen;
1091 
1092 		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
1093 		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
1094 			curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
1095 			    sgsize);
1096 		} else {
1097 			if (map->sync_count > 0) {
1098 				sl_pend = VM_PAGE_TO_PHYS(sl->pages) +
1099 				    sl->dataoffs + sl->datacount;
1100 				sl_vend = sl->vaddr + sl->datacount;
1101 			}
1102 
1103 			if (map->sync_count == 0 ||
1104 			    (kvaddr != 0 && kvaddr != sl_vend) ||
1105 			    (kvaddr == 0 && curaddr != sl_pend)) {
1106 
1107 				if (++map->sync_count > dmat->nsegments)
1108 					goto cleanup;
1109 				sl++;
1110 				sl->vaddr = kvaddr;
1111 				sl->datacount = sgsize;
1112 				sl->pages = PHYS_TO_VM_PAGE(curaddr);
1113 				sl->dataoffs = curaddr & PAGE_MASK;
1114 			} else
1115 				sl->datacount += sgsize;
1116 		}
1117 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1118 		    segp);
1119 		if (sgsize == 0)
1120 			break;
1121 		vaddr += sgsize;
1122 		buflen -= sgsize;
1123 	}
1124 
1125 cleanup:
1126 	/*
1127 	 * Did we fit?
1128 	 */
1129 	if (buflen != 0) {
1130 		_bus_dmamap_unload(dmat, map);
1131 		return (EFBIG); /* XXX better return value here? */
1132 	}
1133 	return (0);
1134 }
1135 
1136 void
1137 __bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem,
1138     bus_dmamap_callback_t *callback, void *callback_arg)
1139 {
1140 
1141 	KASSERT(dmat != NULL, ("dmatag is NULL"));
1142 	KASSERT(map != NULL, ("dmamap is NULL"));
1143 	map->mem = *mem;
1144 	map->callback = callback;
1145 	map->callback_arg = callback_arg;
1146 }
1147 
1148 bus_dma_segment_t *
1149 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1150     bus_dma_segment_t *segs, int nsegs, int error)
1151 {
1152 
1153 	if (segs == NULL)
1154 		segs = map->segments;
1155 	return (segs);
1156 }
1157 
1158 /*
1159  * Release the mapping held by map.
1160  */
1161 void
1162 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1163 {
1164 	struct bounce_page *bpage;
1165 	struct bounce_zone *bz;
1166 
1167 	if ((bz = dmat->bounce_zone) != NULL) {
1168 		while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1169 			STAILQ_REMOVE_HEAD(&map->bpages, links);
1170 			free_bounce_page(dmat, bpage);
1171 		}
1172 
1173 		bz = dmat->bounce_zone;
1174 		bz->free_bpages += map->pagesreserved;
1175 		bz->reserved_bpages -= map->pagesreserved;
1176 		map->pagesreserved = 0;
1177 		map->pagesneeded = 0;
1178 	}
1179 	map->sync_count = 0;
1180 	map->flags &= ~DMAMAP_MBUF;
1181 }
1182 
1183 static void
1184 bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op,
1185     int bufaligned)
1186 {
1187 	char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align];
1188 	register_t s;
1189 	int partial;
1190 
1191 	if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) {
1192 		cpu_dcache_wb_range(buf, len);
1193 		cpu_l2cache_wb_range(buf, len);
1194 	}
1195 
1196 	/*
1197 	 * If the caller promises the buffer is properly aligned to a cache line
1198 	 * (even if the call parms make it look like it isn't) we can avoid
1199 	 * attempting to preserve the non-DMA part of the cache line in the
1200 	 * POSTREAD case, but we MUST still do a writeback in the PREREAD case.
1201 	 *
1202 	 * This covers the case of mbufs, where we know how they're aligned and
1203 	 * know the CPU doesn't touch the header in front of the DMA data area
1204 	 * during the IO, but it may have touched it right before invoking the
1205 	 * sync, so a PREREAD writeback is required.
1206 	 *
1207 	 * It also handles buffers we created in bus_dmamem_alloc(), which are
1208 	 * always aligned and padded to cache line size even if the IO length
1209 	 * isn't a multiple of cache line size.  In this case the PREREAD
1210 	 * writeback probably isn't required, but it's harmless.
1211 	 */
1212 	partial = (((vm_offset_t)buf) | len) & arm_dcache_align_mask;
1213 
1214 	if (op & BUS_DMASYNC_PREREAD) {
1215 		if (!(op & BUS_DMASYNC_PREWRITE) && !partial) {
1216 			cpu_dcache_inv_range(buf, len);
1217 			cpu_l2cache_inv_range(buf, len);
1218 		} else {
1219 		    	cpu_dcache_wbinv_range(buf, len);
1220 	    		cpu_l2cache_wbinv_range(buf, len);
1221 		}
1222 	}
1223 	if (op & BUS_DMASYNC_POSTREAD) {
1224 		if (partial && !bufaligned) {
1225 			s = intr_disable();
1226 			if (buf & arm_dcache_align_mask)
1227 				memcpy(_tmp_cl, (void *)(buf &
1228 				    ~arm_dcache_align_mask),
1229 				    buf & arm_dcache_align_mask);
1230 			if ((buf + len) & arm_dcache_align_mask)
1231 				memcpy(_tmp_clend,
1232 				    (void *)(buf + len),
1233 				    arm_dcache_align -
1234 				    ((buf + len) & arm_dcache_align_mask));
1235 		}
1236 		cpu_dcache_inv_range(buf, len);
1237 		cpu_l2cache_inv_range(buf, len);
1238 		if (partial && !bufaligned) {
1239 			if (buf & arm_dcache_align_mask)
1240 				memcpy((void *)(buf &
1241 				    ~arm_dcache_align_mask), _tmp_cl,
1242 				    buf & arm_dcache_align_mask);
1243 			if ((buf + len) & arm_dcache_align_mask)
1244 				memcpy((void *)(buf + len),
1245 				    _tmp_clend, arm_dcache_align -
1246 				    ((buf + len) & arm_dcache_align_mask));
1247 			intr_restore(s);
1248 		}
1249 	}
1250 }
1251 
1252 static void
1253 bus_dmamap_sync_sl(struct sync_list *sl, bus_dmasync_op_t op,
1254     int bufaligned)
1255 {
1256 	vm_offset_t tempvaddr;
1257 	vm_page_t curpage;
1258 	size_t npages;
1259 
1260 	if (sl->vaddr != 0) {
1261 		bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op, bufaligned);
1262 		return;
1263 	}
1264 
1265 	tempvaddr = 0;
1266 	npages = atop(round_page(sl->dataoffs + sl->datacount));
1267 
1268 	for (curpage = sl->pages; curpage != sl->pages + npages; ++curpage) {
1269 		/*
1270 		 * If the page is mapped to some other VA that hasn't
1271 		 * been supplied to busdma, then pmap_quick_enter_page()
1272 		 * will find all duplicate mappings and mark them
1273 		 * uncacheable.
1274 		 * That will also do any necessary wb/inv.  Otherwise,
1275 		 * if the page is truly unmapped, then we don't actually
1276 		 * need to do cache maintenance.
1277 		 * XXX: May overwrite DMA'ed data in the POSTREAD
1278 		 * case where the CPU has written to a cacheline not
1279 		 * completely covered by the DMA region.
1280 		 */
1281 		KASSERT(VM_PAGE_TO_PHYS(curpage) == VM_PAGE_TO_PHYS(sl->pages) +
1282 		    ptoa(curpage - sl->pages),
1283 		    ("unexpected vm_page_t phys: 0x%08x != 0x%08x",
1284 		    VM_PAGE_TO_PHYS(curpage), VM_PAGE_TO_PHYS(sl->pages) +
1285 		    ptoa(curpage - sl->pages)));
1286 		tempvaddr = pmap_quick_enter_page(curpage);
1287 		pmap_quick_remove_page(tempvaddr);
1288 	}
1289 }
1290 
1291 static void
1292 _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1293 {
1294 	struct bounce_page *bpage;
1295 	vm_offset_t datavaddr, tempvaddr;
1296 
1297 	if ((op & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD)) == 0)
1298 		return;
1299 
1300 	STAILQ_FOREACH(bpage, &map->bpages, links) {
1301 		tempvaddr = 0;
1302 		datavaddr = bpage->datavaddr;
1303 		if (op & BUS_DMASYNC_PREWRITE) {
1304 			if (datavaddr == 0) {
1305 				tempvaddr =
1306 				    pmap_quick_enter_page(bpage->datapage);
1307 				datavaddr = tempvaddr | bpage->dataoffs;
1308 			}
1309 			bcopy((void *)datavaddr,
1310 			    (void *)bpage->vaddr, bpage->datacount);
1311 			if (tempvaddr != 0)
1312 				pmap_quick_remove_page(tempvaddr);
1313 			cpu_dcache_wb_range(bpage->vaddr, bpage->datacount);
1314 			cpu_l2cache_wb_range(bpage->vaddr, bpage->datacount);
1315 			dmat->bounce_zone->total_bounced++;
1316 		}
1317 		if (op & BUS_DMASYNC_POSTREAD) {
1318 			cpu_dcache_inv_range(bpage->vaddr, bpage->datacount);
1319 			cpu_l2cache_inv_range(bpage->vaddr, bpage->datacount);
1320 			if (datavaddr == 0) {
1321 				tempvaddr =
1322 				    pmap_quick_enter_page(bpage->datapage);
1323 				datavaddr = tempvaddr | bpage->dataoffs;
1324 			}
1325 			bcopy((void *)bpage->vaddr,
1326 			    (void *)datavaddr, bpage->datacount);
1327 			if (tempvaddr != 0)
1328 				pmap_quick_remove_page(tempvaddr);
1329 			dmat->bounce_zone->total_bounced++;
1330 		}
1331 	}
1332 }
1333 
1334 void
1335 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1336 {
1337 	struct sync_list *sl, *end;
1338 	int bufaligned;
1339 
1340 	if (op == BUS_DMASYNC_POSTWRITE)
1341 		return;
1342 	if (map->flags & DMAMAP_COHERENT)
1343 		goto drain;
1344 	if (STAILQ_FIRST(&map->bpages))
1345 		_bus_dmamap_sync_bp(dmat, map, op);
1346 	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
1347 	bufaligned = (map->flags & DMAMAP_CACHE_ALIGNED);
1348 	if (map->sync_count) {
1349 		end = &map->slist[map->sync_count];
1350 		for (sl = &map->slist[0]; sl != end; sl++)
1351 			bus_dmamap_sync_sl(sl, op, bufaligned);
1352 	}
1353 
1354 drain:
1355 
1356 	cpu_drain_writebuf();
1357 }
1358 
1359 static void
1360 init_bounce_pages(void *dummy __unused)
1361 {
1362 
1363 	total_bpages = 0;
1364 	STAILQ_INIT(&bounce_zone_list);
1365 	STAILQ_INIT(&bounce_map_waitinglist);
1366 	STAILQ_INIT(&bounce_map_callbacklist);
1367 	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1368 }
1369 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1370 
1371 static struct sysctl_ctx_list *
1372 busdma_sysctl_tree(struct bounce_zone *bz)
1373 {
1374 
1375 	return (&bz->sysctl_tree);
1376 }
1377 
1378 static struct sysctl_oid *
1379 busdma_sysctl_tree_top(struct bounce_zone *bz)
1380 {
1381 
1382 	return (bz->sysctl_tree_top);
1383 }
1384 
1385 static int
1386 alloc_bounce_zone(bus_dma_tag_t dmat)
1387 {
1388 	struct bounce_zone *bz;
1389 
1390 	/* Check to see if we already have a suitable zone */
1391 	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1392 		if ((dmat->alignment <= bz->alignment) &&
1393 		    (dmat->lowaddr >= bz->lowaddr)) {
1394 			dmat->bounce_zone = bz;
1395 			return (0);
1396 		}
1397 	}
1398 
1399 	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
1400 	    M_NOWAIT | M_ZERO)) == NULL)
1401 		return (ENOMEM);
1402 
1403 	STAILQ_INIT(&bz->bounce_page_list);
1404 	bz->free_bpages = 0;
1405 	bz->reserved_bpages = 0;
1406 	bz->active_bpages = 0;
1407 	bz->lowaddr = dmat->lowaddr;
1408 	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1409 	bz->map_count = 0;
1410 	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1411 	busdma_zonecount++;
1412 	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1413 	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1414 	dmat->bounce_zone = bz;
1415 
1416 	sysctl_ctx_init(&bz->sysctl_tree);
1417 	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1418 	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1419 	    CTLFLAG_RD, 0, "");
1420 	if (bz->sysctl_tree_top == NULL) {
1421 		sysctl_ctx_free(&bz->sysctl_tree);
1422 		return (0);	/* XXX error code? */
1423 	}
1424 
1425 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1426 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1427 	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1428 	    "Total bounce pages");
1429 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1430 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1431 	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1432 	    "Free bounce pages");
1433 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1434 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1435 	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1436 	    "Reserved bounce pages");
1437 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1438 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1439 	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1440 	    "Active bounce pages");
1441 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1442 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1443 	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1444 	    "Total bounce requests (pages bounced)");
1445 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1446 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1447 	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1448 	    "Total bounce requests that were deferred");
1449 	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1450 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1451 	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1452 	SYSCTL_ADD_ULONG(busdma_sysctl_tree(bz),
1453 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1454 	    "alignment", CTLFLAG_RD, &bz->alignment, "");
1455 
1456 	return (0);
1457 }
1458 
1459 static int
1460 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1461 {
1462 	struct bounce_zone *bz;
1463 	int count;
1464 
1465 	bz = dmat->bounce_zone;
1466 	count = 0;
1467 	while (numpages > 0) {
1468 		struct bounce_page *bpage;
1469 
1470 		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA,
1471 		    M_NOWAIT | M_ZERO);
1472 
1473 		if (bpage == NULL)
1474 			break;
1475 		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
1476 		    M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
1477 		if (bpage->vaddr == 0) {
1478 			free(bpage, M_BUSDMA);
1479 			break;
1480 		}
1481 		bpage->busaddr = pmap_kextract(bpage->vaddr);
1482 		mtx_lock(&bounce_lock);
1483 		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1484 		total_bpages++;
1485 		bz->total_bpages++;
1486 		bz->free_bpages++;
1487 		mtx_unlock(&bounce_lock);
1488 		count++;
1489 		numpages--;
1490 	}
1491 	return (count);
1492 }
1493 
1494 static int
1495 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1496 {
1497 	struct bounce_zone *bz;
1498 	int pages;
1499 
1500 	mtx_assert(&bounce_lock, MA_OWNED);
1501 	bz = dmat->bounce_zone;
1502 	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1503 	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1504 		return (map->pagesneeded - (map->pagesreserved + pages));
1505 	bz->free_bpages -= pages;
1506 	bz->reserved_bpages += pages;
1507 	map->pagesreserved += pages;
1508 	pages = map->pagesneeded - map->pagesreserved;
1509 
1510 	return (pages);
1511 }
1512 
1513 static bus_addr_t
1514 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1515     bus_addr_t addr, bus_size_t size)
1516 {
1517 	struct bounce_zone *bz;
1518 	struct bounce_page *bpage;
1519 
1520 	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1521 	KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
1522 
1523 	bz = dmat->bounce_zone;
1524 	if (map->pagesneeded == 0)
1525 		panic("add_bounce_page: map doesn't need any pages");
1526 	map->pagesneeded--;
1527 
1528 	if (map->pagesreserved == 0)
1529 		panic("add_bounce_page: map doesn't need any pages");
1530 	map->pagesreserved--;
1531 
1532 	mtx_lock(&bounce_lock);
1533 	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1534 	if (bpage == NULL)
1535 		panic("add_bounce_page: free page list is empty");
1536 
1537 	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1538 	bz->reserved_bpages--;
1539 	bz->active_bpages++;
1540 	mtx_unlock(&bounce_lock);
1541 
1542 	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1543 		/* Page offset needs to be preserved. */
1544 		bpage->vaddr |= addr & PAGE_MASK;
1545 		bpage->busaddr |= addr & PAGE_MASK;
1546 	}
1547 	bpage->datavaddr = vaddr;
1548 	bpage->datapage = PHYS_TO_VM_PAGE(addr);
1549 	bpage->dataoffs = addr & PAGE_MASK;
1550 	bpage->datacount = size;
1551 	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1552 	return (bpage->busaddr);
1553 }
1554 
1555 static void
1556 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1557 {
1558 	struct bus_dmamap *map;
1559 	struct bounce_zone *bz;
1560 
1561 	bz = dmat->bounce_zone;
1562 	bpage->datavaddr = 0;
1563 	bpage->datacount = 0;
1564 	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1565 		/*
1566 		 * Reset the bounce page to start at offset 0.  Other uses
1567 		 * of this bounce page may need to store a full page of
1568 		 * data and/or assume it starts on a page boundary.
1569 		 */
1570 		bpage->vaddr &= ~PAGE_MASK;
1571 		bpage->busaddr &= ~PAGE_MASK;
1572 	}
1573 
1574 	mtx_lock(&bounce_lock);
1575 	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1576 	bz->free_bpages++;
1577 	bz->active_bpages--;
1578 	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1579 		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1580 			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1581 			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1582 			    map, links);
1583 			busdma_swi_pending = 1;
1584 			bz->total_deferred++;
1585 			swi_sched(vm_ih, 0);
1586 		}
1587 	}
1588 	mtx_unlock(&bounce_lock);
1589 }
1590 
1591 void
1592 busdma_swi(void)
1593 {
1594 	bus_dma_tag_t dmat;
1595 	struct bus_dmamap *map;
1596 
1597 	mtx_lock(&bounce_lock);
1598 	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1599 		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1600 		mtx_unlock(&bounce_lock);
1601 		dmat = map->dmat;
1602 		dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK);
1603 		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1604 		    map->callback_arg, BUS_DMA_WAITOK);
1605 		dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1606 		mtx_lock(&bounce_lock);
1607 	}
1608 	mtx_unlock(&bounce_lock);
1609 }
1610