xref: /freebsd/sys/riscv/riscv/busdma_bounce.c (revision e17f5b1d)
1 /*-
2  * Copyright (c) 1997, 1998 Justin T. Gibbs.
3  * Copyright (c) 2015-2016 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Andrew Turner
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Portions of this software were developed by Semihalf
10  * under sponsorship of the FreeBSD Foundation.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions, and the following disclaimer,
17  *    without modification, immediately at the beginning of the file.
18  * 2. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 #include <sys/bus.h>
41 #include <sys/interrupt.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/lock.h>
45 #include <sys/proc.h>
46 #include <sys/memdesc.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 #include <sys/uio.h>
50 
51 #include <vm/vm.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_map.h>
56 
57 #include <machine/atomic.h>
58 #include <machine/bus.h>
59 #include <machine/md_var.h>
60 #include <machine/bus_dma_impl.h>
61 
62 #define MAX_BPAGES 4096
63 
64 enum {
65 	BF_COULD_BOUNCE		= 0x01,
66 	BF_MIN_ALLOC_COMP	= 0x02,
67 	BF_KMEM_ALLOC		= 0x04,
68 	BF_COHERENT		= 0x10,
69 };
70 
71 struct bounce_zone;
72 
73 struct bus_dma_tag {
74 	struct bus_dma_tag_common common;
75 	int			map_count;
76 	int			bounce_flags;
77 	bus_dma_segment_t	*segments;
78 	struct bounce_zone	*bounce_zone;
79 };
80 
81 struct bounce_page {
82 	vm_offset_t	vaddr;		/* kva of bounce buffer */
83 	bus_addr_t	busaddr;	/* Physical address */
84 	vm_offset_t	datavaddr;	/* kva of client data */
85 	vm_page_t	datapage;	/* physical page of client data */
86 	vm_offset_t	dataoffs;	/* page offset of client data */
87 	bus_size_t	datacount;	/* client data count */
88 	STAILQ_ENTRY(bounce_page) links;
89 };
90 
91 int busdma_swi_pending;
92 
93 struct bounce_zone {
94 	STAILQ_ENTRY(bounce_zone) links;
95 	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
96 	int		total_bpages;
97 	int		free_bpages;
98 	int		reserved_bpages;
99 	int		active_bpages;
100 	int		total_bounced;
101 	int		total_deferred;
102 	int		map_count;
103 	bus_size_t	alignment;
104 	bus_addr_t	lowaddr;
105 	char		zoneid[8];
106 	char		lowaddrid[20];
107 	struct sysctl_ctx_list sysctl_tree;
108 	struct sysctl_oid *sysctl_tree_top;
109 };
110 
111 static struct mtx bounce_lock;
112 static int total_bpages;
113 static int busdma_zonecount;
114 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
115 
116 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
117     "Busdma parameters");
118 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
119 	   "Total bounce pages");
120 
121 struct sync_list {
122 	vm_offset_t	vaddr;		/* kva of client data */
123 	bus_addr_t	paddr;		/* physical address */
124 	vm_page_t	pages;		/* starting page of client data */
125 	bus_size_t	datacount;	/* client data count */
126 };
127 
128 struct bus_dmamap {
129 	struct bp_list	       bpages;
130 	int		       pagesneeded;
131 	int		       pagesreserved;
132 	bus_dma_tag_t	       dmat;
133 	struct memdesc	       mem;
134 	bus_dmamap_callback_t *callback;
135 	void		      *callback_arg;
136 	STAILQ_ENTRY(bus_dmamap) links;
137 	u_int			flags;
138 #define	DMAMAP_COULD_BOUNCE	(1 << 0)
139 #define	DMAMAP_FROM_DMAMEM	(1 << 1)
140 	int			sync_count;
141 	struct sync_list	slist[];
142 };
143 
144 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
145 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
146 
147 static void init_bounce_pages(void *dummy);
148 static int alloc_bounce_zone(bus_dma_tag_t dmat);
149 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
150 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
151     int commit);
152 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
153     vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
154 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
155 int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
156 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
157     pmap_t pmap, void *buf, bus_size_t buflen, int flags);
158 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
159     vm_paddr_t buf, bus_size_t buflen, int flags);
160 static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
161     int flags);
162 
163 /*
164  * Allocate a device specific dma_tag.
165  */
166 static int
167 bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
168     bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
169     bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
170     int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
171     void *lockfuncarg, bus_dma_tag_t *dmat)
172 {
173 	bus_dma_tag_t newtag;
174 	int error;
175 
176 	*dmat = NULL;
177 	error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
178 	    NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg,
179 	    maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
180 	    sizeof (struct bus_dma_tag), (void **)&newtag);
181 	if (error != 0)
182 		return (error);
183 
184 	newtag->common.impl = &bus_dma_bounce_impl;
185 	newtag->map_count = 0;
186 	newtag->segments = NULL;
187 
188 	if ((flags & BUS_DMA_COHERENT) != 0)
189 		newtag->bounce_flags |= BF_COHERENT;
190 
191 	if (parent != NULL) {
192 		if ((newtag->common.filter != NULL ||
193 		    (parent->bounce_flags & BF_COULD_BOUNCE) != 0))
194 			newtag->bounce_flags |= BF_COULD_BOUNCE;
195 
196 		/* Copy some flags from the parent */
197 		newtag->bounce_flags |= parent->bounce_flags & BF_COHERENT;
198 	}
199 
200 	if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
201 	    newtag->common.alignment > 1)
202 		newtag->bounce_flags |= BF_COULD_BOUNCE;
203 
204 	if (((newtag->bounce_flags & BF_COULD_BOUNCE) != 0) &&
205 	    (flags & BUS_DMA_ALLOCNOW) != 0) {
206 		struct bounce_zone *bz;
207 
208 		/* Must bounce */
209 		if ((error = alloc_bounce_zone(newtag)) != 0) {
210 			free(newtag, M_DEVBUF);
211 			return (error);
212 		}
213 		bz = newtag->bounce_zone;
214 
215 		if (ptoa(bz->total_bpages) < maxsize) {
216 			int pages;
217 
218 			pages = atop(round_page(maxsize)) - bz->total_bpages;
219 
220 			/* Add pages to our bounce pool */
221 			if (alloc_bounce_pages(newtag, pages) < pages)
222 				error = ENOMEM;
223 		}
224 		/* Performed initial allocation */
225 		newtag->bounce_flags |= BF_MIN_ALLOC_COMP;
226 	} else
227 		error = 0;
228 
229 	if (error != 0)
230 		free(newtag, M_DEVBUF);
231 	else
232 		*dmat = newtag;
233 	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
234 	    __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
235 	    error);
236 	return (error);
237 }
238 
239 static int
240 bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
241 {
242 	bus_dma_tag_t dmat_copy, parent;
243 	int error;
244 
245 	error = 0;
246 	dmat_copy = dmat;
247 
248 	if (dmat != NULL) {
249 		if (dmat->map_count != 0) {
250 			error = EBUSY;
251 			goto out;
252 		}
253 		while (dmat != NULL) {
254 			parent = (bus_dma_tag_t)dmat->common.parent;
255 			atomic_subtract_int(&dmat->common.ref_count, 1);
256 			if (dmat->common.ref_count == 0) {
257 				if (dmat->segments != NULL)
258 					free(dmat->segments, M_DEVBUF);
259 				free(dmat, M_DEVBUF);
260 				/*
261 				 * Last reference count, so
262 				 * release our reference
263 				 * count on our parent.
264 				 */
265 				dmat = parent;
266 			} else
267 				dmat = NULL;
268 		}
269 	}
270 out:
271 	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
272 	return (error);
273 }
274 
275 static bus_dmamap_t
276 alloc_dmamap(bus_dma_tag_t dmat, int flags)
277 {
278 	u_long mapsize;
279 	bus_dmamap_t map;
280 
281 	mapsize = sizeof(*map);
282 	mapsize += sizeof(struct sync_list) * dmat->common.nsegments;
283 	map = malloc(mapsize, M_DEVBUF, flags | M_ZERO);
284 	if (map == NULL)
285 		return (NULL);
286 
287 	/* Initialize the new map */
288 	STAILQ_INIT(&map->bpages);
289 
290 	return (map);
291 }
292 
293 /*
294  * Allocate a handle for mapping from kva/uva/physical
295  * address space into bus device space.
296  */
297 static int
298 bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
299 {
300 	struct bounce_zone *bz;
301 	int error, maxpages, pages;
302 
303 	error = 0;
304 
305 	if (dmat->segments == NULL) {
306 		dmat->segments = (bus_dma_segment_t *)malloc(
307 		    sizeof(bus_dma_segment_t) * dmat->common.nsegments,
308 		    M_DEVBUF, M_NOWAIT);
309 		if (dmat->segments == NULL) {
310 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
311 			    __func__, dmat, ENOMEM);
312 			return (ENOMEM);
313 		}
314 	}
315 
316 	*mapp = alloc_dmamap(dmat, M_NOWAIT);
317 	if (*mapp == NULL) {
318 		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
319 		    __func__, dmat, ENOMEM);
320 		return (ENOMEM);
321 	}
322 
323 	/*
324 	 * Bouncing might be required if the driver asks for an active
325 	 * exclusion region, a data alignment that is stricter than 1, and/or
326 	 * an active address boundary.
327 	 */
328 	if (dmat->bounce_flags & BF_COULD_BOUNCE) {
329 		/* Must bounce */
330 		if (dmat->bounce_zone == NULL) {
331 			if ((error = alloc_bounce_zone(dmat)) != 0) {
332 				free(*mapp, M_DEVBUF);
333 				return (error);
334 			}
335 		}
336 		bz = dmat->bounce_zone;
337 
338 		(*mapp)->flags = DMAMAP_COULD_BOUNCE;
339 
340 		/*
341 		 * Attempt to add pages to our pool on a per-instance
342 		 * basis up to a sane limit.
343 		 */
344 		if (dmat->common.alignment > 1)
345 			maxpages = MAX_BPAGES;
346 		else
347 			maxpages = MIN(MAX_BPAGES, Maxmem -
348 			    atop(dmat->common.lowaddr));
349 		if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP) == 0 ||
350 		    (bz->map_count > 0 && bz->total_bpages < maxpages)) {
351 			pages = MAX(atop(dmat->common.maxsize), 1);
352 			pages = MIN(maxpages - bz->total_bpages, pages);
353 			pages = MAX(pages, 1);
354 			if (alloc_bounce_pages(dmat, pages) < pages)
355 				error = ENOMEM;
356 			if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP)
357 			    == 0) {
358 				if (error == 0) {
359 					dmat->bounce_flags |=
360 					    BF_MIN_ALLOC_COMP;
361 				}
362 			} else
363 				error = 0;
364 		}
365 		bz->map_count++;
366 	}
367 	if (error == 0)
368 		dmat->map_count++;
369 	else
370 		free(*mapp, M_DEVBUF);
371 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
372 	    __func__, dmat, dmat->common.flags, error);
373 	return (error);
374 }
375 
376 /*
377  * Destroy a handle for mapping from kva/uva/physical
378  * address space into bus device space.
379  */
380 static int
381 bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
382 {
383 
384 	/* Check we are destroying the correct map type */
385 	if ((map->flags & DMAMAP_FROM_DMAMEM) != 0)
386 		panic("bounce_bus_dmamap_destroy: Invalid map freed\n");
387 
388 	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
389 		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY);
390 		return (EBUSY);
391 	}
392 	if (dmat->bounce_zone) {
393 		KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
394 		    ("%s: Bounce zone when cannot bounce", __func__));
395 		dmat->bounce_zone->map_count--;
396 	}
397 	free(map, M_DEVBUF);
398 	dmat->map_count--;
399 	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
400 	return (0);
401 }
402 
403 
404 /*
405  * Allocate a piece of memory that can be efficiently mapped into
406  * bus device space based on the constraints lited in the dma tag.
407  * A dmamap to for use with dmamap_load is also allocated.
408  */
409 static int
410 bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
411     bus_dmamap_t *mapp)
412 {
413 	/*
414 	 * XXX ARM64TODO:
415 	 * This bus_dma implementation requires IO-Coherent architecutre.
416 	 * If IO-Coherency is not guaranteed, the BUS_DMA_COHERENT flag has
417 	 * to be implented using non-cacheable memory.
418 	 */
419 
420 	vm_memattr_t attr;
421 	int mflags;
422 
423 	if (flags & BUS_DMA_NOWAIT)
424 		mflags = M_NOWAIT;
425 	else
426 		mflags = M_WAITOK;
427 
428 	if (dmat->segments == NULL) {
429 		dmat->segments = (bus_dma_segment_t *)malloc(
430 		    sizeof(bus_dma_segment_t) * dmat->common.nsegments,
431 		    M_DEVBUF, mflags);
432 		if (dmat->segments == NULL) {
433 			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
434 			    __func__, dmat, dmat->common.flags, ENOMEM);
435 			return (ENOMEM);
436 		}
437 	}
438 	if (flags & BUS_DMA_ZERO)
439 		mflags |= M_ZERO;
440 	if (flags & BUS_DMA_NOCACHE)
441 		attr = VM_MEMATTR_UNCACHEABLE;
442 	else if ((flags & BUS_DMA_COHERENT) != 0 &&
443 	    (dmat->bounce_flags & BF_COHERENT) == 0)
444 		/*
445 		 * If we have a non-coherent tag, and are trying to allocate
446 		 * a coherent block of memory it needs to be uncached.
447 		 */
448 		attr = VM_MEMATTR_UNCACHEABLE;
449 	else
450 		attr = VM_MEMATTR_DEFAULT;
451 
452 	/*
453 	 * Create the map, but don't set the could bounce flag as
454 	 * this allocation should never bounce;
455 	 */
456 	*mapp = alloc_dmamap(dmat, mflags);
457 	if (*mapp == NULL) {
458 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
459 		    __func__, dmat, dmat->common.flags, ENOMEM);
460 		return (ENOMEM);
461 	}
462 	(*mapp)->flags = DMAMAP_FROM_DMAMEM;
463 
464 	/*
465 	 * Allocate the buffer from the malloc(9) allocator if...
466 	 *  - It's small enough to fit into a single power of two sized bucket.
467 	 *  - The alignment is less than or equal to the maximum size
468 	 *  - The low address requirement is fulfilled.
469 	 * else allocate non-contiguous pages if...
470 	 *  - The page count that could get allocated doesn't exceed
471 	 *    nsegments also when the maximum segment size is less
472 	 *    than PAGE_SIZE.
473 	 *  - The alignment constraint isn't larger than a page boundary.
474 	 *  - There are no boundary-crossing constraints.
475 	 * else allocate a block of contiguous pages because one or more of the
476 	 * constraints is something that only the contig allocator can fulfill.
477 	 *
478 	 * NOTE: The (dmat->common.alignment <= dmat->maxsize) check
479 	 * below is just a quick hack. The exact alignment guarantees
480 	 * of malloc(9) need to be nailed down, and the code below
481 	 * should be rewritten to take that into account.
482 	 *
483 	 * In the meantime warn the user if malloc gets it wrong.
484 	 */
485 	if ((dmat->common.maxsize <= PAGE_SIZE) &&
486 	   (dmat->common.alignment <= dmat->common.maxsize) &&
487 	    dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
488 	    attr == VM_MEMATTR_DEFAULT) {
489 		*vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
490 	} else if (dmat->common.nsegments >=
491 	    howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) &&
492 	    dmat->common.alignment <= PAGE_SIZE &&
493 	    (dmat->common.boundary % PAGE_SIZE) == 0) {
494 		/* Page-based multi-segment allocations allowed */
495 		*vaddr = (void *)kmem_alloc_attr(dmat->common.maxsize, mflags,
496 		    0ul, dmat->common.lowaddr, attr);
497 		dmat->bounce_flags |= BF_KMEM_ALLOC;
498 	} else {
499 		*vaddr = (void *)kmem_alloc_contig(dmat->common.maxsize, mflags,
500 		    0ul, dmat->common.lowaddr, dmat->common.alignment != 0 ?
501 		    dmat->common.alignment : 1ul, dmat->common.boundary, attr);
502 		dmat->bounce_flags |= BF_KMEM_ALLOC;
503 	}
504 	if (*vaddr == NULL) {
505 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
506 		    __func__, dmat, dmat->common.flags, ENOMEM);
507 		free(*mapp, M_DEVBUF);
508 		return (ENOMEM);
509 	} else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
510 		printf("bus_dmamem_alloc failed to align memory properly.\n");
511 	}
512 	dmat->map_count++;
513 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
514 	    __func__, dmat, dmat->common.flags, 0);
515 	return (0);
516 }
517 
518 /*
519  * Free a piece of memory and it's allociated dmamap, that was allocated
520  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
521  */
522 static void
523 bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
524 {
525 
526 	/*
527 	 * Check the map came from bounce_bus_dmamem_alloc, so the map
528 	 * should be NULL and the BF_KMEM_ALLOC flag cleared if malloc()
529 	 * was used and set if kmem_alloc_contig() was used.
530 	 */
531 	if ((map->flags & DMAMAP_FROM_DMAMEM) == 0)
532 		panic("bus_dmamem_free: Invalid map freed\n");
533 	if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0)
534 		free(vaddr, M_DEVBUF);
535 	else
536 		kmem_free((vm_offset_t)vaddr, dmat->common.maxsize);
537 	free(map, M_DEVBUF);
538 	dmat->map_count--;
539 	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
540 	    dmat->bounce_flags);
541 }
542 
543 static void
544 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
545     bus_size_t buflen, int flags)
546 {
547 	bus_addr_t curaddr;
548 	bus_size_t sgsize;
549 
550 	if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) {
551 		/*
552 		 * Count the number of bounce pages
553 		 * needed in order to complete this transfer
554 		 */
555 		curaddr = buf;
556 		while (buflen != 0) {
557 			sgsize = MIN(buflen, dmat->common.maxsegsz);
558 			if (bus_dma_run_filter(&dmat->common, curaddr)) {
559 				sgsize = MIN(sgsize,
560 				    PAGE_SIZE - (curaddr & PAGE_MASK));
561 				map->pagesneeded++;
562 			}
563 			curaddr += sgsize;
564 			buflen -= sgsize;
565 		}
566 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
567 	}
568 }
569 
570 static void
571 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
572     void *buf, bus_size_t buflen, int flags)
573 {
574 	vm_offset_t vaddr;
575 	vm_offset_t vendaddr;
576 	bus_addr_t paddr;
577 	bus_size_t sg_len;
578 
579 	if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) {
580 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
581 		    "alignment= %d", dmat->common.lowaddr,
582 		    ptoa((vm_paddr_t)Maxmem),
583 		    dmat->common.boundary, dmat->common.alignment);
584 		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map,
585 		    map->pagesneeded);
586 		/*
587 		 * Count the number of bounce pages
588 		 * needed in order to complete this transfer
589 		 */
590 		vaddr = (vm_offset_t)buf;
591 		vendaddr = (vm_offset_t)buf + buflen;
592 
593 		while (vaddr < vendaddr) {
594 			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
595 			if (pmap == kernel_pmap)
596 				paddr = pmap_kextract(vaddr);
597 			else
598 				paddr = pmap_extract(pmap, vaddr);
599 			if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
600 				sg_len = roundup2(sg_len,
601 				    dmat->common.alignment);
602 				map->pagesneeded++;
603 			}
604 			vaddr += sg_len;
605 		}
606 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
607 	}
608 }
609 
610 static int
611 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
612 {
613 
614 	/* Reserve Necessary Bounce Pages */
615 	mtx_lock(&bounce_lock);
616 	if (flags & BUS_DMA_NOWAIT) {
617 		if (reserve_bounce_pages(dmat, map, 0) != 0) {
618 			mtx_unlock(&bounce_lock);
619 			return (ENOMEM);
620 		}
621 	} else {
622 		if (reserve_bounce_pages(dmat, map, 1) != 0) {
623 			/* Queue us for resources */
624 			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
625 			mtx_unlock(&bounce_lock);
626 			return (EINPROGRESS);
627 		}
628 	}
629 	mtx_unlock(&bounce_lock);
630 
631 	return (0);
632 }
633 
634 /*
635  * Add a single contiguous physical range to the segment list.
636  */
637 static int
638 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
639     bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
640 {
641 	bus_addr_t baddr, bmask;
642 	int seg;
643 
644 	/*
645 	 * Make sure we don't cross any boundaries.
646 	 */
647 	bmask = ~(dmat->common.boundary - 1);
648 	if (dmat->common.boundary > 0) {
649 		baddr = (curaddr + dmat->common.boundary) & bmask;
650 		if (sgsize > (baddr - curaddr))
651 			sgsize = (baddr - curaddr);
652 	}
653 
654 	/*
655 	 * Insert chunk into a segment, coalescing with
656 	 * previous segment if possible.
657 	 */
658 	seg = *segp;
659 	if (seg == -1) {
660 		seg = 0;
661 		segs[seg].ds_addr = curaddr;
662 		segs[seg].ds_len = sgsize;
663 	} else {
664 		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
665 		    (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
666 		    (dmat->common.boundary == 0 ||
667 		     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
668 			segs[seg].ds_len += sgsize;
669 		else {
670 			if (++seg >= dmat->common.nsegments)
671 				return (0);
672 			segs[seg].ds_addr = curaddr;
673 			segs[seg].ds_len = sgsize;
674 		}
675 	}
676 	*segp = seg;
677 	return (sgsize);
678 }
679 
680 /*
681  * Utility function to load a physical buffer.  segp contains
682  * the starting segment on entrace, and the ending segment on exit.
683  */
684 static int
685 bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
686     vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
687     int *segp)
688 {
689 	struct sync_list *sl;
690 	bus_size_t sgsize;
691 	bus_addr_t curaddr, sl_end;
692 	int error;
693 
694 	if (segs == NULL)
695 		segs = dmat->segments;
696 
697 	if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) {
698 		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
699 		if (map->pagesneeded != 0) {
700 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
701 			if (error)
702 				return (error);
703 		}
704 	}
705 
706 	sl = map->slist + map->sync_count - 1;
707 	sl_end = 0;
708 
709 	while (buflen > 0) {
710 		curaddr = buf;
711 		sgsize = MIN(buflen, dmat->common.maxsegsz);
712 		if (((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) &&
713 		    map->pagesneeded != 0 &&
714 		    bus_dma_run_filter(&dmat->common, curaddr)) {
715 			sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
716 			curaddr = add_bounce_page(dmat, map, 0, curaddr,
717 			    sgsize);
718 		} else if ((dmat->bounce_flags & BF_COHERENT) == 0) {
719 			if (map->sync_count > 0)
720 				sl_end = sl->paddr + sl->datacount;
721 
722 			if (map->sync_count == 0 || curaddr != sl_end) {
723 				if (++map->sync_count > dmat->common.nsegments)
724 					break;
725 				sl++;
726 				sl->vaddr = 0;
727 				sl->paddr = curaddr;
728 				sl->datacount = sgsize;
729 				sl->pages = PHYS_TO_VM_PAGE(curaddr);
730 				KASSERT(sl->pages != NULL,
731 				    ("%s: page at PA:0x%08lx is not in "
732 				    "vm_page_array", __func__, curaddr));
733 			} else
734 				sl->datacount += sgsize;
735 		}
736 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
737 		    segp);
738 		if (sgsize == 0)
739 			break;
740 		buf += sgsize;
741 		buflen -= sgsize;
742 	}
743 
744 	/*
745 	 * Did we fit?
746 	 */
747 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
748 }
749 
750 /*
751  * Utility function to load a linear buffer.  segp contains
752  * the starting segment on entrace, and the ending segment on exit.
753  */
754 static int
755 bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
756     bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
757     int *segp)
758 {
759 	struct sync_list *sl;
760 	bus_size_t sgsize, max_sgsize;
761 	bus_addr_t curaddr, sl_pend;
762 	vm_offset_t kvaddr, vaddr, sl_vend;
763 	int error;
764 
765 	if (segs == NULL)
766 		segs = dmat->segments;
767 
768 	if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) {
769 		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
770 		if (map->pagesneeded != 0) {
771 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
772 			if (error)
773 				return (error);
774 		}
775 	}
776 
777 	sl = map->slist + map->sync_count - 1;
778 	vaddr = (vm_offset_t)buf;
779 	sl_pend = 0;
780 	sl_vend = 0;
781 
782 	while (buflen > 0) {
783 		/*
784 		 * Get the physical address for this segment.
785 		 */
786 		if (pmap == kernel_pmap) {
787 			curaddr = pmap_kextract(vaddr);
788 			kvaddr = vaddr;
789 		} else {
790 			curaddr = pmap_extract(pmap, vaddr);
791 			kvaddr = 0;
792 		}
793 
794 		/*
795 		 * Compute the segment size, and adjust counts.
796 		 */
797 		max_sgsize = MIN(buflen, dmat->common.maxsegsz);
798 		sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
799 		if (((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) &&
800 		    map->pagesneeded != 0 &&
801 		    bus_dma_run_filter(&dmat->common, curaddr)) {
802 			sgsize = roundup2(sgsize, dmat->common.alignment);
803 			sgsize = MIN(sgsize, max_sgsize);
804 			curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
805 			    sgsize);
806 		} else if ((dmat->bounce_flags & BF_COHERENT) == 0) {
807 			sgsize = MIN(sgsize, max_sgsize);
808 			if (map->sync_count > 0) {
809 				sl_pend = sl->paddr + sl->datacount;
810 				sl_vend = sl->vaddr + sl->datacount;
811 			}
812 
813 			if (map->sync_count == 0 ||
814 			    (kvaddr != 0 && kvaddr != sl_vend) ||
815 			    (curaddr != sl_pend)) {
816 
817 				if (++map->sync_count > dmat->common.nsegments)
818 					goto cleanup;
819 				sl++;
820 				sl->vaddr = kvaddr;
821 				sl->paddr = curaddr;
822 				if (kvaddr != 0) {
823 					sl->pages = NULL;
824 				} else {
825 					sl->pages = PHYS_TO_VM_PAGE(curaddr);
826 					KASSERT(sl->pages != NULL,
827 					    ("%s: page at PA:0x%08lx is not "
828 					    "in vm_page_array", __func__,
829 					    curaddr));
830 				}
831 				sl->datacount = sgsize;
832 			} else
833 				sl->datacount += sgsize;
834 		} else {
835 			sgsize = MIN(sgsize, max_sgsize);
836 		}
837 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
838 		    segp);
839 		if (sgsize == 0)
840 			break;
841 		vaddr += sgsize;
842 		buflen -= sgsize;
843 	}
844 
845 cleanup:
846 	/*
847 	 * Did we fit?
848 	 */
849 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
850 }
851 
852 static void
853 bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
854     struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
855 {
856 
857 	if ((map->flags & DMAMAP_COULD_BOUNCE) == 0)
858 		return;
859 	map->mem = *mem;
860 	map->dmat = dmat;
861 	map->callback = callback;
862 	map->callback_arg = callback_arg;
863 }
864 
865 static bus_dma_segment_t *
866 bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
867     bus_dma_segment_t *segs, int nsegs, int error)
868 {
869 
870 	if (segs == NULL)
871 		segs = dmat->segments;
872 	return (segs);
873 }
874 
875 /*
876  * Release the mapping held by map.
877  */
878 static void
879 bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
880 {
881 	struct bounce_page *bpage;
882 
883 	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
884 		STAILQ_REMOVE_HEAD(&map->bpages, links);
885 		free_bounce_page(dmat, bpage);
886 	}
887 
888 	map->sync_count = 0;
889 }
890 
891 static void
892 dma_preread_safe(vm_offset_t va, vm_size_t size)
893 {
894 	/*
895 	 * Write back any partial cachelines immediately before and
896 	 * after the DMA region.
897 	 */
898 	if (va & (dcache_line_size - 1))
899 		cpu_dcache_wb_range(va, 1);
900 	if ((va + size) & (dcache_line_size - 1))
901 		cpu_dcache_wb_range(va + size, 1);
902 
903 	cpu_dcache_inv_range(va, size);
904 }
905 
906 static void
907 dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
908 {
909 	uint32_t len, offset;
910 	vm_page_t m;
911 	vm_paddr_t pa;
912 	vm_offset_t va, tempva;
913 	bus_size_t size;
914 
915 	offset = sl->paddr & PAGE_MASK;
916 	m = sl->pages;
917 	size = sl->datacount;
918 	pa = sl->paddr;
919 
920 	for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) {
921 		tempva = 0;
922 		if (sl->vaddr == 0) {
923 			len = min(PAGE_SIZE - offset, size);
924 			tempva = pmap_quick_enter_page(m);
925 			va = tempva | offset;
926 			KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset),
927 			    ("unexpected vm_page_t phys: 0x%16lx != 0x%16lx",
928 			    VM_PAGE_TO_PHYS(m) | offset, pa));
929 		} else {
930 			len = sl->datacount;
931 			va = sl->vaddr;
932 		}
933 
934 		switch (op) {
935 		case BUS_DMASYNC_PREWRITE:
936 		case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
937 			cpu_dcache_wb_range(va, len);
938 			break;
939 		case BUS_DMASYNC_PREREAD:
940 			/*
941 			 * An mbuf may start in the middle of a cacheline. There
942 			 * will be no cpu writes to the beginning of that line
943 			 * (which contains the mbuf header) while dma is in
944 			 * progress.  Handle that case by doing a writeback of
945 			 * just the first cacheline before invalidating the
946 			 * overall buffer.  Any mbuf in a chain may have this
947 			 * misalignment.  Buffers which are not mbufs bounce if
948 			 * they are not aligned to a cacheline.
949 			 */
950 			dma_preread_safe(va, len);
951 			break;
952 		case BUS_DMASYNC_POSTREAD:
953 		case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
954 			cpu_dcache_inv_range(va, len);
955 			break;
956 		default:
957 			panic("unsupported combination of sync operations: "
958                               "0x%08x\n", op);
959 		}
960 
961 		if (tempva != 0)
962 			pmap_quick_remove_page(tempva);
963 	}
964 }
965 
966 static void
967 bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
968     bus_dmasync_op_t op)
969 {
970 	struct bounce_page *bpage;
971 	struct sync_list *sl, *end;
972 	vm_offset_t datavaddr, tempvaddr;
973 
974 	if (op == BUS_DMASYNC_POSTWRITE)
975 		return;
976 
977 	if ((op & BUS_DMASYNC_POSTREAD) != 0) {
978 		/*
979 		 * Wait for any DMA operations to complete before the bcopy.
980 		 */
981 		fence();
982 	}
983 
984 	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
985 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
986 		    "performing bounce", __func__, dmat, dmat->common.flags,
987 		    op);
988 
989 		if ((op & BUS_DMASYNC_PREWRITE) != 0) {
990 			while (bpage != NULL) {
991 				tempvaddr = 0;
992 				datavaddr = bpage->datavaddr;
993 				if (datavaddr == 0) {
994 					tempvaddr = pmap_quick_enter_page(
995 					    bpage->datapage);
996 					datavaddr = tempvaddr | bpage->dataoffs;
997 				}
998 
999 				bcopy((void *)datavaddr,
1000 				    (void *)bpage->vaddr, bpage->datacount);
1001 				if (tempvaddr != 0)
1002 					pmap_quick_remove_page(tempvaddr);
1003 				if ((dmat->bounce_flags & BF_COHERENT) == 0)
1004 					cpu_dcache_wb_range(bpage->vaddr,
1005 					    bpage->datacount);
1006 				bpage = STAILQ_NEXT(bpage, links);
1007 			}
1008 			dmat->bounce_zone->total_bounced++;
1009 		} else if ((op & BUS_DMASYNC_PREREAD) != 0) {
1010 			while (bpage != NULL) {
1011 				if ((dmat->bounce_flags & BF_COHERENT) == 0)
1012 					cpu_dcache_wbinv_range(bpage->vaddr,
1013 					    bpage->datacount);
1014 				bpage = STAILQ_NEXT(bpage, links);
1015 			}
1016 		}
1017 
1018 		if ((op & BUS_DMASYNC_POSTREAD) != 0) {
1019 			while (bpage != NULL) {
1020 				if ((dmat->bounce_flags & BF_COHERENT) == 0)
1021 					cpu_dcache_inv_range(bpage->vaddr,
1022 					    bpage->datacount);
1023 				tempvaddr = 0;
1024 				datavaddr = bpage->datavaddr;
1025 				if (datavaddr == 0) {
1026 					tempvaddr = pmap_quick_enter_page(
1027 					    bpage->datapage);
1028 					datavaddr = tempvaddr | bpage->dataoffs;
1029 				}
1030 
1031 				bcopy((void *)bpage->vaddr,
1032 				    (void *)datavaddr, bpage->datacount);
1033 
1034 				if (tempvaddr != 0)
1035 					pmap_quick_remove_page(tempvaddr);
1036 				bpage = STAILQ_NEXT(bpage, links);
1037 			}
1038 			dmat->bounce_zone->total_bounced++;
1039 		}
1040 	}
1041 
1042 	/*
1043 	 * Cache maintenance for normal (non-COHERENT non-bounce) buffers.
1044 	 */
1045 	if (map->sync_count != 0) {
1046 		sl = &map->slist[0];
1047 		end = &map->slist[map->sync_count];
1048 		CTR3(KTR_BUSDMA, "%s: tag %p op 0x%x "
1049 		    "performing sync", __func__, dmat, op);
1050 
1051 		for ( ; sl != end; ++sl)
1052 			dma_dcache_sync(sl, op);
1053 	}
1054 
1055 	if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0) {
1056 		/*
1057 		 * Wait for the bcopy to complete before any DMA operations.
1058 		 */
1059 		fence();
1060 	}
1061 }
1062 
1063 static void
1064 init_bounce_pages(void *dummy __unused)
1065 {
1066 
1067 	total_bpages = 0;
1068 	STAILQ_INIT(&bounce_zone_list);
1069 	STAILQ_INIT(&bounce_map_waitinglist);
1070 	STAILQ_INIT(&bounce_map_callbacklist);
1071 	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1072 }
1073 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1074 
1075 static struct sysctl_ctx_list *
1076 busdma_sysctl_tree(struct bounce_zone *bz)
1077 {
1078 
1079 	return (&bz->sysctl_tree);
1080 }
1081 
1082 static struct sysctl_oid *
1083 busdma_sysctl_tree_top(struct bounce_zone *bz)
1084 {
1085 
1086 	return (bz->sysctl_tree_top);
1087 }
1088 
1089 static int
1090 alloc_bounce_zone(bus_dma_tag_t dmat)
1091 {
1092 	struct bounce_zone *bz;
1093 
1094 	/* Check to see if we already have a suitable zone */
1095 	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1096 		if ((dmat->common.alignment <= bz->alignment) &&
1097 		    (dmat->common.lowaddr >= bz->lowaddr)) {
1098 			dmat->bounce_zone = bz;
1099 			return (0);
1100 		}
1101 	}
1102 
1103 	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1104 	    M_NOWAIT | M_ZERO)) == NULL)
1105 		return (ENOMEM);
1106 
1107 	STAILQ_INIT(&bz->bounce_page_list);
1108 	bz->free_bpages = 0;
1109 	bz->reserved_bpages = 0;
1110 	bz->active_bpages = 0;
1111 	bz->lowaddr = dmat->common.lowaddr;
1112 	bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE);
1113 	bz->map_count = 0;
1114 	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1115 	busdma_zonecount++;
1116 	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1117 	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1118 	dmat->bounce_zone = bz;
1119 
1120 	sysctl_ctx_init(&bz->sysctl_tree);
1121 	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1122 	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1123 	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
1124 	if (bz->sysctl_tree_top == NULL) {
1125 		sysctl_ctx_free(&bz->sysctl_tree);
1126 		return (0);	/* XXX error code? */
1127 	}
1128 
1129 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1130 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1131 	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1132 	    "Total bounce pages");
1133 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1134 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1135 	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1136 	    "Free bounce pages");
1137 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1138 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1139 	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1140 	    "Reserved bounce pages");
1141 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1142 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1143 	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1144 	    "Active bounce pages");
1145 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1146 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1147 	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1148 	    "Total bounce requests");
1149 	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1150 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1151 	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1152 	    "Total bounce requests that were deferred");
1153 	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1154 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1155 	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1156 	SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
1157 	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1158 	    "alignment", CTLFLAG_RD, &bz->alignment, "");
1159 
1160 	return (0);
1161 }
1162 
1163 static int
1164 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1165 {
1166 	struct bounce_zone *bz;
1167 	int count;
1168 
1169 	bz = dmat->bounce_zone;
1170 	count = 0;
1171 	while (numpages > 0) {
1172 		struct bounce_page *bpage;
1173 
1174 		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1175 						     M_NOWAIT | M_ZERO);
1176 
1177 		if (bpage == NULL)
1178 			break;
1179 		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1180 		    M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
1181 		if (bpage->vaddr == 0) {
1182 			free(bpage, M_DEVBUF);
1183 			break;
1184 		}
1185 		bpage->busaddr = pmap_kextract(bpage->vaddr);
1186 		mtx_lock(&bounce_lock);
1187 		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1188 		total_bpages++;
1189 		bz->total_bpages++;
1190 		bz->free_bpages++;
1191 		mtx_unlock(&bounce_lock);
1192 		count++;
1193 		numpages--;
1194 	}
1195 	return (count);
1196 }
1197 
1198 static int
1199 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1200 {
1201 	struct bounce_zone *bz;
1202 	int pages;
1203 
1204 	mtx_assert(&bounce_lock, MA_OWNED);
1205 	bz = dmat->bounce_zone;
1206 	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1207 	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1208 		return (map->pagesneeded - (map->pagesreserved + pages));
1209 	bz->free_bpages -= pages;
1210 	bz->reserved_bpages += pages;
1211 	map->pagesreserved += pages;
1212 	pages = map->pagesneeded - map->pagesreserved;
1213 
1214 	return (pages);
1215 }
1216 
1217 static bus_addr_t
1218 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1219 		bus_addr_t addr, bus_size_t size)
1220 {
1221 	struct bounce_zone *bz;
1222 	struct bounce_page *bpage;
1223 
1224 	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1225 	KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
1226 	    ("add_bounce_page: bad map %p", map));
1227 
1228 	bz = dmat->bounce_zone;
1229 	if (map->pagesneeded == 0)
1230 		panic("add_bounce_page: map doesn't need any pages");
1231 	map->pagesneeded--;
1232 
1233 	if (map->pagesreserved == 0)
1234 		panic("add_bounce_page: map doesn't need any pages");
1235 	map->pagesreserved--;
1236 
1237 	mtx_lock(&bounce_lock);
1238 	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1239 	if (bpage == NULL)
1240 		panic("add_bounce_page: free page list is empty");
1241 
1242 	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1243 	bz->reserved_bpages--;
1244 	bz->active_bpages++;
1245 	mtx_unlock(&bounce_lock);
1246 
1247 	if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
1248 		/* Page offset needs to be preserved. */
1249 		bpage->vaddr |= addr & PAGE_MASK;
1250 		bpage->busaddr |= addr & PAGE_MASK;
1251 	}
1252 	bpage->datavaddr = vaddr;
1253 	bpage->datapage = PHYS_TO_VM_PAGE(addr);
1254 	bpage->dataoffs = addr & PAGE_MASK;
1255 	bpage->datacount = size;
1256 	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1257 	return (bpage->busaddr);
1258 }
1259 
1260 static void
1261 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1262 {
1263 	struct bus_dmamap *map;
1264 	struct bounce_zone *bz;
1265 
1266 	bz = dmat->bounce_zone;
1267 	bpage->datavaddr = 0;
1268 	bpage->datacount = 0;
1269 	if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
1270 		/*
1271 		 * Reset the bounce page to start at offset 0.  Other uses
1272 		 * of this bounce page may need to store a full page of
1273 		 * data and/or assume it starts on a page boundary.
1274 		 */
1275 		bpage->vaddr &= ~PAGE_MASK;
1276 		bpage->busaddr &= ~PAGE_MASK;
1277 	}
1278 
1279 	mtx_lock(&bounce_lock);
1280 	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1281 	bz->free_bpages++;
1282 	bz->active_bpages--;
1283 	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1284 		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1285 			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1286 			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1287 			    map, links);
1288 			busdma_swi_pending = 1;
1289 			bz->total_deferred++;
1290 			swi_sched(vm_ih, 0);
1291 		}
1292 	}
1293 	mtx_unlock(&bounce_lock);
1294 }
1295 
1296 void
1297 busdma_swi(void)
1298 {
1299 	bus_dma_tag_t dmat;
1300 	struct bus_dmamap *map;
1301 
1302 	mtx_lock(&bounce_lock);
1303 	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1304 		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1305 		mtx_unlock(&bounce_lock);
1306 		dmat = map->dmat;
1307 		(dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK);
1308 		bus_dmamap_load_mem(map->dmat, map, &map->mem,
1309 		    map->callback, map->callback_arg, BUS_DMA_WAITOK);
1310 		(dmat->common.lockfunc)(dmat->common.lockfuncarg,
1311 		    BUS_DMA_UNLOCK);
1312 		mtx_lock(&bounce_lock);
1313 	}
1314 	mtx_unlock(&bounce_lock);
1315 }
1316 
1317 struct bus_dma_impl bus_dma_bounce_impl = {
1318 	.tag_create = bounce_bus_dma_tag_create,
1319 	.tag_destroy = bounce_bus_dma_tag_destroy,
1320 	.map_create = bounce_bus_dmamap_create,
1321 	.map_destroy = bounce_bus_dmamap_destroy,
1322 	.mem_alloc = bounce_bus_dmamem_alloc,
1323 	.mem_free = bounce_bus_dmamem_free,
1324 	.load_phys = bounce_bus_dmamap_load_phys,
1325 	.load_buffer = bounce_bus_dmamap_load_buffer,
1326 	.load_ma = bus_dmamap_load_ma_triv,
1327 	.map_waitok = bounce_bus_dmamap_waitok,
1328 	.map_complete = bounce_bus_dmamap_complete,
1329 	.map_unload = bounce_bus_dmamap_unload,
1330 	.map_sync = bounce_bus_dmamap_sync
1331 };
1332