1 /*
2  * Copyright (c) 1997, 1998 Justin T. Gibbs.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. The name of the author may not be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $
27  * $DragonFly: src/sys/platform/pc32/i386/busdma_machdep.c,v 1.23 2008/06/05 18:06:32 swildner Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/mbuf.h>
34 #include <sys/uio.h>
35 #include <sys/bus_dma.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/lock.h>
39 
40 #include <sys/thread2.h>
41 #include <sys/spinlock2.h>
42 #include <sys/mplock2.h>
43 
44 #include <vm/vm.h>
45 #include <vm/vm_page.h>
46 
47 /* XXX needed for to access pmap to convert per-proc virtual to physical */
48 #include <sys/proc.h>
49 #include <sys/lock.h>
50 #include <vm/vm_map.h>
51 
52 #include <machine/md_var.h>
53 
54 #define MAX_BPAGES	1024
55 
56 /*
57  * 16 x N declared on stack.
58  */
59 #define	BUS_DMA_CACHE_SEGMENTS	8
60 
61 struct bounce_zone;
62 struct bus_dmamap;
63 
64 struct bus_dma_tag {
65 	bus_dma_tag_t	parent;
66 	bus_size_t	alignment;
67 	bus_size_t	boundary;
68 	bus_addr_t	lowaddr;
69 	bus_addr_t	highaddr;
70 	bus_dma_filter_t *filter;
71 	void		*filterarg;
72 	bus_size_t	maxsize;
73 	u_int		nsegments;
74 	bus_size_t	maxsegsz;
75 	int		flags;
76 	int		ref_count;
77 	int		map_count;
78 	bus_dma_segment_t *segments;
79 	struct bounce_zone *bounce_zone;
80 #ifdef SMP
81 	struct spinlock	spin;
82 #else
83 	int		unused0;
84 #endif
85 };
86 
87 /*
88  * bus_dma_tag private flags
89  */
90 #define BUS_DMA_BOUNCE_ALIGN	BUS_DMA_BUS2
91 #define BUS_DMA_BOUNCE_LOWADDR	BUS_DMA_BUS3
92 #define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
93 
94 #define BUS_DMA_COULD_BOUNCE	(BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN)
95 
96 #define BUS_DMAMEM_KMALLOC(dmat) \
97 	((dmat)->maxsize <= PAGE_SIZE && \
98 	 (dmat)->alignment <= PAGE_SIZE && \
99 	 (dmat)->lowaddr >= ptoa(Maxmem))
100 
101 struct bounce_page {
102 	vm_offset_t	vaddr;		/* kva of bounce buffer */
103 	bus_addr_t	busaddr;	/* Physical address */
104 	vm_offset_t	datavaddr;	/* kva of client data */
105 	bus_size_t	datacount;	/* client data count */
106 	STAILQ_ENTRY(bounce_page) links;
107 };
108 
109 struct bounce_zone {
110 	STAILQ_ENTRY(bounce_zone) links;
111 	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
112 	STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
113 #ifdef SMP
114 	struct spinlock	spin;
115 #else
116 	int		unused0;
117 #endif
118 	int		total_bpages;
119 	int		free_bpages;
120 	int		reserved_bpages;
121 	int		active_bpages;
122 	int		total_bounced;
123 	int		total_deferred;
124 	int		reserve_failed;
125 	bus_size_t	alignment;
126 	bus_addr_t	lowaddr;
127 	char		zoneid[8];
128 	char		lowaddrid[20];
129 	struct sysctl_ctx_list sysctl_ctx;
130 	struct sysctl_oid *sysctl_tree;
131 };
132 
133 #ifdef SMP
134 #define BZ_LOCK(bz)	spin_lock(&(bz)->spin)
135 #define BZ_UNLOCK(bz)	spin_unlock(&(bz)->spin)
136 #else
137 #define BZ_LOCK(bz)	crit_enter()
138 #define BZ_UNLOCK(bz)	crit_exit()
139 #endif
140 
141 static struct lwkt_token bounce_zone_tok =
142 	LWKT_TOKEN_INITIALIZER(bounce_zone_token);
143 static int busdma_zonecount;
144 static STAILQ_HEAD(, bounce_zone) bounce_zone_list =
145 	STAILQ_HEAD_INITIALIZER(bounce_zone_list);
146 
147 static int busdma_priv_zonecount = -1;
148 
149 int busdma_swi_pending;
150 static int total_bounce_pages;
151 static int max_bounce_pages = MAX_BPAGES;
152 static int bounce_alignment = 1; /* XXX temporary */
153 
154 TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages);
155 TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment);
156 
157 struct bus_dmamap {
158 	struct bp_list	bpages;
159 	int		pagesneeded;
160 	int		pagesreserved;
161 	bus_dma_tag_t	dmat;
162 	void		*buf;		/* unmapped buffer pointer */
163 	bus_size_t	buflen;		/* unmapped buffer length */
164 	bus_dmamap_callback_t *callback;
165 	void		*callback_arg;
166 	STAILQ_ENTRY(bus_dmamap) links;
167 };
168 
169 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist =
170 	STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist);
171 static struct spinlock bounce_map_list_spin =
172 	SPINLOCK_INITIALIZER(&bounce_map_list_spin);
173 
174 static struct bus_dmamap nobounce_dmamap;
175 
176 static int		alloc_bounce_zone(bus_dma_tag_t);
177 static int		alloc_bounce_pages(bus_dma_tag_t, u_int, int);
178 static void		free_bounce_pages_all(bus_dma_tag_t);
179 static void		free_bounce_zone(bus_dma_tag_t);
180 static int		reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int);
181 static void		return_bounce_pages(bus_dma_tag_t, bus_dmamap_t);
182 static bus_addr_t	add_bounce_page(bus_dma_tag_t, bus_dmamap_t,
183 			    vm_offset_t, bus_size_t);
184 static void		free_bounce_page(bus_dma_tag_t, struct bounce_page *);
185 
186 static bus_dmamap_t	get_map_waiting(bus_dma_tag_t);
187 static void		add_map_callback(bus_dmamap_t);
188 
189 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
190 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bounce_pages,
191 	   0, "Total bounce pages");
192 SYSCTL_INT(_hw_busdma, OID_AUTO, max_bpages, CTLFLAG_RD, &max_bounce_pages,
193 	   0, "Max bounce pages per bounce zone");
194 SYSCTL_INT(_hw_busdma, OID_AUTO, bounce_alignment, CTLFLAG_RD,
195 	   &bounce_alignment, 0, "Obey alignment constraint");
196 
197 static __inline int
198 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
199 {
200 	int retval;
201 
202 	retval = 0;
203 	do {
204 		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) ||
205 		     (bounce_alignment && (paddr & (dmat->alignment - 1)) != 0))
206 		 && (dmat->filter == NULL ||
207 		     dmat->filter(dmat->filterarg, paddr) != 0))
208 			retval = 1;
209 
210 		dmat = dmat->parent;
211 	} while (retval == 0 && dmat != NULL);
212 	return (retval);
213 }
214 
215 static __inline
216 bus_dma_segment_t *
217 bus_dma_tag_lock(bus_dma_tag_t tag, bus_dma_segment_t *cache)
218 {
219 	if (tag->flags & BUS_DMA_PROTECTED)
220 		return(tag->segments);
221 
222 	if (tag->nsegments <= BUS_DMA_CACHE_SEGMENTS)
223 		return(cache);
224 #ifdef SMP
225 	spin_lock(&tag->spin);
226 #endif
227 	return(tag->segments);
228 }
229 
230 static __inline
231 void
232 bus_dma_tag_unlock(bus_dma_tag_t tag)
233 {
234 #ifdef SMP
235 	if (tag->flags & BUS_DMA_PROTECTED)
236 		return;
237 
238 	if (tag->nsegments > BUS_DMA_CACHE_SEGMENTS)
239 		spin_unlock(&tag->spin);
240 #endif
241 }
242 
243 /*
244  * Allocate a device specific dma_tag.
245  */
246 int
247 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
248 		   bus_size_t boundary, bus_addr_t lowaddr,
249 		   bus_addr_t highaddr, bus_dma_filter_t *filter,
250 		   void *filterarg, bus_size_t maxsize, int nsegments,
251 		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
252 {
253 	bus_dma_tag_t newtag;
254 	int error = 0;
255 
256 	/*
257 	 * Sanity checks
258 	 */
259 
260 	if (alignment == 0)
261 		alignment = 1;
262 	if (alignment & (alignment - 1))
263 		panic("alignment must be power of 2\n");
264 
265 	if (boundary != 0) {
266 		if (boundary & (boundary - 1))
267 			panic("boundary must be power of 2\n");
268 		if (boundary < maxsegsz) {
269 			kprintf("boundary < maxsegsz:\n");
270 			print_backtrace(-1);
271 			maxsegsz = boundary;
272 		}
273 	}
274 
275 	/* Return a NULL tag on failure */
276 	*dmat = NULL;
277 
278 	newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO);
279 
280 #ifdef SMP
281 	spin_init(&newtag->spin);
282 #endif
283 	newtag->parent = parent;
284 	newtag->alignment = alignment;
285 	newtag->boundary = boundary;
286 	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
287 	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
288 	newtag->filter = filter;
289 	newtag->filterarg = filterarg;
290 	newtag->maxsize = maxsize;
291 	newtag->nsegments = nsegments;
292 	newtag->maxsegsz = maxsegsz;
293 	newtag->flags = flags;
294 	newtag->ref_count = 1; /* Count ourself */
295 	newtag->map_count = 0;
296 	newtag->segments = NULL;
297 	newtag->bounce_zone = NULL;
298 
299 	/* Take into account any restrictions imposed by our parent tag */
300 	if (parent != NULL) {
301 		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
302 		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
303 
304 		if (newtag->boundary == 0) {
305 			newtag->boundary = parent->boundary;
306 		} else if (parent->boundary != 0) {
307 			newtag->boundary = MIN(parent->boundary,
308 					       newtag->boundary);
309 		}
310 
311 #ifdef notyet
312 		newtag->alignment = MAX(parent->alignment, newtag->alignment);
313 #endif
314 
315 		if (newtag->filter == NULL) {
316 			/*
317 			 * Short circuit looking at our parent directly
318 			 * since we have encapsulated all of its information
319 			 */
320 			newtag->filter = parent->filter;
321 			newtag->filterarg = parent->filterarg;
322 			newtag->parent = parent->parent;
323 		}
324 		if (newtag->parent != NULL)
325 			parent->ref_count++;
326 	}
327 
328 	if (newtag->lowaddr < ptoa(Maxmem))
329 		newtag->flags |= BUS_DMA_BOUNCE_LOWADDR;
330 	if (bounce_alignment && newtag->alignment > 1 &&
331 	    !(newtag->flags & BUS_DMA_ALIGNED))
332 		newtag->flags |= BUS_DMA_BOUNCE_ALIGN;
333 
334 	if ((newtag->flags & BUS_DMA_COULD_BOUNCE) &&
335 	    (flags & BUS_DMA_ALLOCNOW) != 0) {
336 		struct bounce_zone *bz;
337 
338 		/* Must bounce */
339 
340 		error = alloc_bounce_zone(newtag);
341 		if (error)
342 			goto back;
343 		bz = newtag->bounce_zone;
344 
345 		if ((newtag->flags & BUS_DMA_ALLOCALL) == 0 &&
346 		    ptoa(bz->total_bpages) < maxsize) {
347 			int pages;
348 
349 			if (flags & BUS_DMA_ONEBPAGE) {
350 				pages = 1;
351 			} else {
352 				pages = atop(round_page(maxsize)) -
353 					bz->total_bpages;
354 				pages = MAX(pages, 1);
355 			}
356 
357 			/* Add pages to our bounce pool */
358 			if (alloc_bounce_pages(newtag, pages, flags) < pages)
359 				error = ENOMEM;
360 
361 			/* Performed initial allocation */
362 			newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
363 		}
364 	}
365 back:
366 	if (error) {
367 		free_bounce_zone(newtag);
368 		kfree(newtag, M_DEVBUF);
369 	} else {
370 		*dmat = newtag;
371 	}
372 	return error;
373 }
374 
375 int
376 bus_dma_tag_destroy(bus_dma_tag_t dmat)
377 {
378 	if (dmat != NULL) {
379 		if (dmat->map_count != 0)
380 			return (EBUSY);
381 
382 		while (dmat != NULL) {
383 			bus_dma_tag_t parent;
384 
385 			parent = dmat->parent;
386 			dmat->ref_count--;
387 			if (dmat->ref_count == 0) {
388 				free_bounce_zone(dmat);
389 				if (dmat->segments != NULL)
390 					kfree(dmat->segments, M_DEVBUF);
391 				kfree(dmat, M_DEVBUF);
392 				/*
393 				 * Last reference count, so
394 				 * release our reference
395 				 * count on our parent.
396 				 */
397 				dmat = parent;
398 			} else
399 				dmat = NULL;
400 		}
401 	}
402 	return (0);
403 }
404 
405 bus_size_t
406 bus_dma_tag_getmaxsize(bus_dma_tag_t tag)
407 {
408 	return(tag->maxsize);
409 }
410 
411 /*
412  * Allocate a handle for mapping from kva/uva/physical
413  * address space into bus device space.
414  */
415 int
416 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
417 {
418 	int error;
419 
420 	error = 0;
421 
422 	if (dmat->segments == NULL) {
423 		KKASSERT(dmat->nsegments && dmat->nsegments < 16384);
424 		dmat->segments = kmalloc(sizeof(bus_dma_segment_t) *
425 					dmat->nsegments, M_DEVBUF, M_INTWAIT);
426 	}
427 
428 	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
429 		struct bounce_zone *bz;
430 		int maxpages;
431 
432 		/* Must bounce */
433 
434 		if (dmat->bounce_zone == NULL) {
435 			error = alloc_bounce_zone(dmat);
436 			if (error)
437 				return error;
438 		}
439 		bz = dmat->bounce_zone;
440 
441 		*mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT | M_ZERO);
442 
443 		/* Initialize the new map */
444 		STAILQ_INIT(&((*mapp)->bpages));
445 
446 		/*
447 		 * Attempt to add pages to our pool on a per-instance
448 		 * basis up to a sane limit.
449 		 */
450 		if (dmat->flags & BUS_DMA_ALLOCALL) {
451 			maxpages = Maxmem - atop(dmat->lowaddr);
452 		} else if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) {
453 			maxpages = max_bounce_pages;
454 		} else {
455 			maxpages = MIN(max_bounce_pages,
456 				       Maxmem - atop(dmat->lowaddr));
457 		}
458 		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
459 		    (dmat->map_count > 0 && bz->total_bpages < maxpages)) {
460 			int pages;
461 
462 			if (flags & BUS_DMA_ONEBPAGE) {
463 				pages = 1;
464 			} else {
465 				pages = atop(round_page(dmat->maxsize));
466 				pages = MIN(maxpages - bz->total_bpages, pages);
467 				pages = MAX(pages, 1);
468 			}
469 			if (alloc_bounce_pages(dmat, pages, flags) < pages)
470 				error = ENOMEM;
471 
472 			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
473 				if (!error &&
474 				    (dmat->flags & BUS_DMA_ALLOCALL) == 0)
475 					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
476 			} else {
477 				error = 0;
478 			}
479 		}
480 	} else {
481 		*mapp = NULL;
482 	}
483 	if (!error) {
484 		dmat->map_count++;
485 	} else {
486 		kfree(*mapp, M_DEVBUF);
487 		*mapp = NULL;
488 	}
489 	return error;
490 }
491 
492 /*
493  * Destroy a handle for mapping from kva/uva/physical
494  * address space into bus device space.
495  */
496 int
497 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
498 {
499 	if (map != NULL) {
500 		if (STAILQ_FIRST(&map->bpages) != NULL)
501 			return (EBUSY);
502 		kfree(map, M_DEVBUF);
503 	}
504 	dmat->map_count--;
505 	return (0);
506 }
507 
508 static __inline bus_size_t
509 check_kmalloc(bus_dma_tag_t dmat, const void *vaddr0, int verify)
510 {
511 	bus_size_t maxsize = 0;
512 	uintptr_t vaddr = (uintptr_t)vaddr0;
513 
514 	if ((vaddr ^ (vaddr + dmat->maxsize - 1)) & ~PAGE_MASK) {
515 		if (verify || bootverbose)
516 			kprintf("boundary check failed\n");
517 		if (verify)
518 			print_backtrace(-1); /* XXX panic */
519 		maxsize = dmat->maxsize;
520 	}
521 	if (vaddr & (dmat->alignment - 1)) {
522 		if (verify || bootverbose)
523 			kprintf("alignment check failed\n");
524 		if (verify)
525 			print_backtrace(-1); /* XXX panic */
526 		if (dmat->maxsize < dmat->alignment)
527 			maxsize = dmat->alignment;
528 		else
529 			maxsize = dmat->maxsize;
530 	}
531 	return maxsize;
532 }
533 
534 /*
535  * Allocate a piece of memory that can be efficiently mapped into
536  * bus device space based on the constraints lited in the dma tag.
537  *
538  * mapp is degenerate.  By definition this allocation should not require
539  * bounce buffers so do not allocate a dma map.
540  */
541 int
542 bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
543 		 bus_dmamap_t *mapp)
544 {
545 	int mflags;
546 
547 	/* If we succeed, no mapping/bouncing will be required */
548 	*mapp = NULL;
549 
550 	if (dmat->segments == NULL) {
551 		KKASSERT(dmat->nsegments < 16384);
552 		dmat->segments = kmalloc(sizeof(bus_dma_segment_t) *
553 					dmat->nsegments, M_DEVBUF, M_INTWAIT);
554 	}
555 
556 	if (flags & BUS_DMA_NOWAIT)
557 		mflags = M_NOWAIT;
558 	else
559 		mflags = M_WAITOK;
560 	if (flags & BUS_DMA_ZERO)
561 		mflags |= M_ZERO;
562 
563 	if (BUS_DMAMEM_KMALLOC(dmat)) {
564 		bus_size_t maxsize;
565 
566 		*vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags);
567 
568 		/*
569 		 * XXX
570 		 * Check whether the allocation
571 		 * - crossed a page boundary
572 		 * - was not aligned
573 		 * Retry with power-of-2 alignment in the above cases.
574 		 */
575 		maxsize = check_kmalloc(dmat, *vaddr, 0);
576 		if (maxsize) {
577 			size_t size;
578 
579 			kfree(*vaddr, M_DEVBUF);
580 			/* XXX check for overflow? */
581 			for (size = 1; size <= maxsize; size <<= 1)
582 				;
583 			*vaddr = kmalloc(size, M_DEVBUF, mflags);
584 			check_kmalloc(dmat, *vaddr, 1);
585 		}
586 	} else {
587 		/*
588 		 * XXX Use Contigmalloc until it is merged into this facility
589 		 *     and handles multi-seg allocations.  Nobody is doing
590 		 *     multi-seg allocations yet though.
591 		 */
592 		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
593 		    0ul, dmat->lowaddr, dmat->alignment, dmat->boundary);
594 	}
595 	if (*vaddr == NULL)
596 		return (ENOMEM);
597 	return (0);
598 }
599 
600 /*
601  * Free a piece of memory and it's allociated dmamap, that was allocated
602  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
603  */
604 void
605 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
606 {
607 	/*
608 	 * dmamem does not need to be bounced, so the map should be
609 	 * NULL
610 	 */
611 	if (map != NULL)
612 		panic("bus_dmamem_free: Invalid map freed\n");
613 	if (BUS_DMAMEM_KMALLOC(dmat))
614 		kfree(vaddr, M_DEVBUF);
615 	else
616 		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
617 }
618 
619 static __inline vm_paddr_t
620 _bus_dma_extract(pmap_t pmap, vm_offset_t vaddr)
621 {
622 	if (pmap)
623 		return pmap_extract(pmap, vaddr);
624 	else
625 		return pmap_kextract(vaddr);
626 }
627 
628 /*
629  * Utility function to load a linear buffer.  lastaddrp holds state
630  * between invocations (for multiple-buffer loads).  segp contains
631  * the segment following the starting one on entrace, and the ending
632  * segment on exit.  first indicates if this is the first invocation
633  * of this function.
634  */
635 static int
636 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
637 			bus_dmamap_t map,
638 			void *buf, bus_size_t buflen,
639 			bus_dma_segment_t *segments,
640 			int nsegments,
641 			pmap_t pmap,
642 			int flags,
643 			vm_paddr_t *lastpaddrp,
644 			int *segp,
645 			int first)
646 {
647 	vm_offset_t vaddr;
648 	vm_paddr_t paddr, nextpaddr;
649 	bus_dma_segment_t *sg;
650 	bus_addr_t bmask;
651 	int seg, error = 0;
652 
653 	if (map == NULL)
654 		map = &nobounce_dmamap;
655 
656 #ifdef INVARIANTS
657 	if (dmat->flags & BUS_DMA_ALIGNED)
658 		KKASSERT(((uintptr_t)buf & (dmat->alignment - 1)) == 0);
659 #endif
660 
661 	/*
662 	 * If we are being called during a callback, pagesneeded will
663 	 * be non-zero, so we can avoid doing the work twice.
664 	 */
665 	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) &&
666 	    map != &nobounce_dmamap && map->pagesneeded == 0) {
667 		vm_offset_t vendaddr;
668 
669 		/*
670 		 * Count the number of bounce pages
671 		 * needed in order to complete this transfer
672 		 */
673 		vaddr = (vm_offset_t)buf;
674 		vendaddr = (vm_offset_t)buf + buflen;
675 
676 		while (vaddr < vendaddr) {
677 			paddr = _bus_dma_extract(pmap, vaddr);
678 			if (run_filter(dmat, paddr) != 0)
679 				map->pagesneeded++;
680 			vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
681 		}
682 	}
683 
684 	/* Reserve Necessary Bounce Pages */
685 	if (map->pagesneeded != 0) {
686 		struct bounce_zone *bz;
687 
688 		bz = dmat->bounce_zone;
689 		BZ_LOCK(bz);
690 		if (flags & BUS_DMA_NOWAIT) {
691 			if (reserve_bounce_pages(dmat, map, 0) != 0) {
692 				BZ_UNLOCK(bz);
693 				error = ENOMEM;
694 				goto free_bounce;
695 			}
696 		} else {
697 			if (reserve_bounce_pages(dmat, map, 1) != 0) {
698 				/* Queue us for resources */
699 				map->dmat = dmat;
700 				map->buf = buf;
701 				map->buflen = buflen;
702 
703 				STAILQ_INSERT_TAIL(
704 				    &dmat->bounce_zone->bounce_map_waitinglist,
705 				    map, links);
706 				BZ_UNLOCK(bz);
707 
708 				return (EINPROGRESS);
709 			}
710 		}
711 		BZ_UNLOCK(bz);
712 	}
713 
714 	KKASSERT(*segp >= 1 && *segp <= nsegments);
715 	seg = *segp;
716 	sg = &segments[seg - 1];
717 
718 	vaddr = (vm_offset_t)buf;
719 	nextpaddr = *lastpaddrp;
720 	bmask = ~(dmat->boundary - 1);	/* note: will be 0 if boundary is 0 */
721 
722 	/* force at least one segment */
723 	do {
724 		bus_size_t size;
725 
726 		/*
727 		 * Per-page main loop
728 		 */
729 		paddr = _bus_dma_extract(pmap, vaddr);
730 		size = PAGE_SIZE - (paddr & PAGE_MASK);
731 		if (size > buflen)
732 			size = buflen;
733 		if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
734 			/*
735 			 * note: this paddr has the same in-page offset
736 			 * as vaddr and thus the paddr above, so the
737 			 * size does not have to be recalculated
738 			 */
739 			paddr = add_bounce_page(dmat, map, vaddr, size);
740 		}
741 
742 		/*
743 		 * Fill in the bus_dma_segment
744 		 */
745 		if (first) {
746 			sg->ds_addr = paddr;
747 			sg->ds_len = size;
748 			first = 0;
749 		} else if (paddr == nextpaddr) {
750 			sg->ds_len += size;
751 		} else {
752 			sg++;
753 			seg++;
754 			if (seg > nsegments)
755 				break;
756 			sg->ds_addr = paddr;
757 			sg->ds_len = size;
758 		}
759 		nextpaddr = paddr + size;
760 
761 		/*
762 		 * Handle maxsegsz and boundary issues with a nested loop
763 		 */
764 		for (;;) {
765 			bus_size_t tmpsize;
766 
767 			/*
768 			 * Limit to the boundary and maximum segment size
769 			 */
770 			if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) {
771 				tmpsize = dmat->boundary -
772 					  (sg->ds_addr & ~bmask);
773 				if (tmpsize > dmat->maxsegsz)
774 					tmpsize = dmat->maxsegsz;
775 				KKASSERT(tmpsize < sg->ds_len);
776 			} else if (sg->ds_len > dmat->maxsegsz) {
777 				tmpsize = dmat->maxsegsz;
778 			} else {
779 				break;
780 			}
781 
782 			/*
783 			 * Futz, split the data into a new segment.
784 			 */
785 			if (seg >= nsegments)
786 				goto fail;
787 			sg[1].ds_len = sg[0].ds_len - tmpsize;
788 			sg[1].ds_addr = sg[0].ds_addr + tmpsize;
789 			sg[0].ds_len = tmpsize;
790 			sg++;
791 			seg++;
792 		}
793 
794 		/*
795 		 * Adjust for loop
796 		 */
797 		buflen -= size;
798 		vaddr += size;
799 	} while (buflen > 0);
800 fail:
801 	if (buflen != 0)
802 		error = EFBIG;
803 
804 	*segp = seg;
805 	*lastpaddrp = nextpaddr;
806 
807 free_bounce:
808 	if (error && (dmat->flags & BUS_DMA_COULD_BOUNCE) &&
809 	    map != &nobounce_dmamap) {
810 		_bus_dmamap_unload(dmat, map);
811 		return_bounce_pages(dmat, map);
812 	}
813 	return error;
814 }
815 
816 /*
817  * Map the buffer buf into bus space using the dmamap map.
818  */
819 int
820 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
821 		bus_size_t buflen, bus_dmamap_callback_t *callback,
822 		void *callback_arg, int flags)
823 {
824 	bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS];
825 	bus_dma_segment_t *segments;
826 	vm_paddr_t lastaddr = 0;
827 	int error, nsegs = 1;
828 
829 	if (map != NULL) {
830 		/*
831 		 * XXX
832 		 * Follow old semantics.  Once all of the callers are fixed,
833 		 * we should get rid of these internal flag "adjustment".
834 		 */
835 		flags &= ~BUS_DMA_NOWAIT;
836 		flags |= BUS_DMA_WAITOK;
837 
838 		map->callback = callback;
839 		map->callback_arg = callback_arg;
840 	}
841 
842 	segments = bus_dma_tag_lock(dmat, cache_segments);
843 	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen,
844 			segments, dmat->nsegments,
845 			NULL, flags, &lastaddr, &nsegs, 1);
846 	if (error == EINPROGRESS) {
847 		KKASSERT((dmat->flags &
848 			  (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL)) !=
849 			 (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL));
850 
851 		if (dmat->flags & BUS_DMA_PROTECTED)
852 			panic("protected dmamap callback will be defered\n");
853 
854 		bus_dma_tag_unlock(dmat);
855 		return error;
856 	}
857 	callback(callback_arg, segments, nsegs, error);
858 	bus_dma_tag_unlock(dmat);
859 	return 0;
860 }
861 
862 /*
863  * Like _bus_dmamap_load(), but for mbufs.
864  */
865 int
866 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
867 		     struct mbuf *m0,
868 		     bus_dmamap_callback2_t *callback, void *callback_arg,
869 		     int flags)
870 {
871 	bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS];
872 	bus_dma_segment_t *segments;
873 	int nsegs, error;
874 
875 	/*
876 	 * XXX
877 	 * Follow old semantics.  Once all of the callers are fixed,
878 	 * we should get rid of these internal flag "adjustment".
879 	 */
880 	flags &= ~BUS_DMA_WAITOK;
881 	flags |= BUS_DMA_NOWAIT;
882 
883 	segments = bus_dma_tag_lock(dmat, cache_segments);
884 	error = bus_dmamap_load_mbuf_segment(dmat, map, m0,
885 			segments, dmat->nsegments, &nsegs, flags);
886 	if (error) {
887 		/* force "no valid mappings" in callback */
888 		callback(callback_arg, segments, 0,
889 			 0, error);
890 	} else {
891 		callback(callback_arg, segments, nsegs,
892 			 m0->m_pkthdr.len, error);
893 	}
894 	bus_dma_tag_unlock(dmat);
895 	return error;
896 }
897 
898 int
899 bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat, bus_dmamap_t map,
900 			     struct mbuf *m0,
901 			     bus_dma_segment_t *segs, int maxsegs,
902 			     int *nsegs, int flags)
903 {
904 	int error;
905 
906 	M_ASSERTPKTHDR(m0);
907 
908 	KASSERT(maxsegs >= 1, ("invalid maxsegs %d\n", maxsegs));
909 	KASSERT(maxsegs <= dmat->nsegments,
910 		("%d too many segments, dmat only support %d segments\n",
911 		 maxsegs, dmat->nsegments));
912 	KASSERT(flags & BUS_DMA_NOWAIT,
913 		("only BUS_DMA_NOWAIT is supported\n"));
914 
915 	if (m0->m_pkthdr.len <= dmat->maxsize) {
916 		int first = 1;
917 		vm_paddr_t lastaddr = 0;
918 		struct mbuf *m;
919 
920 		*nsegs = 1;
921 		error = 0;
922 		for (m = m0; m != NULL && error == 0; m = m->m_next) {
923 			if (m->m_len == 0)
924 				continue;
925 
926 			error = _bus_dmamap_load_buffer(dmat, map,
927 					m->m_data, m->m_len,
928 					segs, maxsegs,
929 					NULL, flags, &lastaddr,
930 					nsegs, first);
931 			if (error == ENOMEM && !first) {
932 				/*
933 				 * Out of bounce pages due to too many
934 				 * fragments in the mbuf chain; return
935 				 * EFBIG instead.
936 				 */
937 				error = EFBIG;
938 			}
939 			first = 0;
940 		}
941 #ifdef INVARIANTS
942 		if (!error)
943 			KKASSERT(*nsegs <= maxsegs && *nsegs >= 1);
944 #endif
945 	} else {
946 		*nsegs = 0;
947 		error = EINVAL;
948 	}
949 	KKASSERT(error != EINPROGRESS);
950 	return error;
951 }
952 
953 /*
954  * Like _bus_dmamap_load(), but for uios.
955  */
956 int
957 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
958 		    struct uio *uio,
959 		    bus_dmamap_callback2_t *callback, void *callback_arg,
960 		    int flags)
961 {
962 	vm_paddr_t lastaddr;
963 	int nsegs, error, first, i;
964 	bus_size_t resid;
965 	struct iovec *iov;
966 	pmap_t pmap;
967 	bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS];
968 	bus_dma_segment_t *segments;
969 	bus_dma_segment_t *segs;
970 	int nsegs_left;
971 
972 	if (dmat->nsegments <= BUS_DMA_CACHE_SEGMENTS)
973 		segments = cache_segments;
974 	else
975 		segments = kmalloc(sizeof(bus_dma_segment_t) * dmat->nsegments,
976 				   M_DEVBUF, M_WAITOK | M_ZERO);
977 
978 	/*
979 	 * XXX
980 	 * Follow old semantics.  Once all of the callers are fixed,
981 	 * we should get rid of these internal flag "adjustment".
982 	 */
983 	flags &= ~BUS_DMA_WAITOK;
984 	flags |= BUS_DMA_NOWAIT;
985 
986 	resid = (bus_size_t)uio->uio_resid;
987 	iov = uio->uio_iov;
988 
989 	segs = segments;
990 	nsegs_left = dmat->nsegments;
991 
992 	if (uio->uio_segflg == UIO_USERSPACE) {
993 		struct thread *td;
994 
995 		td = uio->uio_td;
996 		KASSERT(td != NULL && td->td_proc != NULL,
997 			("bus_dmamap_load_uio: USERSPACE but no proc"));
998 		pmap = vmspace_pmap(td->td_proc->p_vmspace);
999 	} else {
1000 		pmap = NULL;
1001 	}
1002 
1003 	error = 0;
1004 	nsegs = 1;
1005 	first = 1;
1006 	lastaddr = 0;
1007 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
1008 		/*
1009 		 * Now at the first iovec to load.  Load each iovec
1010 		 * until we have exhausted the residual count.
1011 		 */
1012 		bus_size_t minlen =
1013 			resid < iov[i].iov_len ? resid : iov[i].iov_len;
1014 		caddr_t addr = (caddr_t) iov[i].iov_base;
1015 
1016 		error = _bus_dmamap_load_buffer(dmat, map, addr, minlen,
1017 				segs, nsegs_left,
1018 				pmap, flags, &lastaddr, &nsegs, first);
1019 		first = 0;
1020 
1021 		resid -= minlen;
1022 		if (error == 0) {
1023 			nsegs_left -= nsegs;
1024 			segs += nsegs;
1025 		}
1026 	}
1027 
1028 	/*
1029 	 * Minimum one DMA segment, even if 0-length buffer.
1030 	 */
1031 	if (nsegs_left == dmat->nsegments)
1032 		--nsegs_left;
1033 
1034 	if (error) {
1035 		/* force "no valid mappings" in callback */
1036 		callback(callback_arg, segments, 0,
1037 			 0, error);
1038 	} else {
1039 		callback(callback_arg, segments, dmat->nsegments - nsegs_left,
1040 			 (bus_size_t)uio->uio_resid, error);
1041 	}
1042 	if (dmat->nsegments > BUS_DMA_CACHE_SEGMENTS)
1043 		kfree(segments, M_DEVBUF);
1044 	return error;
1045 }
1046 
1047 /*
1048  * Release the mapping held by map.
1049  */
1050 void
1051 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1052 {
1053 	struct bounce_page *bpage;
1054 
1055 	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1056 		STAILQ_REMOVE_HEAD(&map->bpages, links);
1057 		free_bounce_page(dmat, bpage);
1058 	}
1059 }
1060 
1061 void
1062 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1063 {
1064 	struct bounce_page *bpage;
1065 
1066 	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1067 		/*
1068 		 * Handle data bouncing.  We might also
1069 		 * want to add support for invalidating
1070 		 * the caches on broken hardware
1071 		 */
1072 		switch (op) {
1073 		case BUS_DMASYNC_PREWRITE:
1074 			while (bpage != NULL) {
1075 				bcopy((void *)bpage->datavaddr,
1076 				      (void *)bpage->vaddr,
1077 				      bpage->datacount);
1078 				bpage = STAILQ_NEXT(bpage, links);
1079 			}
1080 			dmat->bounce_zone->total_bounced++;
1081 			break;
1082 
1083 		case BUS_DMASYNC_POSTREAD:
1084 			while (bpage != NULL) {
1085 				bcopy((void *)bpage->vaddr,
1086 				      (void *)bpage->datavaddr,
1087 				      bpage->datacount);
1088 				bpage = STAILQ_NEXT(bpage, links);
1089 			}
1090 			dmat->bounce_zone->total_bounced++;
1091 			break;
1092 
1093 		case BUS_DMASYNC_PREREAD:
1094 		case BUS_DMASYNC_POSTWRITE:
1095 			/* No-ops */
1096 			break;
1097 		}
1098 	}
1099 }
1100 
1101 static int
1102 alloc_bounce_zone(bus_dma_tag_t dmat)
1103 {
1104 	struct bounce_zone *bz, *new_bz;
1105 
1106 	KASSERT(dmat->bounce_zone == NULL,
1107 		("bounce zone was already assigned\n"));
1108 
1109 	new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO);
1110 
1111 	lwkt_gettoken(&bounce_zone_tok);
1112 
1113 	if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) {
1114 		/*
1115 		 * For shared bounce zone, check to see
1116 		 * if we already have a suitable zone
1117 		 */
1118 		STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1119 			if (dmat->alignment <= bz->alignment &&
1120 			    dmat->lowaddr >= bz->lowaddr) {
1121 				lwkt_reltoken(&bounce_zone_tok);
1122 
1123 				dmat->bounce_zone = bz;
1124 				kfree(new_bz, M_DEVBUF);
1125 				return 0;
1126 			}
1127 		}
1128 	}
1129 	bz = new_bz;
1130 
1131 #ifdef SMP
1132 	spin_init(&bz->spin);
1133 #endif
1134 	STAILQ_INIT(&bz->bounce_page_list);
1135 	STAILQ_INIT(&bz->bounce_map_waitinglist);
1136 	bz->free_bpages = 0;
1137 	bz->reserved_bpages = 0;
1138 	bz->active_bpages = 0;
1139 	bz->lowaddr = dmat->lowaddr;
1140 	bz->alignment = round_page(dmat->alignment);
1141 	ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1142 
1143 	if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) {
1144 		ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1145 		busdma_zonecount++;
1146 		STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1147 	} else {
1148 		ksnprintf(bz->zoneid, 8, "zone%d", busdma_priv_zonecount);
1149 		busdma_priv_zonecount--;
1150 	}
1151 
1152 	lwkt_reltoken(&bounce_zone_tok);
1153 
1154 	dmat->bounce_zone = bz;
1155 
1156 	sysctl_ctx_init(&bz->sysctl_ctx);
1157 	bz->sysctl_tree = SYSCTL_ADD_NODE(&bz->sysctl_ctx,
1158 	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1159 	    CTLFLAG_RD, 0, "");
1160 	if (bz->sysctl_tree == NULL) {
1161 		sysctl_ctx_free(&bz->sysctl_ctx);
1162 		return 0;	/* XXX error code? */
1163 	}
1164 
1165 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1166 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1167 	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1168 	    "Total bounce pages");
1169 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1170 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1171 	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1172 	    "Free bounce pages");
1173 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1174 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1175 	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1176 	    "Reserved bounce pages");
1177 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1178 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1179 	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1180 	    "Active bounce pages");
1181 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1182 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1183 	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1184 	    "Total bounce requests");
1185 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1186 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1187 	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1188 	    "Total bounce requests that were deferred");
1189 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1190 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1191 	    "reserve_failed", CTLFLAG_RD, &bz->reserve_failed, 0,
1192 	    "Total bounce page reservations that were failed");
1193 	SYSCTL_ADD_STRING(&bz->sysctl_ctx,
1194 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1195 	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1196 	SYSCTL_ADD_INT(&bz->sysctl_ctx,
1197 	    SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1198 	    "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1199 
1200 	return 0;
1201 }
1202 
1203 static int
1204 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages, int flags)
1205 {
1206 	struct bounce_zone *bz = dmat->bounce_zone;
1207 	int count = 0, mflags;
1208 
1209 	if (flags & BUS_DMA_NOWAIT)
1210 		mflags = M_NOWAIT;
1211 	else
1212 		mflags = M_WAITOK;
1213 
1214 	while (numpages > 0) {
1215 		struct bounce_page *bpage;
1216 
1217 		bpage = kmalloc(sizeof(*bpage), M_DEVBUF, M_INTWAIT | M_ZERO);
1218 
1219 		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1220 							 mflags, 0ul,
1221 							 bz->lowaddr,
1222 							 bz->alignment, 0);
1223 		if (bpage->vaddr == 0) {
1224 			kfree(bpage, M_DEVBUF);
1225 			break;
1226 		}
1227 		bpage->busaddr = pmap_kextract(bpage->vaddr);
1228 
1229 		BZ_LOCK(bz);
1230 		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1231 		total_bounce_pages++;
1232 		bz->total_bpages++;
1233 		bz->free_bpages++;
1234 		BZ_UNLOCK(bz);
1235 
1236 		count++;
1237 		numpages--;
1238 	}
1239 	return count;
1240 }
1241 
1242 static void
1243 free_bounce_pages_all(bus_dma_tag_t dmat)
1244 {
1245 	struct bounce_zone *bz = dmat->bounce_zone;
1246 	struct bounce_page *bpage;
1247 
1248 	BZ_LOCK(bz);
1249 
1250 	while ((bpage = STAILQ_FIRST(&bz->bounce_page_list)) != NULL) {
1251 		STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1252 
1253 		KKASSERT(total_bounce_pages > 0);
1254 		total_bounce_pages--;
1255 
1256 		KKASSERT(bz->total_bpages > 0);
1257 		bz->total_bpages--;
1258 
1259 		KKASSERT(bz->free_bpages > 0);
1260 		bz->free_bpages--;
1261 
1262 		BZ_UNLOCK(bz);
1263 		contigfree((void *)bpage->vaddr, PAGE_SIZE, M_DEVBUF);
1264 		kfree(bpage, M_DEVBUF);
1265 		BZ_LOCK(bz);
1266 	}
1267 	if (bz->total_bpages) {
1268 		kprintf("#%d bounce pages are still in use\n",
1269 			bz->total_bpages);
1270 		print_backtrace(-1);
1271 	}
1272 
1273 	BZ_UNLOCK(bz);
1274 }
1275 
1276 static void
1277 free_bounce_zone(bus_dma_tag_t dmat)
1278 {
1279 	struct bounce_zone *bz = dmat->bounce_zone;
1280 
1281 	if (bz == NULL)
1282 		return;
1283 
1284 	if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0)
1285 		return;
1286 
1287 	free_bounce_pages_all(dmat);
1288 	dmat->bounce_zone = NULL;
1289 
1290 	if (bz->sysctl_tree != NULL)
1291 		sysctl_ctx_free(&bz->sysctl_ctx);
1292 	kfree(bz, M_DEVBUF);
1293 }
1294 
1295 /* Assume caller holds bounce zone spinlock */
1296 static int
1297 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1298 {
1299 	struct bounce_zone *bz = dmat->bounce_zone;
1300 	int pages;
1301 
1302 	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1303 	if (!commit && map->pagesneeded > (map->pagesreserved + pages)) {
1304 		bz->reserve_failed++;
1305 		return (map->pagesneeded - (map->pagesreserved + pages));
1306 	}
1307 
1308 	bz->free_bpages -= pages;
1309 
1310 	bz->reserved_bpages += pages;
1311 	KKASSERT(bz->reserved_bpages <= bz->total_bpages);
1312 
1313 	map->pagesreserved += pages;
1314 	pages = map->pagesneeded - map->pagesreserved;
1315 
1316 	return pages;
1317 }
1318 
1319 static void
1320 return_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
1321 {
1322 	struct bounce_zone *bz = dmat->bounce_zone;
1323 	int reserved = map->pagesreserved;
1324 	bus_dmamap_t wait_map;
1325 
1326 	map->pagesreserved = 0;
1327 	map->pagesneeded = 0;
1328 
1329 	if (reserved == 0)
1330 		return;
1331 
1332 	BZ_LOCK(bz);
1333 
1334 	bz->free_bpages += reserved;
1335 	KKASSERT(bz->free_bpages <= bz->total_bpages);
1336 
1337 	KKASSERT(bz->reserved_bpages >= reserved);
1338 	bz->reserved_bpages -= reserved;
1339 
1340 	wait_map = get_map_waiting(dmat);
1341 
1342 	BZ_UNLOCK(bz);
1343 
1344 	if (wait_map != NULL)
1345 		add_map_callback(map);
1346 }
1347 
1348 static bus_addr_t
1349 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1350 		bus_size_t size)
1351 {
1352 	struct bounce_zone *bz = dmat->bounce_zone;
1353 	struct bounce_page *bpage;
1354 
1355 	KASSERT(map->pagesneeded > 0, ("map doesn't need any pages"));
1356 	map->pagesneeded--;
1357 
1358 	KASSERT(map->pagesreserved > 0, ("map doesn't reserve any pages"));
1359 	map->pagesreserved--;
1360 
1361 	BZ_LOCK(bz);
1362 
1363 	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1364 	KASSERT(bpage != NULL, ("free page list is empty"));
1365 	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1366 
1367 	KKASSERT(bz->reserved_bpages > 0);
1368 	bz->reserved_bpages--;
1369 
1370 	bz->active_bpages++;
1371 	KKASSERT(bz->active_bpages <= bz->total_bpages);
1372 
1373 	BZ_UNLOCK(bz);
1374 
1375 	bpage->datavaddr = vaddr;
1376 	bpage->datacount = size;
1377 	STAILQ_INSERT_TAIL(&map->bpages, bpage, links);
1378 	return bpage->busaddr;
1379 }
1380 
1381 static void
1382 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1383 {
1384 	struct bounce_zone *bz = dmat->bounce_zone;
1385 	bus_dmamap_t map;
1386 
1387 	bpage->datavaddr = 0;
1388 	bpage->datacount = 0;
1389 
1390 	BZ_LOCK(bz);
1391 
1392 	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1393 
1394 	bz->free_bpages++;
1395 	KKASSERT(bz->free_bpages <= bz->total_bpages);
1396 
1397 	KKASSERT(bz->active_bpages > 0);
1398 	bz->active_bpages--;
1399 
1400 	map = get_map_waiting(dmat);
1401 
1402 	BZ_UNLOCK(bz);
1403 
1404 	if (map != NULL)
1405 		add_map_callback(map);
1406 }
1407 
1408 /* Assume caller holds bounce zone spinlock */
1409 static bus_dmamap_t
1410 get_map_waiting(bus_dma_tag_t dmat)
1411 {
1412 	struct bounce_zone *bz = dmat->bounce_zone;
1413 	bus_dmamap_t map;
1414 
1415 	map = STAILQ_FIRST(&bz->bounce_map_waitinglist);
1416 	if (map != NULL) {
1417 		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1418 			STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links);
1419 			bz->total_deferred++;
1420 		} else {
1421 			map = NULL;
1422 		}
1423 	}
1424 	return map;
1425 }
1426 
1427 static void
1428 add_map_callback(bus_dmamap_t map)
1429 {
1430 	spin_lock(&bounce_map_list_spin);
1431 	STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links);
1432 	busdma_swi_pending = 1;
1433 	setsoftvm();
1434 	spin_unlock(&bounce_map_list_spin);
1435 }
1436 
1437 void
1438 busdma_swi(void)
1439 {
1440 	bus_dmamap_t map;
1441 
1442 	spin_lock(&bounce_map_list_spin);
1443 	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1444 		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1445 		spin_unlock(&bounce_map_list_spin);
1446 		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
1447 				map->callback, map->callback_arg, /*flags*/0);
1448 		spin_lock(&bounce_map_list_spin);
1449 	}
1450 	spin_unlock(&bounce_map_list_spin);
1451 }
1452