xref: /freebsd/sys/x86/x86/busdma_bounce.c (revision 61e21613)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/domainset.h>
32 #include <sys/malloc.h>
33 #include <sys/bus.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/ktr.h>
37 #include <sys/lock.h>
38 #include <sys/proc.h>
39 #include <sys/memdesc.h>
40 #include <sys/msan.h>
41 #include <sys/mutex.h>
42 #include <sys/sysctl.h>
43 #include <sys/uio.h>
44 
45 #include <vm/vm.h>
46 #include <vm/vm_extern.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_map.h>
50 
51 #include <machine/atomic.h>
52 #include <machine/bus.h>
53 #include <machine/md_var.h>
54 #include <machine/specialreg.h>
55 #include <x86/include/busdma_impl.h>
56 
57 #ifdef __i386__
58 #define MAX_BPAGES (Maxmem > atop(0x100000000ULL) ? 8192 : 512)
59 #else
60 #define MAX_BPAGES 8192
61 #endif
62 
63 enum {
64 	BUS_DMA_COULD_BOUNCE	= 0x01,
65 	BUS_DMA_MIN_ALLOC_COMP	= 0x02,
66 	BUS_DMA_KMEM_ALLOC	= 0x04,
67 	BUS_DMA_FORCE_MAP	= 0x08,
68 };
69 
70 struct bounce_page;
71 struct bounce_zone;
72 
73 struct bus_dma_tag {
74 	struct bus_dma_tag_common common;
75 	int			map_count;
76 	int			bounce_flags;
77 	bus_dma_segment_t	*segments;
78 	struct bounce_zone	*bounce_zone;
79 };
80 
81 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
82     "Busdma parameters");
83 
84 struct bus_dmamap {
85 	STAILQ_HEAD(, bounce_page) bpages;
86 	int		       pagesneeded;
87 	int		       pagesreserved;
88 	bus_dma_tag_t	       dmat;
89 	struct memdesc	       mem;
90 	bus_dmamap_callback_t *callback;
91 	void		      *callback_arg;
92 	__sbintime_t	       queued_time;
93 	STAILQ_ENTRY(bus_dmamap) links;
94 #ifdef KMSAN
95 	struct memdesc	       kmsan_mem;
96 #endif
97 };
98 
99 static struct bus_dmamap nobounce_dmamap;
100 
101 static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf,
102     bus_size_t buflen, int *pagesneeded);
103 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
104     pmap_t pmap, void *buf, bus_size_t buflen, int flags);
105 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
106     vm_paddr_t buf, bus_size_t buflen, int flags);
107 
108 static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
109 
110 #define	dmat_alignment(dmat)	((dmat)->common.alignment)
111 #define	dmat_domain(dmat)	((dmat)->common.domain)
112 #define	dmat_flags(dmat)	((dmat)->common.flags)
113 #define	dmat_highaddr(dmat)	((dmat)->common.highaddr)
114 #define	dmat_lowaddr(dmat)	((dmat)->common.lowaddr)
115 #define	dmat_lockfunc(dmat)	((dmat)->common.lockfunc)
116 #define	dmat_lockfuncarg(dmat)	((dmat)->common.lockfuncarg)
117 
118 #include "../../kern/subr_busdma_bounce.c"
119 
120 static int
121 bounce_bus_dma_zone_setup(bus_dma_tag_t dmat)
122 {
123 	struct bounce_zone *bz;
124 	int error;
125 
126 	/* Must bounce */
127 	if ((error = alloc_bounce_zone(dmat)) != 0)
128 		return (error);
129 	bz = dmat->bounce_zone;
130 
131 	if (ptoa(bz->total_bpages) < dmat->common.maxsize) {
132 		int pages;
133 
134 		pages = atop(dmat->common.maxsize) - bz->total_bpages;
135 
136 		/* Add pages to our bounce pool */
137 		if (alloc_bounce_pages(dmat, pages) < pages)
138 			return (ENOMEM);
139 	}
140 	/* Performed initial allocation */
141 	dmat->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP;
142 
143 	return (0);
144 }
145 
146 /*
147  * Allocate a device specific dma_tag.
148  */
149 static int
150 bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
151     bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
152     bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags,
153     bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat)
154 {
155 	bus_dma_tag_t newtag;
156 	int error;
157 
158 	*dmat = NULL;
159 	error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
160 	    NULL, alignment, boundary, lowaddr, highaddr, maxsize, nsegments,
161 	    maxsegsz, flags, lockfunc, lockfuncarg, sizeof(struct bus_dma_tag),
162 	    (void **)&newtag);
163 	if (error != 0)
164 		return (error);
165 
166 	newtag->common.impl = &bus_dma_bounce_impl;
167 	newtag->map_count = 0;
168 	newtag->segments = NULL;
169 
170 #ifdef KMSAN
171 	/*
172 	 * When KMSAN is configured, we need a map to store a memory descriptor
173 	 * which can be used for validation.
174 	 */
175 	newtag->bounce_flags |= BUS_DMA_FORCE_MAP;
176 #endif
177 
178 	if (parent != NULL &&
179 	    (parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0)
180 		newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
181 
182 	if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
183 	    newtag->common.alignment > 1)
184 		newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
185 
186 	if ((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
187 	    (flags & BUS_DMA_ALLOCNOW) != 0)
188 		error = bounce_bus_dma_zone_setup(newtag);
189 	else
190 		error = 0;
191 
192 	if (error != 0)
193 		free(newtag, M_DEVBUF);
194 	else
195 		*dmat = newtag;
196 	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
197 	    __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
198 	    error);
199 	return (error);
200 }
201 
202 static bool
203 bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
204 {
205 
206 	if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0)
207 		return (true);
208 	return (!_bus_dmamap_pagesneeded(dmat, buf, buflen, NULL));
209 }
210 
211 /*
212  * Update the domain for the tag.  We may need to reallocate the zone and
213  * bounce pages.
214  */
215 static int
216 bounce_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
217 {
218 
219 	KASSERT(dmat->map_count == 0,
220 	    ("bounce_bus_dma_tag_set_domain:  Domain set after use.\n"));
221 	if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0 ||
222 	    dmat->bounce_zone == NULL)
223 		return (0);
224 	dmat->bounce_flags &= ~BUS_DMA_MIN_ALLOC_COMP;
225 	return (bounce_bus_dma_zone_setup(dmat));
226 }
227 
228 static int
229 bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
230 {
231 	int error = 0;
232 
233 	if (dmat != NULL) {
234 		if (dmat->map_count != 0) {
235 			error = EBUSY;
236 			goto out;
237 		}
238 		if (dmat->segments != NULL)
239 			free(dmat->segments, M_DEVBUF);
240 		free(dmat, M_DEVBUF);
241 	}
242 out:
243 	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat, error);
244 	return (error);
245 }
246 
247 /*
248  * Allocate a handle for mapping from kva/uva/physical
249  * address space into bus device space.
250  */
251 static int
252 bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
253 {
254 	struct bounce_zone *bz;
255 	int error, maxpages, pages;
256 
257 	error = 0;
258 
259 	if (dmat->segments == NULL) {
260 		dmat->segments = malloc_domainset(
261 		    sizeof(bus_dma_segment_t) * dmat->common.nsegments,
262 		    M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT);
263 		if (dmat->segments == NULL) {
264 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
265 			    __func__, dmat, ENOMEM);
266 			return (ENOMEM);
267 		}
268 	}
269 
270 	if (dmat->bounce_flags & (BUS_DMA_COULD_BOUNCE | BUS_DMA_FORCE_MAP)) {
271 		*mapp = malloc_domainset(sizeof(**mapp), M_DEVBUF,
272 		    DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO);
273 		if (*mapp == NULL) {
274 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
275 			    __func__, dmat, ENOMEM);
276 			return (ENOMEM);
277 		}
278 		STAILQ_INIT(&(*mapp)->bpages);
279 	} else {
280 		*mapp = NULL;
281 	}
282 
283 	/*
284 	 * Bouncing might be required if the driver asks for an active
285 	 * exclusion region, a data alignment that is stricter than 1, and/or
286 	 * an active address boundary.
287 	 */
288 	if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
289 		/* Must bounce */
290 		if (dmat->bounce_zone == NULL &&
291 		    (error = alloc_bounce_zone(dmat)) != 0)
292 			goto out;
293 		bz = dmat->bounce_zone;
294 
295 		/*
296 		 * Attempt to add pages to our pool on a per-instance
297 		 * basis up to a sane limit.
298 		 */
299 		if (dmat->common.alignment > 1)
300 			maxpages = MAX_BPAGES;
301 		else
302 			maxpages = MIN(MAX_BPAGES, Maxmem -
303 			    atop(dmat->common.lowaddr));
304 		if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
305 		    (bz->map_count > 0 && bz->total_bpages < maxpages)) {
306 			pages = MAX(atop(dmat->common.maxsize), 1);
307 			pages = MIN(dmat->common.nsegments, pages);
308 			pages = MIN(maxpages - bz->total_bpages, pages);
309 			pages = MAX(pages, 1);
310 			if (alloc_bounce_pages(dmat, pages) < pages)
311 				error = ENOMEM;
312 			if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP)
313 			    == 0) {
314 				if (error == 0) {
315 					dmat->bounce_flags |=
316 					    BUS_DMA_MIN_ALLOC_COMP;
317 				}
318 			} else
319 				error = 0;
320 		}
321 		bz->map_count++;
322 	}
323 
324 out:
325 	if (error == 0) {
326 		dmat->map_count++;
327 	} else {
328 		free(*mapp, M_DEVBUF);
329 		*mapp = NULL;
330 	}
331 
332 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
333 	    __func__, dmat, dmat->common.flags, error);
334 	return (error);
335 }
336 
337 /*
338  * Destroy a handle for mapping from kva/uva/physical
339  * address space into bus device space.
340  */
341 static int
342 bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
343 {
344 
345 	if (map != NULL && map != &nobounce_dmamap) {
346 		if (STAILQ_FIRST(&map->bpages) != NULL) {
347 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
348 			    __func__, dmat, EBUSY);
349 			return (EBUSY);
350 		}
351 		if (dmat->bounce_zone)
352 			dmat->bounce_zone->map_count--;
353 		free(map, M_DEVBUF);
354 	}
355 	dmat->map_count--;
356 	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
357 	return (0);
358 }
359 
360 /*
361  * Allocate a piece of memory that can be efficiently mapped into
362  * bus device space based on the constraints lited in the dma tag.
363  * A dmamap to for use with dmamap_load is also allocated.
364  */
365 static int
366 bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
367     bus_dmamap_t *mapp)
368 {
369 	vm_memattr_t attr;
370 	int mflags;
371 
372 	if (flags & BUS_DMA_NOWAIT)
373 		mflags = M_NOWAIT;
374 	else
375 		mflags = M_WAITOK;
376 
377 	/* If we succeed, no mapping/bouncing will be required */
378 	*mapp = NULL;
379 
380 	if (dmat->segments == NULL) {
381 		dmat->segments = (bus_dma_segment_t *)malloc_domainset(
382 		    sizeof(bus_dma_segment_t) * dmat->common.nsegments,
383 		    M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), mflags);
384 		if (dmat->segments == NULL) {
385 			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
386 			    __func__, dmat, dmat->common.flags, ENOMEM);
387 			return (ENOMEM);
388 		}
389 	}
390 	if (flags & BUS_DMA_ZERO)
391 		mflags |= M_ZERO;
392 	if (flags & BUS_DMA_NOCACHE)
393 		attr = VM_MEMATTR_UNCACHEABLE;
394 	else
395 		attr = VM_MEMATTR_DEFAULT;
396 
397 	/*
398 	 * Allocate the buffer from the malloc(9) allocator if...
399 	 *  - It's small enough to fit into a single page.
400 	 *  - Its alignment requirement is also smaller than the page size.
401 	 *  - The low address requirement is fulfilled.
402 	 *  - Default cache attributes are requested (WB).
403 	 * else allocate non-contiguous pages if...
404 	 *  - The page count that could get allocated doesn't exceed
405 	 *    nsegments also when the maximum segment size is less
406 	 *    than PAGE_SIZE.
407 	 *  - The alignment constraint isn't larger than a page boundary.
408 	 *  - There are no boundary-crossing constraints.
409 	 * else allocate a block of contiguous pages because one or more of the
410 	 * constraints is something that only the contig allocator can fulfill.
411 	 *
412 	 * Warn the user if malloc gets it wrong.
413 	 */
414 	if (dmat->common.maxsize <= PAGE_SIZE &&
415 	    dmat->common.alignment <= PAGE_SIZE &&
416 	    dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
417 	    attr == VM_MEMATTR_DEFAULT) {
418 		*vaddr = malloc_domainset_aligned(dmat->common.maxsize,
419 		    dmat->common.alignment, M_DEVBUF,
420 		    DOMAINSET_PREF(dmat->common.domain), mflags);
421 		KASSERT(*vaddr == NULL || ((uintptr_t)*vaddr & PAGE_MASK) +
422 		    dmat->common.maxsize <= PAGE_SIZE,
423 		    ("bounce_bus_dmamem_alloc: multi-page alloc %p maxsize "
424 		    "%#jx align %#jx", *vaddr, (uintmax_t)dmat->common.maxsize,
425 		    (uintmax_t)dmat->common.alignment));
426 	} else if (dmat->common.nsegments >=
427 	    howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz,
428 	    PAGE_SIZE)) &&
429 	    dmat->common.alignment <= PAGE_SIZE &&
430 	    (dmat->common.boundary % PAGE_SIZE) == 0) {
431 		/* Page-based multi-segment allocations allowed */
432 		*vaddr = kmem_alloc_attr_domainset(
433 		    DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
434 		    mflags, 0ul, dmat->common.lowaddr, attr);
435 		dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
436 	} else {
437 		*vaddr = kmem_alloc_contig_domainset(
438 		    DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
439 		    mflags, 0ul, dmat->common.lowaddr,
440 		    dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
441 		    dmat->common.boundary, attr);
442 		dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
443 	}
444 	if (*vaddr == NULL) {
445 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
446 		    __func__, dmat, dmat->common.flags, ENOMEM);
447 		return (ENOMEM);
448 	} else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->common.alignment)) {
449 		printf("bus_dmamem_alloc failed to align memory properly.\n");
450 	}
451 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
452 	    __func__, dmat, dmat->common.flags, 0);
453 	return (0);
454 }
455 
456 /*
457  * Free a piece of memory and its associated dmamap, that was allocated
458  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
459  */
460 static void
461 bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
462 {
463 	/*
464 	 * dmamem does not need to be bounced, so the map should be
465 	 * NULL and the BUS_DMA_KMEM_ALLOC flag cleared if malloc()
466 	 * was used and set if kmem_alloc_contig() was used.
467 	 */
468 	if (map != NULL)
469 		panic("bus_dmamem_free: Invalid map freed\n");
470 	if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0)
471 		free(vaddr, M_DEVBUF);
472 	else
473 		kmem_free(vaddr, dmat->common.maxsize);
474 	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
475 	    dmat->bounce_flags);
476 }
477 
478 static bool
479 _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen,
480     int *pagesneeded)
481 {
482 	vm_paddr_t curaddr;
483 	bus_size_t sgsize;
484 	int count;
485 
486 	/*
487 	 * Count the number of bounce pages needed in order to
488 	 * complete this transfer
489 	 */
490 	count = 0;
491 	curaddr = buf;
492 	while (buflen != 0) {
493 		sgsize = MIN(buflen, dmat->common.maxsegsz);
494 		if (addr_needs_bounce(dmat, curaddr)) {
495 			sgsize = MIN(sgsize,
496 			    PAGE_SIZE - (curaddr & PAGE_MASK));
497 			if (pagesneeded == NULL)
498 				return (true);
499 			count++;
500 		}
501 		curaddr += sgsize;
502 		buflen -= sgsize;
503 	}
504 
505 	if (pagesneeded != NULL)
506 		*pagesneeded = count;
507 	return (count != 0);
508 }
509 
510 static void
511 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
512     bus_size_t buflen, int flags)
513 {
514 
515 	if (map != &nobounce_dmamap && map->pagesneeded == 0) {
516 		_bus_dmamap_pagesneeded(dmat, buf, buflen, &map->pagesneeded);
517 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
518 	}
519 }
520 
521 static void
522 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
523     void *buf, bus_size_t buflen, int flags)
524 {
525 	vm_offset_t vaddr;
526 	vm_offset_t vendaddr;
527 	vm_paddr_t paddr;
528 	bus_size_t sg_len;
529 
530 	if (map != &nobounce_dmamap && map->pagesneeded == 0) {
531 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
532 		    "alignment= %d", dmat->common.lowaddr,
533 		    ptoa((vm_paddr_t)Maxmem),
534 		    dmat->common.boundary, dmat->common.alignment);
535 		CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
536 		    map, &nobounce_dmamap, map->pagesneeded);
537 		/*
538 		 * Count the number of bounce pages
539 		 * needed in order to complete this transfer
540 		 */
541 		vaddr = (vm_offset_t)buf;
542 		vendaddr = (vm_offset_t)buf + buflen;
543 
544 		while (vaddr < vendaddr) {
545 			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
546 			if (pmap == kernel_pmap)
547 				paddr = pmap_kextract(vaddr);
548 			else
549 				paddr = pmap_extract(pmap, vaddr);
550 			if (addr_needs_bounce(dmat, paddr)) {
551 				sg_len = roundup2(sg_len,
552 				    dmat->common.alignment);
553 				map->pagesneeded++;
554 			}
555 			vaddr += sg_len;
556 		}
557 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
558 	}
559 }
560 
561 static void
562 _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
563     int ma_offs, bus_size_t buflen, int flags)
564 {
565 	bus_size_t sg_len, max_sgsize;
566 	int page_index;
567 	vm_paddr_t paddr;
568 
569 	if (map != &nobounce_dmamap && map->pagesneeded == 0) {
570 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
571 		    "alignment= %d", dmat->common.lowaddr,
572 		    ptoa((vm_paddr_t)Maxmem),
573 		    dmat->common.boundary, dmat->common.alignment);
574 		CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
575 		    map, &nobounce_dmamap, map->pagesneeded);
576 
577 		/*
578 		 * Count the number of bounce pages
579 		 * needed in order to complete this transfer
580 		 */
581 		page_index = 0;
582 		while (buflen > 0) {
583 			paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
584 			sg_len = PAGE_SIZE - ma_offs;
585 			max_sgsize = MIN(buflen, dmat->common.maxsegsz);
586 			sg_len = MIN(sg_len, max_sgsize);
587 			if (addr_needs_bounce(dmat, paddr)) {
588 				sg_len = roundup2(sg_len,
589 				    dmat->common.alignment);
590 				sg_len = MIN(sg_len, max_sgsize);
591 				KASSERT(vm_addr_align_ok(sg_len,
592 				    dmat->common.alignment),
593 				    ("Segment size is not aligned"));
594 				map->pagesneeded++;
595 			}
596 			if (((ma_offs + sg_len) & ~PAGE_MASK) != 0)
597 				page_index++;
598 			ma_offs = (ma_offs + sg_len) & PAGE_MASK;
599 			KASSERT(buflen >= sg_len,
600 			    ("Segment length overruns original buffer"));
601 			buflen -= sg_len;
602 		}
603 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
604 	}
605 }
606 
607 /*
608  * Add a single contiguous physical range to the segment list.
609  */
610 static bus_size_t
611 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t curaddr,
612     bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
613 {
614 	int seg;
615 
616 	KASSERT(curaddr <= BUS_SPACE_MAXADDR,
617 	    ("ds_addr %#jx > BUS_SPACE_MAXADDR %#jx; dmat %p fl %#x low %#jx "
618 	    "hi %#jx",
619 	    (uintmax_t)curaddr, (uintmax_t)BUS_SPACE_MAXADDR,
620 	    dmat, dmat->bounce_flags, (uintmax_t)dmat->common.lowaddr,
621 	    (uintmax_t)dmat->common.highaddr));
622 
623 	/*
624 	 * Make sure we don't cross any boundaries.
625 	 */
626 	if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary))
627 		sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
628 
629 	/*
630 	 * Insert chunk into a segment, coalescing with
631 	 * previous segment if possible.
632 	 */
633 	seg = *segp;
634 	if (seg == -1) {
635 		seg = 0;
636 		segs[seg].ds_addr = curaddr;
637 		segs[seg].ds_len = sgsize;
638 	} else {
639 		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
640 		    (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
641 		    vm_addr_bound_ok(segs[seg].ds_addr,
642 		    segs[seg].ds_len + sgsize, dmat->common.boundary))
643 			segs[seg].ds_len += sgsize;
644 		else {
645 			if (++seg >= dmat->common.nsegments)
646 				return (0);
647 			segs[seg].ds_addr = curaddr;
648 			segs[seg].ds_len = sgsize;
649 		}
650 	}
651 	*segp = seg;
652 	return (sgsize);
653 }
654 
655 /*
656  * Utility function to load a physical buffer.  segp contains
657  * the starting segment on entrace, and the ending segment on exit.
658  */
659 static int
660 bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
661     vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
662     int *segp)
663 {
664 	bus_size_t sgsize;
665 	vm_paddr_t curaddr;
666 	int error;
667 
668 	if (map == NULL)
669 		map = &nobounce_dmamap;
670 
671 	if (segs == NULL)
672 		segs = dmat->segments;
673 
674 	if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
675 		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
676 		if (map->pagesneeded != 0) {
677 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
678 			if (error)
679 				return (error);
680 		}
681 	}
682 
683 	while (buflen > 0) {
684 		curaddr = buf;
685 		sgsize = MIN(buflen, dmat->common.maxsegsz);
686 		if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
687 		    map->pagesneeded != 0 &&
688 		    addr_needs_bounce(dmat, curaddr)) {
689 			sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
690 			curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
691 			    sgsize);
692 		}
693 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
694 		    segp);
695 		if (sgsize == 0)
696 			break;
697 		buf += sgsize;
698 		buflen -= sgsize;
699 	}
700 
701 	/*
702 	 * Did we fit?
703 	 */
704 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
705 }
706 
707 /*
708  * Utility function to load a linear buffer.  segp contains
709  * the starting segment on entrace, and the ending segment on exit.
710  */
711 static int
712 bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
713     bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
714     int *segp)
715 {
716 	bus_size_t sgsize, max_sgsize;
717 	vm_paddr_t curaddr;
718 	vm_offset_t kvaddr, vaddr;
719 	int error;
720 
721 	if (map == NULL)
722 		map = &nobounce_dmamap;
723 
724 	if (segs == NULL)
725 		segs = dmat->segments;
726 
727 	if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
728 		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
729 		if (map->pagesneeded != 0) {
730 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
731 			if (error)
732 				return (error);
733 		}
734 	}
735 
736 	vaddr = (vm_offset_t)buf;
737 	while (buflen > 0) {
738 		/*
739 		 * Get the physical address for this segment.
740 		 */
741 		if (pmap == kernel_pmap) {
742 			curaddr = pmap_kextract(vaddr);
743 			kvaddr = vaddr;
744 		} else {
745 			curaddr = pmap_extract(pmap, vaddr);
746 			kvaddr = 0;
747 		}
748 
749 		/*
750 		 * Compute the segment size, and adjust counts.
751 		 */
752 		max_sgsize = MIN(buflen, dmat->common.maxsegsz);
753 		sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
754 		if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
755 		    map->pagesneeded != 0 &&
756 		    addr_needs_bounce(dmat, curaddr)) {
757 			sgsize = roundup2(sgsize, dmat->common.alignment);
758 			sgsize = MIN(sgsize, max_sgsize);
759 			curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
760 			    sgsize);
761 		} else {
762 			sgsize = MIN(sgsize, max_sgsize);
763 		}
764 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
765 		    segp);
766 		if (sgsize == 0)
767 			break;
768 		vaddr += sgsize;
769 		buflen -= sgsize;
770 	}
771 
772 	/*
773 	 * Did we fit?
774 	 */
775 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
776 }
777 
778 static int
779 bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
780     struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags,
781     bus_dma_segment_t *segs, int *segp)
782 {
783 	vm_paddr_t paddr, next_paddr;
784 	int error, page_index;
785 	bus_size_t sgsize, max_sgsize;
786 
787 	if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
788 		/*
789 		 * If we have to keep the offset of each page this function
790 		 * is not suitable, switch back to bus_dmamap_load_ma_triv
791 		 * which is going to do the right thing in this case.
792 		 */
793 		error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs,
794 		    flags, segs, segp);
795 		return (error);
796 	}
797 
798 	if (map == NULL)
799 		map = &nobounce_dmamap;
800 
801 	if (segs == NULL)
802 		segs = dmat->segments;
803 
804 	if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
805 		_bus_dmamap_count_ma(dmat, map, ma, ma_offs, buflen, flags);
806 		if (map->pagesneeded != 0) {
807 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
808 			if (error)
809 				return (error);
810 		}
811 	}
812 
813 	page_index = 0;
814 	while (buflen > 0) {
815 		/*
816 		 * Compute the segment size, and adjust counts.
817 		 */
818 		paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
819 		max_sgsize = MIN(buflen, dmat->common.maxsegsz);
820 		sgsize = PAGE_SIZE - ma_offs;
821 		if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
822 		    map->pagesneeded != 0 &&
823 		    addr_needs_bounce(dmat, paddr)) {
824 			sgsize = roundup2(sgsize, dmat->common.alignment);
825 			sgsize = MIN(sgsize, max_sgsize);
826 			KASSERT(vm_addr_align_ok(sgsize,
827 			    dmat->common.alignment),
828 			    ("Segment size is not aligned"));
829 			/*
830 			 * Check if two pages of the user provided buffer
831 			 * are used.
832 			 */
833 			if ((ma_offs + sgsize) > PAGE_SIZE)
834 				next_paddr =
835 				    VM_PAGE_TO_PHYS(ma[page_index + 1]);
836 			else
837 				next_paddr = 0;
838 			paddr = add_bounce_page(dmat, map, 0, paddr,
839 			    next_paddr, sgsize);
840 		} else {
841 			sgsize = MIN(sgsize, max_sgsize);
842 		}
843 		sgsize = _bus_dmamap_addseg(dmat, map, paddr, sgsize, segs,
844 		    segp);
845 		if (sgsize == 0)
846 			break;
847 		KASSERT(buflen >= sgsize,
848 		    ("Segment length overruns original buffer"));
849 		buflen -= sgsize;
850 		if (((ma_offs + sgsize) & ~PAGE_MASK) != 0)
851 			page_index++;
852 		ma_offs = (ma_offs + sgsize) & PAGE_MASK;
853 	}
854 
855 	/*
856 	 * Did we fit?
857 	 */
858 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
859 }
860 
861 static void
862 bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
863     struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
864 {
865 
866 	if (map == NULL)
867 		return;
868 	map->mem = *mem;
869 	map->dmat = dmat;
870 	map->callback = callback;
871 	map->callback_arg = callback_arg;
872 }
873 
874 static bus_dma_segment_t *
875 bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
876     bus_dma_segment_t *segs, int nsegs, int error)
877 {
878 
879 	if (segs == NULL)
880 		segs = dmat->segments;
881 	return (segs);
882 }
883 
884 /*
885  * Release the mapping held by map.
886  */
887 static void
888 bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
889 {
890 	if (map == NULL)
891 		return;
892 
893 	free_bounce_pages(dmat, map);
894 }
895 
896 static void
897 bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
898     bus_dmasync_op_t op)
899 {
900 	struct bounce_page *bpage;
901 	vm_offset_t datavaddr, tempvaddr;
902 	bus_size_t datacount1, datacount2;
903 
904 	if (map == NULL)
905 		goto out;
906 	if ((bpage = STAILQ_FIRST(&map->bpages)) == NULL)
907 		goto out;
908 
909 	/*
910 	 * Handle data bouncing.  We might also want to add support for
911 	 * invalidating the caches on broken hardware.
912 	 */
913 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
914 	    "performing bounce", __func__, dmat, dmat->common.flags, op);
915 
916 	if ((op & BUS_DMASYNC_PREWRITE) != 0) {
917 		while (bpage != NULL) {
918 			tempvaddr = 0;
919 			datavaddr = bpage->datavaddr;
920 			datacount1 = bpage->datacount;
921 			if (datavaddr == 0) {
922 				tempvaddr =
923 				    pmap_quick_enter_page(bpage->datapage[0]);
924 				datavaddr = tempvaddr | bpage->dataoffs;
925 				datacount1 = min(PAGE_SIZE - bpage->dataoffs,
926 				    datacount1);
927 			}
928 
929 			bcopy((void *)datavaddr,
930 			    (void *)bpage->vaddr, datacount1);
931 
932 			if (tempvaddr != 0)
933 				pmap_quick_remove_page(tempvaddr);
934 
935 			if (bpage->datapage[1] == 0) {
936 				KASSERT(datacount1 == bpage->datacount,
937 		("Mismatch between data size and provided memory space"));
938 				goto next_w;
939 			}
940 
941 			/*
942 			 * We are dealing with an unmapped buffer that expands
943 			 * over two pages.
944 			 */
945 			datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
946 			datacount2 = bpage->datacount - datacount1;
947 			bcopy((void *)datavaddr,
948 			    (void *)(bpage->vaddr + datacount1), datacount2);
949 			pmap_quick_remove_page(datavaddr);
950 
951 next_w:
952 			bpage = STAILQ_NEXT(bpage, links);
953 		}
954 		dmat->bounce_zone->total_bounced++;
955 	}
956 
957 	if ((op & BUS_DMASYNC_POSTREAD) != 0) {
958 		while (bpage != NULL) {
959 			tempvaddr = 0;
960 			datavaddr = bpage->datavaddr;
961 			datacount1 = bpage->datacount;
962 			if (datavaddr == 0) {
963 				tempvaddr =
964 				    pmap_quick_enter_page(bpage->datapage[0]);
965 				datavaddr = tempvaddr | bpage->dataoffs;
966 				datacount1 = min(PAGE_SIZE - bpage->dataoffs,
967 				    datacount1);
968 			}
969 
970 			bcopy((void *)bpage->vaddr, (void *)datavaddr,
971 			    datacount1);
972 
973 			if (tempvaddr != 0)
974 				pmap_quick_remove_page(tempvaddr);
975 
976 			if (bpage->datapage[1] == 0) {
977 				KASSERT(datacount1 == bpage->datacount,
978 		("Mismatch between data size and provided memory space"));
979 				goto next_r;
980 			}
981 
982 			/*
983 			 * We are dealing with an unmapped buffer that expands
984 			 * over two pages.
985 			 */
986 			datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
987 			datacount2 = bpage->datacount - datacount1;
988 			bcopy((void *)(bpage->vaddr + datacount1),
989 			    (void *)datavaddr, datacount2);
990 			pmap_quick_remove_page(datavaddr);
991 
992 next_r:
993 			bpage = STAILQ_NEXT(bpage, links);
994 		}
995 		dmat->bounce_zone->total_bounced++;
996 	}
997 out:
998 	atomic_thread_fence_rel();
999 	if (map != NULL)
1000 		kmsan_bus_dmamap_sync(&map->kmsan_mem, op);
1001 }
1002 
1003 #ifdef KMSAN
1004 static void
1005 bounce_bus_dmamap_load_kmsan(bus_dmamap_t map, struct memdesc *mem)
1006 {
1007 	if (map == NULL)
1008 		return;
1009 	memcpy(&map->kmsan_mem, mem, sizeof(map->kmsan_mem));
1010 }
1011 #endif
1012 
1013 struct bus_dma_impl bus_dma_bounce_impl = {
1014 	.tag_create = bounce_bus_dma_tag_create,
1015 	.tag_destroy = bounce_bus_dma_tag_destroy,
1016 	.tag_set_domain = bounce_bus_dma_tag_set_domain,
1017 	.id_mapped = bounce_bus_dma_id_mapped,
1018 	.map_create = bounce_bus_dmamap_create,
1019 	.map_destroy = bounce_bus_dmamap_destroy,
1020 	.mem_alloc = bounce_bus_dmamem_alloc,
1021 	.mem_free = bounce_bus_dmamem_free,
1022 	.load_phys = bounce_bus_dmamap_load_phys,
1023 	.load_buffer = bounce_bus_dmamap_load_buffer,
1024 	.load_ma = bounce_bus_dmamap_load_ma,
1025 	.map_waitok = bounce_bus_dmamap_waitok,
1026 	.map_complete = bounce_bus_dmamap_complete,
1027 	.map_unload = bounce_bus_dmamap_unload,
1028 	.map_sync = bounce_bus_dmamap_sync,
1029 #ifdef KMSAN
1030 	.load_kmsan = bounce_bus_dmamap_load_kmsan,
1031 #endif
1032 };
1033