1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/domainset.h> 33 #include <sys/malloc.h> 34 #include <sys/bus.h> 35 #include <sys/interrupt.h> 36 #include <sys/kernel.h> 37 #include <sys/ktr.h> 38 #include <sys/lock.h> 39 #include <sys/proc.h> 40 #include <sys/memdesc.h> 41 #include <sys/msan.h> 42 #include <sys/mutex.h> 43 #include <sys/sysctl.h> 44 #include <sys/uio.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 #include <vm/vm_kern.h> 49 #include <vm/vm_page.h> 50 #include <vm/vm_map.h> 51 52 #include <machine/atomic.h> 53 #include <machine/bus.h> 54 #include <machine/md_var.h> 55 #include <machine/specialreg.h> 56 #include <x86/include/busdma_impl.h> 57 58 #ifdef __i386__ 59 #define MAX_BPAGES (Maxmem > atop(0x100000000ULL) ? 8192 : 512) 60 #else 61 #define MAX_BPAGES 8192 62 #endif 63 64 enum { 65 BUS_DMA_COULD_BOUNCE = 0x01, 66 BUS_DMA_MIN_ALLOC_COMP = 0x02, 67 BUS_DMA_KMEM_ALLOC = 0x04, 68 BUS_DMA_FORCE_MAP = 0x08, 69 }; 70 71 struct bounce_page; 72 struct bounce_zone; 73 74 struct bus_dma_tag { 75 struct bus_dma_tag_common common; 76 int map_count; 77 int bounce_flags; 78 bus_dma_segment_t *segments; 79 struct bounce_zone *bounce_zone; 80 }; 81 82 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 83 "Busdma parameters"); 84 85 struct bus_dmamap { 86 STAILQ_HEAD(, bounce_page) bpages; 87 int pagesneeded; 88 int pagesreserved; 89 bus_dma_tag_t dmat; 90 struct memdesc mem; 91 bus_dmamap_callback_t *callback; 92 void *callback_arg; 93 STAILQ_ENTRY(bus_dmamap) links; 94 #ifdef KMSAN 95 struct memdesc kmsan_mem; 96 #endif 97 }; 98 99 static struct bus_dmamap nobounce_dmamap; 100 101 static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, 102 bus_size_t buflen, int *pagesneeded); 103 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 104 pmap_t pmap, void *buf, bus_size_t buflen, int flags); 105 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 106 vm_paddr_t buf, bus_size_t buflen, int flags); 107 108 static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); 109 110 #define dmat_alignment(dmat) ((dmat)->common.alignment) 111 #define dmat_domain(dmat) ((dmat)->common.domain) 112 #define dmat_flags(dmat) ((dmat)->common.flags) 113 #define dmat_lowaddr(dmat) ((dmat)->common.lowaddr) 114 #define dmat_lockfunc(dmat) ((dmat)->common.lockfunc) 115 #define dmat_lockfuncarg(dmat) ((dmat)->common.lockfuncarg) 116 117 #include "../../kern/subr_busdma_bounce.c" 118 119 static int 120 bounce_bus_dma_zone_setup(bus_dma_tag_t dmat) 121 { 122 struct bounce_zone *bz; 123 int error; 124 125 /* Must bounce */ 126 if ((error = alloc_bounce_zone(dmat)) != 0) 127 return (error); 128 bz = dmat->bounce_zone; 129 130 if (ptoa(bz->total_bpages) < dmat->common.maxsize) { 131 int pages; 132 133 pages = atop(dmat->common.maxsize) - bz->total_bpages; 134 135 /* Add pages to our bounce pool */ 136 if (alloc_bounce_pages(dmat, pages) < pages) 137 return (ENOMEM); 138 } 139 /* Performed initial allocation */ 140 dmat->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP; 141 142 return (0); 143 } 144 145 /* 146 * Allocate a device specific dma_tag. 147 */ 148 static int 149 bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 150 bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, 151 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, 152 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 153 void *lockfuncarg, bus_dma_tag_t *dmat) 154 { 155 bus_dma_tag_t newtag; 156 int error; 157 158 *dmat = NULL; 159 error = common_bus_dma_tag_create(parent != NULL ? &parent->common : 160 NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg, 161 maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, 162 sizeof (struct bus_dma_tag), (void **)&newtag); 163 if (error != 0) 164 return (error); 165 166 newtag->common.impl = &bus_dma_bounce_impl; 167 newtag->map_count = 0; 168 newtag->segments = NULL; 169 170 #ifdef KMSAN 171 /* 172 * When KMSAN is configured, we need a map to store a memory descriptor 173 * which can be used for validation. 174 */ 175 newtag->bounce_flags |= BUS_DMA_FORCE_MAP; 176 #endif 177 178 if (parent != NULL && (newtag->common.filter != NULL || 179 (parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0)) 180 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE; 181 182 if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) || 183 newtag->common.alignment > 1) 184 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE; 185 186 if ((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 && 187 (flags & BUS_DMA_ALLOCNOW) != 0) 188 error = bounce_bus_dma_zone_setup(newtag); 189 else 190 error = 0; 191 192 if (error != 0) 193 free(newtag, M_DEVBUF); 194 else 195 *dmat = newtag; 196 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 197 __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), 198 error); 199 return (error); 200 } 201 202 static bool 203 bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen) 204 { 205 206 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0) 207 return (true); 208 return (!_bus_dmamap_pagesneeded(dmat, buf, buflen, NULL)); 209 } 210 211 /* 212 * Update the domain for the tag. We may need to reallocate the zone and 213 * bounce pages. 214 */ 215 static int 216 bounce_bus_dma_tag_set_domain(bus_dma_tag_t dmat) 217 { 218 219 KASSERT(dmat->map_count == 0, 220 ("bounce_bus_dma_tag_set_domain: Domain set after use.\n")); 221 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0 || 222 dmat->bounce_zone == NULL) 223 return (0); 224 dmat->bounce_flags &= ~BUS_DMA_MIN_ALLOC_COMP; 225 return (bounce_bus_dma_zone_setup(dmat)); 226 } 227 228 static int 229 bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat) 230 { 231 #ifdef KTR 232 bus_dma_tag_t dmat_copy = dmat; 233 #endif 234 bus_dma_tag_t parent; 235 int error; 236 237 error = 0; 238 239 if (dmat != NULL) { 240 if (dmat->map_count != 0) { 241 error = EBUSY; 242 goto out; 243 } 244 while (dmat != NULL) { 245 parent = (bus_dma_tag_t)dmat->common.parent; 246 atomic_subtract_int(&dmat->common.ref_count, 1); 247 if (dmat->common.ref_count == 0) { 248 if (dmat->segments != NULL) 249 free(dmat->segments, M_DEVBUF); 250 free(dmat, M_DEVBUF); 251 /* 252 * Last reference count, so 253 * release our reference 254 * count on our parent. 255 */ 256 dmat = parent; 257 } else 258 dmat = NULL; 259 } 260 } 261 out: 262 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 263 return (error); 264 } 265 266 /* 267 * Allocate a handle for mapping from kva/uva/physical 268 * address space into bus device space. 269 */ 270 static int 271 bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 272 { 273 struct bounce_zone *bz; 274 int error, maxpages, pages; 275 276 error = 0; 277 278 if (dmat->segments == NULL) { 279 dmat->segments = malloc_domainset( 280 sizeof(bus_dma_segment_t) * dmat->common.nsegments, 281 M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT); 282 if (dmat->segments == NULL) { 283 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 284 __func__, dmat, ENOMEM); 285 return (ENOMEM); 286 } 287 } 288 289 if (dmat->bounce_flags & (BUS_DMA_COULD_BOUNCE | BUS_DMA_FORCE_MAP)) { 290 *mapp = malloc_domainset(sizeof(**mapp), M_DEVBUF, 291 DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO); 292 if (*mapp == NULL) { 293 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 294 __func__, dmat, ENOMEM); 295 return (ENOMEM); 296 } 297 STAILQ_INIT(&(*mapp)->bpages); 298 } else { 299 *mapp = NULL; 300 } 301 302 /* 303 * Bouncing might be required if the driver asks for an active 304 * exclusion region, a data alignment that is stricter than 1, and/or 305 * an active address boundary. 306 */ 307 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { 308 /* Must bounce */ 309 if (dmat->bounce_zone == NULL && 310 (error = alloc_bounce_zone(dmat)) != 0) 311 goto out; 312 bz = dmat->bounce_zone; 313 314 /* 315 * Attempt to add pages to our pool on a per-instance 316 * basis up to a sane limit. 317 */ 318 if (dmat->common.alignment > 1) 319 maxpages = MAX_BPAGES; 320 else 321 maxpages = MIN(MAX_BPAGES, Maxmem - 322 atop(dmat->common.lowaddr)); 323 if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || 324 (bz->map_count > 0 && bz->total_bpages < maxpages)) { 325 pages = MAX(atop(dmat->common.maxsize), 1); 326 pages = MIN(dmat->common.nsegments, pages); 327 pages = MIN(maxpages - bz->total_bpages, pages); 328 pages = MAX(pages, 1); 329 if (alloc_bounce_pages(dmat, pages) < pages) 330 error = ENOMEM; 331 if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) 332 == 0) { 333 if (error == 0) { 334 dmat->bounce_flags |= 335 BUS_DMA_MIN_ALLOC_COMP; 336 } 337 } else 338 error = 0; 339 } 340 bz->map_count++; 341 } 342 343 out: 344 if (error == 0) { 345 dmat->map_count++; 346 } else { 347 free(*mapp, M_DEVBUF); 348 *mapp = NULL; 349 } 350 351 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 352 __func__, dmat, dmat->common.flags, error); 353 return (error); 354 } 355 356 /* 357 * Destroy a handle for mapping from kva/uva/physical 358 * address space into bus device space. 359 */ 360 static int 361 bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 362 { 363 364 if (map != NULL && map != &nobounce_dmamap) { 365 if (STAILQ_FIRST(&map->bpages) != NULL) { 366 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 367 __func__, dmat, EBUSY); 368 return (EBUSY); 369 } 370 if (dmat->bounce_zone) 371 dmat->bounce_zone->map_count--; 372 free(map, M_DEVBUF); 373 } 374 dmat->map_count--; 375 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 376 return (0); 377 } 378 379 /* 380 * Allocate a piece of memory that can be efficiently mapped into 381 * bus device space based on the constraints lited in the dma tag. 382 * A dmamap to for use with dmamap_load is also allocated. 383 */ 384 static int 385 bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, 386 bus_dmamap_t *mapp) 387 { 388 vm_memattr_t attr; 389 int mflags; 390 391 if (flags & BUS_DMA_NOWAIT) 392 mflags = M_NOWAIT; 393 else 394 mflags = M_WAITOK; 395 396 /* If we succeed, no mapping/bouncing will be required */ 397 *mapp = NULL; 398 399 if (dmat->segments == NULL) { 400 dmat->segments = (bus_dma_segment_t *)malloc_domainset( 401 sizeof(bus_dma_segment_t) * dmat->common.nsegments, 402 M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), mflags); 403 if (dmat->segments == NULL) { 404 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 405 __func__, dmat, dmat->common.flags, ENOMEM); 406 return (ENOMEM); 407 } 408 } 409 if (flags & BUS_DMA_ZERO) 410 mflags |= M_ZERO; 411 if (flags & BUS_DMA_NOCACHE) 412 attr = VM_MEMATTR_UNCACHEABLE; 413 else 414 attr = VM_MEMATTR_DEFAULT; 415 416 /* 417 * Allocate the buffer from the malloc(9) allocator if... 418 * - It's small enough to fit into a single page. 419 * - Its alignment requirement is also smaller than the page size. 420 * - The low address requirement is fulfilled. 421 * - Default cache attributes are requested (WB). 422 * else allocate non-contiguous pages if... 423 * - The page count that could get allocated doesn't exceed 424 * nsegments also when the maximum segment size is less 425 * than PAGE_SIZE. 426 * - The alignment constraint isn't larger than a page boundary. 427 * - There are no boundary-crossing constraints. 428 * else allocate a block of contiguous pages because one or more of the 429 * constraints is something that only the contig allocator can fulfill. 430 * 431 * Warn the user if malloc gets it wrong. 432 */ 433 if (dmat->common.maxsize <= PAGE_SIZE && 434 dmat->common.alignment <= PAGE_SIZE && 435 dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) && 436 attr == VM_MEMATTR_DEFAULT) { 437 *vaddr = malloc_domainset_aligned(dmat->common.maxsize, 438 dmat->common.alignment, M_DEVBUF, 439 DOMAINSET_PREF(dmat->common.domain), mflags); 440 KASSERT(*vaddr == NULL || ((uintptr_t)*vaddr & PAGE_MASK) + 441 dmat->common.maxsize <= PAGE_SIZE, 442 ("bounce_bus_dmamem_alloc: multi-page alloc %p maxsize " 443 "%#jx align %#jx", *vaddr, (uintmax_t)dmat->common.maxsize, 444 (uintmax_t)dmat->common.alignment)); 445 } else if (dmat->common.nsegments >= 446 howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, 447 PAGE_SIZE)) && 448 dmat->common.alignment <= PAGE_SIZE && 449 (dmat->common.boundary % PAGE_SIZE) == 0) { 450 /* Page-based multi-segment allocations allowed */ 451 *vaddr = kmem_alloc_attr_domainset( 452 DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize, 453 mflags, 0ul, dmat->common.lowaddr, attr); 454 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC; 455 } else { 456 *vaddr = kmem_alloc_contig_domainset( 457 DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize, 458 mflags, 0ul, dmat->common.lowaddr, 459 dmat->common.alignment != 0 ? dmat->common.alignment : 1ul, 460 dmat->common.boundary, attr); 461 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC; 462 } 463 if (*vaddr == NULL) { 464 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 465 __func__, dmat, dmat->common.flags, ENOMEM); 466 return (ENOMEM); 467 } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->common.alignment)) { 468 printf("bus_dmamem_alloc failed to align memory properly.\n"); 469 } 470 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 471 __func__, dmat, dmat->common.flags, 0); 472 return (0); 473 } 474 475 /* 476 * Free a piece of memory and its associated dmamap, that was allocated 477 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 478 */ 479 static void 480 bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 481 { 482 /* 483 * dmamem does not need to be bounced, so the map should be 484 * NULL and the BUS_DMA_KMEM_ALLOC flag cleared if malloc() 485 * was used and set if kmem_alloc_contig() was used. 486 */ 487 if (map != NULL) 488 panic("bus_dmamem_free: Invalid map freed\n"); 489 if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0) 490 free(vaddr, M_DEVBUF); 491 else 492 kmem_free(vaddr, dmat->common.maxsize); 493 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, 494 dmat->bounce_flags); 495 } 496 497 static bool 498 _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen, 499 int *pagesneeded) 500 { 501 vm_paddr_t curaddr; 502 bus_size_t sgsize; 503 int count; 504 505 /* 506 * Count the number of bounce pages needed in order to 507 * complete this transfer 508 */ 509 count = 0; 510 curaddr = buf; 511 while (buflen != 0) { 512 sgsize = MIN(buflen, dmat->common.maxsegsz); 513 if (bus_dma_run_filter(&dmat->common, curaddr)) { 514 sgsize = MIN(sgsize, 515 PAGE_SIZE - (curaddr & PAGE_MASK)); 516 if (pagesneeded == NULL) 517 return (true); 518 count++; 519 } 520 curaddr += sgsize; 521 buflen -= sgsize; 522 } 523 524 if (pagesneeded != NULL) 525 *pagesneeded = count; 526 return (count != 0); 527 } 528 529 static void 530 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 531 bus_size_t buflen, int flags) 532 { 533 534 if (map != &nobounce_dmamap && map->pagesneeded == 0) { 535 _bus_dmamap_pagesneeded(dmat, buf, buflen, &map->pagesneeded); 536 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 537 } 538 } 539 540 static void 541 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 542 void *buf, bus_size_t buflen, int flags) 543 { 544 vm_offset_t vaddr; 545 vm_offset_t vendaddr; 546 vm_paddr_t paddr; 547 bus_size_t sg_len; 548 549 if (map != &nobounce_dmamap && map->pagesneeded == 0) { 550 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 551 "alignment= %d", dmat->common.lowaddr, 552 ptoa((vm_paddr_t)Maxmem), 553 dmat->common.boundary, dmat->common.alignment); 554 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 555 map, &nobounce_dmamap, map->pagesneeded); 556 /* 557 * Count the number of bounce pages 558 * needed in order to complete this transfer 559 */ 560 vaddr = (vm_offset_t)buf; 561 vendaddr = (vm_offset_t)buf + buflen; 562 563 while (vaddr < vendaddr) { 564 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 565 if (pmap == kernel_pmap) 566 paddr = pmap_kextract(vaddr); 567 else 568 paddr = pmap_extract(pmap, vaddr); 569 if (bus_dma_run_filter(&dmat->common, paddr) != 0) { 570 sg_len = roundup2(sg_len, 571 dmat->common.alignment); 572 map->pagesneeded++; 573 } 574 vaddr += sg_len; 575 } 576 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 577 } 578 } 579 580 static void 581 _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, 582 int ma_offs, bus_size_t buflen, int flags) 583 { 584 bus_size_t sg_len, max_sgsize; 585 int page_index; 586 vm_paddr_t paddr; 587 588 if (map != &nobounce_dmamap && map->pagesneeded == 0) { 589 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 590 "alignment= %d", dmat->common.lowaddr, 591 ptoa((vm_paddr_t)Maxmem), 592 dmat->common.boundary, dmat->common.alignment); 593 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 594 map, &nobounce_dmamap, map->pagesneeded); 595 596 /* 597 * Count the number of bounce pages 598 * needed in order to complete this transfer 599 */ 600 page_index = 0; 601 while (buflen > 0) { 602 paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs; 603 sg_len = PAGE_SIZE - ma_offs; 604 max_sgsize = MIN(buflen, dmat->common.maxsegsz); 605 sg_len = MIN(sg_len, max_sgsize); 606 if (bus_dma_run_filter(&dmat->common, paddr) != 0) { 607 sg_len = roundup2(sg_len, 608 dmat->common.alignment); 609 sg_len = MIN(sg_len, max_sgsize); 610 KASSERT(vm_addr_align_ok(sg_len, 611 dmat->common.alignment), 612 ("Segment size is not aligned")); 613 map->pagesneeded++; 614 } 615 if (((ma_offs + sg_len) & ~PAGE_MASK) != 0) 616 page_index++; 617 ma_offs = (ma_offs + sg_len) & PAGE_MASK; 618 KASSERT(buflen >= sg_len, 619 ("Segment length overruns original buffer")); 620 buflen -= sg_len; 621 } 622 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 623 } 624 } 625 626 /* 627 * Add a single contiguous physical range to the segment list. 628 */ 629 static bus_size_t 630 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t curaddr, 631 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 632 { 633 int seg; 634 635 KASSERT(curaddr <= BUS_SPACE_MAXADDR, 636 ("ds_addr %#jx > BUS_SPACE_MAXADDR %#jx; dmat %p fl %#x low %#jx " 637 "hi %#jx", 638 (uintmax_t)curaddr, (uintmax_t)BUS_SPACE_MAXADDR, 639 dmat, dmat->bounce_flags, (uintmax_t)dmat->common.lowaddr, 640 (uintmax_t)dmat->common.highaddr)); 641 642 /* 643 * Make sure we don't cross any boundaries. 644 */ 645 if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary)) 646 sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr; 647 648 /* 649 * Insert chunk into a segment, coalescing with 650 * previous segment if possible. 651 */ 652 seg = *segp; 653 if (seg == -1) { 654 seg = 0; 655 segs[seg].ds_addr = curaddr; 656 segs[seg].ds_len = sgsize; 657 } else { 658 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 659 (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz && 660 vm_addr_bound_ok(segs[seg].ds_addr, 661 segs[seg].ds_len + sgsize, dmat->common.boundary)) 662 segs[seg].ds_len += sgsize; 663 else { 664 if (++seg >= dmat->common.nsegments) 665 return (0); 666 segs[seg].ds_addr = curaddr; 667 segs[seg].ds_len = sgsize; 668 } 669 } 670 *segp = seg; 671 return (sgsize); 672 } 673 674 /* 675 * Utility function to load a physical buffer. segp contains 676 * the starting segment on entrace, and the ending segment on exit. 677 */ 678 static int 679 bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 680 vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, 681 int *segp) 682 { 683 bus_size_t sgsize; 684 vm_paddr_t curaddr; 685 int error; 686 687 if (map == NULL) 688 map = &nobounce_dmamap; 689 690 if (segs == NULL) 691 segs = dmat->segments; 692 693 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { 694 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 695 if (map->pagesneeded != 0) { 696 error = _bus_dmamap_reserve_pages(dmat, map, flags); 697 if (error) 698 return (error); 699 } 700 } 701 702 while (buflen > 0) { 703 curaddr = buf; 704 sgsize = MIN(buflen, dmat->common.maxsegsz); 705 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 && 706 map->pagesneeded != 0 && 707 bus_dma_run_filter(&dmat->common, curaddr)) { 708 sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); 709 curaddr = add_bounce_page(dmat, map, 0, curaddr, 0, 710 sgsize); 711 } 712 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 713 segp); 714 if (sgsize == 0) 715 break; 716 buf += sgsize; 717 buflen -= sgsize; 718 } 719 720 /* 721 * Did we fit? 722 */ 723 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 724 } 725 726 /* 727 * Utility function to load a linear buffer. segp contains 728 * the starting segment on entrace, and the ending segment on exit. 729 */ 730 static int 731 bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 732 bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, 733 int *segp) 734 { 735 bus_size_t sgsize, max_sgsize; 736 vm_paddr_t curaddr; 737 vm_offset_t kvaddr, vaddr; 738 int error; 739 740 if (map == NULL) 741 map = &nobounce_dmamap; 742 743 if (segs == NULL) 744 segs = dmat->segments; 745 746 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { 747 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 748 if (map->pagesneeded != 0) { 749 error = _bus_dmamap_reserve_pages(dmat, map, flags); 750 if (error) 751 return (error); 752 } 753 } 754 755 vaddr = (vm_offset_t)buf; 756 while (buflen > 0) { 757 /* 758 * Get the physical address for this segment. 759 */ 760 if (pmap == kernel_pmap) { 761 curaddr = pmap_kextract(vaddr); 762 kvaddr = vaddr; 763 } else { 764 curaddr = pmap_extract(pmap, vaddr); 765 kvaddr = 0; 766 } 767 768 /* 769 * Compute the segment size, and adjust counts. 770 */ 771 max_sgsize = MIN(buflen, dmat->common.maxsegsz); 772 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); 773 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 && 774 map->pagesneeded != 0 && 775 bus_dma_run_filter(&dmat->common, curaddr)) { 776 sgsize = roundup2(sgsize, dmat->common.alignment); 777 sgsize = MIN(sgsize, max_sgsize); 778 curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0, 779 sgsize); 780 } else { 781 sgsize = MIN(sgsize, max_sgsize); 782 } 783 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 784 segp); 785 if (sgsize == 0) 786 break; 787 vaddr += sgsize; 788 buflen -= sgsize; 789 } 790 791 /* 792 * Did we fit? 793 */ 794 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 795 } 796 797 static int 798 bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, 799 struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags, 800 bus_dma_segment_t *segs, int *segp) 801 { 802 vm_paddr_t paddr, next_paddr; 803 int error, page_index; 804 bus_size_t sgsize, max_sgsize; 805 806 if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { 807 /* 808 * If we have to keep the offset of each page this function 809 * is not suitable, switch back to bus_dmamap_load_ma_triv 810 * which is going to do the right thing in this case. 811 */ 812 error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs, 813 flags, segs, segp); 814 return (error); 815 } 816 817 if (map == NULL) 818 map = &nobounce_dmamap; 819 820 if (segs == NULL) 821 segs = dmat->segments; 822 823 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { 824 _bus_dmamap_count_ma(dmat, map, ma, ma_offs, buflen, flags); 825 if (map->pagesneeded != 0) { 826 error = _bus_dmamap_reserve_pages(dmat, map, flags); 827 if (error) 828 return (error); 829 } 830 } 831 832 page_index = 0; 833 while (buflen > 0) { 834 /* 835 * Compute the segment size, and adjust counts. 836 */ 837 paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs; 838 max_sgsize = MIN(buflen, dmat->common.maxsegsz); 839 sgsize = PAGE_SIZE - ma_offs; 840 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 && 841 map->pagesneeded != 0 && 842 bus_dma_run_filter(&dmat->common, paddr)) { 843 sgsize = roundup2(sgsize, dmat->common.alignment); 844 sgsize = MIN(sgsize, max_sgsize); 845 KASSERT(vm_addr_align_ok(sgsize, 846 dmat->common.alignment), 847 ("Segment size is not aligned")); 848 /* 849 * Check if two pages of the user provided buffer 850 * are used. 851 */ 852 if ((ma_offs + sgsize) > PAGE_SIZE) 853 next_paddr = 854 VM_PAGE_TO_PHYS(ma[page_index + 1]); 855 else 856 next_paddr = 0; 857 paddr = add_bounce_page(dmat, map, 0, paddr, 858 next_paddr, sgsize); 859 } else { 860 sgsize = MIN(sgsize, max_sgsize); 861 } 862 sgsize = _bus_dmamap_addseg(dmat, map, paddr, sgsize, segs, 863 segp); 864 if (sgsize == 0) 865 break; 866 KASSERT(buflen >= sgsize, 867 ("Segment length overruns original buffer")); 868 buflen -= sgsize; 869 if (((ma_offs + sgsize) & ~PAGE_MASK) != 0) 870 page_index++; 871 ma_offs = (ma_offs + sgsize) & PAGE_MASK; 872 } 873 874 /* 875 * Did we fit? 876 */ 877 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 878 } 879 880 static void 881 bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 882 struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) 883 { 884 885 if (map == NULL) 886 return; 887 map->mem = *mem; 888 map->dmat = dmat; 889 map->callback = callback; 890 map->callback_arg = callback_arg; 891 } 892 893 static bus_dma_segment_t * 894 bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 895 bus_dma_segment_t *segs, int nsegs, int error) 896 { 897 898 if (segs == NULL) 899 segs = dmat->segments; 900 return (segs); 901 } 902 903 /* 904 * Release the mapping held by map. 905 */ 906 static void 907 bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 908 { 909 if (map == NULL) 910 return; 911 912 free_bounce_pages(dmat, map); 913 } 914 915 static void 916 bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, 917 bus_dmasync_op_t op) 918 { 919 struct bounce_page *bpage; 920 vm_offset_t datavaddr, tempvaddr; 921 bus_size_t datacount1, datacount2; 922 923 if (map == NULL) 924 goto out; 925 if ((bpage = STAILQ_FIRST(&map->bpages)) == NULL) 926 goto out; 927 928 /* 929 * Handle data bouncing. We might also want to add support for 930 * invalidating the caches on broken hardware. 931 */ 932 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 933 "performing bounce", __func__, dmat, dmat->common.flags, op); 934 935 if ((op & BUS_DMASYNC_PREWRITE) != 0) { 936 while (bpage != NULL) { 937 tempvaddr = 0; 938 datavaddr = bpage->datavaddr; 939 datacount1 = bpage->datacount; 940 if (datavaddr == 0) { 941 tempvaddr = 942 pmap_quick_enter_page(bpage->datapage[0]); 943 datavaddr = tempvaddr | bpage->dataoffs; 944 datacount1 = min(PAGE_SIZE - bpage->dataoffs, 945 datacount1); 946 } 947 948 bcopy((void *)datavaddr, 949 (void *)bpage->vaddr, datacount1); 950 951 if (tempvaddr != 0) 952 pmap_quick_remove_page(tempvaddr); 953 954 if (bpage->datapage[1] == 0) { 955 KASSERT(datacount1 == bpage->datacount, 956 ("Mismatch between data size and provided memory space")); 957 goto next_w; 958 } 959 960 /* 961 * We are dealing with an unmapped buffer that expands 962 * over two pages. 963 */ 964 datavaddr = pmap_quick_enter_page(bpage->datapage[1]); 965 datacount2 = bpage->datacount - datacount1; 966 bcopy((void *)datavaddr, 967 (void *)(bpage->vaddr + datacount1), datacount2); 968 pmap_quick_remove_page(datavaddr); 969 970 next_w: 971 bpage = STAILQ_NEXT(bpage, links); 972 } 973 dmat->bounce_zone->total_bounced++; 974 } 975 976 if ((op & BUS_DMASYNC_POSTREAD) != 0) { 977 while (bpage != NULL) { 978 tempvaddr = 0; 979 datavaddr = bpage->datavaddr; 980 datacount1 = bpage->datacount; 981 if (datavaddr == 0) { 982 tempvaddr = 983 pmap_quick_enter_page(bpage->datapage[0]); 984 datavaddr = tempvaddr | bpage->dataoffs; 985 datacount1 = min(PAGE_SIZE - bpage->dataoffs, 986 datacount1); 987 } 988 989 bcopy((void *)bpage->vaddr, (void *)datavaddr, 990 datacount1); 991 992 if (tempvaddr != 0) 993 pmap_quick_remove_page(tempvaddr); 994 995 if (bpage->datapage[1] == 0) { 996 KASSERT(datacount1 == bpage->datacount, 997 ("Mismatch between data size and provided memory space")); 998 goto next_r; 999 } 1000 1001 /* 1002 * We are dealing with an unmapped buffer that expands 1003 * over two pages. 1004 */ 1005 datavaddr = pmap_quick_enter_page(bpage->datapage[1]); 1006 datacount2 = bpage->datacount - datacount1; 1007 bcopy((void *)(bpage->vaddr + datacount1), 1008 (void *)datavaddr, datacount2); 1009 pmap_quick_remove_page(datavaddr); 1010 1011 next_r: 1012 bpage = STAILQ_NEXT(bpage, links); 1013 } 1014 dmat->bounce_zone->total_bounced++; 1015 } 1016 out: 1017 atomic_thread_fence_rel(); 1018 if (map != NULL) 1019 kmsan_bus_dmamap_sync(&map->kmsan_mem, op); 1020 } 1021 1022 #ifdef KMSAN 1023 static void 1024 bounce_bus_dmamap_load_kmsan(bus_dmamap_t map, struct memdesc *mem) 1025 { 1026 if (map == NULL) 1027 return; 1028 memcpy(&map->kmsan_mem, mem, sizeof(map->kmsan_mem)); 1029 } 1030 #endif 1031 1032 struct bus_dma_impl bus_dma_bounce_impl = { 1033 .tag_create = bounce_bus_dma_tag_create, 1034 .tag_destroy = bounce_bus_dma_tag_destroy, 1035 .tag_set_domain = bounce_bus_dma_tag_set_domain, 1036 .id_mapped = bounce_bus_dma_id_mapped, 1037 .map_create = bounce_bus_dmamap_create, 1038 .map_destroy = bounce_bus_dmamap_destroy, 1039 .mem_alloc = bounce_bus_dmamem_alloc, 1040 .mem_free = bounce_bus_dmamem_free, 1041 .load_phys = bounce_bus_dmamap_load_phys, 1042 .load_buffer = bounce_bus_dmamap_load_buffer, 1043 .load_ma = bounce_bus_dmamap_load_ma, 1044 .map_waitok = bounce_bus_dmamap_waitok, 1045 .map_complete = bounce_bus_dmamap_complete, 1046 .map_unload = bounce_bus_dmamap_unload, 1047 .map_sync = bounce_bus_dmamap_sync, 1048 #ifdef KMSAN 1049 .load_kmsan = bounce_bus_dmamap_load_kmsan, 1050 #endif 1051 }; 1052