1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/domainset.h> 32 #include <sys/malloc.h> 33 #include <sys/bus.h> 34 #include <sys/interrupt.h> 35 #include <sys/kernel.h> 36 #include <sys/ktr.h> 37 #include <sys/lock.h> 38 #include <sys/proc.h> 39 #include <sys/memdesc.h> 40 #include <sys/msan.h> 41 #include <sys/mutex.h> 42 #include <sys/sysctl.h> 43 #include <sys/uio.h> 44 45 #include <vm/vm.h> 46 #include <vm/vm_extern.h> 47 #include <vm/vm_kern.h> 48 #include <vm/vm_page.h> 49 #include <vm/vm_map.h> 50 51 #include <machine/atomic.h> 52 #include <machine/bus.h> 53 #include <machine/md_var.h> 54 #include <machine/specialreg.h> 55 #include <x86/include/busdma_impl.h> 56 57 #ifdef __i386__ 58 #define MAX_BPAGES (Maxmem > atop(0x100000000ULL) ? 8192 : 512) 59 #else 60 #define MAX_BPAGES 8192 61 #endif 62 63 enum { 64 BUS_DMA_COULD_BOUNCE = 0x01, 65 BUS_DMA_MIN_ALLOC_COMP = 0x02, 66 BUS_DMA_KMEM_ALLOC = 0x04, 67 BUS_DMA_FORCE_MAP = 0x08, 68 }; 69 70 struct bounce_page; 71 struct bounce_zone; 72 73 struct bus_dma_tag { 74 struct bus_dma_tag_common common; 75 int map_count; 76 int bounce_flags; 77 bus_dma_segment_t *segments; 78 struct bounce_zone *bounce_zone; 79 }; 80 81 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 82 "Busdma parameters"); 83 84 struct bus_dmamap { 85 STAILQ_HEAD(, bounce_page) bpages; 86 int pagesneeded; 87 int pagesreserved; 88 bus_dma_tag_t dmat; 89 struct memdesc mem; 90 bus_dmamap_callback_t *callback; 91 void *callback_arg; 92 __sbintime_t queued_time; 93 STAILQ_ENTRY(bus_dmamap) links; 94 #ifdef KMSAN 95 struct memdesc kmsan_mem; 96 #endif 97 }; 98 99 static struct bus_dmamap nobounce_dmamap; 100 101 static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, 102 bus_size_t buflen, int *pagesneeded); 103 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 104 pmap_t pmap, void *buf, bus_size_t buflen, int flags); 105 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 106 vm_paddr_t buf, bus_size_t buflen, int flags); 107 108 static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); 109 110 #define dmat_alignment(dmat) ((dmat)->common.alignment) 111 #define dmat_domain(dmat) ((dmat)->common.domain) 112 #define dmat_flags(dmat) ((dmat)->common.flags) 113 #define dmat_highaddr(dmat) ((dmat)->common.highaddr) 114 #define dmat_lowaddr(dmat) ((dmat)->common.lowaddr) 115 #define dmat_lockfunc(dmat) ((dmat)->common.lockfunc) 116 #define dmat_lockfuncarg(dmat) ((dmat)->common.lockfuncarg) 117 118 #include "../../kern/subr_busdma_bounce.c" 119 120 /* 121 * On i386 kernels without 'options PAE' we need to also bounce any 122 * physical addresses above 4G. 123 * 124 * NB: vm_paddr_t is required here since bus_addr_t is only 32 bits in 125 * i386 kernels without 'options PAE'. 126 */ 127 static __inline bool 128 must_bounce(bus_dma_tag_t dmat, vm_paddr_t paddr) 129 { 130 #if defined(__i386__) && !defined(PAE) 131 if (paddr > BUS_SPACE_MAXADDR) 132 return (true); 133 #endif 134 return (addr_needs_bounce(dmat, paddr)); 135 } 136 137 static int 138 bounce_bus_dma_zone_setup(bus_dma_tag_t dmat) 139 { 140 struct bounce_zone *bz; 141 int error; 142 143 /* Must bounce */ 144 if ((error = alloc_bounce_zone(dmat)) != 0) 145 return (error); 146 bz = dmat->bounce_zone; 147 148 if (ptoa(bz->total_bpages) < dmat->common.maxsize) { 149 int pages; 150 151 pages = atop(dmat->common.maxsize) - bz->total_bpages; 152 153 /* Add pages to our bounce pool */ 154 if (alloc_bounce_pages(dmat, pages) < pages) 155 return (ENOMEM); 156 } 157 /* Performed initial allocation */ 158 dmat->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP; 159 160 return (0); 161 } 162 163 /* 164 * Allocate a device specific dma_tag. 165 */ 166 static int 167 bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 168 bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, 169 bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, 170 bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat) 171 { 172 bus_dma_tag_t newtag; 173 int error; 174 175 *dmat = NULL; 176 error = common_bus_dma_tag_create(parent != NULL ? &parent->common : 177 NULL, alignment, boundary, lowaddr, highaddr, maxsize, nsegments, 178 maxsegsz, flags, lockfunc, lockfuncarg, sizeof(struct bus_dma_tag), 179 (void **)&newtag); 180 if (error != 0) 181 return (error); 182 183 newtag->common.impl = &bus_dma_bounce_impl; 184 newtag->map_count = 0; 185 newtag->segments = NULL; 186 187 #ifdef KMSAN 188 /* 189 * When KMSAN is configured, we need a map to store a memory descriptor 190 * which can be used for validation. 191 */ 192 newtag->bounce_flags |= BUS_DMA_FORCE_MAP; 193 #endif 194 195 if (parent != NULL && 196 (parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) 197 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE; 198 199 if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) || 200 newtag->common.alignment > 1) 201 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE; 202 203 if ((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 && 204 (flags & BUS_DMA_ALLOCNOW) != 0) 205 error = bounce_bus_dma_zone_setup(newtag); 206 else 207 error = 0; 208 209 if (error != 0) 210 free(newtag, M_DEVBUF); 211 else 212 *dmat = newtag; 213 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 214 __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), 215 error); 216 return (error); 217 } 218 219 static bool 220 bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen) 221 { 222 223 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0) 224 return (true); 225 return (!_bus_dmamap_pagesneeded(dmat, buf, buflen, NULL)); 226 } 227 228 /* 229 * Update the domain for the tag. We may need to reallocate the zone and 230 * bounce pages. 231 */ 232 static int 233 bounce_bus_dma_tag_set_domain(bus_dma_tag_t dmat) 234 { 235 236 KASSERT(dmat->map_count == 0, 237 ("bounce_bus_dma_tag_set_domain: Domain set after use.\n")); 238 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0 || 239 dmat->bounce_zone == NULL) 240 return (0); 241 dmat->bounce_flags &= ~BUS_DMA_MIN_ALLOC_COMP; 242 return (bounce_bus_dma_zone_setup(dmat)); 243 } 244 245 static int 246 bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat) 247 { 248 int error = 0; 249 250 if (dmat != NULL) { 251 if (dmat->map_count != 0) { 252 error = EBUSY; 253 goto out; 254 } 255 if (dmat->segments != NULL) 256 free(dmat->segments, M_DEVBUF); 257 free(dmat, M_DEVBUF); 258 } 259 out: 260 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat, error); 261 return (error); 262 } 263 264 /* 265 * Allocate a handle for mapping from kva/uva/physical 266 * address space into bus device space. 267 */ 268 static int 269 bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 270 { 271 struct bounce_zone *bz; 272 int error, maxpages, pages; 273 274 error = 0; 275 276 if (dmat->segments == NULL) { 277 dmat->segments = malloc_domainset( 278 sizeof(bus_dma_segment_t) * dmat->common.nsegments, 279 M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT); 280 if (dmat->segments == NULL) { 281 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 282 __func__, dmat, ENOMEM); 283 return (ENOMEM); 284 } 285 } 286 287 if (dmat->bounce_flags & (BUS_DMA_COULD_BOUNCE | BUS_DMA_FORCE_MAP)) { 288 *mapp = malloc_domainset(sizeof(**mapp), M_DEVBUF, 289 DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO); 290 if (*mapp == NULL) { 291 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 292 __func__, dmat, ENOMEM); 293 return (ENOMEM); 294 } 295 STAILQ_INIT(&(*mapp)->bpages); 296 } else { 297 *mapp = NULL; 298 } 299 300 /* 301 * Bouncing might be required if the driver asks for an active 302 * exclusion region, a data alignment that is stricter than 1, and/or 303 * an active address boundary. 304 */ 305 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { 306 /* Must bounce */ 307 if (dmat->bounce_zone == NULL && 308 (error = alloc_bounce_zone(dmat)) != 0) 309 goto out; 310 bz = dmat->bounce_zone; 311 312 /* 313 * Attempt to add pages to our pool on a per-instance 314 * basis up to a sane limit. 315 */ 316 if (dmat->common.alignment > 1) 317 maxpages = MAX_BPAGES; 318 else 319 maxpages = MIN(MAX_BPAGES, Maxmem - 320 atop(dmat->common.lowaddr)); 321 if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || 322 (bz->map_count > 0 && bz->total_bpages < maxpages)) { 323 pages = MAX(atop(dmat->common.maxsize), 1); 324 pages = MIN(dmat->common.nsegments, pages); 325 pages = MIN(maxpages - bz->total_bpages, pages); 326 pages = MAX(pages, 1); 327 if (alloc_bounce_pages(dmat, pages) < pages) 328 error = ENOMEM; 329 if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) 330 == 0) { 331 if (error == 0) { 332 dmat->bounce_flags |= 333 BUS_DMA_MIN_ALLOC_COMP; 334 } 335 } else 336 error = 0; 337 } 338 bz->map_count++; 339 } 340 341 out: 342 if (error == 0) { 343 dmat->map_count++; 344 } else { 345 free(*mapp, M_DEVBUF); 346 *mapp = NULL; 347 } 348 349 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 350 __func__, dmat, dmat->common.flags, error); 351 return (error); 352 } 353 354 /* 355 * Destroy a handle for mapping from kva/uva/physical 356 * address space into bus device space. 357 */ 358 static int 359 bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 360 { 361 362 if (map != NULL && map != &nobounce_dmamap) { 363 if (STAILQ_FIRST(&map->bpages) != NULL) { 364 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 365 __func__, dmat, EBUSY); 366 return (EBUSY); 367 } 368 if (dmat->bounce_zone) 369 dmat->bounce_zone->map_count--; 370 free(map, M_DEVBUF); 371 } 372 dmat->map_count--; 373 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 374 return (0); 375 } 376 377 /* 378 * Allocate a piece of memory that can be efficiently mapped into 379 * bus device space based on the constraints lited in the dma tag. 380 * A dmamap to for use with dmamap_load is also allocated. 381 */ 382 static int 383 bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, 384 bus_dmamap_t *mapp) 385 { 386 vm_memattr_t attr; 387 int mflags; 388 389 if (flags & BUS_DMA_NOWAIT) 390 mflags = M_NOWAIT; 391 else 392 mflags = M_WAITOK; 393 394 /* If we succeed, no mapping/bouncing will be required */ 395 *mapp = NULL; 396 397 if (dmat->segments == NULL) { 398 dmat->segments = (bus_dma_segment_t *)malloc_domainset( 399 sizeof(bus_dma_segment_t) * dmat->common.nsegments, 400 M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), mflags); 401 if (dmat->segments == NULL) { 402 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 403 __func__, dmat, dmat->common.flags, ENOMEM); 404 return (ENOMEM); 405 } 406 } 407 if (flags & BUS_DMA_ZERO) 408 mflags |= M_ZERO; 409 if (flags & BUS_DMA_NOCACHE) 410 attr = VM_MEMATTR_UNCACHEABLE; 411 else 412 attr = VM_MEMATTR_DEFAULT; 413 414 /* 415 * Allocate the buffer from the malloc(9) allocator if... 416 * - It's small enough to fit into a single page. 417 * - Its alignment requirement is also smaller than the page size. 418 * - The low address requirement is fulfilled. 419 * - Default cache attributes are requested (WB). 420 * else allocate non-contiguous pages if... 421 * - The page count that could get allocated doesn't exceed 422 * nsegments also when the maximum segment size is less 423 * than PAGE_SIZE. 424 * - The alignment constraint isn't larger than a page boundary. 425 * - There are no boundary-crossing constraints. 426 * else allocate a block of contiguous pages because one or more of the 427 * constraints is something that only the contig allocator can fulfill. 428 * 429 * Warn the user if malloc gets it wrong. 430 */ 431 if (dmat->common.maxsize <= PAGE_SIZE && 432 dmat->common.alignment <= PAGE_SIZE && 433 dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) && 434 attr == VM_MEMATTR_DEFAULT) { 435 *vaddr = malloc_domainset_aligned(dmat->common.maxsize, 436 dmat->common.alignment, M_DEVBUF, 437 DOMAINSET_PREF(dmat->common.domain), mflags); 438 KASSERT(*vaddr == NULL || ((uintptr_t)*vaddr & PAGE_MASK) + 439 dmat->common.maxsize <= PAGE_SIZE, 440 ("bounce_bus_dmamem_alloc: multi-page alloc %p maxsize " 441 "%#jx align %#jx", *vaddr, (uintmax_t)dmat->common.maxsize, 442 (uintmax_t)dmat->common.alignment)); 443 } else if (dmat->common.nsegments >= 444 howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, 445 PAGE_SIZE)) && 446 dmat->common.alignment <= PAGE_SIZE && 447 (dmat->common.boundary % PAGE_SIZE) == 0) { 448 /* Page-based multi-segment allocations allowed */ 449 *vaddr = kmem_alloc_attr_domainset( 450 DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize, 451 mflags, 0ul, dmat->common.lowaddr, attr); 452 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC; 453 } else { 454 *vaddr = kmem_alloc_contig_domainset( 455 DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize, 456 mflags, 0ul, dmat->common.lowaddr, 457 dmat->common.alignment != 0 ? dmat->common.alignment : 1ul, 458 dmat->common.boundary, attr); 459 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC; 460 } 461 if (*vaddr == NULL) { 462 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 463 __func__, dmat, dmat->common.flags, ENOMEM); 464 return (ENOMEM); 465 } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->common.alignment)) { 466 printf("bus_dmamem_alloc failed to align memory properly.\n"); 467 } 468 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 469 __func__, dmat, dmat->common.flags, 0); 470 return (0); 471 } 472 473 /* 474 * Free a piece of memory and its associated dmamap, that was allocated 475 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 476 */ 477 static void 478 bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 479 { 480 /* 481 * dmamem does not need to be bounced, so the map should be 482 * NULL and the BUS_DMA_KMEM_ALLOC flag cleared if malloc() 483 * was used and set if kmem_alloc_contig() was used. 484 */ 485 if (map != NULL) 486 panic("bus_dmamem_free: Invalid map freed\n"); 487 if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0) 488 free(vaddr, M_DEVBUF); 489 else 490 kmem_free(vaddr, dmat->common.maxsize); 491 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, 492 dmat->bounce_flags); 493 } 494 495 static bool 496 _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen, 497 int *pagesneeded) 498 { 499 vm_paddr_t curaddr; 500 bus_size_t sgsize; 501 int count; 502 503 /* 504 * Count the number of bounce pages needed in order to 505 * complete this transfer 506 */ 507 count = 0; 508 curaddr = buf; 509 while (buflen != 0) { 510 sgsize = MIN(buflen, dmat->common.maxsegsz); 511 if (must_bounce(dmat, curaddr)) { 512 sgsize = MIN(sgsize, 513 PAGE_SIZE - (curaddr & PAGE_MASK)); 514 if (pagesneeded == NULL) 515 return (true); 516 count++; 517 } 518 curaddr += sgsize; 519 buflen -= sgsize; 520 } 521 522 if (pagesneeded != NULL) 523 *pagesneeded = count; 524 return (count != 0); 525 } 526 527 static void 528 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 529 bus_size_t buflen, int flags) 530 { 531 532 if (map != &nobounce_dmamap && map->pagesneeded == 0) { 533 _bus_dmamap_pagesneeded(dmat, buf, buflen, &map->pagesneeded); 534 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 535 } 536 } 537 538 static void 539 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 540 void *buf, bus_size_t buflen, int flags) 541 { 542 vm_offset_t vaddr; 543 vm_offset_t vendaddr; 544 vm_paddr_t paddr; 545 bus_size_t sg_len; 546 547 if (map != &nobounce_dmamap && map->pagesneeded == 0) { 548 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 549 "alignment= %d", dmat->common.lowaddr, 550 ptoa((vm_paddr_t)Maxmem), 551 dmat->common.boundary, dmat->common.alignment); 552 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 553 map, &nobounce_dmamap, map->pagesneeded); 554 /* 555 * Count the number of bounce pages 556 * needed in order to complete this transfer 557 */ 558 vaddr = (vm_offset_t)buf; 559 vendaddr = (vm_offset_t)buf + buflen; 560 561 while (vaddr < vendaddr) { 562 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 563 sg_len = MIN(sg_len, dmat->common.maxsegsz); 564 if (pmap == kernel_pmap) 565 paddr = pmap_kextract(vaddr); 566 else 567 paddr = pmap_extract(pmap, vaddr); 568 if (must_bounce(dmat, paddr)) { 569 sg_len = roundup2(sg_len, 570 dmat->common.alignment); 571 map->pagesneeded++; 572 } 573 vaddr += sg_len; 574 } 575 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 576 } 577 } 578 579 static void 580 _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, 581 int ma_offs, bus_size_t buflen, int flags) 582 { 583 bus_size_t sg_len, max_sgsize; 584 int page_index; 585 vm_paddr_t paddr; 586 587 if (map != &nobounce_dmamap && map->pagesneeded == 0) { 588 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 589 "alignment= %d", dmat->common.lowaddr, 590 ptoa((vm_paddr_t)Maxmem), 591 dmat->common.boundary, dmat->common.alignment); 592 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 593 map, &nobounce_dmamap, map->pagesneeded); 594 595 /* 596 * Count the number of bounce pages 597 * needed in order to complete this transfer 598 */ 599 page_index = 0; 600 while (buflen > 0) { 601 paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs; 602 sg_len = PAGE_SIZE - ma_offs; 603 max_sgsize = MIN(buflen, dmat->common.maxsegsz); 604 sg_len = MIN(sg_len, max_sgsize); 605 if (must_bounce(dmat, paddr)) { 606 sg_len = roundup2(sg_len, 607 dmat->common.alignment); 608 sg_len = MIN(sg_len, max_sgsize); 609 KASSERT(vm_addr_align_ok(sg_len, 610 dmat->common.alignment), 611 ("Segment size is not aligned")); 612 map->pagesneeded++; 613 } 614 if (((ma_offs + sg_len) & ~PAGE_MASK) != 0) 615 page_index++; 616 ma_offs = (ma_offs + sg_len) & PAGE_MASK; 617 KASSERT(buflen >= sg_len, 618 ("Segment length overruns original buffer")); 619 buflen -= sg_len; 620 } 621 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 622 } 623 } 624 625 /* 626 * Add a single contiguous physical range to the segment list. 627 */ 628 static bus_size_t 629 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t curaddr, 630 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 631 { 632 int seg; 633 634 KASSERT(curaddr <= BUS_SPACE_MAXADDR, 635 ("ds_addr %#jx > BUS_SPACE_MAXADDR %#jx; dmat %p fl %#x low %#jx " 636 "hi %#jx", 637 (uintmax_t)curaddr, (uintmax_t)BUS_SPACE_MAXADDR, 638 dmat, dmat->bounce_flags, (uintmax_t)dmat->common.lowaddr, 639 (uintmax_t)dmat->common.highaddr)); 640 641 /* 642 * Make sure we don't cross any boundaries. 643 */ 644 if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary)) 645 sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr; 646 647 /* 648 * Insert chunk into a segment, coalescing with 649 * previous segment if possible. 650 */ 651 seg = *segp; 652 if (seg == -1) { 653 seg = 0; 654 segs[seg].ds_addr = curaddr; 655 segs[seg].ds_len = sgsize; 656 } else { 657 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 658 (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz && 659 vm_addr_bound_ok(segs[seg].ds_addr, 660 segs[seg].ds_len + sgsize, dmat->common.boundary)) 661 segs[seg].ds_len += sgsize; 662 else { 663 if (++seg >= dmat->common.nsegments) 664 return (0); 665 segs[seg].ds_addr = curaddr; 666 segs[seg].ds_len = sgsize; 667 } 668 } 669 *segp = seg; 670 return (sgsize); 671 } 672 673 /* 674 * Utility function to load a physical buffer. segp contains 675 * the starting segment on entrace, and the ending segment on exit. 676 */ 677 static int 678 bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 679 vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, 680 int *segp) 681 { 682 bus_size_t sgsize; 683 vm_paddr_t curaddr; 684 int error; 685 686 if (map == NULL) 687 map = &nobounce_dmamap; 688 689 if (segs == NULL) 690 segs = dmat->segments; 691 692 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { 693 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 694 if (map->pagesneeded != 0) { 695 error = _bus_dmamap_reserve_pages(dmat, map, flags); 696 if (error) 697 return (error); 698 } 699 } 700 701 while (buflen > 0) { 702 curaddr = buf; 703 sgsize = MIN(buflen, dmat->common.maxsegsz); 704 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 && 705 map->pagesneeded != 0 && 706 must_bounce(dmat, curaddr)) { 707 sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); 708 curaddr = add_bounce_page(dmat, map, 0, curaddr, 0, 709 sgsize); 710 } 711 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 712 segp); 713 if (sgsize == 0) 714 break; 715 buf += sgsize; 716 buflen -= sgsize; 717 } 718 719 /* 720 * Did we fit? 721 */ 722 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 723 } 724 725 /* 726 * Utility function to load a linear buffer. segp contains 727 * the starting segment on entrace, and the ending segment on exit. 728 */ 729 static int 730 bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 731 bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, 732 int *segp) 733 { 734 bus_size_t sgsize, max_sgsize; 735 vm_paddr_t curaddr; 736 vm_offset_t kvaddr, vaddr; 737 int error; 738 739 if (map == NULL) 740 map = &nobounce_dmamap; 741 742 if (segs == NULL) 743 segs = dmat->segments; 744 745 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { 746 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 747 if (map->pagesneeded != 0) { 748 error = _bus_dmamap_reserve_pages(dmat, map, flags); 749 if (error) 750 return (error); 751 } 752 } 753 754 vaddr = (vm_offset_t)buf; 755 while (buflen > 0) { 756 /* 757 * Get the physical address for this segment. 758 */ 759 if (pmap == kernel_pmap) { 760 curaddr = pmap_kextract(vaddr); 761 kvaddr = vaddr; 762 } else { 763 curaddr = pmap_extract(pmap, vaddr); 764 kvaddr = 0; 765 } 766 767 /* 768 * Compute the segment size, and adjust counts. 769 */ 770 max_sgsize = MIN(buflen, dmat->common.maxsegsz); 771 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); 772 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 && 773 map->pagesneeded != 0 && 774 must_bounce(dmat, curaddr)) { 775 sgsize = roundup2(sgsize, dmat->common.alignment); 776 sgsize = MIN(sgsize, max_sgsize); 777 curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0, 778 sgsize); 779 } else { 780 sgsize = MIN(sgsize, max_sgsize); 781 } 782 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 783 segp); 784 if (sgsize == 0) 785 break; 786 vaddr += sgsize; 787 buflen -= sgsize; 788 } 789 790 /* 791 * Did we fit? 792 */ 793 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 794 } 795 796 static int 797 bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, 798 struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags, 799 bus_dma_segment_t *segs, int *segp) 800 { 801 vm_paddr_t paddr, next_paddr; 802 int error, page_index; 803 bus_size_t sgsize, max_sgsize; 804 805 if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { 806 /* 807 * If we have to keep the offset of each page this function 808 * is not suitable, switch back to bus_dmamap_load_ma_triv 809 * which is going to do the right thing in this case. 810 */ 811 error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs, 812 flags, segs, segp); 813 return (error); 814 } 815 816 if (map == NULL) 817 map = &nobounce_dmamap; 818 819 if (segs == NULL) 820 segs = dmat->segments; 821 822 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { 823 _bus_dmamap_count_ma(dmat, map, ma, ma_offs, buflen, flags); 824 if (map->pagesneeded != 0) { 825 error = _bus_dmamap_reserve_pages(dmat, map, flags); 826 if (error) 827 return (error); 828 } 829 } 830 831 page_index = 0; 832 while (buflen > 0) { 833 /* 834 * Compute the segment size, and adjust counts. 835 */ 836 paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs; 837 max_sgsize = MIN(buflen, dmat->common.maxsegsz); 838 sgsize = PAGE_SIZE - ma_offs; 839 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 && 840 map->pagesneeded != 0 && 841 must_bounce(dmat, paddr)) { 842 sgsize = roundup2(sgsize, dmat->common.alignment); 843 sgsize = MIN(sgsize, max_sgsize); 844 KASSERT(vm_addr_align_ok(sgsize, 845 dmat->common.alignment), 846 ("Segment size is not aligned")); 847 /* 848 * Check if two pages of the user provided buffer 849 * are used. 850 */ 851 if ((ma_offs + sgsize) > PAGE_SIZE) 852 next_paddr = 853 VM_PAGE_TO_PHYS(ma[page_index + 1]); 854 else 855 next_paddr = 0; 856 paddr = add_bounce_page(dmat, map, 0, paddr, 857 next_paddr, sgsize); 858 } else { 859 sgsize = MIN(sgsize, max_sgsize); 860 } 861 sgsize = _bus_dmamap_addseg(dmat, map, paddr, sgsize, segs, 862 segp); 863 if (sgsize == 0) 864 break; 865 KASSERT(buflen >= sgsize, 866 ("Segment length overruns original buffer")); 867 buflen -= sgsize; 868 if (((ma_offs + sgsize) & ~PAGE_MASK) != 0) 869 page_index++; 870 ma_offs = (ma_offs + sgsize) & PAGE_MASK; 871 } 872 873 /* 874 * Did we fit? 875 */ 876 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 877 } 878 879 static void 880 bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 881 struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) 882 { 883 884 if (map == NULL) 885 return; 886 map->mem = *mem; 887 map->dmat = dmat; 888 map->callback = callback; 889 map->callback_arg = callback_arg; 890 } 891 892 static bus_dma_segment_t * 893 bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 894 bus_dma_segment_t *segs, int nsegs, int error) 895 { 896 897 if (segs == NULL) 898 segs = dmat->segments; 899 return (segs); 900 } 901 902 /* 903 * Release the mapping held by map. 904 */ 905 static void 906 bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 907 { 908 if (map == NULL) 909 return; 910 911 free_bounce_pages(dmat, map); 912 } 913 914 static void 915 bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, 916 bus_dmasync_op_t op) 917 { 918 struct bounce_page *bpage; 919 vm_offset_t datavaddr, tempvaddr; 920 bus_size_t datacount1, datacount2; 921 922 if (map == NULL) 923 goto out; 924 if ((bpage = STAILQ_FIRST(&map->bpages)) == NULL) 925 goto out; 926 927 /* 928 * Handle data bouncing. We might also want to add support for 929 * invalidating the caches on broken hardware. 930 */ 931 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 932 "performing bounce", __func__, dmat, dmat->common.flags, op); 933 934 if ((op & BUS_DMASYNC_PREWRITE) != 0) { 935 while (bpage != NULL) { 936 tempvaddr = 0; 937 datavaddr = bpage->datavaddr; 938 datacount1 = bpage->datacount; 939 if (datavaddr == 0) { 940 tempvaddr = 941 pmap_quick_enter_page(bpage->datapage[0]); 942 datavaddr = tempvaddr | bpage->dataoffs; 943 datacount1 = min(PAGE_SIZE - bpage->dataoffs, 944 datacount1); 945 } 946 947 bcopy((void *)datavaddr, 948 (void *)bpage->vaddr, datacount1); 949 950 if (tempvaddr != 0) 951 pmap_quick_remove_page(tempvaddr); 952 953 if (bpage->datapage[1] == 0) { 954 KASSERT(datacount1 == bpage->datacount, 955 ("Mismatch between data size and provided memory space")); 956 goto next_w; 957 } 958 959 /* 960 * We are dealing with an unmapped buffer that expands 961 * over two pages. 962 */ 963 datavaddr = pmap_quick_enter_page(bpage->datapage[1]); 964 datacount2 = bpage->datacount - datacount1; 965 bcopy((void *)datavaddr, 966 (void *)(bpage->vaddr + datacount1), datacount2); 967 pmap_quick_remove_page(datavaddr); 968 969 next_w: 970 bpage = STAILQ_NEXT(bpage, links); 971 } 972 dmat->bounce_zone->total_bounced++; 973 } 974 975 if ((op & BUS_DMASYNC_POSTREAD) != 0) { 976 while (bpage != NULL) { 977 tempvaddr = 0; 978 datavaddr = bpage->datavaddr; 979 datacount1 = bpage->datacount; 980 if (datavaddr == 0) { 981 tempvaddr = 982 pmap_quick_enter_page(bpage->datapage[0]); 983 datavaddr = tempvaddr | bpage->dataoffs; 984 datacount1 = min(PAGE_SIZE - bpage->dataoffs, 985 datacount1); 986 } 987 988 bcopy((void *)bpage->vaddr, (void *)datavaddr, 989 datacount1); 990 991 if (tempvaddr != 0) 992 pmap_quick_remove_page(tempvaddr); 993 994 if (bpage->datapage[1] == 0) { 995 KASSERT(datacount1 == bpage->datacount, 996 ("Mismatch between data size and provided memory space")); 997 goto next_r; 998 } 999 1000 /* 1001 * We are dealing with an unmapped buffer that expands 1002 * over two pages. 1003 */ 1004 datavaddr = pmap_quick_enter_page(bpage->datapage[1]); 1005 datacount2 = bpage->datacount - datacount1; 1006 bcopy((void *)(bpage->vaddr + datacount1), 1007 (void *)datavaddr, datacount2); 1008 pmap_quick_remove_page(datavaddr); 1009 1010 next_r: 1011 bpage = STAILQ_NEXT(bpage, links); 1012 } 1013 dmat->bounce_zone->total_bounced++; 1014 } 1015 out: 1016 atomic_thread_fence_rel(); 1017 if (map != NULL) 1018 kmsan_bus_dmamap_sync(&map->kmsan_mem, op); 1019 } 1020 1021 #ifdef KMSAN 1022 static void 1023 bounce_bus_dmamap_load_kmsan(bus_dmamap_t map, struct memdesc *mem) 1024 { 1025 if (map == NULL) 1026 return; 1027 memcpy(&map->kmsan_mem, mem, sizeof(map->kmsan_mem)); 1028 } 1029 #endif 1030 1031 struct bus_dma_impl bus_dma_bounce_impl = { 1032 .tag_create = bounce_bus_dma_tag_create, 1033 .tag_destroy = bounce_bus_dma_tag_destroy, 1034 .tag_set_domain = bounce_bus_dma_tag_set_domain, 1035 .id_mapped = bounce_bus_dma_id_mapped, 1036 .map_create = bounce_bus_dmamap_create, 1037 .map_destroy = bounce_bus_dmamap_destroy, 1038 .mem_alloc = bounce_bus_dmamem_alloc, 1039 .mem_free = bounce_bus_dmamem_free, 1040 .load_phys = bounce_bus_dmamap_load_phys, 1041 .load_buffer = bounce_bus_dmamap_load_buffer, 1042 .load_ma = bounce_bus_dmamap_load_ma, 1043 .map_waitok = bounce_bus_dmamap_waitok, 1044 .map_complete = bounce_bus_dmamap_complete, 1045 .map_unload = bounce_bus_dmamap_unload, 1046 .map_sync = bounce_bus_dmamap_sync, 1047 #ifdef KMSAN 1048 .load_kmsan = bounce_bus_dmamap_load_kmsan, 1049 #endif 1050 }; 1051