1 /* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/malloc.h> 32 #include <sys/mbuf.h> 33 #include <sys/uio.h> 34 #include <sys/bus_dma.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/lock.h> 38 39 #include <sys/thread2.h> 40 #include <sys/spinlock2.h> 41 #include <sys/mplock2.h> 42 43 #include <vm/vm.h> 44 #include <vm/vm_page.h> 45 46 /* XXX needed for to access pmap to convert per-proc virtual to physical */ 47 #include <sys/proc.h> 48 #include <vm/vm_map.h> 49 50 #include <machine/md_var.h> 51 52 #define MAX_BPAGES 1024 53 54 /* 55 * 16 x N declared on stack. 56 */ 57 #define BUS_DMA_CACHE_SEGMENTS 8 58 59 struct bounce_zone; 60 struct bus_dmamap; 61 62 struct bus_dma_tag { 63 bus_dma_tag_t parent; 64 bus_size_t alignment; 65 bus_size_t boundary; 66 bus_addr_t lowaddr; 67 bus_addr_t highaddr; 68 bus_dma_filter_t *filter; 69 void *filterarg; 70 bus_size_t maxsize; 71 u_int nsegments; 72 bus_size_t maxsegsz; 73 int flags; 74 int ref_count; 75 int map_count; 76 bus_dma_segment_t *segments; 77 struct bounce_zone *bounce_zone; 78 struct spinlock spin; 79 }; 80 81 /* 82 * bus_dma_tag private flags 83 */ 84 #define BUS_DMA_BOUNCE_ALIGN BUS_DMA_BUS2 85 #define BUS_DMA_BOUNCE_LOWADDR BUS_DMA_BUS3 86 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 87 88 #define BUS_DMA_COULD_BOUNCE (BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN) 89 90 #define BUS_DMAMEM_KMALLOC(dmat) \ 91 ((dmat)->maxsize <= PAGE_SIZE && \ 92 (dmat)->alignment <= PAGE_SIZE && \ 93 (dmat)->lowaddr >= ptoa(Maxmem)) 94 95 struct bounce_page { 96 vm_offset_t vaddr; /* kva of bounce buffer */ 97 bus_addr_t busaddr; /* Physical address */ 98 vm_offset_t datavaddr; /* kva of client data */ 99 bus_size_t datacount; /* client data count */ 100 STAILQ_ENTRY(bounce_page) links; 101 }; 102 103 struct bounce_zone { 104 STAILQ_ENTRY(bounce_zone) links; 105 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 106 STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 107 struct spinlock spin; 108 int total_bpages; 109 int free_bpages; 110 int reserved_bpages; 111 int active_bpages; 112 int total_bounced; 113 int total_deferred; 114 int reserve_failed; 115 bus_size_t alignment; 116 bus_addr_t lowaddr; 117 char zoneid[8]; 118 char lowaddrid[20]; 119 struct sysctl_ctx_list sysctl_ctx; 120 struct sysctl_oid *sysctl_tree; 121 }; 122 123 #define BZ_LOCK(bz) spin_lock(&(bz)->spin) 124 #define BZ_UNLOCK(bz) spin_unlock(&(bz)->spin) 125 126 static struct lwkt_token bounce_zone_tok = 127 LWKT_TOKEN_INITIALIZER(bounce_zone_token); 128 static int busdma_zonecount; 129 static STAILQ_HEAD(, bounce_zone) bounce_zone_list = 130 STAILQ_HEAD_INITIALIZER(bounce_zone_list); 131 132 static int busdma_priv_zonecount = -1; 133 134 int busdma_swi_pending; 135 static int total_bounce_pages; 136 static int max_bounce_pages = MAX_BPAGES; 137 static int bounce_alignment = 1; /* XXX temporary */ 138 139 TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages); 140 TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment); 141 142 struct bus_dmamap { 143 struct bp_list bpages; 144 int pagesneeded; 145 int pagesreserved; 146 bus_dma_tag_t dmat; 147 void *buf; /* unmapped buffer pointer */ 148 bus_size_t buflen; /* unmapped buffer length */ 149 bus_dmamap_callback_t *callback; 150 void *callback_arg; 151 STAILQ_ENTRY(bus_dmamap) links; 152 }; 153 154 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist = 155 STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist); 156 static struct spinlock bounce_map_list_spin = 157 SPINLOCK_INITIALIZER(&bounce_map_list_spin); 158 159 static struct bus_dmamap nobounce_dmamap; 160 161 static int alloc_bounce_zone(bus_dma_tag_t); 162 static int alloc_bounce_pages(bus_dma_tag_t, u_int, int); 163 static void free_bounce_pages_all(bus_dma_tag_t); 164 static void free_bounce_zone(bus_dma_tag_t); 165 static int reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int); 166 static void return_bounce_pages(bus_dma_tag_t, bus_dmamap_t); 167 static bus_addr_t add_bounce_page(bus_dma_tag_t, bus_dmamap_t, 168 vm_offset_t, bus_size_t); 169 static void free_bounce_page(bus_dma_tag_t, struct bounce_page *); 170 171 static bus_dmamap_t get_map_waiting(bus_dma_tag_t); 172 static void add_map_callback(bus_dmamap_t); 173 174 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 175 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bounce_pages, 176 0, "Total bounce pages"); 177 SYSCTL_INT(_hw_busdma, OID_AUTO, max_bpages, CTLFLAG_RD, &max_bounce_pages, 178 0, "Max bounce pages per bounce zone"); 179 SYSCTL_INT(_hw_busdma, OID_AUTO, bounce_alignment, CTLFLAG_RD, 180 &bounce_alignment, 0, "Obey alignment constraint"); 181 182 static __inline int 183 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 184 { 185 int retval; 186 187 retval = 0; 188 do { 189 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) || 190 (bounce_alignment && (paddr & (dmat->alignment - 1)) != 0)) 191 && (dmat->filter == NULL || 192 dmat->filter(dmat->filterarg, paddr) != 0)) 193 retval = 1; 194 195 dmat = dmat->parent; 196 } while (retval == 0 && dmat != NULL); 197 return (retval); 198 } 199 200 static __inline 201 bus_dma_segment_t * 202 bus_dma_tag_lock(bus_dma_tag_t tag, bus_dma_segment_t *cache) 203 { 204 if (tag->flags & BUS_DMA_PROTECTED) 205 return(tag->segments); 206 207 if (tag->nsegments <= BUS_DMA_CACHE_SEGMENTS) 208 return(cache); 209 spin_lock(&tag->spin); 210 return(tag->segments); 211 } 212 213 static __inline 214 void 215 bus_dma_tag_unlock(bus_dma_tag_t tag) 216 { 217 if (tag->flags & BUS_DMA_PROTECTED) 218 return; 219 220 if (tag->nsegments > BUS_DMA_CACHE_SEGMENTS) 221 spin_unlock(&tag->spin); 222 } 223 224 /* 225 * Allocate a device specific dma_tag. 226 */ 227 int 228 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 229 bus_size_t boundary, bus_addr_t lowaddr, 230 bus_addr_t highaddr, bus_dma_filter_t *filter, 231 void *filterarg, bus_size_t maxsize, int nsegments, 232 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 233 { 234 bus_dma_tag_t newtag; 235 int error = 0; 236 237 /* 238 * Sanity checks 239 */ 240 241 if (alignment == 0) 242 alignment = 1; 243 if (alignment & (alignment - 1)) 244 panic("alignment must be power of 2"); 245 246 if (boundary != 0) { 247 if (boundary & (boundary - 1)) 248 panic("boundary must be power of 2"); 249 if (boundary < maxsegsz) { 250 kprintf("boundary < maxsegsz:\n"); 251 print_backtrace(-1); 252 maxsegsz = boundary; 253 } 254 } 255 256 /* Return a NULL tag on failure */ 257 *dmat = NULL; 258 259 newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO); 260 261 spin_init(&newtag->spin); 262 newtag->parent = parent; 263 newtag->alignment = alignment; 264 newtag->boundary = boundary; 265 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 266 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 267 newtag->filter = filter; 268 newtag->filterarg = filterarg; 269 newtag->maxsize = maxsize; 270 newtag->nsegments = nsegments; 271 newtag->maxsegsz = maxsegsz; 272 newtag->flags = flags; 273 newtag->ref_count = 1; /* Count ourself */ 274 newtag->map_count = 0; 275 newtag->segments = NULL; 276 newtag->bounce_zone = NULL; 277 278 /* Take into account any restrictions imposed by our parent tag */ 279 if (parent != NULL) { 280 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 281 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 282 283 if (newtag->boundary == 0) { 284 newtag->boundary = parent->boundary; 285 } else if (parent->boundary != 0) { 286 newtag->boundary = MIN(parent->boundary, 287 newtag->boundary); 288 } 289 290 #ifdef notyet 291 newtag->alignment = MAX(parent->alignment, newtag->alignment); 292 #endif 293 294 if (newtag->filter == NULL) { 295 /* 296 * Short circuit looking at our parent directly 297 * since we have encapsulated all of its information 298 */ 299 newtag->filter = parent->filter; 300 newtag->filterarg = parent->filterarg; 301 newtag->parent = parent->parent; 302 } 303 if (newtag->parent != NULL) 304 parent->ref_count++; 305 } 306 307 if (newtag->lowaddr < ptoa(Maxmem)) 308 newtag->flags |= BUS_DMA_BOUNCE_LOWADDR; 309 if (bounce_alignment && newtag->alignment > 1 && 310 !(newtag->flags & BUS_DMA_ALIGNED)) 311 newtag->flags |= BUS_DMA_BOUNCE_ALIGN; 312 313 if ((newtag->flags & BUS_DMA_COULD_BOUNCE) && 314 (flags & BUS_DMA_ALLOCNOW) != 0) { 315 struct bounce_zone *bz; 316 317 /* Must bounce */ 318 319 error = alloc_bounce_zone(newtag); 320 if (error) 321 goto back; 322 bz = newtag->bounce_zone; 323 324 if ((newtag->flags & BUS_DMA_ALLOCALL) == 0 && 325 ptoa(bz->total_bpages) < maxsize) { 326 int pages; 327 328 if (flags & BUS_DMA_ONEBPAGE) { 329 pages = 1; 330 } else { 331 pages = atop(round_page(maxsize)) - 332 bz->total_bpages; 333 pages = MAX(pages, 1); 334 } 335 336 /* Add pages to our bounce pool */ 337 if (alloc_bounce_pages(newtag, pages, flags) < pages) 338 error = ENOMEM; 339 340 /* Performed initial allocation */ 341 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 342 } 343 } 344 back: 345 if (error) { 346 free_bounce_zone(newtag); 347 kfree(newtag, M_DEVBUF); 348 } else { 349 *dmat = newtag; 350 } 351 return error; 352 } 353 354 int 355 bus_dma_tag_destroy(bus_dma_tag_t dmat) 356 { 357 if (dmat != NULL) { 358 if (dmat->map_count != 0) 359 return (EBUSY); 360 361 while (dmat != NULL) { 362 bus_dma_tag_t parent; 363 364 parent = dmat->parent; 365 dmat->ref_count--; 366 if (dmat->ref_count == 0) { 367 free_bounce_zone(dmat); 368 if (dmat->segments != NULL) 369 kfree(dmat->segments, M_DEVBUF); 370 kfree(dmat, M_DEVBUF); 371 /* 372 * Last reference count, so 373 * release our reference 374 * count on our parent. 375 */ 376 dmat = parent; 377 } else 378 dmat = NULL; 379 } 380 } 381 return (0); 382 } 383 384 bus_size_t 385 bus_dma_tag_getmaxsize(bus_dma_tag_t tag) 386 { 387 return(tag->maxsize); 388 } 389 390 /* 391 * Allocate a handle for mapping from kva/uva/physical 392 * address space into bus device space. 393 */ 394 int 395 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 396 { 397 int error; 398 399 error = 0; 400 401 if (dmat->segments == NULL) { 402 KKASSERT(dmat->nsegments && dmat->nsegments < 16384); 403 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 404 dmat->nsegments, M_DEVBUF, M_INTWAIT); 405 } 406 407 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 408 struct bounce_zone *bz; 409 int maxpages; 410 411 /* Must bounce */ 412 413 if (dmat->bounce_zone == NULL) { 414 error = alloc_bounce_zone(dmat); 415 if (error) 416 return error; 417 } 418 bz = dmat->bounce_zone; 419 420 *mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT | M_ZERO); 421 422 /* Initialize the new map */ 423 STAILQ_INIT(&((*mapp)->bpages)); 424 425 /* 426 * Attempt to add pages to our pool on a per-instance 427 * basis up to a sane limit. 428 */ 429 if (dmat->flags & BUS_DMA_ALLOCALL) { 430 maxpages = Maxmem - atop(dmat->lowaddr); 431 } else if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) { 432 maxpages = max_bounce_pages; 433 } else { 434 maxpages = MIN(max_bounce_pages, 435 Maxmem - atop(dmat->lowaddr)); 436 } 437 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || 438 (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 439 int pages; 440 441 if (flags & BUS_DMA_ONEBPAGE) { 442 pages = 1; 443 } else { 444 pages = atop(round_page(dmat->maxsize)); 445 pages = MIN(maxpages - bz->total_bpages, pages); 446 pages = MAX(pages, 1); 447 } 448 if (alloc_bounce_pages(dmat, pages, flags) < pages) 449 error = ENOMEM; 450 451 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 452 if (!error && 453 (dmat->flags & BUS_DMA_ALLOCALL) == 0) 454 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 455 } else { 456 error = 0; 457 } 458 } 459 } else { 460 *mapp = NULL; 461 } 462 if (!error) { 463 dmat->map_count++; 464 } else { 465 kfree(*mapp, M_DEVBUF); 466 *mapp = NULL; 467 } 468 return error; 469 } 470 471 /* 472 * Destroy a handle for mapping from kva/uva/physical 473 * address space into bus device space. 474 */ 475 int 476 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 477 { 478 if (map != NULL) { 479 if (STAILQ_FIRST(&map->bpages) != NULL) 480 return (EBUSY); 481 kfree(map, M_DEVBUF); 482 } 483 dmat->map_count--; 484 return (0); 485 } 486 487 static __inline bus_size_t 488 check_kmalloc(bus_dma_tag_t dmat, const void *vaddr0, int verify) 489 { 490 bus_size_t maxsize = 0; 491 uintptr_t vaddr = (uintptr_t)vaddr0; 492 493 if ((vaddr ^ (vaddr + dmat->maxsize - 1)) & ~PAGE_MASK) { 494 if (verify) 495 panic("boundary check failed\n"); 496 if (bootverbose) 497 kprintf("boundary check failed\n"); 498 maxsize = dmat->maxsize; 499 } 500 if (vaddr & (dmat->alignment - 1)) { 501 if (verify) 502 panic("alignment check failed\n"); 503 if (bootverbose) 504 kprintf("alignment check failed\n"); 505 if (dmat->maxsize < dmat->alignment) 506 maxsize = dmat->alignment; 507 else 508 maxsize = dmat->maxsize; 509 } 510 return maxsize; 511 } 512 513 /* 514 * Allocate a piece of memory that can be efficiently mapped into 515 * bus device space based on the constraints lited in the dma tag. 516 * 517 * mapp is degenerate. By definition this allocation should not require 518 * bounce buffers so do not allocate a dma map. 519 */ 520 int 521 bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, 522 bus_dmamap_t *mapp) 523 { 524 vm_memattr_t attr; 525 int mflags; 526 527 /* If we succeed, no mapping/bouncing will be required */ 528 *mapp = NULL; 529 530 if (dmat->segments == NULL) { 531 KKASSERT(dmat->nsegments < 16384); 532 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 533 dmat->nsegments, M_DEVBUF, M_INTWAIT); 534 } 535 536 if (flags & BUS_DMA_NOWAIT) 537 mflags = M_NOWAIT; 538 else 539 mflags = M_WAITOK; 540 if (flags & BUS_DMA_ZERO) 541 mflags |= M_ZERO; 542 if (flags & BUS_DMA_NOCACHE) 543 attr = VM_MEMATTR_UNCACHEABLE; 544 else 545 attr = VM_MEMATTR_DEFAULT; 546 547 /* XXX must alloc with correct mem attribute here */ 548 if (BUS_DMAMEM_KMALLOC(dmat)) { 549 bus_size_t maxsize; 550 551 *vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags); 552 553 /* 554 * XXX 555 * Check whether the allocation 556 * - crossed a page boundary 557 * - was not aligned 558 * Retry with power-of-2 alignment in the above cases. 559 */ 560 maxsize = check_kmalloc(dmat, *vaddr, 0); 561 if (maxsize) { 562 kfree(*vaddr, M_DEVBUF); 563 *vaddr = kmalloc(maxsize, M_DEVBUF, 564 mflags | M_POWEROF2); 565 check_kmalloc(dmat, *vaddr, 1); 566 } 567 } else { 568 /* 569 * XXX Use Contigmalloc until it is merged into this facility 570 * and handles multi-seg allocations. Nobody is doing 571 * multi-seg allocations yet though. 572 */ 573 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 574 0ul, dmat->lowaddr, dmat->alignment, dmat->boundary); 575 } 576 if (*vaddr == NULL) 577 return (ENOMEM); 578 579 if (attr != VM_MEMATTR_DEFAULT) 580 pmap_change_attr((vm_offset_t)(*vaddr), dmat->maxsize / PAGE_SIZE, attr); 581 return (0); 582 } 583 584 /* 585 * Free a piece of memory and it's allociated dmamap, that was allocated 586 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 587 */ 588 void 589 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 590 { 591 /* 592 * dmamem does not need to be bounced, so the map should be 593 * NULL 594 */ 595 if (map != NULL) 596 panic("bus_dmamem_free: Invalid map freed"); 597 if (BUS_DMAMEM_KMALLOC(dmat)) 598 kfree(vaddr, M_DEVBUF); 599 else 600 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 601 } 602 603 static __inline vm_paddr_t 604 _bus_dma_extract(pmap_t pmap, vm_offset_t vaddr) 605 { 606 if (pmap) 607 return pmap_extract(pmap, vaddr); 608 else 609 return pmap_kextract(vaddr); 610 } 611 612 /* 613 * Utility function to load a linear buffer. lastaddrp holds state 614 * between invocations (for multiple-buffer loads). segp contains 615 * the segment following the starting one on entrace, and the ending 616 * segment on exit. first indicates if this is the first invocation 617 * of this function. 618 */ 619 static int 620 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, 621 bus_dmamap_t map, 622 void *buf, bus_size_t buflen, 623 bus_dma_segment_t *segments, 624 int nsegments, 625 pmap_t pmap, 626 int flags, 627 vm_paddr_t *lastpaddrp, 628 int *segp, 629 int first) 630 { 631 vm_offset_t vaddr; 632 vm_paddr_t paddr, nextpaddr; 633 bus_dma_segment_t *sg; 634 bus_addr_t bmask; 635 int seg, error = 0; 636 637 if (map == NULL) 638 map = &nobounce_dmamap; 639 640 #ifdef INVARIANTS 641 if (dmat->flags & BUS_DMA_ALIGNED) 642 KKASSERT(((uintptr_t)buf & (dmat->alignment - 1)) == 0); 643 #endif 644 645 /* 646 * If we are being called during a callback, pagesneeded will 647 * be non-zero, so we can avoid doing the work twice. 648 */ 649 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) && 650 map != &nobounce_dmamap && map->pagesneeded == 0) { 651 vm_offset_t vendaddr; 652 653 /* 654 * Count the number of bounce pages 655 * needed in order to complete this transfer 656 */ 657 vaddr = (vm_offset_t)buf; 658 vendaddr = (vm_offset_t)buf + buflen; 659 660 while (vaddr < vendaddr) { 661 paddr = _bus_dma_extract(pmap, vaddr); 662 if (run_filter(dmat, paddr) != 0) 663 map->pagesneeded++; 664 vaddr += (PAGE_SIZE - (vaddr & PAGE_MASK)); 665 } 666 } 667 668 /* Reserve Necessary Bounce Pages */ 669 if (map->pagesneeded != 0) { 670 struct bounce_zone *bz; 671 672 bz = dmat->bounce_zone; 673 BZ_LOCK(bz); 674 if (flags & BUS_DMA_NOWAIT) { 675 if (reserve_bounce_pages(dmat, map, 0) != 0) { 676 BZ_UNLOCK(bz); 677 error = ENOMEM; 678 goto free_bounce; 679 } 680 } else { 681 if (reserve_bounce_pages(dmat, map, 1) != 0) { 682 /* Queue us for resources */ 683 map->dmat = dmat; 684 map->buf = buf; 685 map->buflen = buflen; 686 687 STAILQ_INSERT_TAIL( 688 &dmat->bounce_zone->bounce_map_waitinglist, 689 map, links); 690 BZ_UNLOCK(bz); 691 692 return (EINPROGRESS); 693 } 694 } 695 BZ_UNLOCK(bz); 696 } 697 698 KKASSERT(*segp >= 1 && *segp <= nsegments); 699 seg = *segp; 700 sg = &segments[seg - 1]; 701 702 vaddr = (vm_offset_t)buf; 703 nextpaddr = *lastpaddrp; 704 bmask = ~(dmat->boundary - 1); /* note: will be 0 if boundary is 0 */ 705 706 /* force at least one segment */ 707 do { 708 bus_size_t size; 709 710 /* 711 * Per-page main loop 712 */ 713 paddr = _bus_dma_extract(pmap, vaddr); 714 size = PAGE_SIZE - (paddr & PAGE_MASK); 715 if (size > buflen) 716 size = buflen; 717 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { 718 /* 719 * note: this paddr has the same in-page offset 720 * as vaddr and thus the paddr above, so the 721 * size does not have to be recalculated 722 */ 723 paddr = add_bounce_page(dmat, map, vaddr, size); 724 } 725 726 /* 727 * Fill in the bus_dma_segment 728 */ 729 if (first) { 730 sg->ds_addr = paddr; 731 sg->ds_len = size; 732 first = 0; 733 } else if (paddr == nextpaddr) { 734 sg->ds_len += size; 735 } else { 736 sg++; 737 seg++; 738 if (seg > nsegments) 739 break; 740 sg->ds_addr = paddr; 741 sg->ds_len = size; 742 } 743 nextpaddr = paddr + size; 744 745 /* 746 * Handle maxsegsz and boundary issues with a nested loop 747 */ 748 for (;;) { 749 bus_size_t tmpsize; 750 751 /* 752 * Limit to the boundary and maximum segment size 753 */ 754 if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) { 755 tmpsize = dmat->boundary - 756 (sg->ds_addr & ~bmask); 757 if (tmpsize > dmat->maxsegsz) 758 tmpsize = dmat->maxsegsz; 759 KKASSERT(tmpsize < sg->ds_len); 760 } else if (sg->ds_len > dmat->maxsegsz) { 761 tmpsize = dmat->maxsegsz; 762 } else { 763 break; 764 } 765 766 /* 767 * Futz, split the data into a new segment. 768 */ 769 if (seg >= nsegments) 770 goto fail; 771 sg[1].ds_len = sg[0].ds_len - tmpsize; 772 sg[1].ds_addr = sg[0].ds_addr + tmpsize; 773 sg[0].ds_len = tmpsize; 774 sg++; 775 seg++; 776 } 777 778 /* 779 * Adjust for loop 780 */ 781 buflen -= size; 782 vaddr += size; 783 } while (buflen > 0); 784 fail: 785 if (buflen != 0) 786 error = EFBIG; 787 788 *segp = seg; 789 *lastpaddrp = nextpaddr; 790 791 free_bounce: 792 if (error && (dmat->flags & BUS_DMA_COULD_BOUNCE) && 793 map != &nobounce_dmamap) { 794 _bus_dmamap_unload(dmat, map); 795 return_bounce_pages(dmat, map); 796 } 797 return error; 798 } 799 800 /* 801 * Map the buffer buf into bus space using the dmamap map. 802 */ 803 int 804 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 805 bus_size_t buflen, bus_dmamap_callback_t *callback, 806 void *callback_arg, int flags) 807 { 808 bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; 809 bus_dma_segment_t *segments; 810 vm_paddr_t lastaddr = 0; 811 int error, nsegs = 1; 812 813 if (map != NULL) { 814 /* 815 * XXX 816 * Follow old semantics. Once all of the callers are fixed, 817 * we should get rid of these internal flag "adjustment". 818 */ 819 flags &= ~BUS_DMA_NOWAIT; 820 flags |= BUS_DMA_WAITOK; 821 822 map->callback = callback; 823 map->callback_arg = callback_arg; 824 } 825 826 segments = bus_dma_tag_lock(dmat, cache_segments); 827 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, 828 segments, dmat->nsegments, 829 NULL, flags, &lastaddr, &nsegs, 1); 830 if (error == EINPROGRESS) { 831 KKASSERT((dmat->flags & 832 (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL)) != 833 (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL)); 834 835 if (dmat->flags & BUS_DMA_PROTECTED) 836 panic("protected dmamap callback will be defered"); 837 838 bus_dma_tag_unlock(dmat); 839 return error; 840 } 841 callback(callback_arg, segments, nsegs, error); 842 bus_dma_tag_unlock(dmat); 843 return 0; 844 } 845 846 /* 847 * Like _bus_dmamap_load(), but for mbufs. 848 */ 849 int 850 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 851 struct mbuf *m0, 852 bus_dmamap_callback2_t *callback, void *callback_arg, 853 int flags) 854 { 855 bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; 856 bus_dma_segment_t *segments; 857 int nsegs, error; 858 859 /* 860 * XXX 861 * Follow old semantics. Once all of the callers are fixed, 862 * we should get rid of these internal flag "adjustment". 863 */ 864 flags &= ~BUS_DMA_WAITOK; 865 flags |= BUS_DMA_NOWAIT; 866 867 segments = bus_dma_tag_lock(dmat, cache_segments); 868 error = bus_dmamap_load_mbuf_segment(dmat, map, m0, 869 segments, dmat->nsegments, &nsegs, flags); 870 if (error) { 871 /* force "no valid mappings" in callback */ 872 callback(callback_arg, segments, 0, 873 0, error); 874 } else { 875 callback(callback_arg, segments, nsegs, 876 m0->m_pkthdr.len, error); 877 } 878 bus_dma_tag_unlock(dmat); 879 return error; 880 } 881 882 int 883 bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat, bus_dmamap_t map, 884 struct mbuf *m0, 885 bus_dma_segment_t *segs, int maxsegs, 886 int *nsegs, int flags) 887 { 888 int error; 889 890 M_ASSERTPKTHDR(m0); 891 892 KASSERT(maxsegs >= 1, ("invalid maxsegs %d", maxsegs)); 893 KASSERT(maxsegs <= dmat->nsegments, 894 ("%d too many segments, dmat only supports %d segments", 895 maxsegs, dmat->nsegments)); 896 KASSERT(flags & BUS_DMA_NOWAIT, 897 ("only BUS_DMA_NOWAIT is supported")); 898 899 if (m0->m_pkthdr.len <= dmat->maxsize) { 900 int first = 1; 901 vm_paddr_t lastaddr = 0; 902 struct mbuf *m; 903 904 *nsegs = 1; 905 error = 0; 906 for (m = m0; m != NULL && error == 0; m = m->m_next) { 907 if (m->m_len == 0) 908 continue; 909 910 error = _bus_dmamap_load_buffer(dmat, map, 911 m->m_data, m->m_len, 912 segs, maxsegs, 913 NULL, flags, &lastaddr, 914 nsegs, first); 915 if (error == ENOMEM && !first) { 916 /* 917 * Out of bounce pages due to too many 918 * fragments in the mbuf chain; return 919 * EFBIG instead. 920 */ 921 error = EFBIG; 922 } 923 first = 0; 924 } 925 #ifdef INVARIANTS 926 if (!error) 927 KKASSERT(*nsegs <= maxsegs && *nsegs >= 1); 928 #endif 929 } else { 930 *nsegs = 0; 931 error = EINVAL; 932 } 933 KKASSERT(error != EINPROGRESS); 934 return error; 935 } 936 937 /* 938 * Like _bus_dmamap_load(), but for uios. 939 */ 940 int 941 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 942 struct uio *uio, 943 bus_dmamap_callback2_t *callback, void *callback_arg, 944 int flags) 945 { 946 vm_paddr_t lastaddr; 947 int nsegs, error, first, i; 948 bus_size_t resid; 949 struct iovec *iov; 950 pmap_t pmap; 951 bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; 952 bus_dma_segment_t *segments; 953 bus_dma_segment_t *segs; 954 int nsegs_left; 955 956 if (dmat->nsegments <= BUS_DMA_CACHE_SEGMENTS) 957 segments = cache_segments; 958 else 959 segments = kmalloc(sizeof(bus_dma_segment_t) * dmat->nsegments, 960 M_DEVBUF, M_WAITOK | M_ZERO); 961 962 /* 963 * XXX 964 * Follow old semantics. Once all of the callers are fixed, 965 * we should get rid of these internal flag "adjustment". 966 */ 967 flags &= ~BUS_DMA_WAITOK; 968 flags |= BUS_DMA_NOWAIT; 969 970 resid = (bus_size_t)uio->uio_resid; 971 iov = uio->uio_iov; 972 973 segs = segments; 974 nsegs_left = dmat->nsegments; 975 976 if (uio->uio_segflg == UIO_USERSPACE) { 977 struct thread *td; 978 979 td = uio->uio_td; 980 KASSERT(td != NULL && td->td_proc != NULL, 981 ("bus_dmamap_load_uio: USERSPACE but no proc")); 982 pmap = vmspace_pmap(td->td_proc->p_vmspace); 983 } else { 984 pmap = NULL; 985 } 986 987 error = 0; 988 nsegs = 1; 989 first = 1; 990 lastaddr = 0; 991 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 992 /* 993 * Now at the first iovec to load. Load each iovec 994 * until we have exhausted the residual count. 995 */ 996 bus_size_t minlen = 997 resid < iov[i].iov_len ? resid : iov[i].iov_len; 998 caddr_t addr = (caddr_t) iov[i].iov_base; 999 1000 error = _bus_dmamap_load_buffer(dmat, map, addr, minlen, 1001 segs, nsegs_left, 1002 pmap, flags, &lastaddr, &nsegs, first); 1003 first = 0; 1004 1005 resid -= minlen; 1006 if (error == 0) { 1007 nsegs_left -= nsegs; 1008 segs += nsegs; 1009 } 1010 } 1011 1012 /* 1013 * Minimum one DMA segment, even if 0-length buffer. 1014 */ 1015 if (nsegs_left == dmat->nsegments) 1016 --nsegs_left; 1017 1018 if (error) { 1019 /* force "no valid mappings" in callback */ 1020 callback(callback_arg, segments, 0, 1021 0, error); 1022 } else { 1023 callback(callback_arg, segments, dmat->nsegments - nsegs_left, 1024 (bus_size_t)uio->uio_resid, error); 1025 } 1026 if (dmat->nsegments > BUS_DMA_CACHE_SEGMENTS) 1027 kfree(segments, M_DEVBUF); 1028 return error; 1029 } 1030 1031 /* 1032 * Release the mapping held by map. 1033 */ 1034 void 1035 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1036 { 1037 struct bounce_page *bpage; 1038 1039 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1040 STAILQ_REMOVE_HEAD(&map->bpages, links); 1041 free_bounce_page(dmat, bpage); 1042 } 1043 } 1044 1045 void 1046 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1047 { 1048 struct bounce_page *bpage; 1049 1050 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1051 /* 1052 * Handle data bouncing. We might also 1053 * want to add support for invalidating 1054 * the caches on broken hardware 1055 */ 1056 switch (op) { 1057 case BUS_DMASYNC_PREWRITE: 1058 while (bpage != NULL) { 1059 bcopy((void *)bpage->datavaddr, 1060 (void *)bpage->vaddr, 1061 bpage->datacount); 1062 bpage = STAILQ_NEXT(bpage, links); 1063 } 1064 dmat->bounce_zone->total_bounced++; 1065 break; 1066 1067 case BUS_DMASYNC_POSTREAD: 1068 while (bpage != NULL) { 1069 bcopy((void *)bpage->vaddr, 1070 (void *)bpage->datavaddr, 1071 bpage->datacount); 1072 bpage = STAILQ_NEXT(bpage, links); 1073 } 1074 dmat->bounce_zone->total_bounced++; 1075 break; 1076 1077 case BUS_DMASYNC_PREREAD: 1078 case BUS_DMASYNC_POSTWRITE: 1079 /* No-ops */ 1080 break; 1081 } 1082 } 1083 } 1084 1085 static int 1086 alloc_bounce_zone(bus_dma_tag_t dmat) 1087 { 1088 struct bounce_zone *bz, *new_bz; 1089 1090 KASSERT(dmat->bounce_zone == NULL, 1091 ("bounce zone was already assigned")); 1092 1093 new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO); 1094 1095 lwkt_gettoken(&bounce_zone_tok); 1096 1097 if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) { 1098 /* 1099 * For shared bounce zone, check to see 1100 * if we already have a suitable zone 1101 */ 1102 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1103 if (dmat->alignment <= bz->alignment && 1104 dmat->lowaddr >= bz->lowaddr) { 1105 lwkt_reltoken(&bounce_zone_tok); 1106 1107 dmat->bounce_zone = bz; 1108 kfree(new_bz, M_DEVBUF); 1109 return 0; 1110 } 1111 } 1112 } 1113 bz = new_bz; 1114 1115 spin_init(&bz->spin); 1116 STAILQ_INIT(&bz->bounce_page_list); 1117 STAILQ_INIT(&bz->bounce_map_waitinglist); 1118 bz->free_bpages = 0; 1119 bz->reserved_bpages = 0; 1120 bz->active_bpages = 0; 1121 bz->lowaddr = dmat->lowaddr; 1122 bz->alignment = round_page(dmat->alignment); 1123 ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1124 1125 if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) { 1126 ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1127 busdma_zonecount++; 1128 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1129 } else { 1130 ksnprintf(bz->zoneid, 8, "zone%d", busdma_priv_zonecount); 1131 busdma_priv_zonecount--; 1132 } 1133 1134 lwkt_reltoken(&bounce_zone_tok); 1135 1136 dmat->bounce_zone = bz; 1137 1138 sysctl_ctx_init(&bz->sysctl_ctx); 1139 bz->sysctl_tree = SYSCTL_ADD_NODE(&bz->sysctl_ctx, 1140 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1141 CTLFLAG_RD, 0, ""); 1142 if (bz->sysctl_tree == NULL) { 1143 sysctl_ctx_free(&bz->sysctl_ctx); 1144 return 0; /* XXX error code? */ 1145 } 1146 1147 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1148 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1149 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1150 "Total bounce pages"); 1151 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1152 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1153 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1154 "Free bounce pages"); 1155 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1156 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1157 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1158 "Reserved bounce pages"); 1159 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1160 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1161 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1162 "Active bounce pages"); 1163 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1164 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1165 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1166 "Total bounce requests"); 1167 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1168 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1169 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1170 "Total bounce requests that were deferred"); 1171 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1172 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1173 "reserve_failed", CTLFLAG_RD, &bz->reserve_failed, 0, 1174 "Total bounce page reservations that were failed"); 1175 SYSCTL_ADD_STRING(&bz->sysctl_ctx, 1176 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1177 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1178 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1179 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1180 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1181 1182 return 0; 1183 } 1184 1185 static int 1186 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages, int flags) 1187 { 1188 struct bounce_zone *bz = dmat->bounce_zone; 1189 int count = 0, mflags; 1190 1191 if (flags & BUS_DMA_NOWAIT) 1192 mflags = M_NOWAIT; 1193 else 1194 mflags = M_WAITOK; 1195 1196 while (numpages > 0) { 1197 struct bounce_page *bpage; 1198 1199 bpage = kmalloc(sizeof(*bpage), M_DEVBUF, M_INTWAIT | M_ZERO); 1200 1201 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1202 mflags, 0ul, 1203 bz->lowaddr, 1204 bz->alignment, 0); 1205 if (bpage->vaddr == 0) { 1206 kfree(bpage, M_DEVBUF); 1207 break; 1208 } 1209 bpage->busaddr = pmap_kextract(bpage->vaddr); 1210 1211 BZ_LOCK(bz); 1212 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1213 total_bounce_pages++; 1214 bz->total_bpages++; 1215 bz->free_bpages++; 1216 BZ_UNLOCK(bz); 1217 1218 count++; 1219 numpages--; 1220 } 1221 return count; 1222 } 1223 1224 static void 1225 free_bounce_pages_all(bus_dma_tag_t dmat) 1226 { 1227 struct bounce_zone *bz = dmat->bounce_zone; 1228 struct bounce_page *bpage; 1229 1230 BZ_LOCK(bz); 1231 1232 while ((bpage = STAILQ_FIRST(&bz->bounce_page_list)) != NULL) { 1233 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1234 1235 KKASSERT(total_bounce_pages > 0); 1236 total_bounce_pages--; 1237 1238 KKASSERT(bz->total_bpages > 0); 1239 bz->total_bpages--; 1240 1241 KKASSERT(bz->free_bpages > 0); 1242 bz->free_bpages--; 1243 1244 BZ_UNLOCK(bz); 1245 contigfree((void *)bpage->vaddr, PAGE_SIZE, M_DEVBUF); 1246 kfree(bpage, M_DEVBUF); 1247 BZ_LOCK(bz); 1248 } 1249 if (bz->total_bpages) { 1250 kprintf("#%d bounce pages are still in use\n", 1251 bz->total_bpages); 1252 print_backtrace(-1); 1253 } 1254 1255 BZ_UNLOCK(bz); 1256 } 1257 1258 static void 1259 free_bounce_zone(bus_dma_tag_t dmat) 1260 { 1261 struct bounce_zone *bz = dmat->bounce_zone; 1262 1263 if (bz == NULL) 1264 return; 1265 1266 if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) 1267 return; 1268 1269 free_bounce_pages_all(dmat); 1270 dmat->bounce_zone = NULL; 1271 1272 if (bz->sysctl_tree != NULL) 1273 sysctl_ctx_free(&bz->sysctl_ctx); 1274 kfree(bz, M_DEVBUF); 1275 } 1276 1277 /* Assume caller holds bounce zone spinlock */ 1278 static int 1279 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1280 { 1281 struct bounce_zone *bz = dmat->bounce_zone; 1282 int pages; 1283 1284 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1285 if (!commit && map->pagesneeded > (map->pagesreserved + pages)) { 1286 bz->reserve_failed++; 1287 return (map->pagesneeded - (map->pagesreserved + pages)); 1288 } 1289 1290 bz->free_bpages -= pages; 1291 1292 bz->reserved_bpages += pages; 1293 KKASSERT(bz->reserved_bpages <= bz->total_bpages); 1294 1295 map->pagesreserved += pages; 1296 pages = map->pagesneeded - map->pagesreserved; 1297 1298 return pages; 1299 } 1300 1301 static void 1302 return_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 1303 { 1304 struct bounce_zone *bz = dmat->bounce_zone; 1305 int reserved = map->pagesreserved; 1306 bus_dmamap_t wait_map; 1307 1308 map->pagesreserved = 0; 1309 map->pagesneeded = 0; 1310 1311 if (reserved == 0) 1312 return; 1313 1314 BZ_LOCK(bz); 1315 1316 bz->free_bpages += reserved; 1317 KKASSERT(bz->free_bpages <= bz->total_bpages); 1318 1319 KKASSERT(bz->reserved_bpages >= reserved); 1320 bz->reserved_bpages -= reserved; 1321 1322 wait_map = get_map_waiting(dmat); 1323 1324 BZ_UNLOCK(bz); 1325 1326 if (wait_map != NULL) 1327 add_map_callback(map); 1328 } 1329 1330 static bus_addr_t 1331 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1332 bus_size_t size) 1333 { 1334 struct bounce_zone *bz = dmat->bounce_zone; 1335 struct bounce_page *bpage; 1336 1337 KASSERT(map->pagesneeded > 0, ("map doesn't need any pages")); 1338 map->pagesneeded--; 1339 1340 KASSERT(map->pagesreserved > 0, ("map doesn't reserve any pages")); 1341 map->pagesreserved--; 1342 1343 BZ_LOCK(bz); 1344 1345 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1346 KASSERT(bpage != NULL, ("free page list is empty")); 1347 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1348 1349 KKASSERT(bz->reserved_bpages > 0); 1350 bz->reserved_bpages--; 1351 1352 bz->active_bpages++; 1353 KKASSERT(bz->active_bpages <= bz->total_bpages); 1354 1355 BZ_UNLOCK(bz); 1356 1357 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1358 /* Page offset needs to be preserved. */ 1359 bpage->vaddr |= vaddr & PAGE_MASK; 1360 bpage->busaddr |= vaddr & PAGE_MASK; 1361 } 1362 1363 bpage->datavaddr = vaddr; 1364 bpage->datacount = size; 1365 STAILQ_INSERT_TAIL(&map->bpages, bpage, links); 1366 return bpage->busaddr; 1367 } 1368 1369 static void 1370 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1371 { 1372 struct bounce_zone *bz = dmat->bounce_zone; 1373 bus_dmamap_t map; 1374 1375 bpage->datavaddr = 0; 1376 bpage->datacount = 0; 1377 1378 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1379 /* 1380 * Reset the bounce page to start at offset 0. Other uses 1381 * of this bounce page may need to store a full page of 1382 * data and/or assume it starts on a page boundary. 1383 */ 1384 bpage->vaddr &= ~PAGE_MASK; 1385 bpage->busaddr &= ~PAGE_MASK; 1386 } 1387 1388 BZ_LOCK(bz); 1389 1390 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1391 1392 bz->free_bpages++; 1393 KKASSERT(bz->free_bpages <= bz->total_bpages); 1394 1395 KKASSERT(bz->active_bpages > 0); 1396 bz->active_bpages--; 1397 1398 map = get_map_waiting(dmat); 1399 1400 BZ_UNLOCK(bz); 1401 1402 if (map != NULL) 1403 add_map_callback(map); 1404 } 1405 1406 /* Assume caller holds bounce zone spinlock */ 1407 static bus_dmamap_t 1408 get_map_waiting(bus_dma_tag_t dmat) 1409 { 1410 struct bounce_zone *bz = dmat->bounce_zone; 1411 bus_dmamap_t map; 1412 1413 map = STAILQ_FIRST(&bz->bounce_map_waitinglist); 1414 if (map != NULL) { 1415 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1416 STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links); 1417 bz->total_deferred++; 1418 } else { 1419 map = NULL; 1420 } 1421 } 1422 return map; 1423 } 1424 1425 static void 1426 add_map_callback(bus_dmamap_t map) 1427 { 1428 spin_lock(&bounce_map_list_spin); 1429 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); 1430 busdma_swi_pending = 1; 1431 setsoftvm(); 1432 spin_unlock(&bounce_map_list_spin); 1433 } 1434 1435 void 1436 busdma_swi(void) 1437 { 1438 bus_dmamap_t map; 1439 1440 spin_lock(&bounce_map_list_spin); 1441 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1442 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1443 spin_unlock(&bounce_map_list_spin); 1444 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1445 map->callback, map->callback_arg, /*flags*/0); 1446 spin_lock(&bounce_map_list_spin); 1447 } 1448 spin_unlock(&bounce_map_list_spin); 1449 } 1450