1 /* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/malloc.h> 32 #include <sys/mbuf.h> 33 #include <sys/uio.h> 34 #include <sys/bus_dma.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/lock.h> 38 39 #include <sys/thread2.h> 40 #include <sys/spinlock2.h> 41 #include <sys/mplock2.h> 42 43 #include <vm/vm.h> 44 #include <vm/vm_page.h> 45 46 /* XXX needed for to access pmap to convert per-proc virtual to physical */ 47 #include <sys/proc.h> 48 #include <sys/lock.h> 49 #include <vm/vm_map.h> 50 51 #include <machine/md_var.h> 52 53 #define MAX_BPAGES 1024 54 55 /* 56 * 16 x N declared on stack. 57 */ 58 #define BUS_DMA_CACHE_SEGMENTS 8 59 60 struct bounce_zone; 61 struct bus_dmamap; 62 63 struct bus_dma_tag { 64 bus_dma_tag_t parent; 65 bus_size_t alignment; 66 bus_size_t boundary; 67 bus_addr_t lowaddr; 68 bus_addr_t highaddr; 69 bus_dma_filter_t *filter; 70 void *filterarg; 71 bus_size_t maxsize; 72 u_int nsegments; 73 bus_size_t maxsegsz; 74 int flags; 75 int ref_count; 76 int map_count; 77 bus_dma_segment_t *segments; 78 struct bounce_zone *bounce_zone; 79 #ifdef SMP 80 struct spinlock spin; 81 #else 82 int unused0; 83 #endif 84 }; 85 86 /* 87 * bus_dma_tag private flags 88 */ 89 #define BUS_DMA_BOUNCE_ALIGN BUS_DMA_BUS2 90 #define BUS_DMA_BOUNCE_LOWADDR BUS_DMA_BUS3 91 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 92 93 #define BUS_DMA_COULD_BOUNCE (BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN) 94 95 #define BUS_DMAMEM_KMALLOC(dmat) \ 96 ((dmat)->maxsize <= PAGE_SIZE && \ 97 (dmat)->alignment <= PAGE_SIZE && \ 98 (dmat)->lowaddr >= ptoa(Maxmem)) 99 100 struct bounce_page { 101 vm_offset_t vaddr; /* kva of bounce buffer */ 102 bus_addr_t busaddr; /* Physical address */ 103 vm_offset_t datavaddr; /* kva of client data */ 104 bus_size_t datacount; /* client data count */ 105 STAILQ_ENTRY(bounce_page) links; 106 }; 107 108 struct bounce_zone { 109 STAILQ_ENTRY(bounce_zone) links; 110 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 111 STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 112 #ifdef SMP 113 struct spinlock spin; 114 #else 115 int unused0; 116 #endif 117 int total_bpages; 118 int free_bpages; 119 int reserved_bpages; 120 int active_bpages; 121 int total_bounced; 122 int total_deferred; 123 int reserve_failed; 124 bus_size_t alignment; 125 bus_addr_t lowaddr; 126 char zoneid[8]; 127 char lowaddrid[20]; 128 struct sysctl_ctx_list sysctl_ctx; 129 struct sysctl_oid *sysctl_tree; 130 }; 131 132 #ifdef SMP 133 #define BZ_LOCK(bz) spin_lock(&(bz)->spin) 134 #define BZ_UNLOCK(bz) spin_unlock(&(bz)->spin) 135 #else 136 #define BZ_LOCK(bz) crit_enter() 137 #define BZ_UNLOCK(bz) crit_exit() 138 #endif 139 140 static struct lwkt_token bounce_zone_tok = 141 LWKT_TOKEN_INITIALIZER(bounce_zone_token); 142 static int busdma_zonecount; 143 static STAILQ_HEAD(, bounce_zone) bounce_zone_list = 144 STAILQ_HEAD_INITIALIZER(bounce_zone_list); 145 146 static int busdma_priv_zonecount = -1; 147 148 int busdma_swi_pending; 149 static int total_bounce_pages; 150 static int max_bounce_pages = MAX_BPAGES; 151 static int bounce_alignment = 1; /* XXX temporary */ 152 153 TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages); 154 TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment); 155 156 struct bus_dmamap { 157 struct bp_list bpages; 158 int pagesneeded; 159 int pagesreserved; 160 bus_dma_tag_t dmat; 161 void *buf; /* unmapped buffer pointer */ 162 bus_size_t buflen; /* unmapped buffer length */ 163 bus_dmamap_callback_t *callback; 164 void *callback_arg; 165 STAILQ_ENTRY(bus_dmamap) links; 166 }; 167 168 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist = 169 STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist); 170 static struct spinlock bounce_map_list_spin = 171 SPINLOCK_INITIALIZER(&bounce_map_list_spin); 172 173 static struct bus_dmamap nobounce_dmamap; 174 175 static int alloc_bounce_zone(bus_dma_tag_t); 176 static int alloc_bounce_pages(bus_dma_tag_t, u_int, int); 177 static void free_bounce_pages_all(bus_dma_tag_t); 178 static void free_bounce_zone(bus_dma_tag_t); 179 static int reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int); 180 static void return_bounce_pages(bus_dma_tag_t, bus_dmamap_t); 181 static bus_addr_t add_bounce_page(bus_dma_tag_t, bus_dmamap_t, 182 vm_offset_t, bus_size_t); 183 static void free_bounce_page(bus_dma_tag_t, struct bounce_page *); 184 185 static bus_dmamap_t get_map_waiting(bus_dma_tag_t); 186 static void add_map_callback(bus_dmamap_t); 187 188 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 189 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bounce_pages, 190 0, "Total bounce pages"); 191 SYSCTL_INT(_hw_busdma, OID_AUTO, max_bpages, CTLFLAG_RD, &max_bounce_pages, 192 0, "Max bounce pages per bounce zone"); 193 SYSCTL_INT(_hw_busdma, OID_AUTO, bounce_alignment, CTLFLAG_RD, 194 &bounce_alignment, 0, "Obey alignment constraint"); 195 196 static __inline int 197 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 198 { 199 int retval; 200 201 retval = 0; 202 do { 203 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) || 204 (bounce_alignment && (paddr & (dmat->alignment - 1)) != 0)) 205 && (dmat->filter == NULL || 206 dmat->filter(dmat->filterarg, paddr) != 0)) 207 retval = 1; 208 209 dmat = dmat->parent; 210 } while (retval == 0 && dmat != NULL); 211 return (retval); 212 } 213 214 static __inline 215 bus_dma_segment_t * 216 bus_dma_tag_lock(bus_dma_tag_t tag, bus_dma_segment_t *cache) 217 { 218 if (tag->flags & BUS_DMA_PROTECTED) 219 return(tag->segments); 220 221 if (tag->nsegments <= BUS_DMA_CACHE_SEGMENTS) 222 return(cache); 223 #ifdef SMP 224 spin_lock(&tag->spin); 225 #endif 226 return(tag->segments); 227 } 228 229 static __inline 230 void 231 bus_dma_tag_unlock(bus_dma_tag_t tag) 232 { 233 #ifdef SMP 234 if (tag->flags & BUS_DMA_PROTECTED) 235 return; 236 237 if (tag->nsegments > BUS_DMA_CACHE_SEGMENTS) 238 spin_unlock(&tag->spin); 239 #endif 240 } 241 242 /* 243 * Allocate a device specific dma_tag. 244 */ 245 int 246 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 247 bus_size_t boundary, bus_addr_t lowaddr, 248 bus_addr_t highaddr, bus_dma_filter_t *filter, 249 void *filterarg, bus_size_t maxsize, int nsegments, 250 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 251 { 252 bus_dma_tag_t newtag; 253 int error = 0; 254 255 /* 256 * Sanity checks 257 */ 258 259 if (alignment == 0) 260 alignment = 1; 261 if (alignment & (alignment - 1)) 262 panic("alignment must be power of 2"); 263 264 if (boundary != 0) { 265 if (boundary & (boundary - 1)) 266 panic("boundary must be power of 2"); 267 if (boundary < maxsegsz) { 268 kprintf("boundary < maxsegsz:\n"); 269 print_backtrace(-1); 270 maxsegsz = boundary; 271 } 272 } 273 274 /* Return a NULL tag on failure */ 275 *dmat = NULL; 276 277 newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO); 278 279 #ifdef SMP 280 spin_init(&newtag->spin); 281 #endif 282 newtag->parent = parent; 283 newtag->alignment = alignment; 284 newtag->boundary = boundary; 285 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 286 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 287 newtag->filter = filter; 288 newtag->filterarg = filterarg; 289 newtag->maxsize = maxsize; 290 newtag->nsegments = nsegments; 291 newtag->maxsegsz = maxsegsz; 292 newtag->flags = flags; 293 newtag->ref_count = 1; /* Count ourself */ 294 newtag->map_count = 0; 295 newtag->segments = NULL; 296 newtag->bounce_zone = NULL; 297 298 /* Take into account any restrictions imposed by our parent tag */ 299 if (parent != NULL) { 300 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 301 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 302 303 if (newtag->boundary == 0) { 304 newtag->boundary = parent->boundary; 305 } else if (parent->boundary != 0) { 306 newtag->boundary = MIN(parent->boundary, 307 newtag->boundary); 308 } 309 310 #ifdef notyet 311 newtag->alignment = MAX(parent->alignment, newtag->alignment); 312 #endif 313 314 if (newtag->filter == NULL) { 315 /* 316 * Short circuit looking at our parent directly 317 * since we have encapsulated all of its information 318 */ 319 newtag->filter = parent->filter; 320 newtag->filterarg = parent->filterarg; 321 newtag->parent = parent->parent; 322 } 323 if (newtag->parent != NULL) 324 parent->ref_count++; 325 } 326 327 if (newtag->lowaddr < ptoa(Maxmem)) 328 newtag->flags |= BUS_DMA_BOUNCE_LOWADDR; 329 if (bounce_alignment && newtag->alignment > 1 && 330 !(newtag->flags & BUS_DMA_ALIGNED)) 331 newtag->flags |= BUS_DMA_BOUNCE_ALIGN; 332 333 if ((newtag->flags & BUS_DMA_COULD_BOUNCE) && 334 (flags & BUS_DMA_ALLOCNOW) != 0) { 335 struct bounce_zone *bz; 336 337 /* Must bounce */ 338 339 error = alloc_bounce_zone(newtag); 340 if (error) 341 goto back; 342 bz = newtag->bounce_zone; 343 344 if ((newtag->flags & BUS_DMA_ALLOCALL) == 0 && 345 ptoa(bz->total_bpages) < maxsize) { 346 int pages; 347 348 if (flags & BUS_DMA_ONEBPAGE) { 349 pages = 1; 350 } else { 351 pages = atop(round_page(maxsize)) - 352 bz->total_bpages; 353 pages = MAX(pages, 1); 354 } 355 356 /* Add pages to our bounce pool */ 357 if (alloc_bounce_pages(newtag, pages, flags) < pages) 358 error = ENOMEM; 359 360 /* Performed initial allocation */ 361 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 362 } 363 } 364 back: 365 if (error) { 366 free_bounce_zone(newtag); 367 kfree(newtag, M_DEVBUF); 368 } else { 369 *dmat = newtag; 370 } 371 return error; 372 } 373 374 int 375 bus_dma_tag_destroy(bus_dma_tag_t dmat) 376 { 377 if (dmat != NULL) { 378 if (dmat->map_count != 0) 379 return (EBUSY); 380 381 while (dmat != NULL) { 382 bus_dma_tag_t parent; 383 384 parent = dmat->parent; 385 dmat->ref_count--; 386 if (dmat->ref_count == 0) { 387 free_bounce_zone(dmat); 388 if (dmat->segments != NULL) 389 kfree(dmat->segments, M_DEVBUF); 390 kfree(dmat, M_DEVBUF); 391 /* 392 * Last reference count, so 393 * release our reference 394 * count on our parent. 395 */ 396 dmat = parent; 397 } else 398 dmat = NULL; 399 } 400 } 401 return (0); 402 } 403 404 bus_size_t 405 bus_dma_tag_getmaxsize(bus_dma_tag_t tag) 406 { 407 return(tag->maxsize); 408 } 409 410 /* 411 * Allocate a handle for mapping from kva/uva/physical 412 * address space into bus device space. 413 */ 414 int 415 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 416 { 417 int error; 418 419 error = 0; 420 421 if (dmat->segments == NULL) { 422 KKASSERT(dmat->nsegments && dmat->nsegments < 16384); 423 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 424 dmat->nsegments, M_DEVBUF, M_INTWAIT); 425 } 426 427 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 428 struct bounce_zone *bz; 429 int maxpages; 430 431 /* Must bounce */ 432 433 if (dmat->bounce_zone == NULL) { 434 error = alloc_bounce_zone(dmat); 435 if (error) 436 return error; 437 } 438 bz = dmat->bounce_zone; 439 440 *mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT | M_ZERO); 441 442 /* Initialize the new map */ 443 STAILQ_INIT(&((*mapp)->bpages)); 444 445 /* 446 * Attempt to add pages to our pool on a per-instance 447 * basis up to a sane limit. 448 */ 449 if (dmat->flags & BUS_DMA_ALLOCALL) { 450 maxpages = Maxmem - atop(dmat->lowaddr); 451 } else if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) { 452 maxpages = max_bounce_pages; 453 } else { 454 maxpages = MIN(max_bounce_pages, 455 Maxmem - atop(dmat->lowaddr)); 456 } 457 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || 458 (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 459 int pages; 460 461 if (flags & BUS_DMA_ONEBPAGE) { 462 pages = 1; 463 } else { 464 pages = atop(round_page(dmat->maxsize)); 465 pages = MIN(maxpages - bz->total_bpages, pages); 466 pages = MAX(pages, 1); 467 } 468 if (alloc_bounce_pages(dmat, pages, flags) < pages) 469 error = ENOMEM; 470 471 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 472 if (!error && 473 (dmat->flags & BUS_DMA_ALLOCALL) == 0) 474 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 475 } else { 476 error = 0; 477 } 478 } 479 } else { 480 *mapp = NULL; 481 } 482 if (!error) { 483 dmat->map_count++; 484 } else { 485 kfree(*mapp, M_DEVBUF); 486 *mapp = NULL; 487 } 488 return error; 489 } 490 491 /* 492 * Destroy a handle for mapping from kva/uva/physical 493 * address space into bus device space. 494 */ 495 int 496 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 497 { 498 if (map != NULL) { 499 if (STAILQ_FIRST(&map->bpages) != NULL) 500 return (EBUSY); 501 kfree(map, M_DEVBUF); 502 } 503 dmat->map_count--; 504 return (0); 505 } 506 507 static __inline bus_size_t 508 check_kmalloc(bus_dma_tag_t dmat, const void *vaddr0, int verify) 509 { 510 bus_size_t maxsize = 0; 511 uintptr_t vaddr = (uintptr_t)vaddr0; 512 513 if ((vaddr ^ (vaddr + dmat->maxsize - 1)) & ~PAGE_MASK) { 514 if (verify) 515 panic("boundary check failed\n"); 516 if (bootverbose) 517 kprintf("boundary check failed\n"); 518 maxsize = dmat->maxsize; 519 } 520 if (vaddr & (dmat->alignment - 1)) { 521 if (verify) 522 panic("alignment check failed\n"); 523 if (bootverbose) 524 kprintf("alignment check failed\n"); 525 if (dmat->maxsize < dmat->alignment) 526 maxsize = dmat->alignment; 527 else 528 maxsize = dmat->maxsize; 529 } 530 return maxsize; 531 } 532 533 /* 534 * Allocate a piece of memory that can be efficiently mapped into 535 * bus device space based on the constraints lited in the dma tag. 536 * 537 * mapp is degenerate. By definition this allocation should not require 538 * bounce buffers so do not allocate a dma map. 539 */ 540 int 541 bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, 542 bus_dmamap_t *mapp) 543 { 544 int mflags; 545 546 /* If we succeed, no mapping/bouncing will be required */ 547 *mapp = NULL; 548 549 if (dmat->segments == NULL) { 550 KKASSERT(dmat->nsegments < 16384); 551 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 552 dmat->nsegments, M_DEVBUF, M_INTWAIT); 553 } 554 555 if (flags & BUS_DMA_NOWAIT) 556 mflags = M_NOWAIT; 557 else 558 mflags = M_WAITOK; 559 if (flags & BUS_DMA_ZERO) 560 mflags |= M_ZERO; 561 562 if (BUS_DMAMEM_KMALLOC(dmat)) { 563 bus_size_t maxsize; 564 565 *vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags); 566 567 /* 568 * XXX 569 * Check whether the allocation 570 * - crossed a page boundary 571 * - was not aligned 572 * Retry with power-of-2 alignment in the above cases. 573 */ 574 maxsize = check_kmalloc(dmat, *vaddr, 0); 575 if (maxsize) { 576 kfree(*vaddr, M_DEVBUF); 577 *vaddr = kmalloc(maxsize, M_DEVBUF, 578 mflags | M_POWEROF2); 579 check_kmalloc(dmat, *vaddr, 1); 580 } 581 } else { 582 /* 583 * XXX Use Contigmalloc until it is merged into this facility 584 * and handles multi-seg allocations. Nobody is doing 585 * multi-seg allocations yet though. 586 */ 587 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 588 0ul, dmat->lowaddr, dmat->alignment, dmat->boundary); 589 } 590 if (*vaddr == NULL) 591 return (ENOMEM); 592 return (0); 593 } 594 595 /* 596 * Free a piece of memory and it's allociated dmamap, that was allocated 597 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 598 */ 599 void 600 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 601 { 602 /* 603 * dmamem does not need to be bounced, so the map should be 604 * NULL 605 */ 606 if (map != NULL) 607 panic("bus_dmamem_free: Invalid map freed"); 608 if (BUS_DMAMEM_KMALLOC(dmat)) 609 kfree(vaddr, M_DEVBUF); 610 else 611 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 612 } 613 614 static __inline vm_paddr_t 615 _bus_dma_extract(pmap_t pmap, vm_offset_t vaddr) 616 { 617 if (pmap) 618 return pmap_extract(pmap, vaddr); 619 else 620 return pmap_kextract(vaddr); 621 } 622 623 /* 624 * Utility function to load a linear buffer. lastaddrp holds state 625 * between invocations (for multiple-buffer loads). segp contains 626 * the segment following the starting one on entrace, and the ending 627 * segment on exit. first indicates if this is the first invocation 628 * of this function. 629 */ 630 static int 631 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, 632 bus_dmamap_t map, 633 void *buf, bus_size_t buflen, 634 bus_dma_segment_t *segments, 635 int nsegments, 636 pmap_t pmap, 637 int flags, 638 vm_paddr_t *lastpaddrp, 639 int *segp, 640 int first) 641 { 642 vm_offset_t vaddr; 643 vm_paddr_t paddr, nextpaddr; 644 bus_dma_segment_t *sg; 645 bus_addr_t bmask; 646 int seg, error = 0; 647 648 if (map == NULL) 649 map = &nobounce_dmamap; 650 651 #ifdef INVARIANTS 652 if (dmat->flags & BUS_DMA_ALIGNED) 653 KKASSERT(((uintptr_t)buf & (dmat->alignment - 1)) == 0); 654 #endif 655 656 /* 657 * If we are being called during a callback, pagesneeded will 658 * be non-zero, so we can avoid doing the work twice. 659 */ 660 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) && 661 map != &nobounce_dmamap && map->pagesneeded == 0) { 662 vm_offset_t vendaddr; 663 664 /* 665 * Count the number of bounce pages 666 * needed in order to complete this transfer 667 */ 668 vaddr = (vm_offset_t)buf; 669 vendaddr = (vm_offset_t)buf + buflen; 670 671 while (vaddr < vendaddr) { 672 paddr = _bus_dma_extract(pmap, vaddr); 673 if (run_filter(dmat, paddr) != 0) 674 map->pagesneeded++; 675 vaddr += (PAGE_SIZE - (vaddr & PAGE_MASK)); 676 } 677 } 678 679 /* Reserve Necessary Bounce Pages */ 680 if (map->pagesneeded != 0) { 681 struct bounce_zone *bz; 682 683 bz = dmat->bounce_zone; 684 BZ_LOCK(bz); 685 if (flags & BUS_DMA_NOWAIT) { 686 if (reserve_bounce_pages(dmat, map, 0) != 0) { 687 BZ_UNLOCK(bz); 688 error = ENOMEM; 689 goto free_bounce; 690 } 691 } else { 692 if (reserve_bounce_pages(dmat, map, 1) != 0) { 693 /* Queue us for resources */ 694 map->dmat = dmat; 695 map->buf = buf; 696 map->buflen = buflen; 697 698 STAILQ_INSERT_TAIL( 699 &dmat->bounce_zone->bounce_map_waitinglist, 700 map, links); 701 BZ_UNLOCK(bz); 702 703 return (EINPROGRESS); 704 } 705 } 706 BZ_UNLOCK(bz); 707 } 708 709 KKASSERT(*segp >= 1 && *segp <= nsegments); 710 seg = *segp; 711 sg = &segments[seg - 1]; 712 713 vaddr = (vm_offset_t)buf; 714 nextpaddr = *lastpaddrp; 715 bmask = ~(dmat->boundary - 1); /* note: will be 0 if boundary is 0 */ 716 717 /* force at least one segment */ 718 do { 719 bus_size_t size; 720 721 /* 722 * Per-page main loop 723 */ 724 paddr = _bus_dma_extract(pmap, vaddr); 725 size = PAGE_SIZE - (paddr & PAGE_MASK); 726 if (size > buflen) 727 size = buflen; 728 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { 729 /* 730 * note: this paddr has the same in-page offset 731 * as vaddr and thus the paddr above, so the 732 * size does not have to be recalculated 733 */ 734 paddr = add_bounce_page(dmat, map, vaddr, size); 735 } 736 737 /* 738 * Fill in the bus_dma_segment 739 */ 740 if (first) { 741 sg->ds_addr = paddr; 742 sg->ds_len = size; 743 first = 0; 744 } else if (paddr == nextpaddr) { 745 sg->ds_len += size; 746 } else { 747 sg++; 748 seg++; 749 if (seg > nsegments) 750 break; 751 sg->ds_addr = paddr; 752 sg->ds_len = size; 753 } 754 nextpaddr = paddr + size; 755 756 /* 757 * Handle maxsegsz and boundary issues with a nested loop 758 */ 759 for (;;) { 760 bus_size_t tmpsize; 761 762 /* 763 * Limit to the boundary and maximum segment size 764 */ 765 if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) { 766 tmpsize = dmat->boundary - 767 (sg->ds_addr & ~bmask); 768 if (tmpsize > dmat->maxsegsz) 769 tmpsize = dmat->maxsegsz; 770 KKASSERT(tmpsize < sg->ds_len); 771 } else if (sg->ds_len > dmat->maxsegsz) { 772 tmpsize = dmat->maxsegsz; 773 } else { 774 break; 775 } 776 777 /* 778 * Futz, split the data into a new segment. 779 */ 780 if (seg >= nsegments) 781 goto fail; 782 sg[1].ds_len = sg[0].ds_len - tmpsize; 783 sg[1].ds_addr = sg[0].ds_addr + tmpsize; 784 sg[0].ds_len = tmpsize; 785 sg++; 786 seg++; 787 } 788 789 /* 790 * Adjust for loop 791 */ 792 buflen -= size; 793 vaddr += size; 794 } while (buflen > 0); 795 fail: 796 if (buflen != 0) 797 error = EFBIG; 798 799 *segp = seg; 800 *lastpaddrp = nextpaddr; 801 802 free_bounce: 803 if (error && (dmat->flags & BUS_DMA_COULD_BOUNCE) && 804 map != &nobounce_dmamap) { 805 _bus_dmamap_unload(dmat, map); 806 return_bounce_pages(dmat, map); 807 } 808 return error; 809 } 810 811 /* 812 * Map the buffer buf into bus space using the dmamap map. 813 */ 814 int 815 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 816 bus_size_t buflen, bus_dmamap_callback_t *callback, 817 void *callback_arg, int flags) 818 { 819 bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; 820 bus_dma_segment_t *segments; 821 vm_paddr_t lastaddr = 0; 822 int error, nsegs = 1; 823 824 if (map != NULL) { 825 /* 826 * XXX 827 * Follow old semantics. Once all of the callers are fixed, 828 * we should get rid of these internal flag "adjustment". 829 */ 830 flags &= ~BUS_DMA_NOWAIT; 831 flags |= BUS_DMA_WAITOK; 832 833 map->callback = callback; 834 map->callback_arg = callback_arg; 835 } 836 837 segments = bus_dma_tag_lock(dmat, cache_segments); 838 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, 839 segments, dmat->nsegments, 840 NULL, flags, &lastaddr, &nsegs, 1); 841 if (error == EINPROGRESS) { 842 KKASSERT((dmat->flags & 843 (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL)) != 844 (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL)); 845 846 if (dmat->flags & BUS_DMA_PROTECTED) 847 panic("protected dmamap callback will be defered"); 848 849 bus_dma_tag_unlock(dmat); 850 return error; 851 } 852 callback(callback_arg, segments, nsegs, error); 853 bus_dma_tag_unlock(dmat); 854 return 0; 855 } 856 857 /* 858 * Like _bus_dmamap_load(), but for mbufs. 859 */ 860 int 861 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 862 struct mbuf *m0, 863 bus_dmamap_callback2_t *callback, void *callback_arg, 864 int flags) 865 { 866 bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; 867 bus_dma_segment_t *segments; 868 int nsegs, error; 869 870 /* 871 * XXX 872 * Follow old semantics. Once all of the callers are fixed, 873 * we should get rid of these internal flag "adjustment". 874 */ 875 flags &= ~BUS_DMA_WAITOK; 876 flags |= BUS_DMA_NOWAIT; 877 878 segments = bus_dma_tag_lock(dmat, cache_segments); 879 error = bus_dmamap_load_mbuf_segment(dmat, map, m0, 880 segments, dmat->nsegments, &nsegs, flags); 881 if (error) { 882 /* force "no valid mappings" in callback */ 883 callback(callback_arg, segments, 0, 884 0, error); 885 } else { 886 callback(callback_arg, segments, nsegs, 887 m0->m_pkthdr.len, error); 888 } 889 bus_dma_tag_unlock(dmat); 890 return error; 891 } 892 893 int 894 bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat, bus_dmamap_t map, 895 struct mbuf *m0, 896 bus_dma_segment_t *segs, int maxsegs, 897 int *nsegs, int flags) 898 { 899 int error; 900 901 M_ASSERTPKTHDR(m0); 902 903 KASSERT(maxsegs >= 1, ("invalid maxsegs %d", maxsegs)); 904 KASSERT(maxsegs <= dmat->nsegments, 905 ("%d too many segments, dmat only supports %d segments", 906 maxsegs, dmat->nsegments)); 907 KASSERT(flags & BUS_DMA_NOWAIT, 908 ("only BUS_DMA_NOWAIT is supported")); 909 910 if (m0->m_pkthdr.len <= dmat->maxsize) { 911 int first = 1; 912 vm_paddr_t lastaddr = 0; 913 struct mbuf *m; 914 915 *nsegs = 1; 916 error = 0; 917 for (m = m0; m != NULL && error == 0; m = m->m_next) { 918 if (m->m_len == 0) 919 continue; 920 921 error = _bus_dmamap_load_buffer(dmat, map, 922 m->m_data, m->m_len, 923 segs, maxsegs, 924 NULL, flags, &lastaddr, 925 nsegs, first); 926 if (error == ENOMEM && !first) { 927 /* 928 * Out of bounce pages due to too many 929 * fragments in the mbuf chain; return 930 * EFBIG instead. 931 */ 932 error = EFBIG; 933 } 934 first = 0; 935 } 936 #ifdef INVARIANTS 937 if (!error) 938 KKASSERT(*nsegs <= maxsegs && *nsegs >= 1); 939 #endif 940 } else { 941 *nsegs = 0; 942 error = EINVAL; 943 } 944 KKASSERT(error != EINPROGRESS); 945 return error; 946 } 947 948 /* 949 * Like _bus_dmamap_load(), but for uios. 950 */ 951 int 952 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 953 struct uio *uio, 954 bus_dmamap_callback2_t *callback, void *callback_arg, 955 int flags) 956 { 957 vm_paddr_t lastaddr; 958 int nsegs, error, first, i; 959 bus_size_t resid; 960 struct iovec *iov; 961 pmap_t pmap; 962 bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; 963 bus_dma_segment_t *segments; 964 bus_dma_segment_t *segs; 965 int nsegs_left; 966 967 if (dmat->nsegments <= BUS_DMA_CACHE_SEGMENTS) 968 segments = cache_segments; 969 else 970 segments = kmalloc(sizeof(bus_dma_segment_t) * dmat->nsegments, 971 M_DEVBUF, M_WAITOK | M_ZERO); 972 973 /* 974 * XXX 975 * Follow old semantics. Once all of the callers are fixed, 976 * we should get rid of these internal flag "adjustment". 977 */ 978 flags &= ~BUS_DMA_WAITOK; 979 flags |= BUS_DMA_NOWAIT; 980 981 resid = (bus_size_t)uio->uio_resid; 982 iov = uio->uio_iov; 983 984 segs = segments; 985 nsegs_left = dmat->nsegments; 986 987 if (uio->uio_segflg == UIO_USERSPACE) { 988 struct thread *td; 989 990 td = uio->uio_td; 991 KASSERT(td != NULL && td->td_proc != NULL, 992 ("bus_dmamap_load_uio: USERSPACE but no proc")); 993 pmap = vmspace_pmap(td->td_proc->p_vmspace); 994 } else { 995 pmap = NULL; 996 } 997 998 error = 0; 999 nsegs = 1; 1000 first = 1; 1001 lastaddr = 0; 1002 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 1003 /* 1004 * Now at the first iovec to load. Load each iovec 1005 * until we have exhausted the residual count. 1006 */ 1007 bus_size_t minlen = 1008 resid < iov[i].iov_len ? resid : iov[i].iov_len; 1009 caddr_t addr = (caddr_t) iov[i].iov_base; 1010 1011 error = _bus_dmamap_load_buffer(dmat, map, addr, minlen, 1012 segs, nsegs_left, 1013 pmap, flags, &lastaddr, &nsegs, first); 1014 first = 0; 1015 1016 resid -= minlen; 1017 if (error == 0) { 1018 nsegs_left -= nsegs; 1019 segs += nsegs; 1020 } 1021 } 1022 1023 /* 1024 * Minimum one DMA segment, even if 0-length buffer. 1025 */ 1026 if (nsegs_left == dmat->nsegments) 1027 --nsegs_left; 1028 1029 if (error) { 1030 /* force "no valid mappings" in callback */ 1031 callback(callback_arg, segments, 0, 1032 0, error); 1033 } else { 1034 callback(callback_arg, segments, dmat->nsegments - nsegs_left, 1035 (bus_size_t)uio->uio_resid, error); 1036 } 1037 if (dmat->nsegments > BUS_DMA_CACHE_SEGMENTS) 1038 kfree(segments, M_DEVBUF); 1039 return error; 1040 } 1041 1042 /* 1043 * Release the mapping held by map. 1044 */ 1045 void 1046 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1047 { 1048 struct bounce_page *bpage; 1049 1050 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1051 STAILQ_REMOVE_HEAD(&map->bpages, links); 1052 free_bounce_page(dmat, bpage); 1053 } 1054 } 1055 1056 void 1057 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1058 { 1059 struct bounce_page *bpage; 1060 1061 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1062 /* 1063 * Handle data bouncing. We might also 1064 * want to add support for invalidating 1065 * the caches on broken hardware 1066 */ 1067 switch (op) { 1068 case BUS_DMASYNC_PREWRITE: 1069 while (bpage != NULL) { 1070 bcopy((void *)bpage->datavaddr, 1071 (void *)bpage->vaddr, 1072 bpage->datacount); 1073 bpage = STAILQ_NEXT(bpage, links); 1074 } 1075 dmat->bounce_zone->total_bounced++; 1076 break; 1077 1078 case BUS_DMASYNC_POSTREAD: 1079 while (bpage != NULL) { 1080 bcopy((void *)bpage->vaddr, 1081 (void *)bpage->datavaddr, 1082 bpage->datacount); 1083 bpage = STAILQ_NEXT(bpage, links); 1084 } 1085 dmat->bounce_zone->total_bounced++; 1086 break; 1087 1088 case BUS_DMASYNC_PREREAD: 1089 case BUS_DMASYNC_POSTWRITE: 1090 /* No-ops */ 1091 break; 1092 } 1093 } 1094 } 1095 1096 static int 1097 alloc_bounce_zone(bus_dma_tag_t dmat) 1098 { 1099 struct bounce_zone *bz, *new_bz; 1100 1101 KASSERT(dmat->bounce_zone == NULL, 1102 ("bounce zone was already assigned")); 1103 1104 new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO); 1105 1106 lwkt_gettoken(&bounce_zone_tok); 1107 1108 if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) { 1109 /* 1110 * For shared bounce zone, check to see 1111 * if we already have a suitable zone 1112 */ 1113 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1114 if (dmat->alignment <= bz->alignment && 1115 dmat->lowaddr >= bz->lowaddr) { 1116 lwkt_reltoken(&bounce_zone_tok); 1117 1118 dmat->bounce_zone = bz; 1119 kfree(new_bz, M_DEVBUF); 1120 return 0; 1121 } 1122 } 1123 } 1124 bz = new_bz; 1125 1126 #ifdef SMP 1127 spin_init(&bz->spin); 1128 #endif 1129 STAILQ_INIT(&bz->bounce_page_list); 1130 STAILQ_INIT(&bz->bounce_map_waitinglist); 1131 bz->free_bpages = 0; 1132 bz->reserved_bpages = 0; 1133 bz->active_bpages = 0; 1134 bz->lowaddr = dmat->lowaddr; 1135 bz->alignment = round_page(dmat->alignment); 1136 ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1137 1138 if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) { 1139 ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1140 busdma_zonecount++; 1141 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1142 } else { 1143 ksnprintf(bz->zoneid, 8, "zone%d", busdma_priv_zonecount); 1144 busdma_priv_zonecount--; 1145 } 1146 1147 lwkt_reltoken(&bounce_zone_tok); 1148 1149 dmat->bounce_zone = bz; 1150 1151 sysctl_ctx_init(&bz->sysctl_ctx); 1152 bz->sysctl_tree = SYSCTL_ADD_NODE(&bz->sysctl_ctx, 1153 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1154 CTLFLAG_RD, 0, ""); 1155 if (bz->sysctl_tree == NULL) { 1156 sysctl_ctx_free(&bz->sysctl_ctx); 1157 return 0; /* XXX error code? */ 1158 } 1159 1160 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1161 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1162 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1163 "Total bounce pages"); 1164 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1165 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1166 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1167 "Free bounce pages"); 1168 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1169 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1170 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1171 "Reserved bounce pages"); 1172 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1173 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1174 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1175 "Active bounce pages"); 1176 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1177 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1178 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1179 "Total bounce requests"); 1180 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1181 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1182 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1183 "Total bounce requests that were deferred"); 1184 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1185 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1186 "reserve_failed", CTLFLAG_RD, &bz->reserve_failed, 0, 1187 "Total bounce page reservations that were failed"); 1188 SYSCTL_ADD_STRING(&bz->sysctl_ctx, 1189 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1190 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1191 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1192 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1193 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1194 1195 return 0; 1196 } 1197 1198 static int 1199 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages, int flags) 1200 { 1201 struct bounce_zone *bz = dmat->bounce_zone; 1202 int count = 0, mflags; 1203 1204 if (flags & BUS_DMA_NOWAIT) 1205 mflags = M_NOWAIT; 1206 else 1207 mflags = M_WAITOK; 1208 1209 while (numpages > 0) { 1210 struct bounce_page *bpage; 1211 1212 bpage = kmalloc(sizeof(*bpage), M_DEVBUF, M_INTWAIT | M_ZERO); 1213 1214 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1215 mflags, 0ul, 1216 bz->lowaddr, 1217 bz->alignment, 0); 1218 if (bpage->vaddr == 0) { 1219 kfree(bpage, M_DEVBUF); 1220 break; 1221 } 1222 bpage->busaddr = pmap_kextract(bpage->vaddr); 1223 1224 BZ_LOCK(bz); 1225 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1226 total_bounce_pages++; 1227 bz->total_bpages++; 1228 bz->free_bpages++; 1229 BZ_UNLOCK(bz); 1230 1231 count++; 1232 numpages--; 1233 } 1234 return count; 1235 } 1236 1237 static void 1238 free_bounce_pages_all(bus_dma_tag_t dmat) 1239 { 1240 struct bounce_zone *bz = dmat->bounce_zone; 1241 struct bounce_page *bpage; 1242 1243 BZ_LOCK(bz); 1244 1245 while ((bpage = STAILQ_FIRST(&bz->bounce_page_list)) != NULL) { 1246 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1247 1248 KKASSERT(total_bounce_pages > 0); 1249 total_bounce_pages--; 1250 1251 KKASSERT(bz->total_bpages > 0); 1252 bz->total_bpages--; 1253 1254 KKASSERT(bz->free_bpages > 0); 1255 bz->free_bpages--; 1256 1257 BZ_UNLOCK(bz); 1258 contigfree((void *)bpage->vaddr, PAGE_SIZE, M_DEVBUF); 1259 kfree(bpage, M_DEVBUF); 1260 BZ_LOCK(bz); 1261 } 1262 if (bz->total_bpages) { 1263 kprintf("#%d bounce pages are still in use\n", 1264 bz->total_bpages); 1265 print_backtrace(-1); 1266 } 1267 1268 BZ_UNLOCK(bz); 1269 } 1270 1271 static void 1272 free_bounce_zone(bus_dma_tag_t dmat) 1273 { 1274 struct bounce_zone *bz = dmat->bounce_zone; 1275 1276 if (bz == NULL) 1277 return; 1278 1279 if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) 1280 return; 1281 1282 free_bounce_pages_all(dmat); 1283 dmat->bounce_zone = NULL; 1284 1285 if (bz->sysctl_tree != NULL) 1286 sysctl_ctx_free(&bz->sysctl_ctx); 1287 kfree(bz, M_DEVBUF); 1288 } 1289 1290 /* Assume caller holds bounce zone spinlock */ 1291 static int 1292 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1293 { 1294 struct bounce_zone *bz = dmat->bounce_zone; 1295 int pages; 1296 1297 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1298 if (!commit && map->pagesneeded > (map->pagesreserved + pages)) { 1299 bz->reserve_failed++; 1300 return (map->pagesneeded - (map->pagesreserved + pages)); 1301 } 1302 1303 bz->free_bpages -= pages; 1304 1305 bz->reserved_bpages += pages; 1306 KKASSERT(bz->reserved_bpages <= bz->total_bpages); 1307 1308 map->pagesreserved += pages; 1309 pages = map->pagesneeded - map->pagesreserved; 1310 1311 return pages; 1312 } 1313 1314 static void 1315 return_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 1316 { 1317 struct bounce_zone *bz = dmat->bounce_zone; 1318 int reserved = map->pagesreserved; 1319 bus_dmamap_t wait_map; 1320 1321 map->pagesreserved = 0; 1322 map->pagesneeded = 0; 1323 1324 if (reserved == 0) 1325 return; 1326 1327 BZ_LOCK(bz); 1328 1329 bz->free_bpages += reserved; 1330 KKASSERT(bz->free_bpages <= bz->total_bpages); 1331 1332 KKASSERT(bz->reserved_bpages >= reserved); 1333 bz->reserved_bpages -= reserved; 1334 1335 wait_map = get_map_waiting(dmat); 1336 1337 BZ_UNLOCK(bz); 1338 1339 if (wait_map != NULL) 1340 add_map_callback(map); 1341 } 1342 1343 static bus_addr_t 1344 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1345 bus_size_t size) 1346 { 1347 struct bounce_zone *bz = dmat->bounce_zone; 1348 struct bounce_page *bpage; 1349 1350 KASSERT(map->pagesneeded > 0, ("map doesn't need any pages")); 1351 map->pagesneeded--; 1352 1353 KASSERT(map->pagesreserved > 0, ("map doesn't reserve any pages")); 1354 map->pagesreserved--; 1355 1356 BZ_LOCK(bz); 1357 1358 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1359 KASSERT(bpage != NULL, ("free page list is empty")); 1360 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1361 1362 KKASSERT(bz->reserved_bpages > 0); 1363 bz->reserved_bpages--; 1364 1365 bz->active_bpages++; 1366 KKASSERT(bz->active_bpages <= bz->total_bpages); 1367 1368 BZ_UNLOCK(bz); 1369 1370 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1371 /* Page offset needs to be preserved. */ 1372 bpage->vaddr |= vaddr & PAGE_MASK; 1373 bpage->busaddr |= vaddr & PAGE_MASK; 1374 } 1375 1376 bpage->datavaddr = vaddr; 1377 bpage->datacount = size; 1378 STAILQ_INSERT_TAIL(&map->bpages, bpage, links); 1379 return bpage->busaddr; 1380 } 1381 1382 static void 1383 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1384 { 1385 struct bounce_zone *bz = dmat->bounce_zone; 1386 bus_dmamap_t map; 1387 1388 bpage->datavaddr = 0; 1389 bpage->datacount = 0; 1390 1391 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1392 /* 1393 * Reset the bounce page to start at offset 0. Other uses 1394 * of this bounce page may need to store a full page of 1395 * data and/or assume it starts on a page boundary. 1396 */ 1397 bpage->vaddr &= ~PAGE_MASK; 1398 bpage->busaddr &= ~PAGE_MASK; 1399 } 1400 1401 BZ_LOCK(bz); 1402 1403 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1404 1405 bz->free_bpages++; 1406 KKASSERT(bz->free_bpages <= bz->total_bpages); 1407 1408 KKASSERT(bz->active_bpages > 0); 1409 bz->active_bpages--; 1410 1411 map = get_map_waiting(dmat); 1412 1413 BZ_UNLOCK(bz); 1414 1415 if (map != NULL) 1416 add_map_callback(map); 1417 } 1418 1419 /* Assume caller holds bounce zone spinlock */ 1420 static bus_dmamap_t 1421 get_map_waiting(bus_dma_tag_t dmat) 1422 { 1423 struct bounce_zone *bz = dmat->bounce_zone; 1424 bus_dmamap_t map; 1425 1426 map = STAILQ_FIRST(&bz->bounce_map_waitinglist); 1427 if (map != NULL) { 1428 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1429 STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links); 1430 bz->total_deferred++; 1431 } else { 1432 map = NULL; 1433 } 1434 } 1435 return map; 1436 } 1437 1438 static void 1439 add_map_callback(bus_dmamap_t map) 1440 { 1441 spin_lock(&bounce_map_list_spin); 1442 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); 1443 busdma_swi_pending = 1; 1444 setsoftvm(); 1445 spin_unlock(&bounce_map_list_spin); 1446 } 1447 1448 void 1449 busdma_swi(void) 1450 { 1451 bus_dmamap_t map; 1452 1453 spin_lock(&bounce_map_list_spin); 1454 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1455 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1456 spin_unlock(&bounce_map_list_spin); 1457 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1458 map->callback, map->callback_arg, /*flags*/0); 1459 spin_lock(&bounce_map_list_spin); 1460 } 1461 spin_unlock(&bounce_map_list_spin); 1462 } 1463