1 /* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/malloc.h> 32 #include <sys/mbuf.h> 33 #include <sys/uio.h> 34 #include <sys/bus_dma.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/lock.h> 38 39 #include <sys/thread2.h> 40 #include <sys/spinlock2.h> 41 #include <sys/mplock2.h> 42 43 #include <vm/vm.h> 44 #include <vm/vm_page.h> 45 46 /* XXX needed for to access pmap to convert per-proc virtual to physical */ 47 #include <sys/proc.h> 48 #include <vm/vm_map.h> 49 50 #include <machine/md_var.h> 51 52 #include <bus/cam/cam.h> 53 #include <bus/cam/cam_ccb.h> 54 55 #define MAX_BPAGES 1024 56 57 /* 58 * 16 x N declared on stack. 59 */ 60 #define BUS_DMA_CACHE_SEGMENTS 8 61 62 struct bounce_zone; 63 struct bus_dmamap; 64 65 struct bus_dma_tag { 66 bus_dma_tag_t parent; 67 bus_size_t alignment; 68 bus_size_t boundary; 69 bus_addr_t lowaddr; 70 bus_addr_t highaddr; 71 bus_dma_filter_t *filter; 72 void *filterarg; 73 bus_size_t maxsize; 74 u_int nsegments; 75 bus_size_t maxsegsz; 76 int flags; 77 int ref_count; 78 int map_count; 79 bus_dma_segment_t *segments; 80 struct bounce_zone *bounce_zone; 81 struct spinlock spin; 82 }; 83 84 /* 85 * bus_dma_tag private flags 86 */ 87 #define BUS_DMA_BOUNCE_ALIGN BUS_DMA_BUS2 88 #define BUS_DMA_BOUNCE_LOWADDR BUS_DMA_BUS3 89 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 90 91 #define BUS_DMA_COULD_BOUNCE (BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN) 92 93 #define BUS_DMAMEM_KMALLOC(dmat) \ 94 ((dmat)->maxsize <= PAGE_SIZE && \ 95 (dmat)->alignment <= PAGE_SIZE && \ 96 (dmat)->lowaddr >= ptoa(Maxmem)) 97 98 struct bounce_page { 99 vm_offset_t vaddr; /* kva of bounce buffer */ 100 bus_addr_t busaddr; /* Physical address */ 101 vm_offset_t datavaddr; /* kva of client data */ 102 bus_size_t datacount; /* client data count */ 103 STAILQ_ENTRY(bounce_page) links; 104 }; 105 106 struct bounce_zone { 107 STAILQ_ENTRY(bounce_zone) links; 108 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 109 STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 110 struct spinlock spin; 111 int total_bpages; 112 int free_bpages; 113 int reserved_bpages; 114 int active_bpages; 115 int total_bounced; 116 int total_deferred; 117 int reserve_failed; 118 bus_size_t alignment; 119 bus_addr_t lowaddr; 120 char zoneid[8]; 121 char lowaddrid[20]; 122 struct sysctl_ctx_list sysctl_ctx; 123 struct sysctl_oid *sysctl_tree; 124 }; 125 126 #define BZ_LOCK(bz) spin_lock(&(bz)->spin) 127 #define BZ_UNLOCK(bz) spin_unlock(&(bz)->spin) 128 129 static struct lwkt_token bounce_zone_tok = 130 LWKT_TOKEN_INITIALIZER(bounce_zone_token); 131 static int busdma_zonecount; 132 static STAILQ_HEAD(, bounce_zone) bounce_zone_list = 133 STAILQ_HEAD_INITIALIZER(bounce_zone_list); 134 135 static int busdma_priv_zonecount = -1; 136 137 int busdma_swi_pending; 138 static int total_bounce_pages; 139 static int max_bounce_pages = MAX_BPAGES; 140 static int bounce_alignment = 1; /* XXX temporary */ 141 142 TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages); 143 TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment); 144 145 struct bus_dmamap { 146 struct bp_list bpages; 147 int pagesneeded; 148 int pagesreserved; 149 bus_dma_tag_t dmat; 150 void *buf; /* unmapped buffer pointer */ 151 bus_size_t buflen; /* unmapped buffer length */ 152 bus_dmamap_callback_t *callback; 153 void *callback_arg; 154 STAILQ_ENTRY(bus_dmamap) links; 155 }; 156 157 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist = 158 STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist); 159 static struct spinlock bounce_map_list_spin = 160 SPINLOCK_INITIALIZER(&bounce_map_list_spin, "bounce_map_list_spin"); 161 162 static struct bus_dmamap nobounce_dmamap; 163 164 static int alloc_bounce_zone(bus_dma_tag_t); 165 static int alloc_bounce_pages(bus_dma_tag_t, u_int, int); 166 static void free_bounce_pages_all(bus_dma_tag_t); 167 static void free_bounce_zone(bus_dma_tag_t); 168 static int reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int); 169 static void return_bounce_pages(bus_dma_tag_t, bus_dmamap_t); 170 static bus_addr_t add_bounce_page(bus_dma_tag_t, bus_dmamap_t, 171 vm_offset_t, bus_size_t *); 172 static void free_bounce_page(bus_dma_tag_t, struct bounce_page *); 173 174 static bus_dmamap_t get_map_waiting(bus_dma_tag_t); 175 static void add_map_callback(bus_dmamap_t); 176 177 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 178 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bounce_pages, 179 0, "Total bounce pages"); 180 SYSCTL_INT(_hw_busdma, OID_AUTO, max_bpages, CTLFLAG_RD, &max_bounce_pages, 181 0, "Max bounce pages per bounce zone"); 182 SYSCTL_INT(_hw_busdma, OID_AUTO, bounce_alignment, CTLFLAG_RD, 183 &bounce_alignment, 0, "Obey alignment constraint"); 184 185 static __inline int 186 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 187 { 188 int retval; 189 190 retval = 0; 191 do { 192 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) || 193 (bounce_alignment && (paddr & (dmat->alignment - 1)) != 0)) 194 && (dmat->filter == NULL || 195 dmat->filter(dmat->filterarg, paddr) != 0)) 196 retval = 1; 197 198 dmat = dmat->parent; 199 } while (retval == 0 && dmat != NULL); 200 return (retval); 201 } 202 203 static __inline 204 bus_dma_segment_t * 205 bus_dma_tag_lock(bus_dma_tag_t tag, bus_dma_segment_t *cache) 206 { 207 if (tag->flags & BUS_DMA_PROTECTED) 208 return(tag->segments); 209 210 if (tag->nsegments <= BUS_DMA_CACHE_SEGMENTS) 211 return(cache); 212 spin_lock(&tag->spin); 213 return(tag->segments); 214 } 215 216 static __inline 217 void 218 bus_dma_tag_unlock(bus_dma_tag_t tag) 219 { 220 if (tag->flags & BUS_DMA_PROTECTED) 221 return; 222 223 if (tag->nsegments > BUS_DMA_CACHE_SEGMENTS) 224 spin_unlock(&tag->spin); 225 } 226 227 /* 228 * Allocate a device specific dma_tag. 229 */ 230 int 231 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 232 bus_size_t boundary, bus_addr_t lowaddr, 233 bus_addr_t highaddr, bus_dma_filter_t *filter, 234 void *filterarg, bus_size_t maxsize, int nsegments, 235 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 236 { 237 bus_dma_tag_t newtag; 238 int error = 0; 239 240 /* 241 * Sanity checks 242 */ 243 244 if (alignment == 0) 245 alignment = 1; 246 if (alignment & (alignment - 1)) 247 panic("alignment must be power of 2"); 248 249 if (boundary != 0) { 250 if (boundary & (boundary - 1)) 251 panic("boundary must be power of 2"); 252 if (boundary < maxsegsz) { 253 kprintf("boundary < maxsegsz:\n"); 254 print_backtrace(-1); 255 maxsegsz = boundary; 256 } 257 } 258 259 /* Return a NULL tag on failure */ 260 *dmat = NULL; 261 262 newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO); 263 264 spin_init(&newtag->spin, "busdmacreate"); 265 newtag->parent = parent; 266 newtag->alignment = alignment; 267 newtag->boundary = boundary; 268 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 269 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 270 newtag->filter = filter; 271 newtag->filterarg = filterarg; 272 newtag->maxsize = maxsize; 273 newtag->nsegments = nsegments; 274 newtag->maxsegsz = maxsegsz; 275 newtag->flags = flags; 276 newtag->ref_count = 1; /* Count ourself */ 277 newtag->map_count = 0; 278 newtag->segments = NULL; 279 newtag->bounce_zone = NULL; 280 281 /* Take into account any restrictions imposed by our parent tag */ 282 if (parent != NULL) { 283 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 284 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 285 286 if (newtag->boundary == 0) { 287 newtag->boundary = parent->boundary; 288 } else if (parent->boundary != 0) { 289 newtag->boundary = MIN(parent->boundary, 290 newtag->boundary); 291 } 292 293 #ifdef notyet 294 newtag->alignment = MAX(parent->alignment, newtag->alignment); 295 #endif 296 297 if (newtag->filter == NULL) { 298 /* 299 * Short circuit looking at our parent directly 300 * since we have encapsulated all of its information 301 */ 302 newtag->filter = parent->filter; 303 newtag->filterarg = parent->filterarg; 304 newtag->parent = parent->parent; 305 } 306 if (newtag->parent != NULL) 307 parent->ref_count++; 308 } 309 310 if (newtag->lowaddr < ptoa(Maxmem)) 311 newtag->flags |= BUS_DMA_BOUNCE_LOWADDR; 312 if (bounce_alignment && newtag->alignment > 1 && 313 !(newtag->flags & BUS_DMA_ALIGNED)) 314 newtag->flags |= BUS_DMA_BOUNCE_ALIGN; 315 316 if ((newtag->flags & BUS_DMA_COULD_BOUNCE) && 317 (flags & BUS_DMA_ALLOCNOW) != 0) { 318 struct bounce_zone *bz; 319 320 /* Must bounce */ 321 322 error = alloc_bounce_zone(newtag); 323 if (error) 324 goto back; 325 bz = newtag->bounce_zone; 326 327 if ((newtag->flags & BUS_DMA_ALLOCALL) == 0 && 328 ptoa(bz->total_bpages) < maxsize) { 329 int pages; 330 331 if (flags & BUS_DMA_ONEBPAGE) { 332 pages = 1; 333 } else { 334 pages = atop(round_page(maxsize)) - 335 bz->total_bpages; 336 pages = MAX(pages, 1); 337 } 338 339 /* Add pages to our bounce pool */ 340 if (alloc_bounce_pages(newtag, pages, flags) < pages) 341 error = ENOMEM; 342 343 /* Performed initial allocation */ 344 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 345 } 346 } 347 back: 348 if (error) { 349 free_bounce_zone(newtag); 350 kfree(newtag, M_DEVBUF); 351 } else { 352 *dmat = newtag; 353 } 354 return error; 355 } 356 357 int 358 bus_dma_tag_destroy(bus_dma_tag_t dmat) 359 { 360 if (dmat != NULL) { 361 if (dmat->map_count != 0) 362 return (EBUSY); 363 364 while (dmat != NULL) { 365 bus_dma_tag_t parent; 366 367 parent = dmat->parent; 368 dmat->ref_count--; 369 if (dmat->ref_count == 0) { 370 free_bounce_zone(dmat); 371 if (dmat->segments != NULL) 372 kfree(dmat->segments, M_DEVBUF); 373 kfree(dmat, M_DEVBUF); 374 /* 375 * Last reference count, so 376 * release our reference 377 * count on our parent. 378 */ 379 dmat = parent; 380 } else 381 dmat = NULL; 382 } 383 } 384 return (0); 385 } 386 387 bus_size_t 388 bus_dma_tag_getmaxsize(bus_dma_tag_t tag) 389 { 390 return(tag->maxsize); 391 } 392 393 /* 394 * Allocate a handle for mapping from kva/uva/physical 395 * address space into bus device space. 396 */ 397 int 398 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 399 { 400 int error; 401 402 error = 0; 403 404 if (dmat->segments == NULL) { 405 KKASSERT(dmat->nsegments && dmat->nsegments < 16384); 406 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 407 dmat->nsegments, M_DEVBUF, M_INTWAIT); 408 } 409 410 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 411 struct bounce_zone *bz; 412 int maxpages; 413 414 /* Must bounce */ 415 416 if (dmat->bounce_zone == NULL) { 417 error = alloc_bounce_zone(dmat); 418 if (error) 419 return error; 420 } 421 bz = dmat->bounce_zone; 422 423 *mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT | M_ZERO); 424 425 /* Initialize the new map */ 426 STAILQ_INIT(&((*mapp)->bpages)); 427 428 /* 429 * Attempt to add pages to our pool on a per-instance 430 * basis up to a sane limit. 431 */ 432 if (dmat->flags & BUS_DMA_ALLOCALL) { 433 maxpages = Maxmem - atop(dmat->lowaddr); 434 } else if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) { 435 maxpages = max_bounce_pages; 436 } else { 437 maxpages = MIN(max_bounce_pages, 438 Maxmem - atop(dmat->lowaddr)); 439 } 440 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || 441 (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 442 int pages; 443 444 if (flags & BUS_DMA_ONEBPAGE) { 445 pages = 1; 446 } else { 447 pages = atop(round_page(dmat->maxsize)); 448 pages = MIN(maxpages - bz->total_bpages, pages); 449 pages = MAX(pages, 1); 450 } 451 if (alloc_bounce_pages(dmat, pages, flags) < pages) 452 error = ENOMEM; 453 454 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 455 if (!error && 456 (dmat->flags & BUS_DMA_ALLOCALL) == 0) 457 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 458 } else { 459 error = 0; 460 } 461 } 462 } else { 463 *mapp = NULL; 464 } 465 if (!error) { 466 dmat->map_count++; 467 } else { 468 kfree(*mapp, M_DEVBUF); 469 *mapp = NULL; 470 } 471 return error; 472 } 473 474 /* 475 * Destroy a handle for mapping from kva/uva/physical 476 * address space into bus device space. 477 */ 478 int 479 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 480 { 481 if (map != NULL) { 482 if (STAILQ_FIRST(&map->bpages) != NULL) 483 return (EBUSY); 484 kfree(map, M_DEVBUF); 485 } 486 dmat->map_count--; 487 return (0); 488 } 489 490 static __inline bus_size_t 491 check_kmalloc(bus_dma_tag_t dmat, const void *vaddr0, int verify) 492 { 493 bus_size_t maxsize = 0; 494 uintptr_t vaddr = (uintptr_t)vaddr0; 495 496 if ((vaddr ^ (vaddr + dmat->maxsize - 1)) & ~PAGE_MASK) { 497 if (verify) 498 panic("boundary check failed\n"); 499 if (bootverbose) 500 kprintf("boundary check failed\n"); 501 maxsize = dmat->maxsize; 502 } 503 if (vaddr & (dmat->alignment - 1)) { 504 if (verify) 505 panic("alignment check failed\n"); 506 if (bootverbose) 507 kprintf("alignment check failed\n"); 508 if (dmat->maxsize < dmat->alignment) 509 maxsize = dmat->alignment; 510 else 511 maxsize = dmat->maxsize; 512 } 513 return maxsize; 514 } 515 516 /* 517 * Allocate a piece of memory that can be efficiently mapped into 518 * bus device space based on the constraints lited in the dma tag. 519 * 520 * mapp is degenerate. By definition this allocation should not require 521 * bounce buffers so do not allocate a dma map. 522 */ 523 int 524 bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, 525 bus_dmamap_t *mapp) 526 { 527 vm_memattr_t attr; 528 int mflags; 529 530 /* If we succeed, no mapping/bouncing will be required */ 531 *mapp = NULL; 532 533 if (dmat->segments == NULL) { 534 KKASSERT(dmat->nsegments < 16384); 535 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 536 dmat->nsegments, M_DEVBUF, M_INTWAIT); 537 } 538 539 if (flags & BUS_DMA_NOWAIT) 540 mflags = M_NOWAIT; 541 else 542 mflags = M_WAITOK; 543 if (flags & BUS_DMA_ZERO) 544 mflags |= M_ZERO; 545 if (flags & BUS_DMA_NOCACHE) 546 attr = VM_MEMATTR_UNCACHEABLE; 547 else 548 attr = VM_MEMATTR_DEFAULT; 549 550 /* XXX must alloc with correct mem attribute here */ 551 if (BUS_DMAMEM_KMALLOC(dmat)) { 552 bus_size_t maxsize; 553 554 *vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags); 555 556 /* 557 * XXX 558 * Check whether the allocation 559 * - crossed a page boundary 560 * - was not aligned 561 * Retry with power-of-2 alignment in the above cases. 562 */ 563 maxsize = check_kmalloc(dmat, *vaddr, 0); 564 if (maxsize) { 565 kfree(*vaddr, M_DEVBUF); 566 *vaddr = kmalloc(maxsize, M_DEVBUF, 567 mflags | M_POWEROF2); 568 check_kmalloc(dmat, *vaddr, 1); 569 } 570 } else { 571 /* 572 * XXX Use Contigmalloc until it is merged into this facility 573 * and handles multi-seg allocations. Nobody is doing 574 * multi-seg allocations yet though. 575 */ 576 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 577 0ul, dmat->lowaddr, dmat->alignment, dmat->boundary); 578 } 579 if (*vaddr == NULL) 580 return (ENOMEM); 581 582 if (attr != VM_MEMATTR_DEFAULT) 583 pmap_change_attr((vm_offset_t)(*vaddr), dmat->maxsize / PAGE_SIZE, attr); 584 return (0); 585 } 586 587 /* 588 * Free a piece of memory and it's allociated dmamap, that was allocated 589 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 590 */ 591 void 592 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 593 { 594 /* 595 * dmamem does not need to be bounced, so the map should be 596 * NULL 597 */ 598 if (map != NULL) 599 panic("bus_dmamem_free: Invalid map freed"); 600 if (BUS_DMAMEM_KMALLOC(dmat)) 601 kfree(vaddr, M_DEVBUF); 602 else 603 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 604 } 605 606 static __inline vm_paddr_t 607 _bus_dma_extract(pmap_t pmap, vm_offset_t vaddr) 608 { 609 if (pmap) 610 return pmap_extract(pmap, vaddr, NULL); 611 else 612 return pmap_kextract(vaddr); 613 } 614 615 /* 616 * Utility function to load a linear buffer. lastaddrp holds state 617 * between invocations (for multiple-buffer loads). segp contains 618 * the segment following the starting one on entrace, and the ending 619 * segment on exit. first indicates if this is the first invocation 620 * of this function. 621 */ 622 static int 623 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, 624 bus_dmamap_t map, 625 void *buf, bus_size_t buflen, 626 bus_dma_segment_t *segments, 627 int nsegments, 628 pmap_t pmap, 629 int flags, 630 vm_paddr_t *lastpaddrp, 631 int *segp, 632 int first) 633 { 634 vm_offset_t vaddr; 635 vm_paddr_t paddr, nextpaddr; 636 bus_dma_segment_t *sg; 637 bus_addr_t bmask; 638 int seg, error = 0; 639 640 if (map == NULL) 641 map = &nobounce_dmamap; 642 643 #ifdef INVARIANTS 644 if (dmat->flags & BUS_DMA_ALIGNED) 645 KKASSERT(((uintptr_t)buf & (dmat->alignment - 1)) == 0); 646 #endif 647 648 /* 649 * If we are being called during a callback, pagesneeded will 650 * be non-zero, so we can avoid doing the work twice. 651 */ 652 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) && 653 map != &nobounce_dmamap && map->pagesneeded == 0) { 654 vm_offset_t vendaddr; 655 656 /* 657 * Count the number of bounce pages 658 * needed in order to complete this transfer 659 */ 660 vaddr = (vm_offset_t)buf; 661 vendaddr = (vm_offset_t)buf + buflen; 662 663 while (vaddr < vendaddr) { 664 paddr = _bus_dma_extract(pmap, vaddr); 665 if (run_filter(dmat, paddr) != 0) 666 map->pagesneeded++; 667 vaddr += (PAGE_SIZE - (vaddr & PAGE_MASK)); 668 } 669 } 670 671 /* Reserve Necessary Bounce Pages */ 672 if (map->pagesneeded != 0) { 673 struct bounce_zone *bz; 674 675 bz = dmat->bounce_zone; 676 BZ_LOCK(bz); 677 if (flags & BUS_DMA_NOWAIT) { 678 if (reserve_bounce_pages(dmat, map, 0) != 0) { 679 BZ_UNLOCK(bz); 680 error = ENOMEM; 681 goto free_bounce; 682 } 683 } else { 684 if (reserve_bounce_pages(dmat, map, 1) != 0) { 685 /* Queue us for resources */ 686 map->dmat = dmat; 687 map->buf = buf; 688 map->buflen = buflen; 689 690 STAILQ_INSERT_TAIL( 691 &dmat->bounce_zone->bounce_map_waitinglist, 692 map, links); 693 BZ_UNLOCK(bz); 694 695 return (EINPROGRESS); 696 } 697 } 698 BZ_UNLOCK(bz); 699 } 700 701 KKASSERT(*segp >= 1 && *segp <= nsegments); 702 seg = *segp; 703 sg = &segments[seg - 1]; 704 705 vaddr = (vm_offset_t)buf; 706 nextpaddr = *lastpaddrp; 707 bmask = ~(dmat->boundary - 1); /* note: will be 0 if boundary is 0 */ 708 709 /* force at least one segment */ 710 do { 711 bus_size_t size; 712 713 /* 714 * Per-page main loop 715 */ 716 paddr = _bus_dma_extract(pmap, vaddr); 717 size = PAGE_SIZE - (paddr & PAGE_MASK); 718 if (size > buflen) 719 size = buflen; 720 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { 721 /* 722 * NOTE: paddr may have different in-page offset, 723 * unless BUS_DMA_KEEP_PG_OFFSET is set. 724 */ 725 paddr = add_bounce_page(dmat, map, vaddr, &size); 726 } 727 728 /* 729 * Fill in the bus_dma_segment 730 */ 731 if (first) { 732 sg->ds_addr = paddr; 733 sg->ds_len = size; 734 first = 0; 735 } else if (paddr == nextpaddr) { 736 sg->ds_len += size; 737 } else { 738 sg++; 739 seg++; 740 if (seg > nsegments) 741 break; 742 sg->ds_addr = paddr; 743 sg->ds_len = size; 744 } 745 nextpaddr = paddr + size; 746 747 /* 748 * Handle maxsegsz and boundary issues with a nested loop 749 */ 750 for (;;) { 751 bus_size_t tmpsize; 752 753 /* 754 * Limit to the boundary and maximum segment size 755 */ 756 if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) { 757 tmpsize = dmat->boundary - 758 (sg->ds_addr & ~bmask); 759 if (tmpsize > dmat->maxsegsz) 760 tmpsize = dmat->maxsegsz; 761 KKASSERT(tmpsize < sg->ds_len); 762 } else if (sg->ds_len > dmat->maxsegsz) { 763 tmpsize = dmat->maxsegsz; 764 } else { 765 break; 766 } 767 768 /* 769 * Futz, split the data into a new segment. 770 */ 771 if (seg >= nsegments) 772 goto fail; 773 sg[1].ds_len = sg[0].ds_len - tmpsize; 774 sg[1].ds_addr = sg[0].ds_addr + tmpsize; 775 sg[0].ds_len = tmpsize; 776 sg++; 777 seg++; 778 } 779 780 /* 781 * Adjust for loop 782 */ 783 buflen -= size; 784 vaddr += size; 785 } while (buflen > 0); 786 fail: 787 if (buflen != 0) 788 error = EFBIG; 789 790 *segp = seg; 791 *lastpaddrp = nextpaddr; 792 793 free_bounce: 794 if (error && (dmat->flags & BUS_DMA_COULD_BOUNCE) && 795 map != &nobounce_dmamap) { 796 _bus_dmamap_unload(dmat, map); 797 return_bounce_pages(dmat, map); 798 } 799 return error; 800 } 801 802 /* 803 * Map the buffer buf into bus space using the dmamap map. 804 */ 805 int 806 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 807 bus_size_t buflen, bus_dmamap_callback_t *callback, 808 void *callback_arg, int flags) 809 { 810 bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; 811 bus_dma_segment_t *segments; 812 vm_paddr_t lastaddr = 0; 813 int error, nsegs = 1; 814 815 if (map != NULL) { 816 /* 817 * XXX 818 * Follow old semantics. Once all of the callers are fixed, 819 * we should get rid of these internal flag "adjustment". 820 */ 821 flags &= ~BUS_DMA_NOWAIT; 822 flags |= BUS_DMA_WAITOK; 823 824 map->callback = callback; 825 map->callback_arg = callback_arg; 826 } 827 828 segments = bus_dma_tag_lock(dmat, cache_segments); 829 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, 830 segments, dmat->nsegments, 831 NULL, flags, &lastaddr, &nsegs, 1); 832 if (error == EINPROGRESS) { 833 KKASSERT((dmat->flags & 834 (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL)) != 835 (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL)); 836 837 if (dmat->flags & BUS_DMA_PROTECTED) 838 panic("protected dmamap callback will be defered"); 839 840 bus_dma_tag_unlock(dmat); 841 return error; 842 } 843 callback(callback_arg, segments, nsegs, error); 844 bus_dma_tag_unlock(dmat); 845 return 0; 846 } 847 848 int 849 bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, 850 bus_dmamap_callback_t *callback, void *callback_arg, int flags) 851 { 852 const struct ccb_scsiio *csio; 853 854 KASSERT(ccb->ccb_h.func_code == XPT_SCSI_IO || 855 ccb->ccb_h.func_code == XPT_CONT_TARGET_IO, 856 ("invalid ccb func_code %u", ccb->ccb_h.func_code)); 857 csio = &ccb->csio; 858 859 return (bus_dmamap_load(dmat, map, csio->data_ptr, csio->dxfer_len, 860 callback, callback_arg, flags)); 861 } 862 863 /* 864 * Like _bus_dmamap_load(), but for mbufs. 865 */ 866 int 867 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 868 struct mbuf *m0, 869 bus_dmamap_callback2_t *callback, void *callback_arg, 870 int flags) 871 { 872 bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; 873 bus_dma_segment_t *segments; 874 int nsegs, error; 875 876 /* 877 * XXX 878 * Follow old semantics. Once all of the callers are fixed, 879 * we should get rid of these internal flag "adjustment". 880 */ 881 flags &= ~BUS_DMA_WAITOK; 882 flags |= BUS_DMA_NOWAIT; 883 884 segments = bus_dma_tag_lock(dmat, cache_segments); 885 error = bus_dmamap_load_mbuf_segment(dmat, map, m0, 886 segments, dmat->nsegments, &nsegs, flags); 887 if (error) { 888 /* force "no valid mappings" in callback */ 889 callback(callback_arg, segments, 0, 890 0, error); 891 } else { 892 callback(callback_arg, segments, nsegs, 893 m0->m_pkthdr.len, error); 894 } 895 bus_dma_tag_unlock(dmat); 896 return error; 897 } 898 899 int 900 bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat, bus_dmamap_t map, 901 struct mbuf *m0, 902 bus_dma_segment_t *segs, int maxsegs, 903 int *nsegs, int flags) 904 { 905 int error; 906 907 M_ASSERTPKTHDR(m0); 908 909 KASSERT(maxsegs >= 1, ("invalid maxsegs %d", maxsegs)); 910 KASSERT(maxsegs <= dmat->nsegments, 911 ("%d too many segments, dmat only supports %d segments", 912 maxsegs, dmat->nsegments)); 913 KASSERT(flags & BUS_DMA_NOWAIT, 914 ("only BUS_DMA_NOWAIT is supported")); 915 916 if (m0->m_pkthdr.len <= dmat->maxsize) { 917 int first = 1; 918 vm_paddr_t lastaddr = 0; 919 struct mbuf *m; 920 921 *nsegs = 1; 922 error = 0; 923 for (m = m0; m != NULL && error == 0; m = m->m_next) { 924 if (m->m_len == 0) 925 continue; 926 927 error = _bus_dmamap_load_buffer(dmat, map, 928 m->m_data, m->m_len, 929 segs, maxsegs, 930 NULL, flags, &lastaddr, 931 nsegs, first); 932 if (error == ENOMEM && !first) { 933 /* 934 * Out of bounce pages due to too many 935 * fragments in the mbuf chain; return 936 * EFBIG instead. 937 */ 938 error = EFBIG; 939 break; 940 } 941 first = 0; 942 } 943 #ifdef INVARIANTS 944 if (!error) 945 KKASSERT(*nsegs <= maxsegs && *nsegs >= 1); 946 #endif 947 } else { 948 *nsegs = 0; 949 error = EINVAL; 950 } 951 KKASSERT(error != EINPROGRESS); 952 return error; 953 } 954 955 /* 956 * Like _bus_dmamap_load(), but for uios. 957 */ 958 int 959 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 960 struct uio *uio, 961 bus_dmamap_callback2_t *callback, void *callback_arg, 962 int flags) 963 { 964 vm_paddr_t lastaddr; 965 int nsegs, error, first, i; 966 bus_size_t resid; 967 struct iovec *iov; 968 pmap_t pmap; 969 bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; 970 bus_dma_segment_t *segments; 971 bus_dma_segment_t *segs; 972 int nsegs_left; 973 974 if (dmat->nsegments <= BUS_DMA_CACHE_SEGMENTS) 975 segments = cache_segments; 976 else 977 segments = kmalloc(sizeof(bus_dma_segment_t) * dmat->nsegments, 978 M_DEVBUF, M_WAITOK | M_ZERO); 979 980 /* 981 * XXX 982 * Follow old semantics. Once all of the callers are fixed, 983 * we should get rid of these internal flag "adjustment". 984 */ 985 flags &= ~BUS_DMA_WAITOK; 986 flags |= BUS_DMA_NOWAIT; 987 988 resid = (bus_size_t)uio->uio_resid; 989 iov = uio->uio_iov; 990 991 segs = segments; 992 nsegs_left = dmat->nsegments; 993 994 if (uio->uio_segflg == UIO_USERSPACE) { 995 struct thread *td; 996 997 td = uio->uio_td; 998 KASSERT(td != NULL && td->td_proc != NULL, 999 ("bus_dmamap_load_uio: USERSPACE but no proc")); 1000 pmap = vmspace_pmap(td->td_proc->p_vmspace); 1001 } else { 1002 pmap = NULL; 1003 } 1004 1005 error = 0; 1006 nsegs = 1; 1007 first = 1; 1008 lastaddr = 0; 1009 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 1010 /* 1011 * Now at the first iovec to load. Load each iovec 1012 * until we have exhausted the residual count. 1013 */ 1014 bus_size_t minlen = 1015 resid < iov[i].iov_len ? resid : iov[i].iov_len; 1016 caddr_t addr = (caddr_t) iov[i].iov_base; 1017 1018 error = _bus_dmamap_load_buffer(dmat, map, addr, minlen, 1019 segs, nsegs_left, 1020 pmap, flags, &lastaddr, &nsegs, first); 1021 first = 0; 1022 1023 resid -= minlen; 1024 if (error == 0) { 1025 nsegs_left -= nsegs; 1026 segs += nsegs; 1027 } 1028 } 1029 1030 /* 1031 * Minimum one DMA segment, even if 0-length buffer. 1032 */ 1033 if (nsegs_left == dmat->nsegments) 1034 --nsegs_left; 1035 1036 if (error) { 1037 /* force "no valid mappings" in callback */ 1038 callback(callback_arg, segments, 0, 1039 0, error); 1040 } else { 1041 callback(callback_arg, segments, dmat->nsegments - nsegs_left, 1042 (bus_size_t)uio->uio_resid, error); 1043 } 1044 if (dmat->nsegments > BUS_DMA_CACHE_SEGMENTS) 1045 kfree(segments, M_DEVBUF); 1046 return error; 1047 } 1048 1049 /* 1050 * Release the mapping held by map. 1051 */ 1052 void 1053 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1054 { 1055 struct bounce_page *bpage; 1056 1057 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1058 STAILQ_REMOVE_HEAD(&map->bpages, links); 1059 free_bounce_page(dmat, bpage); 1060 } 1061 } 1062 1063 void 1064 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1065 { 1066 struct bounce_page *bpage; 1067 1068 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1069 /* 1070 * Handle data bouncing. We might also 1071 * want to add support for invalidating 1072 * the caches on broken hardware 1073 */ 1074 if (op & BUS_DMASYNC_PREWRITE) { 1075 while (bpage != NULL) { 1076 bcopy((void *)bpage->datavaddr, 1077 (void *)bpage->vaddr, 1078 bpage->datacount); 1079 bpage = STAILQ_NEXT(bpage, links); 1080 } 1081 cpu_sfence(); 1082 dmat->bounce_zone->total_bounced++; 1083 } 1084 if (op & BUS_DMASYNC_POSTREAD) { 1085 cpu_lfence(); 1086 while (bpage != NULL) { 1087 bcopy((void *)bpage->vaddr, 1088 (void *)bpage->datavaddr, 1089 bpage->datacount); 1090 bpage = STAILQ_NEXT(bpage, links); 1091 } 1092 dmat->bounce_zone->total_bounced++; 1093 } 1094 /* BUS_DMASYNC_PREREAD - no operation on intel */ 1095 /* BUS_DMASYNC_POSTWRITE - no operation on intel */ 1096 } 1097 } 1098 1099 static int 1100 alloc_bounce_zone(bus_dma_tag_t dmat) 1101 { 1102 struct bounce_zone *bz, *new_bz; 1103 1104 KASSERT(dmat->bounce_zone == NULL, 1105 ("bounce zone was already assigned")); 1106 1107 new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO); 1108 1109 lwkt_gettoken(&bounce_zone_tok); 1110 1111 if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) { 1112 /* 1113 * For shared bounce zone, check to see 1114 * if we already have a suitable zone 1115 */ 1116 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1117 if (dmat->alignment <= bz->alignment && 1118 dmat->lowaddr >= bz->lowaddr) { 1119 lwkt_reltoken(&bounce_zone_tok); 1120 1121 dmat->bounce_zone = bz; 1122 kfree(new_bz, M_DEVBUF); 1123 return 0; 1124 } 1125 } 1126 } 1127 bz = new_bz; 1128 1129 spin_init(&bz->spin, "allocbouncezone"); 1130 STAILQ_INIT(&bz->bounce_page_list); 1131 STAILQ_INIT(&bz->bounce_map_waitinglist); 1132 bz->free_bpages = 0; 1133 bz->reserved_bpages = 0; 1134 bz->active_bpages = 0; 1135 bz->lowaddr = dmat->lowaddr; 1136 bz->alignment = round_page(dmat->alignment); 1137 ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1138 1139 if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) { 1140 ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1141 busdma_zonecount++; 1142 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1143 } else { 1144 ksnprintf(bz->zoneid, 8, "zone%d", busdma_priv_zonecount); 1145 busdma_priv_zonecount--; 1146 } 1147 1148 lwkt_reltoken(&bounce_zone_tok); 1149 1150 dmat->bounce_zone = bz; 1151 1152 sysctl_ctx_init(&bz->sysctl_ctx); 1153 bz->sysctl_tree = SYSCTL_ADD_NODE(&bz->sysctl_ctx, 1154 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1155 CTLFLAG_RD, 0, ""); 1156 if (bz->sysctl_tree == NULL) { 1157 sysctl_ctx_free(&bz->sysctl_ctx); 1158 return 0; /* XXX error code? */ 1159 } 1160 1161 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1162 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1163 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1164 "Total bounce pages"); 1165 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1166 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1167 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1168 "Free bounce pages"); 1169 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1170 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1171 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1172 "Reserved bounce pages"); 1173 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1174 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1175 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1176 "Active bounce pages"); 1177 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1178 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1179 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1180 "Total bounce requests"); 1181 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1182 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1183 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1184 "Total bounce requests that were deferred"); 1185 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1186 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1187 "reserve_failed", CTLFLAG_RD, &bz->reserve_failed, 0, 1188 "Total bounce page reservations that were failed"); 1189 SYSCTL_ADD_STRING(&bz->sysctl_ctx, 1190 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1191 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1192 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1193 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1194 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1195 1196 return 0; 1197 } 1198 1199 static int 1200 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages, int flags) 1201 { 1202 struct bounce_zone *bz = dmat->bounce_zone; 1203 int count = 0, mflags; 1204 1205 if (flags & BUS_DMA_NOWAIT) 1206 mflags = M_NOWAIT; 1207 else 1208 mflags = M_WAITOK; 1209 1210 while (numpages > 0) { 1211 struct bounce_page *bpage; 1212 1213 bpage = kmalloc(sizeof(*bpage), M_DEVBUF, M_INTWAIT | M_ZERO); 1214 1215 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1216 mflags, 0ul, 1217 bz->lowaddr, 1218 bz->alignment, 0); 1219 if (bpage->vaddr == 0) { 1220 kfree(bpage, M_DEVBUF); 1221 break; 1222 } 1223 bpage->busaddr = pmap_kextract(bpage->vaddr); 1224 1225 BZ_LOCK(bz); 1226 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1227 total_bounce_pages++; 1228 bz->total_bpages++; 1229 bz->free_bpages++; 1230 BZ_UNLOCK(bz); 1231 1232 count++; 1233 numpages--; 1234 } 1235 return count; 1236 } 1237 1238 static void 1239 free_bounce_pages_all(bus_dma_tag_t dmat) 1240 { 1241 struct bounce_zone *bz = dmat->bounce_zone; 1242 struct bounce_page *bpage; 1243 1244 BZ_LOCK(bz); 1245 1246 while ((bpage = STAILQ_FIRST(&bz->bounce_page_list)) != NULL) { 1247 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1248 1249 KKASSERT(total_bounce_pages > 0); 1250 total_bounce_pages--; 1251 1252 KKASSERT(bz->total_bpages > 0); 1253 bz->total_bpages--; 1254 1255 KKASSERT(bz->free_bpages > 0); 1256 bz->free_bpages--; 1257 1258 BZ_UNLOCK(bz); 1259 contigfree((void *)bpage->vaddr, PAGE_SIZE, M_DEVBUF); 1260 kfree(bpage, M_DEVBUF); 1261 BZ_LOCK(bz); 1262 } 1263 if (bz->total_bpages) { 1264 kprintf("#%d bounce pages are still in use\n", 1265 bz->total_bpages); 1266 print_backtrace(-1); 1267 } 1268 1269 BZ_UNLOCK(bz); 1270 } 1271 1272 static void 1273 free_bounce_zone(bus_dma_tag_t dmat) 1274 { 1275 struct bounce_zone *bz = dmat->bounce_zone; 1276 1277 if (bz == NULL) 1278 return; 1279 1280 if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) 1281 return; 1282 1283 free_bounce_pages_all(dmat); 1284 dmat->bounce_zone = NULL; 1285 1286 if (bz->sysctl_tree != NULL) 1287 sysctl_ctx_free(&bz->sysctl_ctx); 1288 kfree(bz, M_DEVBUF); 1289 } 1290 1291 /* Assume caller holds bounce zone spinlock */ 1292 static int 1293 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1294 { 1295 struct bounce_zone *bz = dmat->bounce_zone; 1296 int pages; 1297 1298 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1299 if (!commit && map->pagesneeded > (map->pagesreserved + pages)) { 1300 bz->reserve_failed++; 1301 return (map->pagesneeded - (map->pagesreserved + pages)); 1302 } 1303 1304 bz->free_bpages -= pages; 1305 1306 bz->reserved_bpages += pages; 1307 KKASSERT(bz->reserved_bpages <= bz->total_bpages); 1308 1309 map->pagesreserved += pages; 1310 pages = map->pagesneeded - map->pagesreserved; 1311 1312 return pages; 1313 } 1314 1315 static void 1316 return_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 1317 { 1318 struct bounce_zone *bz = dmat->bounce_zone; 1319 int reserved = map->pagesreserved; 1320 bus_dmamap_t wait_map; 1321 1322 map->pagesreserved = 0; 1323 map->pagesneeded = 0; 1324 1325 if (reserved == 0) 1326 return; 1327 1328 BZ_LOCK(bz); 1329 1330 bz->free_bpages += reserved; 1331 KKASSERT(bz->free_bpages <= bz->total_bpages); 1332 1333 KKASSERT(bz->reserved_bpages >= reserved); 1334 bz->reserved_bpages -= reserved; 1335 1336 wait_map = get_map_waiting(dmat); 1337 1338 BZ_UNLOCK(bz); 1339 1340 if (wait_map != NULL) 1341 add_map_callback(map); 1342 } 1343 1344 static bus_addr_t 1345 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1346 bus_size_t *sizep) 1347 { 1348 struct bounce_zone *bz = dmat->bounce_zone; 1349 struct bounce_page *bpage; 1350 bus_size_t size; 1351 1352 KASSERT(map->pagesneeded > 0, ("map doesn't need any pages")); 1353 map->pagesneeded--; 1354 1355 KASSERT(map->pagesreserved > 0, ("map doesn't reserve any pages")); 1356 map->pagesreserved--; 1357 1358 BZ_LOCK(bz); 1359 1360 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1361 KASSERT(bpage != NULL, ("free page list is empty")); 1362 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1363 1364 KKASSERT(bz->reserved_bpages > 0); 1365 bz->reserved_bpages--; 1366 1367 bz->active_bpages++; 1368 KKASSERT(bz->active_bpages <= bz->total_bpages); 1369 1370 BZ_UNLOCK(bz); 1371 1372 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1373 /* 1374 * Page offset needs to be preserved. No size adjustments 1375 * needed. 1376 */ 1377 bpage->vaddr |= vaddr & PAGE_MASK; 1378 bpage->busaddr |= vaddr & PAGE_MASK; 1379 size = *sizep; 1380 } else { 1381 /* 1382 * Realign to bounce page base address, reduce size if 1383 * necessary. Bounce pages are typically already 1384 * page-aligned. 1385 */ 1386 size = PAGE_SIZE - (bpage->busaddr & PAGE_MASK); 1387 if (size < *sizep) { 1388 *sizep = size; 1389 } else { 1390 size = *sizep; 1391 } 1392 } 1393 1394 bpage->datavaddr = vaddr; 1395 bpage->datacount = size; 1396 STAILQ_INSERT_TAIL(&map->bpages, bpage, links); 1397 return bpage->busaddr; 1398 } 1399 1400 static void 1401 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1402 { 1403 struct bounce_zone *bz = dmat->bounce_zone; 1404 bus_dmamap_t map; 1405 1406 bpage->datavaddr = 0; 1407 bpage->datacount = 0; 1408 1409 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1410 /* 1411 * Reset the bounce page to start at offset 0. Other uses 1412 * of this bounce page may need to store a full page of 1413 * data and/or assume it starts on a page boundary. 1414 */ 1415 bpage->vaddr &= ~PAGE_MASK; 1416 bpage->busaddr &= ~PAGE_MASK; 1417 } 1418 1419 BZ_LOCK(bz); 1420 1421 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1422 1423 bz->free_bpages++; 1424 KKASSERT(bz->free_bpages <= bz->total_bpages); 1425 1426 KKASSERT(bz->active_bpages > 0); 1427 bz->active_bpages--; 1428 1429 map = get_map_waiting(dmat); 1430 1431 BZ_UNLOCK(bz); 1432 1433 if (map != NULL) 1434 add_map_callback(map); 1435 } 1436 1437 /* Assume caller holds bounce zone spinlock */ 1438 static bus_dmamap_t 1439 get_map_waiting(bus_dma_tag_t dmat) 1440 { 1441 struct bounce_zone *bz = dmat->bounce_zone; 1442 bus_dmamap_t map; 1443 1444 map = STAILQ_FIRST(&bz->bounce_map_waitinglist); 1445 if (map != NULL) { 1446 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1447 STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links); 1448 bz->total_deferred++; 1449 } else { 1450 map = NULL; 1451 } 1452 } 1453 return map; 1454 } 1455 1456 static void 1457 add_map_callback(bus_dmamap_t map) 1458 { 1459 spin_lock(&bounce_map_list_spin); 1460 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); 1461 busdma_swi_pending = 1; 1462 setsoftvm(); 1463 spin_unlock(&bounce_map_list_spin); 1464 } 1465 1466 void 1467 busdma_swi(void) 1468 { 1469 bus_dmamap_t map; 1470 1471 spin_lock(&bounce_map_list_spin); 1472 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1473 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1474 spin_unlock(&bounce_map_list_spin); 1475 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1476 map->callback, map->callback_arg, /*flags*/0); 1477 spin_lock(&bounce_map_list_spin); 1478 } 1479 spin_unlock(&bounce_map_list_spin); 1480 } 1481