1 /* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/malloc.h> 32 #include <sys/mbuf.h> 33 #include <sys/uio.h> 34 #include <sys/bus_dma.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/lock.h> 38 39 #include <sys/thread2.h> 40 #include <sys/spinlock2.h> 41 #include <sys/mplock2.h> 42 43 #include <vm/vm.h> 44 #include <vm/vm_page.h> 45 46 /* XXX needed for to access pmap to convert per-proc virtual to physical */ 47 #include <sys/proc.h> 48 #include <vm/vm_map.h> 49 50 #include <machine/md_var.h> 51 #include <machine/pmap.h> 52 53 #include <bus/cam/cam.h> 54 #include <bus/cam/cam_ccb.h> 55 56 #define MAX_BPAGES 1024 57 58 /* 59 * 16 x N declared on stack. 60 */ 61 #define BUS_DMA_CACHE_SEGMENTS 8 62 63 struct bounce_zone; 64 struct bus_dmamap; 65 66 struct bus_dma_tag { 67 bus_dma_tag_t parent; 68 bus_size_t alignment; 69 bus_size_t boundary; 70 bus_addr_t lowaddr; 71 bus_addr_t highaddr; 72 bus_dma_filter_t *filter; 73 void *filterarg; 74 bus_size_t maxsize; 75 u_int nsegments; 76 bus_size_t maxsegsz; 77 int flags; 78 int ref_count; 79 int map_count; 80 bus_dma_segment_t *segments; 81 struct bounce_zone *bounce_zone; 82 struct spinlock spin; 83 }; 84 85 /* 86 * bus_dma_tag private flags 87 */ 88 #define BUS_DMA_BOUNCE_ALIGN BUS_DMA_BUS2 89 #define BUS_DMA_BOUNCE_LOWADDR BUS_DMA_BUS3 90 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 91 92 #define BUS_DMA_COULD_BOUNCE (BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN) 93 94 #define BUS_DMAMEM_KMALLOC(dmat) \ 95 ((dmat)->maxsize <= PAGE_SIZE && \ 96 (dmat)->alignment <= PAGE_SIZE && \ 97 (dmat)->lowaddr >= ptoa(Maxmem)) 98 99 struct bounce_page { 100 vm_offset_t vaddr; /* kva of bounce buffer */ 101 bus_addr_t busaddr; /* Physical address */ 102 vm_offset_t datavaddr; /* kva of client data */ 103 bus_size_t datacount; /* client data count */ 104 STAILQ_ENTRY(bounce_page) links; 105 }; 106 107 struct bounce_zone { 108 STAILQ_ENTRY(bounce_zone) links; 109 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 110 STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 111 struct spinlock spin; 112 int total_bpages; 113 int free_bpages; 114 int reserved_bpages; 115 int active_bpages; 116 int total_bounced; 117 int total_deferred; 118 int reserve_failed; 119 bus_size_t alignment; 120 bus_addr_t lowaddr; 121 char zoneid[8]; 122 char lowaddrid[20]; 123 struct sysctl_ctx_list sysctl_ctx; 124 struct sysctl_oid *sysctl_tree; 125 }; 126 127 #define BZ_LOCK(bz) spin_lock(&(bz)->spin) 128 #define BZ_UNLOCK(bz) spin_unlock(&(bz)->spin) 129 130 static struct lwkt_token bounce_zone_tok = 131 LWKT_TOKEN_INITIALIZER(bounce_zone_token); 132 static int busdma_zonecount; 133 static STAILQ_HEAD(, bounce_zone) bounce_zone_list = 134 STAILQ_HEAD_INITIALIZER(bounce_zone_list); 135 136 static int busdma_priv_zonecount = -1; 137 138 int busdma_swi_pending; 139 static int total_bounce_pages; 140 static int max_bounce_pages = MAX_BPAGES; 141 static int bounce_alignment = 1; /* XXX temporary */ 142 143 TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages); 144 TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment); 145 146 struct bus_dmamap { 147 struct bp_list bpages; 148 int pagesneeded; 149 int pagesreserved; 150 bus_dma_tag_t dmat; 151 void *buf; /* unmapped buffer pointer */ 152 bus_size_t buflen; /* unmapped buffer length */ 153 bus_dmamap_callback_t *callback; 154 void *callback_arg; 155 STAILQ_ENTRY(bus_dmamap) links; 156 }; 157 158 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist = 159 STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist); 160 static struct spinlock bounce_map_list_spin = 161 SPINLOCK_INITIALIZER(&bounce_map_list_spin, "bounce_map_list_spin"); 162 163 static struct bus_dmamap nobounce_dmamap; 164 165 static int alloc_bounce_zone(bus_dma_tag_t); 166 static int alloc_bounce_pages(bus_dma_tag_t, u_int, int); 167 static void free_bounce_pages_all(bus_dma_tag_t); 168 static void free_bounce_zone(bus_dma_tag_t); 169 static int reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int); 170 static void return_bounce_pages(bus_dma_tag_t, bus_dmamap_t); 171 static bus_addr_t add_bounce_page(bus_dma_tag_t, bus_dmamap_t, 172 vm_offset_t, bus_size_t *); 173 static void free_bounce_page(bus_dma_tag_t, struct bounce_page *); 174 175 static bus_dmamap_t get_map_waiting(bus_dma_tag_t); 176 static void add_map_callback(bus_dmamap_t); 177 178 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 179 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bounce_pages, 180 0, "Total bounce pages"); 181 SYSCTL_INT(_hw_busdma, OID_AUTO, max_bpages, CTLFLAG_RD, &max_bounce_pages, 182 0, "Max bounce pages per bounce zone"); 183 SYSCTL_INT(_hw_busdma, OID_AUTO, bounce_alignment, CTLFLAG_RD, 184 &bounce_alignment, 0, "Obey alignment constraint"); 185 186 static __inline int 187 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 188 { 189 int retval; 190 191 retval = 0; 192 do { 193 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) || 194 (bounce_alignment && (paddr & (dmat->alignment - 1)) != 0)) 195 && (dmat->filter == NULL || 196 dmat->filter(dmat->filterarg, paddr) != 0)) 197 retval = 1; 198 199 dmat = dmat->parent; 200 } while (retval == 0 && dmat != NULL); 201 return (retval); 202 } 203 204 static __inline 205 bus_dma_segment_t * 206 bus_dma_tag_lock(bus_dma_tag_t tag, bus_dma_segment_t *cache) 207 { 208 if (tag->flags & BUS_DMA_PROTECTED) 209 return(tag->segments); 210 211 if (tag->nsegments <= BUS_DMA_CACHE_SEGMENTS) 212 return(cache); 213 spin_lock(&tag->spin); 214 return(tag->segments); 215 } 216 217 static __inline 218 void 219 bus_dma_tag_unlock(bus_dma_tag_t tag) 220 { 221 if (tag->flags & BUS_DMA_PROTECTED) 222 return; 223 224 if (tag->nsegments > BUS_DMA_CACHE_SEGMENTS) 225 spin_unlock(&tag->spin); 226 } 227 228 /* 229 * Allocate a device specific dma_tag. 230 */ 231 int 232 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 233 bus_size_t boundary, bus_addr_t lowaddr, 234 bus_addr_t highaddr, bus_dma_filter_t *filter, 235 void *filterarg, bus_size_t maxsize, int nsegments, 236 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 237 { 238 bus_dma_tag_t newtag; 239 int error = 0; 240 241 /* 242 * Sanity checks 243 */ 244 245 if (alignment == 0) 246 alignment = 1; 247 if (alignment & (alignment - 1)) 248 panic("alignment must be power of 2"); 249 250 if (boundary != 0) { 251 if (boundary & (boundary - 1)) 252 panic("boundary must be power of 2"); 253 if (boundary < maxsegsz) { 254 kprintf("boundary < maxsegsz:\n"); 255 print_backtrace(-1); 256 maxsegsz = boundary; 257 } 258 } 259 260 /* Return a NULL tag on failure */ 261 *dmat = NULL; 262 263 newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO); 264 265 spin_init(&newtag->spin, "busdmacreate"); 266 newtag->parent = parent; 267 newtag->alignment = alignment; 268 newtag->boundary = boundary; 269 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 270 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 271 newtag->filter = filter; 272 newtag->filterarg = filterarg; 273 newtag->maxsize = maxsize; 274 newtag->nsegments = nsegments; 275 newtag->maxsegsz = maxsegsz; 276 newtag->flags = flags; 277 newtag->ref_count = 1; /* Count ourself */ 278 newtag->map_count = 0; 279 newtag->segments = NULL; 280 newtag->bounce_zone = NULL; 281 282 /* Take into account any restrictions imposed by our parent tag */ 283 if (parent != NULL) { 284 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 285 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 286 287 if (newtag->boundary == 0) { 288 newtag->boundary = parent->boundary; 289 } else if (parent->boundary != 0) { 290 newtag->boundary = MIN(parent->boundary, 291 newtag->boundary); 292 } 293 294 #ifdef notyet 295 newtag->alignment = MAX(parent->alignment, newtag->alignment); 296 #endif 297 298 if (newtag->filter == NULL) { 299 /* 300 * Short circuit looking at our parent directly 301 * since we have encapsulated all of its information 302 */ 303 newtag->filter = parent->filter; 304 newtag->filterarg = parent->filterarg; 305 newtag->parent = parent->parent; 306 } 307 if (newtag->parent != NULL) 308 parent->ref_count++; 309 } 310 311 if (newtag->lowaddr < ptoa(Maxmem)) 312 newtag->flags |= BUS_DMA_BOUNCE_LOWADDR; 313 if (bounce_alignment && newtag->alignment > 1 && 314 !(newtag->flags & BUS_DMA_ALIGNED)) 315 newtag->flags |= BUS_DMA_BOUNCE_ALIGN; 316 317 if ((newtag->flags & BUS_DMA_COULD_BOUNCE) && 318 (flags & BUS_DMA_ALLOCNOW) != 0) { 319 struct bounce_zone *bz; 320 321 /* Must bounce */ 322 323 error = alloc_bounce_zone(newtag); 324 if (error) 325 goto back; 326 bz = newtag->bounce_zone; 327 328 if ((newtag->flags & BUS_DMA_ALLOCALL) == 0 && 329 ptoa(bz->total_bpages) < maxsize) { 330 int pages; 331 332 if (flags & BUS_DMA_ONEBPAGE) { 333 pages = 1; 334 } else { 335 pages = atop(round_page(maxsize)) - 336 bz->total_bpages; 337 pages = MAX(pages, 1); 338 } 339 340 /* Add pages to our bounce pool */ 341 if (alloc_bounce_pages(newtag, pages, flags) < pages) 342 error = ENOMEM; 343 344 /* Performed initial allocation */ 345 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 346 } 347 } 348 back: 349 if (error) { 350 free_bounce_zone(newtag); 351 kfree(newtag, M_DEVBUF); 352 } else { 353 *dmat = newtag; 354 } 355 return error; 356 } 357 358 int 359 bus_dma_tag_destroy(bus_dma_tag_t dmat) 360 { 361 if (dmat != NULL) { 362 if (dmat->map_count != 0) 363 return (EBUSY); 364 365 while (dmat != NULL) { 366 bus_dma_tag_t parent; 367 368 parent = dmat->parent; 369 dmat->ref_count--; 370 if (dmat->ref_count == 0) { 371 free_bounce_zone(dmat); 372 if (dmat->segments != NULL) 373 kfree(dmat->segments, M_DEVBUF); 374 kfree(dmat, M_DEVBUF); 375 /* 376 * Last reference count, so 377 * release our reference 378 * count on our parent. 379 */ 380 dmat = parent; 381 } else 382 dmat = NULL; 383 } 384 } 385 return (0); 386 } 387 388 bus_size_t 389 bus_dma_tag_getmaxsize(bus_dma_tag_t tag) 390 { 391 return(tag->maxsize); 392 } 393 394 /* 395 * Allocate a handle for mapping from kva/uva/physical 396 * address space into bus device space. 397 */ 398 int 399 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 400 { 401 int error; 402 403 error = 0; 404 405 if (dmat->segments == NULL) { 406 KKASSERT(dmat->nsegments && dmat->nsegments < 16384); 407 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 408 dmat->nsegments, M_DEVBUF, M_INTWAIT); 409 } 410 411 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 412 struct bounce_zone *bz; 413 int maxpages; 414 415 /* Must bounce */ 416 417 if (dmat->bounce_zone == NULL) { 418 error = alloc_bounce_zone(dmat); 419 if (error) 420 return error; 421 } 422 bz = dmat->bounce_zone; 423 424 *mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT | M_ZERO); 425 426 /* Initialize the new map */ 427 STAILQ_INIT(&((*mapp)->bpages)); 428 429 /* 430 * Attempt to add pages to our pool on a per-instance 431 * basis up to a sane limit. 432 */ 433 if (dmat->flags & BUS_DMA_ALLOCALL) { 434 maxpages = Maxmem - atop(dmat->lowaddr); 435 } else if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) { 436 maxpages = max_bounce_pages; 437 } else { 438 maxpages = MIN(max_bounce_pages, 439 Maxmem - atop(dmat->lowaddr)); 440 } 441 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || 442 (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 443 int pages; 444 445 if (flags & BUS_DMA_ONEBPAGE) { 446 pages = 1; 447 } else { 448 pages = atop(round_page(dmat->maxsize)); 449 pages = MIN(maxpages - bz->total_bpages, pages); 450 pages = MAX(pages, 1); 451 } 452 if (alloc_bounce_pages(dmat, pages, flags) < pages) 453 error = ENOMEM; 454 455 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 456 if (!error && 457 (dmat->flags & BUS_DMA_ALLOCALL) == 0) 458 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 459 } else { 460 error = 0; 461 } 462 } 463 } else { 464 *mapp = NULL; 465 } 466 if (!error) { 467 dmat->map_count++; 468 } else { 469 kfree(*mapp, M_DEVBUF); 470 *mapp = NULL; 471 } 472 return error; 473 } 474 475 /* 476 * Destroy a handle for mapping from kva/uva/physical 477 * address space into bus device space. 478 */ 479 int 480 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 481 { 482 if (map != NULL && map != (void *)-1) { 483 if (STAILQ_FIRST(&map->bpages) != NULL) 484 return (EBUSY); 485 kfree(map, M_DEVBUF); 486 } 487 dmat->map_count--; 488 return (0); 489 } 490 491 static __inline bus_size_t 492 check_kmalloc(bus_dma_tag_t dmat, const void *vaddr0, int verify) 493 { 494 bus_size_t maxsize = 0; 495 uintptr_t vaddr = (uintptr_t)vaddr0; 496 497 if ((vaddr ^ (vaddr + dmat->maxsize - 1)) & ~PAGE_MASK) { 498 if (verify) 499 panic("boundary check failed\n"); 500 maxsize = dmat->maxsize; 501 } 502 if (vaddr & (dmat->alignment - 1)) { 503 if (verify) 504 panic("alignment check failed\n"); 505 if (dmat->maxsize < dmat->alignment) 506 maxsize = dmat->alignment; 507 else 508 maxsize = dmat->maxsize; 509 } 510 return maxsize; 511 } 512 513 /* 514 * Allocate a piece of memory that can be efficiently mapped into 515 * bus device space based on the constraints lited in the dma tag. 516 * 517 * Use *mapp to record whether we were able to use kmalloc() 518 * or whether we had to use contigmalloc(). 519 */ 520 int 521 bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, 522 bus_dmamap_t *mapp) 523 { 524 vm_memattr_t attr; 525 int mflags; 526 527 /* If we succeed, no mapping/bouncing will be required */ 528 *mapp = NULL; 529 530 if (dmat->segments == NULL) { 531 KKASSERT(dmat->nsegments < 16384); 532 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 533 dmat->nsegments, M_DEVBUF, M_INTWAIT); 534 } 535 536 if (flags & BUS_DMA_NOWAIT) 537 mflags = M_NOWAIT; 538 else 539 mflags = M_WAITOK; 540 if (flags & BUS_DMA_ZERO) 541 mflags |= M_ZERO; 542 if (flags & BUS_DMA_NOCACHE) 543 attr = VM_MEMATTR_UNCACHEABLE; 544 else 545 attr = VM_MEMATTR_DEFAULT; 546 547 /* XXX must alloc with correct mem attribute here */ 548 if (BUS_DMAMEM_KMALLOC(dmat) && attr == VM_MEMATTR_DEFAULT) { 549 bus_size_t maxsize; 550 551 *vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags); 552 553 /* 554 * XXX 555 * Check whether the allocation 556 * - crossed a page boundary 557 * - was not aligned 558 * Retry with power-of-2 alignment in the above cases. 559 */ 560 maxsize = check_kmalloc(dmat, *vaddr, 0); 561 if (maxsize) { 562 kfree(*vaddr, M_DEVBUF); 563 *vaddr = kmalloc(maxsize, M_DEVBUF, 564 mflags | M_POWEROF2); 565 check_kmalloc(dmat, *vaddr, 1); 566 } 567 } else { 568 /* 569 * XXX Use Contigmalloc until it is merged into this facility 570 * and handles multi-seg allocations. Nobody is doing 571 * multi-seg allocations yet though. 572 */ 573 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 574 0ul, dmat->lowaddr, 575 dmat->alignment, dmat->boundary); 576 *mapp = (void *)-1; 577 } 578 if (*vaddr == NULL) 579 return (ENOMEM); 580 581 if (attr != VM_MEMATTR_DEFAULT) { 582 pmap_change_attr((vm_offset_t)(*vaddr), 583 dmat->maxsize / PAGE_SIZE, attr); 584 } 585 return (0); 586 } 587 588 /* 589 * Free a piece of memory and it's allociated dmamap, that was allocated 590 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 591 */ 592 void 593 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 594 { 595 /* 596 * dmamem does not need to be bounced, so the map should be 597 * NULL 598 */ 599 if (map != NULL && map != (void *)-1) 600 panic("bus_dmamem_free: Invalid map freed"); 601 if (map == NULL) 602 kfree(vaddr, M_DEVBUF); 603 else 604 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 605 } 606 607 static __inline vm_paddr_t 608 _bus_dma_extract(pmap_t pmap, vm_offset_t vaddr) 609 { 610 if (pmap) 611 return pmap_extract(pmap, vaddr, NULL); 612 else 613 return pmap_kextract(vaddr); 614 } 615 616 /* 617 * Utility function to load a linear buffer. lastaddrp holds state 618 * between invocations (for multiple-buffer loads). segp contains 619 * the segment following the starting one on entrace, and the ending 620 * segment on exit. first indicates if this is the first invocation 621 * of this function. 622 */ 623 static int 624 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, 625 bus_dmamap_t map, 626 void *buf, bus_size_t buflen, 627 bus_dma_segment_t *segments, 628 int nsegments, 629 pmap_t pmap, 630 int flags, 631 vm_paddr_t *lastpaddrp, 632 int *segp, 633 int first) 634 { 635 vm_offset_t vaddr; 636 vm_paddr_t paddr, nextpaddr; 637 bus_dma_segment_t *sg; 638 bus_addr_t bmask; 639 int seg, error = 0; 640 641 if (map == NULL || map == (void *)-1) 642 map = &nobounce_dmamap; 643 644 #ifdef INVARIANTS 645 if (dmat->flags & BUS_DMA_ALIGNED) 646 KKASSERT(((uintptr_t)buf & (dmat->alignment - 1)) == 0); 647 #endif 648 649 /* 650 * If we are being called during a callback, pagesneeded will 651 * be non-zero, so we can avoid doing the work twice. 652 */ 653 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) && 654 map != &nobounce_dmamap && map->pagesneeded == 0) { 655 vm_offset_t vendaddr; 656 657 /* 658 * Count the number of bounce pages 659 * needed in order to complete this transfer 660 */ 661 vaddr = (vm_offset_t)buf; 662 vendaddr = (vm_offset_t)buf + buflen; 663 664 while (vaddr < vendaddr) { 665 paddr = _bus_dma_extract(pmap, vaddr); 666 if (run_filter(dmat, paddr) != 0) 667 map->pagesneeded++; 668 vaddr += (PAGE_SIZE - (vaddr & PAGE_MASK)); 669 } 670 } 671 672 /* Reserve Necessary Bounce Pages */ 673 if (map->pagesneeded != 0) { 674 struct bounce_zone *bz; 675 676 bz = dmat->bounce_zone; 677 BZ_LOCK(bz); 678 if (flags & BUS_DMA_NOWAIT) { 679 if (reserve_bounce_pages(dmat, map, 0) != 0) { 680 BZ_UNLOCK(bz); 681 error = ENOMEM; 682 goto free_bounce; 683 } 684 } else { 685 if (reserve_bounce_pages(dmat, map, 1) != 0) { 686 /* Queue us for resources */ 687 map->dmat = dmat; 688 map->buf = buf; 689 map->buflen = buflen; 690 691 STAILQ_INSERT_TAIL( 692 &dmat->bounce_zone->bounce_map_waitinglist, 693 map, links); 694 BZ_UNLOCK(bz); 695 696 return (EINPROGRESS); 697 } 698 } 699 BZ_UNLOCK(bz); 700 } 701 702 KKASSERT(*segp >= 1 && *segp <= nsegments); 703 seg = *segp; 704 sg = &segments[seg - 1]; 705 706 vaddr = (vm_offset_t)buf; 707 nextpaddr = *lastpaddrp; 708 bmask = ~(dmat->boundary - 1); /* note: will be 0 if boundary is 0 */ 709 710 /* force at least one segment */ 711 do { 712 bus_size_t size; 713 714 /* 715 * Per-page main loop 716 */ 717 paddr = _bus_dma_extract(pmap, vaddr); 718 size = PAGE_SIZE - (paddr & PAGE_MASK); 719 if (size > buflen) 720 size = buflen; 721 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { 722 /* 723 * NOTE: paddr may have different in-page offset, 724 * unless BUS_DMA_KEEP_PG_OFFSET is set. 725 */ 726 paddr = add_bounce_page(dmat, map, vaddr, &size); 727 } 728 729 /* 730 * Fill in the bus_dma_segment 731 */ 732 if (first) { 733 sg->ds_addr = paddr; 734 sg->ds_len = size; 735 first = 0; 736 } else if (paddr == nextpaddr) { 737 sg->ds_len += size; 738 } else { 739 sg++; 740 seg++; 741 if (seg > nsegments) 742 break; 743 sg->ds_addr = paddr; 744 sg->ds_len = size; 745 } 746 nextpaddr = paddr + size; 747 748 /* 749 * Handle maxsegsz and boundary issues with a nested loop 750 */ 751 for (;;) { 752 bus_size_t tmpsize; 753 754 /* 755 * Limit to the boundary and maximum segment size 756 */ 757 if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) { 758 tmpsize = dmat->boundary - 759 (sg->ds_addr & ~bmask); 760 if (tmpsize > dmat->maxsegsz) 761 tmpsize = dmat->maxsegsz; 762 KKASSERT(tmpsize < sg->ds_len); 763 } else if (sg->ds_len > dmat->maxsegsz) { 764 tmpsize = dmat->maxsegsz; 765 } else { 766 break; 767 } 768 769 /* 770 * Futz, split the data into a new segment. 771 */ 772 if (seg >= nsegments) 773 goto fail; 774 sg[1].ds_len = sg[0].ds_len - tmpsize; 775 sg[1].ds_addr = sg[0].ds_addr + tmpsize; 776 sg[0].ds_len = tmpsize; 777 sg++; 778 seg++; 779 } 780 781 /* 782 * Adjust for loop 783 */ 784 buflen -= size; 785 vaddr += size; 786 } while (buflen > 0); 787 fail: 788 if (buflen != 0) 789 error = EFBIG; 790 791 *segp = seg; 792 *lastpaddrp = nextpaddr; 793 794 free_bounce: 795 if (error && (dmat->flags & BUS_DMA_COULD_BOUNCE) && 796 map != &nobounce_dmamap) { 797 _bus_dmamap_unload(dmat, map); 798 return_bounce_pages(dmat, map); 799 } 800 return error; 801 } 802 803 /* 804 * Map the buffer buf into bus space using the dmamap map. 805 */ 806 int 807 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 808 bus_size_t buflen, bus_dmamap_callback_t *callback, 809 void *callback_arg, int flags) 810 { 811 bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; 812 bus_dma_segment_t *segments; 813 vm_paddr_t lastaddr = 0; 814 int error, nsegs = 1; 815 816 if (map != NULL && map != (void *)-1) { 817 /* 818 * XXX 819 * Follow old semantics. Once all of the callers are fixed, 820 * we should get rid of these internal flag "adjustment". 821 */ 822 flags &= ~BUS_DMA_NOWAIT; 823 flags |= BUS_DMA_WAITOK; 824 825 map->callback = callback; 826 map->callback_arg = callback_arg; 827 } 828 829 segments = bus_dma_tag_lock(dmat, cache_segments); 830 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, 831 segments, dmat->nsegments, 832 NULL, flags, &lastaddr, &nsegs, 1); 833 if (error == EINPROGRESS) { 834 KKASSERT((dmat->flags & 835 (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL)) != 836 (BUS_DMA_PRIVBZONE | BUS_DMA_ALLOCALL)); 837 838 if (dmat->flags & BUS_DMA_PROTECTED) 839 panic("protected dmamap callback will be defered"); 840 841 bus_dma_tag_unlock(dmat); 842 return error; 843 } 844 callback(callback_arg, segments, nsegs, error); 845 bus_dma_tag_unlock(dmat); 846 return 0; 847 } 848 849 /* 850 * Like _bus_dmamap_load(), but for ccb. 851 */ 852 int 853 bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, 854 bus_dmamap_callback_t *callback, void *callback_arg, int flags) 855 { 856 const struct ccb_scsiio *csio; 857 858 KASSERT(ccb->ccb_h.func_code == XPT_SCSI_IO || 859 ccb->ccb_h.func_code == XPT_CONT_TARGET_IO, 860 ("invalid ccb func_code %u", ccb->ccb_h.func_code)); 861 csio = &ccb->csio; 862 863 return (bus_dmamap_load(dmat, map, csio->data_ptr, csio->dxfer_len, 864 callback, callback_arg, flags)); 865 } 866 867 /* 868 * Like _bus_dmamap_load(), but for mbufs. 869 */ 870 int 871 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 872 struct mbuf *m0, 873 bus_dmamap_callback2_t *callback, void *callback_arg, 874 int flags) 875 { 876 bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; 877 bus_dma_segment_t *segments; 878 int nsegs, error; 879 880 /* 881 * XXX 882 * Follow old semantics. Once all of the callers are fixed, 883 * we should get rid of these internal flag "adjustment". 884 */ 885 flags &= ~BUS_DMA_WAITOK; 886 flags |= BUS_DMA_NOWAIT; 887 888 segments = bus_dma_tag_lock(dmat, cache_segments); 889 error = bus_dmamap_load_mbuf_segment(dmat, map, m0, 890 segments, dmat->nsegments, &nsegs, flags); 891 if (error) { 892 /* force "no valid mappings" in callback */ 893 callback(callback_arg, segments, 0, 894 0, error); 895 } else { 896 callback(callback_arg, segments, nsegs, 897 m0->m_pkthdr.len, error); 898 } 899 bus_dma_tag_unlock(dmat); 900 return error; 901 } 902 903 int 904 bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat, bus_dmamap_t map, 905 struct mbuf *m0, 906 bus_dma_segment_t *segs, int maxsegs, 907 int *nsegs, int flags) 908 { 909 int error; 910 911 M_ASSERTPKTHDR(m0); 912 913 KASSERT(maxsegs >= 1, ("invalid maxsegs %d", maxsegs)); 914 KASSERT(maxsegs <= dmat->nsegments, 915 ("%d too many segments, dmat only supports %d segments", 916 maxsegs, dmat->nsegments)); 917 KASSERT(flags & BUS_DMA_NOWAIT, 918 ("only BUS_DMA_NOWAIT is supported")); 919 920 if (m0->m_pkthdr.len <= dmat->maxsize) { 921 int first = 1; 922 vm_paddr_t lastaddr = 0; 923 struct mbuf *m; 924 925 *nsegs = 1; 926 error = 0; 927 for (m = m0; m != NULL && error == 0; m = m->m_next) { 928 if (m->m_len == 0) 929 continue; 930 931 error = _bus_dmamap_load_buffer(dmat, map, 932 m->m_data, m->m_len, 933 segs, maxsegs, 934 NULL, flags, &lastaddr, 935 nsegs, first); 936 if (error == ENOMEM && !first) { 937 /* 938 * Out of bounce pages due to too many 939 * fragments in the mbuf chain; return 940 * EFBIG instead. 941 */ 942 error = EFBIG; 943 break; 944 } 945 first = 0; 946 } 947 #ifdef INVARIANTS 948 if (!error) 949 KKASSERT(*nsegs <= maxsegs && *nsegs >= 1); 950 #endif 951 } else { 952 *nsegs = 0; 953 error = EINVAL; 954 } 955 KKASSERT(error != EINPROGRESS); 956 return error; 957 } 958 959 /* 960 * Like _bus_dmamap_load(), but for uios. 961 */ 962 int 963 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 964 struct uio *uio, 965 bus_dmamap_callback2_t *callback, void *callback_arg, 966 int flags) 967 { 968 vm_paddr_t lastaddr; 969 int nsegs, error, first, i; 970 bus_size_t resid; 971 struct iovec *iov; 972 pmap_t pmap; 973 bus_dma_segment_t cache_segments[BUS_DMA_CACHE_SEGMENTS]; 974 bus_dma_segment_t *segments; 975 bus_dma_segment_t *segs; 976 int nsegs_left; 977 978 if (dmat->nsegments <= BUS_DMA_CACHE_SEGMENTS) 979 segments = cache_segments; 980 else 981 segments = kmalloc(sizeof(bus_dma_segment_t) * dmat->nsegments, 982 M_DEVBUF, M_WAITOK | M_ZERO); 983 984 /* 985 * XXX 986 * Follow old semantics. Once all of the callers are fixed, 987 * we should get rid of these internal flag "adjustment". 988 */ 989 flags &= ~BUS_DMA_WAITOK; 990 flags |= BUS_DMA_NOWAIT; 991 992 resid = (bus_size_t)uio->uio_resid; 993 iov = uio->uio_iov; 994 995 segs = segments; 996 nsegs_left = dmat->nsegments; 997 998 if (uio->uio_segflg == UIO_USERSPACE) { 999 struct thread *td; 1000 1001 td = uio->uio_td; 1002 KASSERT(td != NULL && td->td_proc != NULL, 1003 ("bus_dmamap_load_uio: USERSPACE but no proc")); 1004 pmap = vmspace_pmap(td->td_proc->p_vmspace); 1005 } else { 1006 pmap = NULL; 1007 } 1008 1009 error = 0; 1010 nsegs = 1; 1011 first = 1; 1012 lastaddr = 0; 1013 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 1014 /* 1015 * Now at the first iovec to load. Load each iovec 1016 * until we have exhausted the residual count. 1017 */ 1018 bus_size_t minlen = 1019 resid < iov[i].iov_len ? resid : iov[i].iov_len; 1020 caddr_t addr = (caddr_t) iov[i].iov_base; 1021 1022 error = _bus_dmamap_load_buffer(dmat, map, addr, minlen, 1023 segs, nsegs_left, 1024 pmap, flags, &lastaddr, &nsegs, first); 1025 first = 0; 1026 1027 resid -= minlen; 1028 if (error == 0) { 1029 nsegs_left -= nsegs; 1030 segs += nsegs; 1031 } 1032 } 1033 1034 /* 1035 * Minimum one DMA segment, even if 0-length buffer. 1036 */ 1037 if (nsegs_left == dmat->nsegments) 1038 --nsegs_left; 1039 1040 if (error) { 1041 /* force "no valid mappings" in callback */ 1042 callback(callback_arg, segments, 0, 1043 0, error); 1044 } else { 1045 callback(callback_arg, segments, dmat->nsegments - nsegs_left, 1046 (bus_size_t)uio->uio_resid, error); 1047 } 1048 if (dmat->nsegments > BUS_DMA_CACHE_SEGMENTS) 1049 kfree(segments, M_DEVBUF); 1050 return error; 1051 } 1052 1053 /* 1054 * Release the mapping held by map. 1055 */ 1056 void 1057 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1058 { 1059 struct bounce_page *bpage; 1060 1061 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1062 STAILQ_REMOVE_HEAD(&map->bpages, links); 1063 free_bounce_page(dmat, bpage); 1064 } 1065 } 1066 1067 void 1068 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1069 { 1070 struct bounce_page *bpage; 1071 1072 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1073 /* 1074 * Handle data bouncing. We might also 1075 * want to add support for invalidating 1076 * the caches on broken hardware 1077 */ 1078 if (op & BUS_DMASYNC_PREWRITE) { 1079 while (bpage != NULL) { 1080 bcopy((void *)bpage->datavaddr, 1081 (void *)bpage->vaddr, 1082 bpage->datacount); 1083 bpage = STAILQ_NEXT(bpage, links); 1084 } 1085 cpu_sfence(); 1086 dmat->bounce_zone->total_bounced++; 1087 } 1088 if (op & BUS_DMASYNC_POSTREAD) { 1089 cpu_lfence(); 1090 while (bpage != NULL) { 1091 bcopy((void *)bpage->vaddr, 1092 (void *)bpage->datavaddr, 1093 bpage->datacount); 1094 bpage = STAILQ_NEXT(bpage, links); 1095 } 1096 dmat->bounce_zone->total_bounced++; 1097 } 1098 /* BUS_DMASYNC_PREREAD - no operation on intel */ 1099 /* BUS_DMASYNC_POSTWRITE - no operation on intel */ 1100 } 1101 } 1102 1103 static int 1104 alloc_bounce_zone(bus_dma_tag_t dmat) 1105 { 1106 struct bounce_zone *bz, *new_bz; 1107 1108 KASSERT(dmat->bounce_zone == NULL, 1109 ("bounce zone was already assigned")); 1110 1111 new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO); 1112 1113 lwkt_gettoken(&bounce_zone_tok); 1114 1115 if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) { 1116 /* 1117 * For shared bounce zone, check to see 1118 * if we already have a suitable zone 1119 */ 1120 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1121 if (dmat->alignment <= bz->alignment && 1122 dmat->lowaddr >= bz->lowaddr) { 1123 lwkt_reltoken(&bounce_zone_tok); 1124 1125 dmat->bounce_zone = bz; 1126 kfree(new_bz, M_DEVBUF); 1127 return 0; 1128 } 1129 } 1130 } 1131 bz = new_bz; 1132 1133 spin_init(&bz->spin, "allocbouncezone"); 1134 STAILQ_INIT(&bz->bounce_page_list); 1135 STAILQ_INIT(&bz->bounce_map_waitinglist); 1136 bz->free_bpages = 0; 1137 bz->reserved_bpages = 0; 1138 bz->active_bpages = 0; 1139 bz->lowaddr = dmat->lowaddr; 1140 bz->alignment = round_page(dmat->alignment); 1141 ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1142 1143 if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) { 1144 ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1145 busdma_zonecount++; 1146 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1147 } else { 1148 ksnprintf(bz->zoneid, 8, "zone%d", busdma_priv_zonecount); 1149 busdma_priv_zonecount--; 1150 } 1151 1152 lwkt_reltoken(&bounce_zone_tok); 1153 1154 dmat->bounce_zone = bz; 1155 1156 sysctl_ctx_init(&bz->sysctl_ctx); 1157 bz->sysctl_tree = SYSCTL_ADD_NODE(&bz->sysctl_ctx, 1158 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1159 CTLFLAG_RD, 0, ""); 1160 if (bz->sysctl_tree == NULL) { 1161 sysctl_ctx_free(&bz->sysctl_ctx); 1162 return 0; /* XXX error code? */ 1163 } 1164 1165 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1166 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1167 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1168 "Total bounce pages"); 1169 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1170 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1171 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1172 "Free bounce pages"); 1173 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1174 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1175 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1176 "Reserved bounce pages"); 1177 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1178 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1179 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1180 "Active bounce pages"); 1181 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1182 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1183 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1184 "Total bounce requests"); 1185 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1186 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1187 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1188 "Total bounce requests that were deferred"); 1189 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1190 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1191 "reserve_failed", CTLFLAG_RD, &bz->reserve_failed, 0, 1192 "Total bounce page reservations that were failed"); 1193 SYSCTL_ADD_STRING(&bz->sysctl_ctx, 1194 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1195 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1196 SYSCTL_ADD_INT(&bz->sysctl_ctx, 1197 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO, 1198 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1199 1200 return 0; 1201 } 1202 1203 static int 1204 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages, int flags) 1205 { 1206 struct bounce_zone *bz = dmat->bounce_zone; 1207 int count = 0, mflags; 1208 1209 if (flags & BUS_DMA_NOWAIT) 1210 mflags = M_NOWAIT; 1211 else 1212 mflags = M_WAITOK; 1213 1214 while (numpages > 0) { 1215 struct bounce_page *bpage; 1216 1217 bpage = kmalloc(sizeof(*bpage), M_DEVBUF, M_INTWAIT | M_ZERO); 1218 1219 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1220 mflags, 0ul, 1221 bz->lowaddr, 1222 bz->alignment, 0); 1223 if (bpage->vaddr == 0) { 1224 kfree(bpage, M_DEVBUF); 1225 break; 1226 } 1227 bpage->busaddr = pmap_kextract(bpage->vaddr); 1228 1229 BZ_LOCK(bz); 1230 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1231 total_bounce_pages++; 1232 bz->total_bpages++; 1233 bz->free_bpages++; 1234 BZ_UNLOCK(bz); 1235 1236 count++; 1237 numpages--; 1238 } 1239 return count; 1240 } 1241 1242 static void 1243 free_bounce_pages_all(bus_dma_tag_t dmat) 1244 { 1245 struct bounce_zone *bz = dmat->bounce_zone; 1246 struct bounce_page *bpage; 1247 1248 BZ_LOCK(bz); 1249 1250 while ((bpage = STAILQ_FIRST(&bz->bounce_page_list)) != NULL) { 1251 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1252 1253 KKASSERT(total_bounce_pages > 0); 1254 total_bounce_pages--; 1255 1256 KKASSERT(bz->total_bpages > 0); 1257 bz->total_bpages--; 1258 1259 KKASSERT(bz->free_bpages > 0); 1260 bz->free_bpages--; 1261 1262 BZ_UNLOCK(bz); 1263 contigfree((void *)bpage->vaddr, PAGE_SIZE, M_DEVBUF); 1264 kfree(bpage, M_DEVBUF); 1265 BZ_LOCK(bz); 1266 } 1267 if (bz->total_bpages) { 1268 kprintf("#%d bounce pages are still in use\n", 1269 bz->total_bpages); 1270 print_backtrace(-1); 1271 } 1272 1273 BZ_UNLOCK(bz); 1274 } 1275 1276 static void 1277 free_bounce_zone(bus_dma_tag_t dmat) 1278 { 1279 struct bounce_zone *bz = dmat->bounce_zone; 1280 1281 if (bz == NULL) 1282 return; 1283 1284 if ((dmat->flags & BUS_DMA_PRIVBZONE) == 0) 1285 return; 1286 1287 free_bounce_pages_all(dmat); 1288 dmat->bounce_zone = NULL; 1289 1290 if (bz->sysctl_tree != NULL) 1291 sysctl_ctx_free(&bz->sysctl_ctx); 1292 kfree(bz, M_DEVBUF); 1293 } 1294 1295 /* Assume caller holds bounce zone spinlock */ 1296 static int 1297 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1298 { 1299 struct bounce_zone *bz = dmat->bounce_zone; 1300 int pages; 1301 1302 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1303 if (!commit && map->pagesneeded > (map->pagesreserved + pages)) { 1304 bz->reserve_failed++; 1305 return (map->pagesneeded - (map->pagesreserved + pages)); 1306 } 1307 1308 bz->free_bpages -= pages; 1309 1310 bz->reserved_bpages += pages; 1311 KKASSERT(bz->reserved_bpages <= bz->total_bpages); 1312 1313 map->pagesreserved += pages; 1314 pages = map->pagesneeded - map->pagesreserved; 1315 1316 return pages; 1317 } 1318 1319 static void 1320 return_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 1321 { 1322 struct bounce_zone *bz = dmat->bounce_zone; 1323 int reserved = map->pagesreserved; 1324 bus_dmamap_t wait_map; 1325 1326 map->pagesreserved = 0; 1327 map->pagesneeded = 0; 1328 1329 if (reserved == 0) 1330 return; 1331 1332 BZ_LOCK(bz); 1333 1334 bz->free_bpages += reserved; 1335 KKASSERT(bz->free_bpages <= bz->total_bpages); 1336 1337 KKASSERT(bz->reserved_bpages >= reserved); 1338 bz->reserved_bpages -= reserved; 1339 1340 wait_map = get_map_waiting(dmat); 1341 1342 BZ_UNLOCK(bz); 1343 1344 if (wait_map != NULL) 1345 add_map_callback(map); 1346 } 1347 1348 static bus_addr_t 1349 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1350 bus_size_t *sizep) 1351 { 1352 struct bounce_zone *bz = dmat->bounce_zone; 1353 struct bounce_page *bpage; 1354 bus_size_t size; 1355 1356 KASSERT(map->pagesneeded > 0, ("map doesn't need any pages")); 1357 map->pagesneeded--; 1358 1359 KASSERT(map->pagesreserved > 0, ("map doesn't reserve any pages")); 1360 map->pagesreserved--; 1361 1362 BZ_LOCK(bz); 1363 1364 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1365 KASSERT(bpage != NULL, ("free page list is empty")); 1366 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1367 1368 KKASSERT(bz->reserved_bpages > 0); 1369 bz->reserved_bpages--; 1370 1371 bz->active_bpages++; 1372 KKASSERT(bz->active_bpages <= bz->total_bpages); 1373 1374 BZ_UNLOCK(bz); 1375 1376 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1377 /* 1378 * Page offset needs to be preserved. No size adjustments 1379 * needed. 1380 */ 1381 bpage->vaddr |= vaddr & PAGE_MASK; 1382 bpage->busaddr |= vaddr & PAGE_MASK; 1383 size = *sizep; 1384 } else { 1385 /* 1386 * Realign to bounce page base address, reduce size if 1387 * necessary. Bounce pages are typically already 1388 * page-aligned. 1389 */ 1390 size = PAGE_SIZE - (bpage->busaddr & PAGE_MASK); 1391 if (size < *sizep) { 1392 *sizep = size; 1393 } else { 1394 size = *sizep; 1395 } 1396 } 1397 1398 bpage->datavaddr = vaddr; 1399 bpage->datacount = size; 1400 STAILQ_INSERT_TAIL(&map->bpages, bpage, links); 1401 return bpage->busaddr; 1402 } 1403 1404 static void 1405 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1406 { 1407 struct bounce_zone *bz = dmat->bounce_zone; 1408 bus_dmamap_t map; 1409 1410 bpage->datavaddr = 0; 1411 bpage->datacount = 0; 1412 1413 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1414 /* 1415 * Reset the bounce page to start at offset 0. Other uses 1416 * of this bounce page may need to store a full page of 1417 * data and/or assume it starts on a page boundary. 1418 */ 1419 bpage->vaddr &= ~PAGE_MASK; 1420 bpage->busaddr &= ~PAGE_MASK; 1421 } 1422 1423 BZ_LOCK(bz); 1424 1425 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1426 1427 bz->free_bpages++; 1428 KKASSERT(bz->free_bpages <= bz->total_bpages); 1429 1430 KKASSERT(bz->active_bpages > 0); 1431 bz->active_bpages--; 1432 1433 map = get_map_waiting(dmat); 1434 1435 BZ_UNLOCK(bz); 1436 1437 if (map != NULL && map != (void *)-1) 1438 add_map_callback(map); 1439 } 1440 1441 /* Assume caller holds bounce zone spinlock */ 1442 static bus_dmamap_t 1443 get_map_waiting(bus_dma_tag_t dmat) 1444 { 1445 struct bounce_zone *bz = dmat->bounce_zone; 1446 bus_dmamap_t map; 1447 1448 map = STAILQ_FIRST(&bz->bounce_map_waitinglist); 1449 if (map != NULL && map != (void *)-1) { 1450 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1451 STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links); 1452 bz->total_deferred++; 1453 } else { 1454 map = NULL; 1455 } 1456 } 1457 return map; 1458 } 1459 1460 static void 1461 add_map_callback(bus_dmamap_t map) 1462 { 1463 spin_lock(&bounce_map_list_spin); 1464 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); 1465 busdma_swi_pending = 1; 1466 setsoftvm(); 1467 spin_unlock(&bounce_map_list_spin); 1468 } 1469 1470 void 1471 busdma_swi(void) 1472 { 1473 bus_dmamap_t map; 1474 1475 spin_lock(&bounce_map_list_spin); 1476 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1477 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1478 spin_unlock(&bounce_map_list_spin); 1479 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1480 map->callback, map->callback_arg, /*flags*/0); 1481 spin_lock(&bounce_map_list_spin); 1482 } 1483 spin_unlock(&bounce_map_list_spin); 1484 } 1485 1486 int 1487 bus_space_map(bus_space_tag_t t __unused, bus_addr_t addr, bus_size_t size, 1488 int flags __unused, bus_space_handle_t *bshp) 1489 { 1490 1491 if (t == X86_64_BUS_SPACE_MEM) 1492 *bshp = (uintptr_t)pmap_mapdev(addr, size); 1493 else 1494 *bshp = addr; 1495 return (0); 1496 } 1497 1498 void 1499 bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size) 1500 { 1501 if (t == X86_64_BUS_SPACE_MEM) 1502 pmap_unmapdev(bsh, size); 1503 } 1504