1 /*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * From amd64/busdma_machdep.c, r204214 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/bus.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/ktr.h> 41 #include <sys/lock.h> 42 #include <sys/proc.h> 43 #include <sys/memdesc.h> 44 #include <sys/mutex.h> 45 #include <sys/sysctl.h> 46 #include <sys/uio.h> 47 48 #include <vm/vm.h> 49 #include <vm/vm_extern.h> 50 #include <vm/vm_kern.h> 51 #include <vm/vm_page.h> 52 #include <vm/vm_map.h> 53 54 #include <machine/atomic.h> 55 #include <machine/bus.h> 56 #include <machine/cpufunc.h> 57 #include <machine/md_var.h> 58 59 #include "iommu_if.h" 60 61 #define MAX_BPAGES MIN(8192, physmem/40) 62 63 struct bounce_zone; 64 65 struct bus_dma_tag { 66 bus_dma_tag_t parent; 67 bus_size_t alignment; 68 bus_addr_t boundary; 69 bus_addr_t lowaddr; 70 bus_addr_t highaddr; 71 bus_dma_filter_t *filter; 72 void *filterarg; 73 bus_size_t maxsize; 74 u_int nsegments; 75 bus_size_t maxsegsz; 76 int flags; 77 int ref_count; 78 int map_count; 79 bus_dma_lock_t *lockfunc; 80 void *lockfuncarg; 81 struct bounce_zone *bounce_zone; 82 device_t iommu; 83 void *iommu_cookie; 84 }; 85 86 struct bounce_page { 87 vm_offset_t vaddr; /* kva of bounce buffer */ 88 bus_addr_t busaddr; /* Physical address */ 89 vm_offset_t datavaddr; /* kva of client data */ 90 vm_page_t datapage; /* physical page of client data */ 91 vm_offset_t dataoffs; /* page offset of client data */ 92 bus_size_t datacount; /* client data count */ 93 STAILQ_ENTRY(bounce_page) links; 94 }; 95 96 int busdma_swi_pending; 97 98 struct bounce_zone { 99 STAILQ_ENTRY(bounce_zone) links; 100 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 101 int total_bpages; 102 int free_bpages; 103 int reserved_bpages; 104 int active_bpages; 105 int total_bounced; 106 int total_deferred; 107 int map_count; 108 bus_size_t alignment; 109 bus_addr_t lowaddr; 110 char zoneid[8]; 111 char lowaddrid[20]; 112 struct sysctl_ctx_list sysctl_tree; 113 struct sysctl_oid *sysctl_tree_top; 114 }; 115 116 static struct mtx bounce_lock; 117 static int total_bpages; 118 static int busdma_zonecount; 119 static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 120 121 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 122 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 123 "Total bounce pages"); 124 125 struct bus_dmamap { 126 struct bp_list bpages; 127 int pagesneeded; 128 int pagesreserved; 129 bus_dma_tag_t dmat; 130 struct memdesc mem; 131 bus_dma_segment_t *segments; 132 int nsegs; 133 bus_dmamap_callback_t *callback; 134 void *callback_arg; 135 STAILQ_ENTRY(bus_dmamap) links; 136 int contigalloc; 137 }; 138 139 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 140 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 141 142 static void init_bounce_pages(void *dummy); 143 static int alloc_bounce_zone(bus_dma_tag_t dmat); 144 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 145 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 146 int commit); 147 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 148 vm_offset_t vaddr, bus_addr_t addr, 149 bus_size_t size); 150 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 151 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 152 153 /* 154 * Return true if a match is made. 155 * 156 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 157 * 158 * If paddr is within the bounds of the dma tag then call the filter callback 159 * to check for a match, if there is no filter callback then assume a match. 160 */ 161 static __inline int 162 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 163 { 164 int retval; 165 166 retval = 0; 167 168 do { 169 if (dmat->filter == NULL && dmat->iommu == NULL && 170 paddr > dmat->lowaddr && paddr <= dmat->highaddr) 171 retval = 1; 172 if (dmat->filter == NULL && 173 (paddr & (dmat->alignment - 1)) != 0) 174 retval = 1; 175 if (dmat->filter != NULL && 176 (*dmat->filter)(dmat->filterarg, paddr) != 0) 177 retval = 1; 178 179 dmat = dmat->parent; 180 } while (retval == 0 && dmat != NULL); 181 return (retval); 182 } 183 184 /* 185 * Convenience function for manipulating driver locks from busdma (during 186 * busdma_swi, for example). Drivers that don't provide their own locks 187 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 188 * non-mutex locking scheme don't have to use this at all. 189 */ 190 void 191 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 192 { 193 struct mtx *dmtx; 194 195 dmtx = (struct mtx *)arg; 196 switch (op) { 197 case BUS_DMA_LOCK: 198 mtx_lock(dmtx); 199 break; 200 case BUS_DMA_UNLOCK: 201 mtx_unlock(dmtx); 202 break; 203 default: 204 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 205 } 206 } 207 208 /* 209 * dflt_lock should never get called. It gets put into the dma tag when 210 * lockfunc == NULL, which is only valid if the maps that are associated 211 * with the tag are meant to never be defered. 212 * XXX Should have a way to identify which driver is responsible here. 213 */ 214 static void 215 dflt_lock(void *arg, bus_dma_lock_op_t op) 216 { 217 panic("driver error: busdma dflt_lock called"); 218 } 219 220 #define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 221 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 222 /* 223 * Allocate a device specific dma_tag. 224 */ 225 int 226 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 227 bus_addr_t boundary, bus_addr_t lowaddr, 228 bus_addr_t highaddr, bus_dma_filter_t *filter, 229 void *filterarg, bus_size_t maxsize, int nsegments, 230 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 231 void *lockfuncarg, bus_dma_tag_t *dmat) 232 { 233 bus_dma_tag_t newtag; 234 int error = 0; 235 236 /* Basic sanity checking */ 237 if (boundary != 0 && boundary < maxsegsz) 238 maxsegsz = boundary; 239 240 if (maxsegsz == 0) { 241 return (EINVAL); 242 } 243 244 /* Return a NULL tag on failure */ 245 *dmat = NULL; 246 247 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 248 M_ZERO | M_NOWAIT); 249 if (newtag == NULL) { 250 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 251 __func__, newtag, 0, error); 252 return (ENOMEM); 253 } 254 255 newtag->parent = parent; 256 newtag->alignment = alignment; 257 newtag->boundary = boundary; 258 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 259 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 260 newtag->filter = filter; 261 newtag->filterarg = filterarg; 262 newtag->maxsize = maxsize; 263 newtag->nsegments = nsegments; 264 newtag->maxsegsz = maxsegsz; 265 newtag->flags = flags; 266 newtag->ref_count = 1; /* Count ourself */ 267 newtag->map_count = 0; 268 if (lockfunc != NULL) { 269 newtag->lockfunc = lockfunc; 270 newtag->lockfuncarg = lockfuncarg; 271 } else { 272 newtag->lockfunc = dflt_lock; 273 newtag->lockfuncarg = NULL; 274 } 275 276 /* Take into account any restrictions imposed by our parent tag */ 277 if (parent != NULL) { 278 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 279 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 280 if (newtag->boundary == 0) 281 newtag->boundary = parent->boundary; 282 else if (parent->boundary != 0) 283 newtag->boundary = MIN(parent->boundary, 284 newtag->boundary); 285 if (newtag->filter == NULL) { 286 /* 287 * Short circuit looking at our parent directly 288 * since we have encapsulated all of its information 289 */ 290 newtag->filter = parent->filter; 291 newtag->filterarg = parent->filterarg; 292 newtag->parent = parent->parent; 293 } 294 if (newtag->parent != NULL) 295 atomic_add_int(&parent->ref_count, 1); 296 newtag->iommu = parent->iommu; 297 newtag->iommu_cookie = parent->iommu_cookie; 298 } 299 300 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL) 301 newtag->flags |= BUS_DMA_COULD_BOUNCE; 302 303 if (newtag->alignment > 1) 304 newtag->flags |= BUS_DMA_COULD_BOUNCE; 305 306 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 307 (flags & BUS_DMA_ALLOCNOW) != 0) { 308 struct bounce_zone *bz; 309 310 /* Must bounce */ 311 312 if ((error = alloc_bounce_zone(newtag)) != 0) { 313 free(newtag, M_DEVBUF); 314 return (error); 315 } 316 bz = newtag->bounce_zone; 317 318 if (ptoa(bz->total_bpages) < maxsize) { 319 int pages; 320 321 pages = atop(maxsize) - bz->total_bpages; 322 323 /* Add pages to our bounce pool */ 324 if (alloc_bounce_pages(newtag, pages) < pages) 325 error = ENOMEM; 326 } 327 /* Performed initial allocation */ 328 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 329 } 330 331 if (error != 0) { 332 free(newtag, M_DEVBUF); 333 } else { 334 *dmat = newtag; 335 } 336 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 337 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 338 return (error); 339 } 340 341 int 342 bus_dma_tag_destroy(bus_dma_tag_t dmat) 343 { 344 bus_dma_tag_t dmat_copy; 345 int error; 346 347 error = 0; 348 dmat_copy = dmat; 349 350 if (dmat != NULL) { 351 352 if (dmat->map_count != 0) { 353 error = EBUSY; 354 goto out; 355 } 356 357 while (dmat != NULL) { 358 bus_dma_tag_t parent; 359 360 parent = dmat->parent; 361 atomic_subtract_int(&dmat->ref_count, 1); 362 if (dmat->ref_count == 0) { 363 free(dmat, M_DEVBUF); 364 /* 365 * Last reference count, so 366 * release our reference 367 * count on our parent. 368 */ 369 dmat = parent; 370 } else 371 dmat = NULL; 372 } 373 } 374 out: 375 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 376 return (error); 377 } 378 379 /* 380 * Allocate a handle for mapping from kva/uva/physical 381 * address space into bus device space. 382 */ 383 int 384 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 385 { 386 int error; 387 388 error = 0; 389 390 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 391 M_NOWAIT | M_ZERO); 392 if (*mapp == NULL) { 393 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 394 __func__, dmat, ENOMEM); 395 return (ENOMEM); 396 } 397 398 399 /* 400 * Bouncing might be required if the driver asks for an active 401 * exclusion region, a data alignment that is stricter than 1, and/or 402 * an active address boundary. 403 */ 404 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 405 406 /* Must bounce */ 407 struct bounce_zone *bz; 408 int maxpages; 409 410 if (dmat->bounce_zone == NULL) { 411 if ((error = alloc_bounce_zone(dmat)) != 0) 412 return (error); 413 } 414 bz = dmat->bounce_zone; 415 416 /* Initialize the new map */ 417 STAILQ_INIT(&((*mapp)->bpages)); 418 419 /* 420 * Attempt to add pages to our pool on a per-instance 421 * basis up to a sane limit. 422 */ 423 if (dmat->alignment > 1) 424 maxpages = MAX_BPAGES; 425 else 426 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 427 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 428 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 429 int pages; 430 431 pages = MAX(atop(dmat->maxsize), 1); 432 pages = MIN(maxpages - bz->total_bpages, pages); 433 pages = MAX(pages, 1); 434 if (alloc_bounce_pages(dmat, pages) < pages) 435 error = ENOMEM; 436 437 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 438 if (error == 0) 439 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 440 } else { 441 error = 0; 442 } 443 } 444 bz->map_count++; 445 } 446 447 (*mapp)->nsegs = 0; 448 (*mapp)->segments = (bus_dma_segment_t *)malloc( 449 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 450 M_NOWAIT); 451 if ((*mapp)->segments == NULL) { 452 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 453 __func__, dmat, ENOMEM); 454 return (ENOMEM); 455 } 456 457 if (error == 0) 458 dmat->map_count++; 459 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 460 __func__, dmat, dmat->flags, error); 461 return (error); 462 } 463 464 /* 465 * Destroy a handle for mapping from kva/uva/physical 466 * address space into bus device space. 467 */ 468 int 469 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 470 { 471 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 472 if (STAILQ_FIRST(&map->bpages) != NULL) { 473 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 474 __func__, dmat, EBUSY); 475 return (EBUSY); 476 } 477 if (dmat->bounce_zone) 478 dmat->bounce_zone->map_count--; 479 } 480 free(map->segments, M_DEVBUF); 481 free(map, M_DEVBUF); 482 dmat->map_count--; 483 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 484 return (0); 485 } 486 487 488 /* 489 * Allocate a piece of memory that can be efficiently mapped into 490 * bus device space based on the constraints lited in the dma tag. 491 * A dmamap to for use with dmamap_load is also allocated. 492 */ 493 int 494 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 495 bus_dmamap_t *mapp) 496 { 497 vm_memattr_t attr; 498 int mflags; 499 500 if (flags & BUS_DMA_NOWAIT) 501 mflags = M_NOWAIT; 502 else 503 mflags = M_WAITOK; 504 505 bus_dmamap_create(dmat, flags, mapp); 506 507 if (flags & BUS_DMA_ZERO) 508 mflags |= M_ZERO; 509 #ifdef NOTYET 510 if (flags & BUS_DMA_NOCACHE) 511 attr = VM_MEMATTR_UNCACHEABLE; 512 else 513 #endif 514 attr = VM_MEMATTR_DEFAULT; 515 516 /* 517 * XXX: 518 * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact 519 * alignment guarantees of malloc need to be nailed down, and the 520 * code below should be rewritten to take that into account. 521 * 522 * In the meantime, we'll warn the user if malloc gets it wrong. 523 */ 524 if ((dmat->maxsize <= PAGE_SIZE) && 525 (dmat->alignment <= dmat->maxsize) && 526 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) && 527 attr == VM_MEMATTR_DEFAULT) { 528 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 529 } else { 530 /* 531 * XXX Use Contigmalloc until it is merged into this facility 532 * and handles multi-seg allocations. Nobody is doing 533 * multi-seg allocations yet though. 534 * XXX Certain AGP hardware does. 535 */ 536 *vaddr = (void *)kmem_alloc_contig(kmem_arena, dmat->maxsize, 537 mflags, 0ul, dmat->lowaddr, dmat->alignment ? 538 dmat->alignment : 1ul, dmat->boundary, attr); 539 (*mapp)->contigalloc = 1; 540 } 541 if (*vaddr == NULL) { 542 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 543 __func__, dmat, dmat->flags, ENOMEM); 544 return (ENOMEM); 545 } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { 546 printf("bus_dmamem_alloc failed to align memory properly.\n"); 547 } 548 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 549 __func__, dmat, dmat->flags, 0); 550 return (0); 551 } 552 553 /* 554 * Free a piece of memory and it's allociated dmamap, that was allocated 555 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 556 */ 557 void 558 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 559 { 560 561 if (!map->contigalloc) 562 free(vaddr, M_DEVBUF); 563 else 564 kmem_free(kmem_arena, (vm_offset_t)vaddr, dmat->maxsize); 565 bus_dmamap_destroy(dmat, map); 566 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 567 } 568 569 static void 570 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 571 bus_size_t buflen, int flags) 572 { 573 bus_addr_t curaddr; 574 bus_size_t sgsize; 575 576 if (map->pagesneeded == 0) { 577 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 578 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 579 dmat->boundary, dmat->alignment); 580 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); 581 /* 582 * Count the number of bounce pages 583 * needed in order to complete this transfer 584 */ 585 curaddr = buf; 586 while (buflen != 0) { 587 sgsize = MIN(buflen, dmat->maxsegsz); 588 if (run_filter(dmat, curaddr) != 0) { 589 sgsize = MIN(sgsize, 590 PAGE_SIZE - (curaddr & PAGE_MASK)); 591 map->pagesneeded++; 592 } 593 curaddr += sgsize; 594 buflen -= sgsize; 595 } 596 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 597 } 598 } 599 600 static void 601 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 602 void *buf, bus_size_t buflen, int flags) 603 { 604 vm_offset_t vaddr; 605 vm_offset_t vendaddr; 606 bus_addr_t paddr; 607 608 if (map->pagesneeded == 0) { 609 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 610 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 611 dmat->boundary, dmat->alignment); 612 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); 613 /* 614 * Count the number of bounce pages 615 * needed in order to complete this transfer 616 */ 617 vaddr = (vm_offset_t)buf; 618 vendaddr = (vm_offset_t)buf + buflen; 619 620 while (vaddr < vendaddr) { 621 bus_size_t sg_len; 622 623 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 624 if (pmap == kernel_pmap) 625 paddr = pmap_kextract(vaddr); 626 else 627 paddr = pmap_extract(pmap, vaddr); 628 if (run_filter(dmat, paddr) != 0) { 629 sg_len = roundup2(sg_len, dmat->alignment); 630 map->pagesneeded++; 631 } 632 vaddr += sg_len; 633 } 634 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 635 } 636 } 637 638 static int 639 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 640 { 641 642 /* Reserve Necessary Bounce Pages */ 643 mtx_lock(&bounce_lock); 644 if (flags & BUS_DMA_NOWAIT) { 645 if (reserve_bounce_pages(dmat, map, 0) != 0) { 646 mtx_unlock(&bounce_lock); 647 return (ENOMEM); 648 } 649 } else { 650 if (reserve_bounce_pages(dmat, map, 1) != 0) { 651 /* Queue us for resources */ 652 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 653 map, links); 654 mtx_unlock(&bounce_lock); 655 return (EINPROGRESS); 656 } 657 } 658 mtx_unlock(&bounce_lock); 659 660 return (0); 661 } 662 663 /* 664 * Add a single contiguous physical range to the segment list. 665 */ 666 static int 667 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 668 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 669 { 670 bus_addr_t baddr, bmask; 671 int seg; 672 673 /* 674 * Make sure we don't cross any boundaries. 675 */ 676 bmask = ~(dmat->boundary - 1); 677 if (dmat->boundary > 0) { 678 baddr = (curaddr + dmat->boundary) & bmask; 679 if (sgsize > (baddr - curaddr)) 680 sgsize = (baddr - curaddr); 681 } 682 683 /* 684 * Insert chunk into a segment, coalescing with 685 * previous segment if possible. 686 */ 687 seg = *segp; 688 if (seg == -1) { 689 seg = 0; 690 segs[seg].ds_addr = curaddr; 691 segs[seg].ds_len = sgsize; 692 } else { 693 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 694 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 695 (dmat->boundary == 0 || 696 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 697 segs[seg].ds_len += sgsize; 698 else { 699 if (++seg >= dmat->nsegments) 700 return (0); 701 segs[seg].ds_addr = curaddr; 702 segs[seg].ds_len = sgsize; 703 } 704 } 705 *segp = seg; 706 return (sgsize); 707 } 708 709 /* 710 * Utility function to load a physical buffer. segp contains 711 * the starting segment on entrace, and the ending segment on exit. 712 */ 713 int 714 _bus_dmamap_load_phys(bus_dma_tag_t dmat, 715 bus_dmamap_t map, 716 vm_paddr_t buf, bus_size_t buflen, 717 int flags, 718 bus_dma_segment_t *segs, 719 int *segp) 720 { 721 bus_addr_t curaddr; 722 bus_size_t sgsize; 723 int error; 724 725 if (segs == NULL) 726 segs = map->segments; 727 728 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 729 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 730 if (map->pagesneeded != 0) { 731 error = _bus_dmamap_reserve_pages(dmat, map, flags); 732 if (error) 733 return (error); 734 } 735 } 736 737 while (buflen > 0) { 738 curaddr = buf; 739 sgsize = MIN(buflen, dmat->maxsegsz); 740 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 741 sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); 742 curaddr = add_bounce_page(dmat, map, 0, curaddr, 743 sgsize); 744 } 745 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 746 segp); 747 if (sgsize == 0) 748 break; 749 buf += sgsize; 750 buflen -= sgsize; 751 } 752 753 /* 754 * Did we fit? 755 */ 756 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 757 } 758 759 int 760 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, 761 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 762 bus_dma_segment_t *segs, int *segp) 763 { 764 765 return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, 766 segs, segp)); 767 } 768 769 /* 770 * Utility function to load a linear buffer. segp contains 771 * the starting segment on entrance, and the ending segment on exit. 772 */ 773 int 774 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, 775 bus_dmamap_t map, 776 void *buf, bus_size_t buflen, 777 pmap_t pmap, 778 int flags, 779 bus_dma_segment_t *segs, 780 int *segp) 781 { 782 bus_size_t sgsize; 783 bus_addr_t curaddr; 784 vm_offset_t kvaddr, vaddr; 785 int error; 786 787 if (segs == NULL) 788 segs = map->segments; 789 790 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 791 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 792 if (map->pagesneeded != 0) { 793 error = _bus_dmamap_reserve_pages(dmat, map, flags); 794 if (error) 795 return (error); 796 } 797 } 798 799 vaddr = (vm_offset_t)buf; 800 801 while (buflen > 0) { 802 bus_size_t max_sgsize; 803 804 /* 805 * Get the physical address for this segment. 806 */ 807 if (pmap == kernel_pmap) { 808 curaddr = pmap_kextract(vaddr); 809 kvaddr = vaddr; 810 } else { 811 curaddr = pmap_extract(pmap, vaddr); 812 kvaddr = 0; 813 } 814 815 /* 816 * Compute the segment size, and adjust counts. 817 */ 818 max_sgsize = MIN(buflen, dmat->maxsegsz); 819 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); 820 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 821 sgsize = roundup2(sgsize, dmat->alignment); 822 sgsize = MIN(sgsize, max_sgsize); 823 curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 824 sgsize); 825 } else { 826 sgsize = MIN(sgsize, max_sgsize); 827 } 828 829 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 830 segp); 831 if (sgsize == 0) 832 break; 833 vaddr += sgsize; 834 buflen -= sgsize; 835 } 836 837 /* 838 * Did we fit? 839 */ 840 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 841 } 842 843 void 844 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 845 struct memdesc *mem, bus_dmamap_callback_t *callback, 846 void *callback_arg) 847 { 848 849 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 850 map->dmat = dmat; 851 map->mem = *mem; 852 map->callback = callback; 853 map->callback_arg = callback_arg; 854 } 855 } 856 857 bus_dma_segment_t * 858 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 859 bus_dma_segment_t *segs, int nsegs, int error) 860 { 861 862 map->nsegs = nsegs; 863 if (segs != NULL) 864 memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); 865 if (dmat->iommu != NULL) 866 IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, 867 dmat->lowaddr, dmat->highaddr, dmat->alignment, 868 dmat->boundary, dmat->iommu_cookie); 869 870 if (segs != NULL) 871 memcpy(segs, map->segments, map->nsegs*sizeof(segs[0])); 872 else 873 segs = map->segments; 874 875 return (segs); 876 } 877 878 /* 879 * Release the mapping held by map. 880 */ 881 void 882 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 883 { 884 struct bounce_page *bpage; 885 886 if (dmat->iommu) { 887 IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie); 888 map->nsegs = 0; 889 } 890 891 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 892 STAILQ_REMOVE_HEAD(&map->bpages, links); 893 free_bounce_page(dmat, bpage); 894 } 895 } 896 897 void 898 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 899 { 900 struct bounce_page *bpage; 901 vm_offset_t datavaddr, tempvaddr; 902 903 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 904 905 /* 906 * Handle data bouncing. We might also 907 * want to add support for invalidating 908 * the caches on broken hardware 909 */ 910 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 911 "performing bounce", __func__, dmat, dmat->flags, op); 912 913 if (op & BUS_DMASYNC_PREWRITE) { 914 while (bpage != NULL) { 915 tempvaddr = 0; 916 datavaddr = bpage->datavaddr; 917 if (datavaddr == 0) { 918 tempvaddr = pmap_quick_enter_page( 919 bpage->datapage); 920 datavaddr = tempvaddr | 921 bpage->dataoffs; 922 } 923 924 bcopy((void *)datavaddr, 925 (void *)bpage->vaddr, bpage->datacount); 926 927 if (tempvaddr != 0) 928 pmap_quick_remove_page(tempvaddr); 929 bpage = STAILQ_NEXT(bpage, links); 930 } 931 dmat->bounce_zone->total_bounced++; 932 } 933 934 if (op & BUS_DMASYNC_POSTREAD) { 935 while (bpage != NULL) { 936 tempvaddr = 0; 937 datavaddr = bpage->datavaddr; 938 if (datavaddr == 0) { 939 tempvaddr = pmap_quick_enter_page( 940 bpage->datapage); 941 datavaddr = tempvaddr | 942 bpage->dataoffs; 943 } 944 945 bcopy((void *)bpage->vaddr, 946 (void *)datavaddr, bpage->datacount); 947 948 if (tempvaddr != 0) 949 pmap_quick_remove_page(tempvaddr); 950 bpage = STAILQ_NEXT(bpage, links); 951 } 952 dmat->bounce_zone->total_bounced++; 953 } 954 } 955 956 powerpc_sync(); 957 } 958 959 static void 960 init_bounce_pages(void *dummy __unused) 961 { 962 963 total_bpages = 0; 964 STAILQ_INIT(&bounce_zone_list); 965 STAILQ_INIT(&bounce_map_waitinglist); 966 STAILQ_INIT(&bounce_map_callbacklist); 967 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 968 } 969 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 970 971 static struct sysctl_ctx_list * 972 busdma_sysctl_tree(struct bounce_zone *bz) 973 { 974 return (&bz->sysctl_tree); 975 } 976 977 static struct sysctl_oid * 978 busdma_sysctl_tree_top(struct bounce_zone *bz) 979 { 980 return (bz->sysctl_tree_top); 981 } 982 983 static int 984 alloc_bounce_zone(bus_dma_tag_t dmat) 985 { 986 struct bounce_zone *bz; 987 988 /* Check to see if we already have a suitable zone */ 989 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 990 if ((dmat->alignment <= bz->alignment) 991 && (dmat->lowaddr >= bz->lowaddr)) { 992 dmat->bounce_zone = bz; 993 return (0); 994 } 995 } 996 997 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 998 M_NOWAIT | M_ZERO)) == NULL) 999 return (ENOMEM); 1000 1001 STAILQ_INIT(&bz->bounce_page_list); 1002 bz->free_bpages = 0; 1003 bz->reserved_bpages = 0; 1004 bz->active_bpages = 0; 1005 bz->lowaddr = dmat->lowaddr; 1006 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1007 bz->map_count = 0; 1008 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1009 busdma_zonecount++; 1010 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1011 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1012 dmat->bounce_zone = bz; 1013 1014 sysctl_ctx_init(&bz->sysctl_tree); 1015 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1016 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1017 CTLFLAG_RD, 0, ""); 1018 if (bz->sysctl_tree_top == NULL) { 1019 sysctl_ctx_free(&bz->sysctl_tree); 1020 return (0); /* XXX error code? */ 1021 } 1022 1023 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1024 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1025 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1026 "Total bounce pages"); 1027 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1028 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1029 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1030 "Free bounce pages"); 1031 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1032 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1033 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1034 "Reserved bounce pages"); 1035 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1036 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1037 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1038 "Active bounce pages"); 1039 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1040 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1041 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1042 "Total bounce requests"); 1043 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1044 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1045 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1046 "Total bounce requests that were deferred"); 1047 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1048 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1049 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1050 SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), 1051 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1052 "alignment", CTLFLAG_RD, &bz->alignment, ""); 1053 1054 return (0); 1055 } 1056 1057 static int 1058 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1059 { 1060 struct bounce_zone *bz; 1061 int count; 1062 1063 bz = dmat->bounce_zone; 1064 count = 0; 1065 while (numpages > 0) { 1066 struct bounce_page *bpage; 1067 1068 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1069 M_NOWAIT | M_ZERO); 1070 1071 if (bpage == NULL) 1072 break; 1073 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1074 M_NOWAIT, 0ul, 1075 bz->lowaddr, 1076 PAGE_SIZE, 1077 0); 1078 if (bpage->vaddr == 0) { 1079 free(bpage, M_DEVBUF); 1080 break; 1081 } 1082 bpage->busaddr = pmap_kextract(bpage->vaddr); 1083 mtx_lock(&bounce_lock); 1084 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1085 total_bpages++; 1086 bz->total_bpages++; 1087 bz->free_bpages++; 1088 mtx_unlock(&bounce_lock); 1089 count++; 1090 numpages--; 1091 } 1092 return (count); 1093 } 1094 1095 static int 1096 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1097 { 1098 struct bounce_zone *bz; 1099 int pages; 1100 1101 mtx_assert(&bounce_lock, MA_OWNED); 1102 bz = dmat->bounce_zone; 1103 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1104 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1105 return (map->pagesneeded - (map->pagesreserved + pages)); 1106 bz->free_bpages -= pages; 1107 bz->reserved_bpages += pages; 1108 map->pagesreserved += pages; 1109 pages = map->pagesneeded - map->pagesreserved; 1110 1111 return (pages); 1112 } 1113 1114 static bus_addr_t 1115 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1116 bus_addr_t addr, bus_size_t size) 1117 { 1118 struct bounce_zone *bz; 1119 struct bounce_page *bpage; 1120 1121 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1122 1123 bz = dmat->bounce_zone; 1124 if (map->pagesneeded == 0) 1125 panic("add_bounce_page: map doesn't need any pages"); 1126 map->pagesneeded--; 1127 1128 if (map->pagesreserved == 0) 1129 panic("add_bounce_page: map doesn't need any pages"); 1130 map->pagesreserved--; 1131 1132 mtx_lock(&bounce_lock); 1133 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1134 if (bpage == NULL) 1135 panic("add_bounce_page: free page list is empty"); 1136 1137 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1138 bz->reserved_bpages--; 1139 bz->active_bpages++; 1140 mtx_unlock(&bounce_lock); 1141 1142 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1143 /* Page offset needs to be preserved. */ 1144 bpage->vaddr |= addr & PAGE_MASK; 1145 bpage->busaddr |= addr & PAGE_MASK; 1146 } 1147 bpage->datavaddr = vaddr; 1148 bpage->datapage = PHYS_TO_VM_PAGE(addr); 1149 bpage->dataoffs = addr & PAGE_MASK; 1150 bpage->datacount = size; 1151 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1152 return (bpage->busaddr); 1153 } 1154 1155 static void 1156 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1157 { 1158 struct bus_dmamap *map; 1159 struct bounce_zone *bz; 1160 1161 bz = dmat->bounce_zone; 1162 bpage->datavaddr = 0; 1163 bpage->datacount = 0; 1164 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1165 /* 1166 * Reset the bounce page to start at offset 0. Other uses 1167 * of this bounce page may need to store a full page of 1168 * data and/or assume it starts on a page boundary. 1169 */ 1170 bpage->vaddr &= ~PAGE_MASK; 1171 bpage->busaddr &= ~PAGE_MASK; 1172 } 1173 1174 mtx_lock(&bounce_lock); 1175 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1176 bz->free_bpages++; 1177 bz->active_bpages--; 1178 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1179 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1180 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1181 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1182 map, links); 1183 busdma_swi_pending = 1; 1184 bz->total_deferred++; 1185 swi_sched(vm_ih, 0); 1186 } 1187 } 1188 mtx_unlock(&bounce_lock); 1189 } 1190 1191 void 1192 busdma_swi(void) 1193 { 1194 bus_dma_tag_t dmat; 1195 struct bus_dmamap *map; 1196 1197 mtx_lock(&bounce_lock); 1198 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1199 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1200 mtx_unlock(&bounce_lock); 1201 dmat = map->dmat; 1202 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1203 bus_dmamap_load_mem(map->dmat, map, &map->mem, 1204 map->callback, map->callback_arg, 1205 BUS_DMA_WAITOK); 1206 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1207 mtx_lock(&bounce_lock); 1208 } 1209 mtx_unlock(&bounce_lock); 1210 } 1211 1212 int 1213 bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie) 1214 { 1215 tag->iommu = iommu; 1216 tag->iommu_cookie = cookie; 1217 1218 return (0); 1219 } 1220 1221