1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * From amd64/busdma_machdep.c, r204214 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/malloc.h> 39 #include <sys/bus.h> 40 #include <sys/interrupt.h> 41 #include <sys/kernel.h> 42 #include <sys/ktr.h> 43 #include <sys/lock.h> 44 #include <sys/proc.h> 45 #include <sys/memdesc.h> 46 #include <sys/mutex.h> 47 #include <sys/sysctl.h> 48 #include <sys/uio.h> 49 50 #include <vm/vm.h> 51 #include <vm/vm_extern.h> 52 #include <vm/vm_kern.h> 53 #include <vm/vm_page.h> 54 #include <vm/vm_map.h> 55 56 #include <machine/atomic.h> 57 #include <machine/bus.h> 58 #include <machine/cpufunc.h> 59 #include <machine/md_var.h> 60 61 #include "iommu_if.h" 62 63 #define MAX_BPAGES MIN(8192, physmem/40) 64 65 struct bounce_zone; 66 67 struct bus_dma_tag { 68 bus_dma_tag_t parent; 69 bus_size_t alignment; 70 bus_addr_t boundary; 71 bus_addr_t lowaddr; 72 bus_addr_t highaddr; 73 bus_dma_filter_t *filter; 74 void *filterarg; 75 bus_size_t maxsize; 76 bus_size_t maxsegsz; 77 u_int nsegments; 78 int flags; 79 int ref_count; 80 int map_count; 81 bus_dma_lock_t *lockfunc; 82 void *lockfuncarg; 83 struct bounce_zone *bounce_zone; 84 device_t iommu; 85 void *iommu_cookie; 86 }; 87 88 struct bounce_page { 89 vm_offset_t vaddr; /* kva of bounce buffer */ 90 bus_addr_t busaddr; /* Physical address */ 91 vm_offset_t datavaddr; /* kva of client data */ 92 vm_page_t datapage; /* physical page of client data */ 93 vm_offset_t dataoffs; /* page offset of client data */ 94 bus_size_t datacount; /* client data count */ 95 STAILQ_ENTRY(bounce_page) links; 96 }; 97 98 int busdma_swi_pending; 99 100 struct bounce_zone { 101 STAILQ_ENTRY(bounce_zone) links; 102 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 103 int total_bpages; 104 int free_bpages; 105 int reserved_bpages; 106 int active_bpages; 107 int total_bounced; 108 int total_deferred; 109 int map_count; 110 bus_size_t alignment; 111 bus_addr_t lowaddr; 112 char zoneid[8]; 113 char lowaddrid[20]; 114 struct sysctl_ctx_list sysctl_tree; 115 struct sysctl_oid *sysctl_tree_top; 116 }; 117 118 static struct mtx bounce_lock; 119 static int total_bpages; 120 static int busdma_zonecount; 121 static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 122 123 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 124 "Busdma parameters"); 125 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 126 "Total bounce pages"); 127 128 struct bus_dmamap { 129 struct bp_list bpages; 130 int pagesneeded; 131 int pagesreserved; 132 bus_dma_tag_t dmat; 133 struct memdesc mem; 134 bus_dma_segment_t *segments; 135 int nsegs; 136 bus_dmamap_callback_t *callback; 137 void *callback_arg; 138 STAILQ_ENTRY(bus_dmamap) links; 139 int contigalloc; 140 }; 141 142 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 143 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 144 145 static void init_bounce_pages(void *dummy); 146 static int alloc_bounce_zone(bus_dma_tag_t dmat); 147 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 148 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 149 int commit); 150 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 151 vm_offset_t vaddr, bus_addr_t addr, 152 bus_size_t size); 153 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 154 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 155 156 /* 157 * Return true if a match is made. 158 * 159 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 160 * 161 * If paddr is within the bounds of the dma tag then call the filter callback 162 * to check for a match, if there is no filter callback then assume a match. 163 */ 164 static __inline int 165 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 166 { 167 int retval; 168 169 retval = 0; 170 171 do { 172 if (dmat->filter == NULL && dmat->iommu == NULL && 173 paddr > dmat->lowaddr && paddr <= dmat->highaddr) 174 retval = 1; 175 if (dmat->filter == NULL && 176 (paddr & (dmat->alignment - 1)) != 0) 177 retval = 1; 178 if (dmat->filter != NULL && 179 (*dmat->filter)(dmat->filterarg, paddr) != 0) 180 retval = 1; 181 182 dmat = dmat->parent; 183 } while (retval == 0 && dmat != NULL); 184 return (retval); 185 } 186 187 /* 188 * Convenience function for manipulating driver locks from busdma (during 189 * busdma_swi, for example). 190 */ 191 void 192 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 193 { 194 struct mtx *dmtx; 195 196 dmtx = (struct mtx *)arg; 197 switch (op) { 198 case BUS_DMA_LOCK: 199 mtx_lock(dmtx); 200 break; 201 case BUS_DMA_UNLOCK: 202 mtx_unlock(dmtx); 203 break; 204 default: 205 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 206 } 207 } 208 209 /* 210 * dflt_lock should never get called. It gets put into the dma tag when 211 * lockfunc == NULL, which is only valid if the maps that are associated 212 * with the tag are meant to never be defered. 213 * XXX Should have a way to identify which driver is responsible here. 214 */ 215 static void 216 dflt_lock(void *arg, bus_dma_lock_op_t op) 217 { 218 panic("driver error: busdma dflt_lock called"); 219 } 220 221 #define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 222 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 223 /* 224 * Allocate a device specific dma_tag. 225 */ 226 int 227 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 228 bus_addr_t boundary, bus_addr_t lowaddr, 229 bus_addr_t highaddr, bus_dma_filter_t *filter, 230 void *filterarg, bus_size_t maxsize, int nsegments, 231 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 232 void *lockfuncarg, bus_dma_tag_t *dmat) 233 { 234 bus_dma_tag_t newtag; 235 int error = 0; 236 237 /* Basic sanity checking */ 238 if (boundary != 0 && boundary < maxsegsz) 239 maxsegsz = boundary; 240 241 if (maxsegsz == 0) { 242 return (EINVAL); 243 } 244 245 /* Return a NULL tag on failure */ 246 *dmat = NULL; 247 248 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 249 M_ZERO | M_NOWAIT); 250 if (newtag == NULL) { 251 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 252 __func__, newtag, 0, error); 253 return (ENOMEM); 254 } 255 256 newtag->parent = parent; 257 newtag->alignment = alignment; 258 newtag->boundary = boundary; 259 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 260 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 261 newtag->filter = filter; 262 newtag->filterarg = filterarg; 263 newtag->maxsize = maxsize; 264 newtag->nsegments = nsegments; 265 newtag->maxsegsz = maxsegsz; 266 newtag->flags = flags; 267 newtag->ref_count = 1; /* Count ourself */ 268 newtag->map_count = 0; 269 if (lockfunc != NULL) { 270 newtag->lockfunc = lockfunc; 271 newtag->lockfuncarg = lockfuncarg; 272 } else { 273 newtag->lockfunc = dflt_lock; 274 newtag->lockfuncarg = NULL; 275 } 276 277 /* Take into account any restrictions imposed by our parent tag */ 278 if (parent != NULL) { 279 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 280 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 281 if (newtag->boundary == 0) 282 newtag->boundary = parent->boundary; 283 else if (parent->boundary != 0) 284 newtag->boundary = MIN(parent->boundary, 285 newtag->boundary); 286 if (newtag->filter == NULL) { 287 /* 288 * Short circuit looking at our parent directly 289 * since we have encapsulated all of its information 290 */ 291 newtag->filter = parent->filter; 292 newtag->filterarg = parent->filterarg; 293 newtag->parent = parent->parent; 294 } 295 if (newtag->parent != NULL) 296 atomic_add_int(&parent->ref_count, 1); 297 newtag->iommu = parent->iommu; 298 newtag->iommu_cookie = parent->iommu_cookie; 299 } 300 301 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL) 302 newtag->flags |= BUS_DMA_COULD_BOUNCE; 303 304 if (newtag->alignment > 1) 305 newtag->flags |= BUS_DMA_COULD_BOUNCE; 306 307 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 308 (flags & BUS_DMA_ALLOCNOW) != 0) { 309 struct bounce_zone *bz; 310 311 /* Must bounce */ 312 313 if ((error = alloc_bounce_zone(newtag)) != 0) { 314 free(newtag, M_DEVBUF); 315 return (error); 316 } 317 bz = newtag->bounce_zone; 318 319 if (ptoa(bz->total_bpages) < maxsize) { 320 int pages; 321 322 pages = atop(maxsize) - bz->total_bpages; 323 324 /* Add pages to our bounce pool */ 325 if (alloc_bounce_pages(newtag, pages) < pages) 326 error = ENOMEM; 327 } 328 /* Performed initial allocation */ 329 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 330 } 331 332 if (error != 0) { 333 free(newtag, M_DEVBUF); 334 } else { 335 *dmat = newtag; 336 } 337 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 338 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 339 return (error); 340 } 341 342 void 343 bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat) 344 { 345 346 if (t == NULL || dmat == NULL) 347 return; 348 349 t->parent = dmat->parent; 350 t->alignment = dmat->alignment; 351 t->boundary = dmat->boundary; 352 t->lowaddr = dmat->lowaddr; 353 t->highaddr = dmat->highaddr; 354 t->maxsize = dmat->maxsize; 355 t->nsegments = dmat->nsegments; 356 t->maxsegsize = dmat->maxsegsz; 357 t->flags = dmat->flags; 358 t->lockfunc = dmat->lockfunc; 359 t->lockfuncarg = dmat->lockfuncarg; 360 } 361 362 int 363 bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain) 364 { 365 366 return (0); 367 } 368 369 int 370 bus_dma_tag_destroy(bus_dma_tag_t dmat) 371 { 372 bus_dma_tag_t dmat_copy __unused; 373 int error; 374 375 error = 0; 376 dmat_copy = dmat; 377 378 if (dmat != NULL) { 379 if (dmat->map_count != 0) { 380 error = EBUSY; 381 goto out; 382 } 383 384 while (dmat != NULL) { 385 bus_dma_tag_t parent; 386 387 parent = dmat->parent; 388 atomic_subtract_int(&dmat->ref_count, 1); 389 if (dmat->ref_count == 0) { 390 free(dmat, M_DEVBUF); 391 /* 392 * Last reference count, so 393 * release our reference 394 * count on our parent. 395 */ 396 dmat = parent; 397 } else 398 dmat = NULL; 399 } 400 } 401 out: 402 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 403 return (error); 404 } 405 406 /* 407 * Allocate a handle for mapping from kva/uva/physical 408 * address space into bus device space. 409 */ 410 int 411 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 412 { 413 int error; 414 415 error = 0; 416 417 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 418 M_NOWAIT | M_ZERO); 419 if (*mapp == NULL) { 420 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 421 __func__, dmat, ENOMEM); 422 return (ENOMEM); 423 } 424 425 /* 426 * Bouncing might be required if the driver asks for an active 427 * exclusion region, a data alignment that is stricter than 1, and/or 428 * an active address boundary. 429 */ 430 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 431 /* Must bounce */ 432 struct bounce_zone *bz; 433 int maxpages; 434 435 if (dmat->bounce_zone == NULL) { 436 if ((error = alloc_bounce_zone(dmat)) != 0) 437 return (error); 438 } 439 bz = dmat->bounce_zone; 440 441 /* Initialize the new map */ 442 STAILQ_INIT(&((*mapp)->bpages)); 443 444 /* 445 * Attempt to add pages to our pool on a per-instance 446 * basis up to a sane limit. 447 */ 448 if (dmat->alignment > 1) 449 maxpages = MAX_BPAGES; 450 else 451 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 452 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 453 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 454 int pages; 455 456 pages = MAX(atop(dmat->maxsize), 1); 457 pages = MIN(maxpages - bz->total_bpages, pages); 458 pages = MAX(pages, 1); 459 if (alloc_bounce_pages(dmat, pages) < pages) 460 error = ENOMEM; 461 462 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 463 if (error == 0) 464 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 465 } else { 466 error = 0; 467 } 468 } 469 bz->map_count++; 470 } 471 472 (*mapp)->nsegs = 0; 473 (*mapp)->segments = (bus_dma_segment_t *)malloc( 474 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 475 M_NOWAIT); 476 if ((*mapp)->segments == NULL) { 477 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 478 __func__, dmat, ENOMEM); 479 return (ENOMEM); 480 } 481 482 if (error == 0) 483 dmat->map_count++; 484 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 485 __func__, dmat, dmat->flags, error); 486 return (error); 487 } 488 489 /* 490 * Destroy a handle for mapping from kva/uva/physical 491 * address space into bus device space. 492 */ 493 int 494 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 495 { 496 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 497 if (STAILQ_FIRST(&map->bpages) != NULL) { 498 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 499 __func__, dmat, EBUSY); 500 return (EBUSY); 501 } 502 if (dmat->bounce_zone) 503 dmat->bounce_zone->map_count--; 504 } 505 free(map->segments, M_DEVBUF); 506 free(map, M_DEVBUF); 507 dmat->map_count--; 508 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 509 return (0); 510 } 511 512 /* 513 * Allocate a piece of memory that can be efficiently mapped into 514 * bus device space based on the constraints lited in the dma tag. 515 * A dmamap to for use with dmamap_load is also allocated. 516 */ 517 int 518 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 519 bus_dmamap_t *mapp) 520 { 521 vm_memattr_t attr; 522 int mflags; 523 524 if (flags & BUS_DMA_NOWAIT) 525 mflags = M_NOWAIT; 526 else 527 mflags = M_WAITOK; 528 529 bus_dmamap_create(dmat, flags, mapp); 530 531 if (flags & BUS_DMA_ZERO) 532 mflags |= M_ZERO; 533 if (flags & BUS_DMA_NOCACHE) 534 attr = VM_MEMATTR_UNCACHEABLE; 535 else 536 attr = VM_MEMATTR_DEFAULT; 537 538 /* 539 * XXX: 540 * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact 541 * alignment guarantees of malloc need to be nailed down, and the 542 * code below should be rewritten to take that into account. 543 * 544 * In the meantime, we'll warn the user if malloc gets it wrong. 545 */ 546 if ((dmat->maxsize <= PAGE_SIZE) && 547 (dmat->alignment <= dmat->maxsize) && 548 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) && 549 attr == VM_MEMATTR_DEFAULT) { 550 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 551 } else { 552 /* 553 * XXX Use Contigmalloc until it is merged into this facility 554 * and handles multi-seg allocations. Nobody is doing 555 * multi-seg allocations yet though. 556 * XXX Certain AGP hardware does. 557 */ 558 *vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0ul, 559 dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul, 560 dmat->boundary, attr); 561 (*mapp)->contigalloc = 1; 562 } 563 if (*vaddr == NULL) { 564 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 565 __func__, dmat, dmat->flags, ENOMEM); 566 return (ENOMEM); 567 } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { 568 printf("bus_dmamem_alloc failed to align memory properly.\n"); 569 } 570 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 571 __func__, dmat, dmat->flags, 0); 572 return (0); 573 } 574 575 /* 576 * Free a piece of memory and it's allociated dmamap, that was allocated 577 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 578 */ 579 void 580 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 581 { 582 583 if (!map->contigalloc) 584 free(vaddr, M_DEVBUF); 585 else 586 kmem_free((vm_offset_t)vaddr, dmat->maxsize); 587 bus_dmamap_destroy(dmat, map); 588 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 589 } 590 591 static void 592 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 593 bus_size_t buflen, int flags) 594 { 595 bus_addr_t curaddr; 596 bus_size_t sgsize; 597 598 if (map->pagesneeded == 0) { 599 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 600 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 601 dmat->boundary, dmat->alignment); 602 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); 603 /* 604 * Count the number of bounce pages 605 * needed in order to complete this transfer 606 */ 607 curaddr = buf; 608 while (buflen != 0) { 609 sgsize = MIN(buflen, dmat->maxsegsz); 610 if (run_filter(dmat, curaddr) != 0) { 611 sgsize = MIN(sgsize, 612 PAGE_SIZE - (curaddr & PAGE_MASK)); 613 map->pagesneeded++; 614 } 615 curaddr += sgsize; 616 buflen -= sgsize; 617 } 618 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 619 } 620 } 621 622 static void 623 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 624 void *buf, bus_size_t buflen, int flags) 625 { 626 vm_offset_t vaddr; 627 vm_offset_t vendaddr; 628 bus_addr_t paddr; 629 630 if (map->pagesneeded == 0) { 631 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 632 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 633 dmat->boundary, dmat->alignment); 634 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); 635 /* 636 * Count the number of bounce pages 637 * needed in order to complete this transfer 638 */ 639 vaddr = (vm_offset_t)buf; 640 vendaddr = (vm_offset_t)buf + buflen; 641 642 while (vaddr < vendaddr) { 643 bus_size_t sg_len; 644 645 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 646 if (pmap == kernel_pmap) 647 paddr = pmap_kextract(vaddr); 648 else 649 paddr = pmap_extract(pmap, vaddr); 650 if (run_filter(dmat, paddr) != 0) { 651 sg_len = roundup2(sg_len, dmat->alignment); 652 map->pagesneeded++; 653 } 654 vaddr += sg_len; 655 } 656 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 657 } 658 } 659 660 static int 661 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 662 { 663 664 /* Reserve Necessary Bounce Pages */ 665 mtx_lock(&bounce_lock); 666 if (flags & BUS_DMA_NOWAIT) { 667 if (reserve_bounce_pages(dmat, map, 0) != 0) { 668 mtx_unlock(&bounce_lock); 669 return (ENOMEM); 670 } 671 } else { 672 if (reserve_bounce_pages(dmat, map, 1) != 0) { 673 /* Queue us for resources */ 674 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 675 map, links); 676 mtx_unlock(&bounce_lock); 677 return (EINPROGRESS); 678 } 679 } 680 mtx_unlock(&bounce_lock); 681 682 return (0); 683 } 684 685 /* 686 * Add a single contiguous physical range to the segment list. 687 */ 688 static int 689 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 690 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 691 { 692 bus_addr_t baddr, bmask; 693 int seg; 694 695 /* 696 * Make sure we don't cross any boundaries. 697 */ 698 bmask = ~(dmat->boundary - 1); 699 if (dmat->boundary > 0) { 700 baddr = (curaddr + dmat->boundary) & bmask; 701 if (sgsize > (baddr - curaddr)) 702 sgsize = (baddr - curaddr); 703 } 704 705 /* 706 * Insert chunk into a segment, coalescing with 707 * previous segment if possible. 708 */ 709 seg = *segp; 710 if (seg == -1) { 711 seg = 0; 712 segs[seg].ds_addr = curaddr; 713 segs[seg].ds_len = sgsize; 714 } else { 715 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 716 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 717 (dmat->boundary == 0 || 718 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 719 segs[seg].ds_len += sgsize; 720 else { 721 if (++seg >= dmat->nsegments) 722 return (0); 723 segs[seg].ds_addr = curaddr; 724 segs[seg].ds_len = sgsize; 725 } 726 } 727 *segp = seg; 728 return (sgsize); 729 } 730 731 /* 732 * Utility function to load a physical buffer. segp contains 733 * the starting segment on entrace, and the ending segment on exit. 734 */ 735 int 736 _bus_dmamap_load_phys(bus_dma_tag_t dmat, 737 bus_dmamap_t map, 738 vm_paddr_t buf, bus_size_t buflen, 739 int flags, 740 bus_dma_segment_t *segs, 741 int *segp) 742 { 743 bus_addr_t curaddr; 744 bus_size_t sgsize; 745 int error; 746 747 if (segs == NULL) 748 segs = map->segments; 749 750 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 751 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 752 if (map->pagesneeded != 0) { 753 error = _bus_dmamap_reserve_pages(dmat, map, flags); 754 if (error) 755 return (error); 756 } 757 } 758 759 while (buflen > 0) { 760 curaddr = buf; 761 sgsize = MIN(buflen, dmat->maxsegsz); 762 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 763 sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); 764 curaddr = add_bounce_page(dmat, map, 0, curaddr, 765 sgsize); 766 } 767 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 768 segp); 769 if (sgsize == 0) 770 break; 771 buf += sgsize; 772 buflen -= sgsize; 773 } 774 775 /* 776 * Did we fit? 777 */ 778 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 779 } 780 781 int 782 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, 783 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 784 bus_dma_segment_t *segs, int *segp) 785 { 786 787 return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, 788 segs, segp)); 789 } 790 791 /* 792 * Utility function to load a linear buffer. segp contains 793 * the starting segment on entrance, and the ending segment on exit. 794 */ 795 int 796 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, 797 bus_dmamap_t map, 798 void *buf, bus_size_t buflen, 799 pmap_t pmap, 800 int flags, 801 bus_dma_segment_t *segs, 802 int *segp) 803 { 804 bus_size_t sgsize; 805 bus_addr_t curaddr; 806 vm_offset_t kvaddr, vaddr; 807 int error; 808 809 if (segs == NULL) 810 segs = map->segments; 811 812 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 813 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 814 if (map->pagesneeded != 0) { 815 error = _bus_dmamap_reserve_pages(dmat, map, flags); 816 if (error) 817 return (error); 818 } 819 } 820 821 vaddr = (vm_offset_t)buf; 822 823 while (buflen > 0) { 824 bus_size_t max_sgsize; 825 826 /* 827 * Get the physical address for this segment. 828 */ 829 if (pmap == kernel_pmap) { 830 curaddr = pmap_kextract(vaddr); 831 kvaddr = vaddr; 832 } else { 833 curaddr = pmap_extract(pmap, vaddr); 834 kvaddr = 0; 835 } 836 837 /* 838 * Compute the segment size, and adjust counts. 839 */ 840 max_sgsize = MIN(buflen, dmat->maxsegsz); 841 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); 842 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 843 sgsize = roundup2(sgsize, dmat->alignment); 844 sgsize = MIN(sgsize, max_sgsize); 845 curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 846 sgsize); 847 } else { 848 sgsize = MIN(sgsize, max_sgsize); 849 } 850 851 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 852 segp); 853 if (sgsize == 0) 854 break; 855 vaddr += sgsize; 856 buflen -= sgsize; 857 } 858 859 /* 860 * Did we fit? 861 */ 862 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 863 } 864 865 void 866 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 867 struct memdesc *mem, bus_dmamap_callback_t *callback, 868 void *callback_arg) 869 { 870 871 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 872 map->dmat = dmat; 873 map->mem = *mem; 874 map->callback = callback; 875 map->callback_arg = callback_arg; 876 } 877 } 878 879 bus_dma_segment_t * 880 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 881 bus_dma_segment_t *segs, int nsegs, int error) 882 { 883 884 map->nsegs = nsegs; 885 if (segs != NULL) 886 memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); 887 if (dmat->iommu != NULL) 888 IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, 889 dmat->lowaddr, dmat->highaddr, dmat->alignment, 890 dmat->boundary, dmat->iommu_cookie); 891 892 if (segs != NULL) 893 memcpy(segs, map->segments, map->nsegs*sizeof(segs[0])); 894 else 895 segs = map->segments; 896 897 return (segs); 898 } 899 900 /* 901 * Release the mapping held by map. 902 */ 903 void 904 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 905 { 906 struct bounce_page *bpage; 907 908 if (dmat->iommu) { 909 IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie); 910 map->nsegs = 0; 911 } 912 913 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 914 STAILQ_REMOVE_HEAD(&map->bpages, links); 915 free_bounce_page(dmat, bpage); 916 } 917 } 918 919 void 920 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 921 { 922 struct bounce_page *bpage; 923 vm_offset_t datavaddr, tempvaddr; 924 925 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 926 /* 927 * Handle data bouncing. We might also 928 * want to add support for invalidating 929 * the caches on broken hardware 930 */ 931 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 932 "performing bounce", __func__, dmat, dmat->flags, op); 933 934 if (op & BUS_DMASYNC_PREWRITE) { 935 while (bpage != NULL) { 936 tempvaddr = 0; 937 datavaddr = bpage->datavaddr; 938 if (datavaddr == 0) { 939 tempvaddr = pmap_quick_enter_page( 940 bpage->datapage); 941 datavaddr = tempvaddr | 942 bpage->dataoffs; 943 } 944 945 bcopy((void *)datavaddr, 946 (void *)bpage->vaddr, bpage->datacount); 947 948 if (tempvaddr != 0) 949 pmap_quick_remove_page(tempvaddr); 950 bpage = STAILQ_NEXT(bpage, links); 951 } 952 dmat->bounce_zone->total_bounced++; 953 } 954 955 if (op & BUS_DMASYNC_POSTREAD) { 956 while (bpage != NULL) { 957 tempvaddr = 0; 958 datavaddr = bpage->datavaddr; 959 if (datavaddr == 0) { 960 tempvaddr = pmap_quick_enter_page( 961 bpage->datapage); 962 datavaddr = tempvaddr | 963 bpage->dataoffs; 964 } 965 966 bcopy((void *)bpage->vaddr, 967 (void *)datavaddr, bpage->datacount); 968 969 if (tempvaddr != 0) 970 pmap_quick_remove_page(tempvaddr); 971 bpage = STAILQ_NEXT(bpage, links); 972 } 973 dmat->bounce_zone->total_bounced++; 974 } 975 } 976 977 powerpc_sync(); 978 } 979 980 static void 981 init_bounce_pages(void *dummy __unused) 982 { 983 984 total_bpages = 0; 985 STAILQ_INIT(&bounce_zone_list); 986 STAILQ_INIT(&bounce_map_waitinglist); 987 STAILQ_INIT(&bounce_map_callbacklist); 988 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 989 } 990 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 991 992 static struct sysctl_ctx_list * 993 busdma_sysctl_tree(struct bounce_zone *bz) 994 { 995 return (&bz->sysctl_tree); 996 } 997 998 static struct sysctl_oid * 999 busdma_sysctl_tree_top(struct bounce_zone *bz) 1000 { 1001 return (bz->sysctl_tree_top); 1002 } 1003 1004 static int 1005 alloc_bounce_zone(bus_dma_tag_t dmat) 1006 { 1007 struct bounce_zone *bz; 1008 1009 /* Check to see if we already have a suitable zone */ 1010 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1011 if ((dmat->alignment <= bz->alignment) 1012 && (dmat->lowaddr >= bz->lowaddr)) { 1013 dmat->bounce_zone = bz; 1014 return (0); 1015 } 1016 } 1017 1018 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1019 M_NOWAIT | M_ZERO)) == NULL) 1020 return (ENOMEM); 1021 1022 STAILQ_INIT(&bz->bounce_page_list); 1023 bz->free_bpages = 0; 1024 bz->reserved_bpages = 0; 1025 bz->active_bpages = 0; 1026 bz->lowaddr = dmat->lowaddr; 1027 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1028 bz->map_count = 0; 1029 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1030 busdma_zonecount++; 1031 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1032 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1033 dmat->bounce_zone = bz; 1034 1035 sysctl_ctx_init(&bz->sysctl_tree); 1036 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1037 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1038 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); 1039 if (bz->sysctl_tree_top == NULL) { 1040 sysctl_ctx_free(&bz->sysctl_tree); 1041 return (0); /* XXX error code? */ 1042 } 1043 1044 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1045 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1046 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1047 "Total bounce pages"); 1048 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1049 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1050 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1051 "Free bounce pages"); 1052 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1053 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1054 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1055 "Reserved bounce pages"); 1056 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1057 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1058 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1059 "Active bounce pages"); 1060 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1061 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1062 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1063 "Total bounce requests"); 1064 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1065 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1066 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1067 "Total bounce requests that were deferred"); 1068 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1069 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1070 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1071 SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), 1072 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1073 "alignment", CTLFLAG_RD, &bz->alignment, ""); 1074 1075 return (0); 1076 } 1077 1078 static int 1079 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1080 { 1081 struct bounce_zone *bz; 1082 int count; 1083 1084 bz = dmat->bounce_zone; 1085 count = 0; 1086 while (numpages > 0) { 1087 struct bounce_page *bpage; 1088 1089 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1090 M_NOWAIT | M_ZERO); 1091 1092 if (bpage == NULL) 1093 break; 1094 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1095 M_NOWAIT, 0ul, 1096 bz->lowaddr, 1097 PAGE_SIZE, 1098 0); 1099 if (bpage->vaddr == 0) { 1100 free(bpage, M_DEVBUF); 1101 break; 1102 } 1103 bpage->busaddr = pmap_kextract(bpage->vaddr); 1104 mtx_lock(&bounce_lock); 1105 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1106 total_bpages++; 1107 bz->total_bpages++; 1108 bz->free_bpages++; 1109 mtx_unlock(&bounce_lock); 1110 count++; 1111 numpages--; 1112 } 1113 return (count); 1114 } 1115 1116 static int 1117 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1118 { 1119 struct bounce_zone *bz; 1120 int pages; 1121 1122 mtx_assert(&bounce_lock, MA_OWNED); 1123 bz = dmat->bounce_zone; 1124 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1125 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1126 return (map->pagesneeded - (map->pagesreserved + pages)); 1127 bz->free_bpages -= pages; 1128 bz->reserved_bpages += pages; 1129 map->pagesreserved += pages; 1130 pages = map->pagesneeded - map->pagesreserved; 1131 1132 return (pages); 1133 } 1134 1135 static bus_addr_t 1136 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1137 bus_addr_t addr, bus_size_t size) 1138 { 1139 struct bounce_zone *bz; 1140 struct bounce_page *bpage; 1141 1142 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1143 1144 bz = dmat->bounce_zone; 1145 if (map->pagesneeded == 0) 1146 panic("add_bounce_page: map doesn't need any pages"); 1147 map->pagesneeded--; 1148 1149 if (map->pagesreserved == 0) 1150 panic("add_bounce_page: map doesn't need any pages"); 1151 map->pagesreserved--; 1152 1153 mtx_lock(&bounce_lock); 1154 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1155 if (bpage == NULL) 1156 panic("add_bounce_page: free page list is empty"); 1157 1158 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1159 bz->reserved_bpages--; 1160 bz->active_bpages++; 1161 mtx_unlock(&bounce_lock); 1162 1163 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1164 /* Page offset needs to be preserved. */ 1165 bpage->vaddr |= addr & PAGE_MASK; 1166 bpage->busaddr |= addr & PAGE_MASK; 1167 } 1168 bpage->datavaddr = vaddr; 1169 bpage->datapage = PHYS_TO_VM_PAGE(addr); 1170 bpage->dataoffs = addr & PAGE_MASK; 1171 bpage->datacount = size; 1172 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1173 return (bpage->busaddr); 1174 } 1175 1176 static void 1177 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1178 { 1179 struct bus_dmamap *map; 1180 struct bounce_zone *bz; 1181 1182 bz = dmat->bounce_zone; 1183 bpage->datavaddr = 0; 1184 bpage->datacount = 0; 1185 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1186 /* 1187 * Reset the bounce page to start at offset 0. Other uses 1188 * of this bounce page may need to store a full page of 1189 * data and/or assume it starts on a page boundary. 1190 */ 1191 bpage->vaddr &= ~PAGE_MASK; 1192 bpage->busaddr &= ~PAGE_MASK; 1193 } 1194 1195 mtx_lock(&bounce_lock); 1196 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1197 bz->free_bpages++; 1198 bz->active_bpages--; 1199 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1200 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1201 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1202 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1203 map, links); 1204 busdma_swi_pending = 1; 1205 bz->total_deferred++; 1206 swi_sched(vm_ih, 0); 1207 } 1208 } 1209 mtx_unlock(&bounce_lock); 1210 } 1211 1212 void 1213 busdma_swi(void) 1214 { 1215 bus_dma_tag_t dmat; 1216 struct bus_dmamap *map; 1217 1218 mtx_lock(&bounce_lock); 1219 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1220 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1221 mtx_unlock(&bounce_lock); 1222 dmat = map->dmat; 1223 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1224 bus_dmamap_load_mem(map->dmat, map, &map->mem, 1225 map->callback, map->callback_arg, 1226 BUS_DMA_WAITOK); 1227 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1228 mtx_lock(&bounce_lock); 1229 } 1230 mtx_unlock(&bounce_lock); 1231 } 1232 1233 int 1234 bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie) 1235 { 1236 tag->iommu = iommu; 1237 tag->iommu_cookie = cookie; 1238 1239 return (0); 1240 } 1241