1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * From amd64/busdma_machdep.c, r204214 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/malloc.h> 39 #include <sys/bus.h> 40 #include <sys/interrupt.h> 41 #include <sys/kernel.h> 42 #include <sys/ktr.h> 43 #include <sys/lock.h> 44 #include <sys/proc.h> 45 #include <sys/memdesc.h> 46 #include <sys/mutex.h> 47 #include <sys/sysctl.h> 48 #include <sys/uio.h> 49 50 #include <vm/vm.h> 51 #include <vm/vm_extern.h> 52 #include <vm/vm_kern.h> 53 #include <vm/vm_page.h> 54 #include <vm/vm_map.h> 55 56 #include <machine/atomic.h> 57 #include <machine/bus.h> 58 #include <machine/cpufunc.h> 59 #include <machine/md_var.h> 60 61 #include "iommu_if.h" 62 63 #define MAX_BPAGES MIN(8192, physmem/40) 64 65 struct bounce_zone; 66 67 struct bus_dma_tag { 68 bus_dma_tag_t parent; 69 bus_size_t alignment; 70 bus_addr_t boundary; 71 bus_addr_t lowaddr; 72 bus_addr_t highaddr; 73 bus_dma_filter_t *filter; 74 void *filterarg; 75 bus_size_t maxsize; 76 bus_size_t maxsegsz; 77 u_int nsegments; 78 int flags; 79 int ref_count; 80 int map_count; 81 bus_dma_lock_t *lockfunc; 82 void *lockfuncarg; 83 struct bounce_zone *bounce_zone; 84 device_t iommu; 85 void *iommu_cookie; 86 }; 87 88 struct bounce_page { 89 vm_offset_t vaddr; /* kva of bounce buffer */ 90 bus_addr_t busaddr; /* Physical address */ 91 vm_offset_t datavaddr; /* kva of client data */ 92 vm_page_t datapage; /* physical page of client data */ 93 vm_offset_t dataoffs; /* page offset of client data */ 94 bus_size_t datacount; /* client data count */ 95 STAILQ_ENTRY(bounce_page) links; 96 }; 97 98 int busdma_swi_pending; 99 100 struct bounce_zone { 101 STAILQ_ENTRY(bounce_zone) links; 102 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 103 int total_bpages; 104 int free_bpages; 105 int reserved_bpages; 106 int active_bpages; 107 int total_bounced; 108 int total_deferred; 109 int map_count; 110 bus_size_t alignment; 111 bus_addr_t lowaddr; 112 char zoneid[8]; 113 char lowaddrid[20]; 114 struct sysctl_ctx_list sysctl_tree; 115 struct sysctl_oid *sysctl_tree_top; 116 }; 117 118 static struct mtx bounce_lock; 119 static int total_bpages; 120 static int busdma_zonecount; 121 static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 122 123 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 124 "Busdma parameters"); 125 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 126 "Total bounce pages"); 127 128 struct bus_dmamap { 129 struct bp_list bpages; 130 int pagesneeded; 131 int pagesreserved; 132 bus_dma_tag_t dmat; 133 struct memdesc mem; 134 bus_dma_segment_t *segments; 135 int nsegs; 136 bus_dmamap_callback_t *callback; 137 void *callback_arg; 138 STAILQ_ENTRY(bus_dmamap) links; 139 int contigalloc; 140 }; 141 142 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 143 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 144 145 static void init_bounce_pages(void *dummy); 146 static int alloc_bounce_zone(bus_dma_tag_t dmat); 147 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 148 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 149 int commit); 150 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 151 vm_offset_t vaddr, bus_addr_t addr, 152 bus_size_t size); 153 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 154 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 155 156 /* 157 * Return true if a match is made. 158 * 159 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 160 * 161 * If paddr is within the bounds of the dma tag then call the filter callback 162 * to check for a match, if there is no filter callback then assume a match. 163 */ 164 static __inline int 165 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 166 { 167 int retval; 168 169 retval = 0; 170 171 do { 172 if (dmat->filter == NULL && dmat->iommu == NULL && 173 paddr > dmat->lowaddr && paddr <= dmat->highaddr) 174 retval = 1; 175 if (dmat->filter == NULL && 176 (paddr & (dmat->alignment - 1)) != 0) 177 retval = 1; 178 if (dmat->filter != NULL && 179 (*dmat->filter)(dmat->filterarg, paddr) != 0) 180 retval = 1; 181 182 dmat = dmat->parent; 183 } while (retval == 0 && dmat != NULL); 184 return (retval); 185 } 186 187 /* 188 * Convenience function for manipulating driver locks from busdma (during 189 * busdma_swi, for example). Drivers that don't provide their own locks 190 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 191 * non-mutex locking scheme don't have to use this at all. 192 */ 193 void 194 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 195 { 196 struct mtx *dmtx; 197 198 dmtx = (struct mtx *)arg; 199 switch (op) { 200 case BUS_DMA_LOCK: 201 mtx_lock(dmtx); 202 break; 203 case BUS_DMA_UNLOCK: 204 mtx_unlock(dmtx); 205 break; 206 default: 207 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 208 } 209 } 210 211 /* 212 * dflt_lock should never get called. It gets put into the dma tag when 213 * lockfunc == NULL, which is only valid if the maps that are associated 214 * with the tag are meant to never be defered. 215 * XXX Should have a way to identify which driver is responsible here. 216 */ 217 static void 218 dflt_lock(void *arg, bus_dma_lock_op_t op) 219 { 220 panic("driver error: busdma dflt_lock called"); 221 } 222 223 #define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 224 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 225 /* 226 * Allocate a device specific dma_tag. 227 */ 228 int 229 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 230 bus_addr_t boundary, bus_addr_t lowaddr, 231 bus_addr_t highaddr, bus_dma_filter_t *filter, 232 void *filterarg, bus_size_t maxsize, int nsegments, 233 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 234 void *lockfuncarg, bus_dma_tag_t *dmat) 235 { 236 bus_dma_tag_t newtag; 237 int error = 0; 238 239 /* Basic sanity checking */ 240 if (boundary != 0 && boundary < maxsegsz) 241 maxsegsz = boundary; 242 243 if (maxsegsz == 0) { 244 return (EINVAL); 245 } 246 247 /* Return a NULL tag on failure */ 248 *dmat = NULL; 249 250 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 251 M_ZERO | M_NOWAIT); 252 if (newtag == NULL) { 253 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 254 __func__, newtag, 0, error); 255 return (ENOMEM); 256 } 257 258 newtag->parent = parent; 259 newtag->alignment = alignment; 260 newtag->boundary = boundary; 261 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 262 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 263 newtag->filter = filter; 264 newtag->filterarg = filterarg; 265 newtag->maxsize = maxsize; 266 newtag->nsegments = nsegments; 267 newtag->maxsegsz = maxsegsz; 268 newtag->flags = flags; 269 newtag->ref_count = 1; /* Count ourself */ 270 newtag->map_count = 0; 271 if (lockfunc != NULL) { 272 newtag->lockfunc = lockfunc; 273 newtag->lockfuncarg = lockfuncarg; 274 } else { 275 newtag->lockfunc = dflt_lock; 276 newtag->lockfuncarg = NULL; 277 } 278 279 /* Take into account any restrictions imposed by our parent tag */ 280 if (parent != NULL) { 281 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 282 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 283 if (newtag->boundary == 0) 284 newtag->boundary = parent->boundary; 285 else if (parent->boundary != 0) 286 newtag->boundary = MIN(parent->boundary, 287 newtag->boundary); 288 if (newtag->filter == NULL) { 289 /* 290 * Short circuit looking at our parent directly 291 * since we have encapsulated all of its information 292 */ 293 newtag->filter = parent->filter; 294 newtag->filterarg = parent->filterarg; 295 newtag->parent = parent->parent; 296 } 297 if (newtag->parent != NULL) 298 atomic_add_int(&parent->ref_count, 1); 299 newtag->iommu = parent->iommu; 300 newtag->iommu_cookie = parent->iommu_cookie; 301 } 302 303 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL) 304 newtag->flags |= BUS_DMA_COULD_BOUNCE; 305 306 if (newtag->alignment > 1) 307 newtag->flags |= BUS_DMA_COULD_BOUNCE; 308 309 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 310 (flags & BUS_DMA_ALLOCNOW) != 0) { 311 struct bounce_zone *bz; 312 313 /* Must bounce */ 314 315 if ((error = alloc_bounce_zone(newtag)) != 0) { 316 free(newtag, M_DEVBUF); 317 return (error); 318 } 319 bz = newtag->bounce_zone; 320 321 if (ptoa(bz->total_bpages) < maxsize) { 322 int pages; 323 324 pages = atop(maxsize) - bz->total_bpages; 325 326 /* Add pages to our bounce pool */ 327 if (alloc_bounce_pages(newtag, pages) < pages) 328 error = ENOMEM; 329 } 330 /* Performed initial allocation */ 331 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 332 } 333 334 if (error != 0) { 335 free(newtag, M_DEVBUF); 336 } else { 337 *dmat = newtag; 338 } 339 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 340 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 341 return (error); 342 } 343 344 void 345 bus_dma_template_init(bus_dma_tag_template_t *t, bus_dma_tag_t parent) 346 { 347 348 if (t == NULL) 349 return; 350 351 t->parent = parent; 352 t->alignment = 1; 353 t->boundary = 0; 354 t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR; 355 t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE; 356 t->nsegments = BUS_SPACE_UNRESTRICTED; 357 t->lockfunc = NULL; 358 t->lockfuncarg = NULL; 359 t->flags = 0; 360 } 361 362 int 363 bus_dma_template_tag(bus_dma_tag_template_t *t, bus_dma_tag_t *dmat) 364 { 365 366 if (t == NULL || dmat == NULL) 367 return (EINVAL); 368 369 return (bus_dma_tag_create(t->parent, t->alignment, t->boundary, 370 t->lowaddr, t->highaddr, NULL, NULL, t->maxsize, 371 t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg, 372 dmat)); 373 } 374 375 void 376 bus_dma_template_clone(bus_dma_tag_template_t *t, bus_dma_tag_t dmat) 377 { 378 379 if (t == NULL || dmat == NULL) 380 return; 381 382 t->parent = dmat->parent; 383 t->alignment = dmat->alignment; 384 t->boundary = dmat->boundary; 385 t->lowaddr = dmat->lowaddr; 386 t->highaddr = dmat->highaddr; 387 t->maxsize = dmat->maxsize; 388 t->nsegments = dmat->nsegments; 389 t->maxsegsize = dmat->maxsegsz; 390 t->flags = dmat->flags; 391 t->lockfunc = dmat->lockfunc; 392 t->lockfuncarg = dmat->lockfuncarg; 393 } 394 395 int 396 bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain) 397 { 398 399 return (0); 400 } 401 402 int 403 bus_dma_tag_destroy(bus_dma_tag_t dmat) 404 { 405 bus_dma_tag_t dmat_copy __unused; 406 int error; 407 408 error = 0; 409 dmat_copy = dmat; 410 411 if (dmat != NULL) { 412 413 if (dmat->map_count != 0) { 414 error = EBUSY; 415 goto out; 416 } 417 418 while (dmat != NULL) { 419 bus_dma_tag_t parent; 420 421 parent = dmat->parent; 422 atomic_subtract_int(&dmat->ref_count, 1); 423 if (dmat->ref_count == 0) { 424 free(dmat, M_DEVBUF); 425 /* 426 * Last reference count, so 427 * release our reference 428 * count on our parent. 429 */ 430 dmat = parent; 431 } else 432 dmat = NULL; 433 } 434 } 435 out: 436 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 437 return (error); 438 } 439 440 /* 441 * Allocate a handle for mapping from kva/uva/physical 442 * address space into bus device space. 443 */ 444 int 445 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 446 { 447 int error; 448 449 error = 0; 450 451 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 452 M_NOWAIT | M_ZERO); 453 if (*mapp == NULL) { 454 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 455 __func__, dmat, ENOMEM); 456 return (ENOMEM); 457 } 458 459 460 /* 461 * Bouncing might be required if the driver asks for an active 462 * exclusion region, a data alignment that is stricter than 1, and/or 463 * an active address boundary. 464 */ 465 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 466 467 /* Must bounce */ 468 struct bounce_zone *bz; 469 int maxpages; 470 471 if (dmat->bounce_zone == NULL) { 472 if ((error = alloc_bounce_zone(dmat)) != 0) 473 return (error); 474 } 475 bz = dmat->bounce_zone; 476 477 /* Initialize the new map */ 478 STAILQ_INIT(&((*mapp)->bpages)); 479 480 /* 481 * Attempt to add pages to our pool on a per-instance 482 * basis up to a sane limit. 483 */ 484 if (dmat->alignment > 1) 485 maxpages = MAX_BPAGES; 486 else 487 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 488 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 489 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 490 int pages; 491 492 pages = MAX(atop(dmat->maxsize), 1); 493 pages = MIN(maxpages - bz->total_bpages, pages); 494 pages = MAX(pages, 1); 495 if (alloc_bounce_pages(dmat, pages) < pages) 496 error = ENOMEM; 497 498 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 499 if (error == 0) 500 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 501 } else { 502 error = 0; 503 } 504 } 505 bz->map_count++; 506 } 507 508 (*mapp)->nsegs = 0; 509 (*mapp)->segments = (bus_dma_segment_t *)malloc( 510 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 511 M_NOWAIT); 512 if ((*mapp)->segments == NULL) { 513 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 514 __func__, dmat, ENOMEM); 515 return (ENOMEM); 516 } 517 518 if (error == 0) 519 dmat->map_count++; 520 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 521 __func__, dmat, dmat->flags, error); 522 return (error); 523 } 524 525 /* 526 * Destroy a handle for mapping from kva/uva/physical 527 * address space into bus device space. 528 */ 529 int 530 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 531 { 532 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 533 if (STAILQ_FIRST(&map->bpages) != NULL) { 534 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 535 __func__, dmat, EBUSY); 536 return (EBUSY); 537 } 538 if (dmat->bounce_zone) 539 dmat->bounce_zone->map_count--; 540 } 541 free(map->segments, M_DEVBUF); 542 free(map, M_DEVBUF); 543 dmat->map_count--; 544 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 545 return (0); 546 } 547 548 549 /* 550 * Allocate a piece of memory that can be efficiently mapped into 551 * bus device space based on the constraints lited in the dma tag. 552 * A dmamap to for use with dmamap_load is also allocated. 553 */ 554 int 555 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 556 bus_dmamap_t *mapp) 557 { 558 vm_memattr_t attr; 559 int mflags; 560 561 if (flags & BUS_DMA_NOWAIT) 562 mflags = M_NOWAIT; 563 else 564 mflags = M_WAITOK; 565 566 bus_dmamap_create(dmat, flags, mapp); 567 568 if (flags & BUS_DMA_ZERO) 569 mflags |= M_ZERO; 570 if (flags & BUS_DMA_NOCACHE) 571 attr = VM_MEMATTR_UNCACHEABLE; 572 else 573 attr = VM_MEMATTR_DEFAULT; 574 575 /* 576 * XXX: 577 * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact 578 * alignment guarantees of malloc need to be nailed down, and the 579 * code below should be rewritten to take that into account. 580 * 581 * In the meantime, we'll warn the user if malloc gets it wrong. 582 */ 583 if ((dmat->maxsize <= PAGE_SIZE) && 584 (dmat->alignment <= dmat->maxsize) && 585 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) && 586 attr == VM_MEMATTR_DEFAULT) { 587 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 588 } else { 589 /* 590 * XXX Use Contigmalloc until it is merged into this facility 591 * and handles multi-seg allocations. Nobody is doing 592 * multi-seg allocations yet though. 593 * XXX Certain AGP hardware does. 594 */ 595 *vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0ul, 596 dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul, 597 dmat->boundary, attr); 598 (*mapp)->contigalloc = 1; 599 } 600 if (*vaddr == NULL) { 601 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 602 __func__, dmat, dmat->flags, ENOMEM); 603 return (ENOMEM); 604 } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { 605 printf("bus_dmamem_alloc failed to align memory properly.\n"); 606 } 607 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 608 __func__, dmat, dmat->flags, 0); 609 return (0); 610 } 611 612 /* 613 * Free a piece of memory and it's allociated dmamap, that was allocated 614 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 615 */ 616 void 617 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 618 { 619 620 if (!map->contigalloc) 621 free(vaddr, M_DEVBUF); 622 else 623 kmem_free((vm_offset_t)vaddr, dmat->maxsize); 624 bus_dmamap_destroy(dmat, map); 625 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 626 } 627 628 static void 629 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 630 bus_size_t buflen, int flags) 631 { 632 bus_addr_t curaddr; 633 bus_size_t sgsize; 634 635 if (map->pagesneeded == 0) { 636 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 637 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 638 dmat->boundary, dmat->alignment); 639 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); 640 /* 641 * Count the number of bounce pages 642 * needed in order to complete this transfer 643 */ 644 curaddr = buf; 645 while (buflen != 0) { 646 sgsize = MIN(buflen, dmat->maxsegsz); 647 if (run_filter(dmat, curaddr) != 0) { 648 sgsize = MIN(sgsize, 649 PAGE_SIZE - (curaddr & PAGE_MASK)); 650 map->pagesneeded++; 651 } 652 curaddr += sgsize; 653 buflen -= sgsize; 654 } 655 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 656 } 657 } 658 659 static void 660 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 661 void *buf, bus_size_t buflen, int flags) 662 { 663 vm_offset_t vaddr; 664 vm_offset_t vendaddr; 665 bus_addr_t paddr; 666 667 if (map->pagesneeded == 0) { 668 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 669 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 670 dmat->boundary, dmat->alignment); 671 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); 672 /* 673 * Count the number of bounce pages 674 * needed in order to complete this transfer 675 */ 676 vaddr = (vm_offset_t)buf; 677 vendaddr = (vm_offset_t)buf + buflen; 678 679 while (vaddr < vendaddr) { 680 bus_size_t sg_len; 681 682 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 683 if (pmap == kernel_pmap) 684 paddr = pmap_kextract(vaddr); 685 else 686 paddr = pmap_extract(pmap, vaddr); 687 if (run_filter(dmat, paddr) != 0) { 688 sg_len = roundup2(sg_len, dmat->alignment); 689 map->pagesneeded++; 690 } 691 vaddr += sg_len; 692 } 693 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 694 } 695 } 696 697 static int 698 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 699 { 700 701 /* Reserve Necessary Bounce Pages */ 702 mtx_lock(&bounce_lock); 703 if (flags & BUS_DMA_NOWAIT) { 704 if (reserve_bounce_pages(dmat, map, 0) != 0) { 705 mtx_unlock(&bounce_lock); 706 return (ENOMEM); 707 } 708 } else { 709 if (reserve_bounce_pages(dmat, map, 1) != 0) { 710 /* Queue us for resources */ 711 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 712 map, links); 713 mtx_unlock(&bounce_lock); 714 return (EINPROGRESS); 715 } 716 } 717 mtx_unlock(&bounce_lock); 718 719 return (0); 720 } 721 722 /* 723 * Add a single contiguous physical range to the segment list. 724 */ 725 static int 726 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 727 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 728 { 729 bus_addr_t baddr, bmask; 730 int seg; 731 732 /* 733 * Make sure we don't cross any boundaries. 734 */ 735 bmask = ~(dmat->boundary - 1); 736 if (dmat->boundary > 0) { 737 baddr = (curaddr + dmat->boundary) & bmask; 738 if (sgsize > (baddr - curaddr)) 739 sgsize = (baddr - curaddr); 740 } 741 742 /* 743 * Insert chunk into a segment, coalescing with 744 * previous segment if possible. 745 */ 746 seg = *segp; 747 if (seg == -1) { 748 seg = 0; 749 segs[seg].ds_addr = curaddr; 750 segs[seg].ds_len = sgsize; 751 } else { 752 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 753 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 754 (dmat->boundary == 0 || 755 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 756 segs[seg].ds_len += sgsize; 757 else { 758 if (++seg >= dmat->nsegments) 759 return (0); 760 segs[seg].ds_addr = curaddr; 761 segs[seg].ds_len = sgsize; 762 } 763 } 764 *segp = seg; 765 return (sgsize); 766 } 767 768 /* 769 * Utility function to load a physical buffer. segp contains 770 * the starting segment on entrace, and the ending segment on exit. 771 */ 772 int 773 _bus_dmamap_load_phys(bus_dma_tag_t dmat, 774 bus_dmamap_t map, 775 vm_paddr_t buf, bus_size_t buflen, 776 int flags, 777 bus_dma_segment_t *segs, 778 int *segp) 779 { 780 bus_addr_t curaddr; 781 bus_size_t sgsize; 782 int error; 783 784 if (segs == NULL) 785 segs = map->segments; 786 787 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 788 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 789 if (map->pagesneeded != 0) { 790 error = _bus_dmamap_reserve_pages(dmat, map, flags); 791 if (error) 792 return (error); 793 } 794 } 795 796 while (buflen > 0) { 797 curaddr = buf; 798 sgsize = MIN(buflen, dmat->maxsegsz); 799 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 800 sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); 801 curaddr = add_bounce_page(dmat, map, 0, curaddr, 802 sgsize); 803 } 804 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 805 segp); 806 if (sgsize == 0) 807 break; 808 buf += sgsize; 809 buflen -= sgsize; 810 } 811 812 /* 813 * Did we fit? 814 */ 815 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 816 } 817 818 int 819 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, 820 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 821 bus_dma_segment_t *segs, int *segp) 822 { 823 824 return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, 825 segs, segp)); 826 } 827 828 /* 829 * Utility function to load a linear buffer. segp contains 830 * the starting segment on entrance, and the ending segment on exit. 831 */ 832 int 833 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, 834 bus_dmamap_t map, 835 void *buf, bus_size_t buflen, 836 pmap_t pmap, 837 int flags, 838 bus_dma_segment_t *segs, 839 int *segp) 840 { 841 bus_size_t sgsize; 842 bus_addr_t curaddr; 843 vm_offset_t kvaddr, vaddr; 844 int error; 845 846 if (segs == NULL) 847 segs = map->segments; 848 849 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 850 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 851 if (map->pagesneeded != 0) { 852 error = _bus_dmamap_reserve_pages(dmat, map, flags); 853 if (error) 854 return (error); 855 } 856 } 857 858 vaddr = (vm_offset_t)buf; 859 860 while (buflen > 0) { 861 bus_size_t max_sgsize; 862 863 /* 864 * Get the physical address for this segment. 865 */ 866 if (pmap == kernel_pmap) { 867 curaddr = pmap_kextract(vaddr); 868 kvaddr = vaddr; 869 } else { 870 curaddr = pmap_extract(pmap, vaddr); 871 kvaddr = 0; 872 } 873 874 /* 875 * Compute the segment size, and adjust counts. 876 */ 877 max_sgsize = MIN(buflen, dmat->maxsegsz); 878 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); 879 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 880 sgsize = roundup2(sgsize, dmat->alignment); 881 sgsize = MIN(sgsize, max_sgsize); 882 curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 883 sgsize); 884 } else { 885 sgsize = MIN(sgsize, max_sgsize); 886 } 887 888 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 889 segp); 890 if (sgsize == 0) 891 break; 892 vaddr += sgsize; 893 buflen -= sgsize; 894 } 895 896 /* 897 * Did we fit? 898 */ 899 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 900 } 901 902 void 903 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 904 struct memdesc *mem, bus_dmamap_callback_t *callback, 905 void *callback_arg) 906 { 907 908 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 909 map->dmat = dmat; 910 map->mem = *mem; 911 map->callback = callback; 912 map->callback_arg = callback_arg; 913 } 914 } 915 916 bus_dma_segment_t * 917 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 918 bus_dma_segment_t *segs, int nsegs, int error) 919 { 920 921 map->nsegs = nsegs; 922 if (segs != NULL) 923 memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); 924 if (dmat->iommu != NULL) 925 IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, 926 dmat->lowaddr, dmat->highaddr, dmat->alignment, 927 dmat->boundary, dmat->iommu_cookie); 928 929 if (segs != NULL) 930 memcpy(segs, map->segments, map->nsegs*sizeof(segs[0])); 931 else 932 segs = map->segments; 933 934 return (segs); 935 } 936 937 /* 938 * Release the mapping held by map. 939 */ 940 void 941 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 942 { 943 struct bounce_page *bpage; 944 945 if (dmat->iommu) { 946 IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie); 947 map->nsegs = 0; 948 } 949 950 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 951 STAILQ_REMOVE_HEAD(&map->bpages, links); 952 free_bounce_page(dmat, bpage); 953 } 954 } 955 956 void 957 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 958 { 959 struct bounce_page *bpage; 960 vm_offset_t datavaddr, tempvaddr; 961 962 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 963 964 /* 965 * Handle data bouncing. We might also 966 * want to add support for invalidating 967 * the caches on broken hardware 968 */ 969 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 970 "performing bounce", __func__, dmat, dmat->flags, op); 971 972 if (op & BUS_DMASYNC_PREWRITE) { 973 while (bpage != NULL) { 974 tempvaddr = 0; 975 datavaddr = bpage->datavaddr; 976 if (datavaddr == 0) { 977 tempvaddr = pmap_quick_enter_page( 978 bpage->datapage); 979 datavaddr = tempvaddr | 980 bpage->dataoffs; 981 } 982 983 bcopy((void *)datavaddr, 984 (void *)bpage->vaddr, bpage->datacount); 985 986 if (tempvaddr != 0) 987 pmap_quick_remove_page(tempvaddr); 988 bpage = STAILQ_NEXT(bpage, links); 989 } 990 dmat->bounce_zone->total_bounced++; 991 } 992 993 if (op & BUS_DMASYNC_POSTREAD) { 994 while (bpage != NULL) { 995 tempvaddr = 0; 996 datavaddr = bpage->datavaddr; 997 if (datavaddr == 0) { 998 tempvaddr = pmap_quick_enter_page( 999 bpage->datapage); 1000 datavaddr = tempvaddr | 1001 bpage->dataoffs; 1002 } 1003 1004 bcopy((void *)bpage->vaddr, 1005 (void *)datavaddr, bpage->datacount); 1006 1007 if (tempvaddr != 0) 1008 pmap_quick_remove_page(tempvaddr); 1009 bpage = STAILQ_NEXT(bpage, links); 1010 } 1011 dmat->bounce_zone->total_bounced++; 1012 } 1013 } 1014 1015 powerpc_sync(); 1016 } 1017 1018 static void 1019 init_bounce_pages(void *dummy __unused) 1020 { 1021 1022 total_bpages = 0; 1023 STAILQ_INIT(&bounce_zone_list); 1024 STAILQ_INIT(&bounce_map_waitinglist); 1025 STAILQ_INIT(&bounce_map_callbacklist); 1026 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1027 } 1028 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1029 1030 static struct sysctl_ctx_list * 1031 busdma_sysctl_tree(struct bounce_zone *bz) 1032 { 1033 return (&bz->sysctl_tree); 1034 } 1035 1036 static struct sysctl_oid * 1037 busdma_sysctl_tree_top(struct bounce_zone *bz) 1038 { 1039 return (bz->sysctl_tree_top); 1040 } 1041 1042 static int 1043 alloc_bounce_zone(bus_dma_tag_t dmat) 1044 { 1045 struct bounce_zone *bz; 1046 1047 /* Check to see if we already have a suitable zone */ 1048 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1049 if ((dmat->alignment <= bz->alignment) 1050 && (dmat->lowaddr >= bz->lowaddr)) { 1051 dmat->bounce_zone = bz; 1052 return (0); 1053 } 1054 } 1055 1056 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1057 M_NOWAIT | M_ZERO)) == NULL) 1058 return (ENOMEM); 1059 1060 STAILQ_INIT(&bz->bounce_page_list); 1061 bz->free_bpages = 0; 1062 bz->reserved_bpages = 0; 1063 bz->active_bpages = 0; 1064 bz->lowaddr = dmat->lowaddr; 1065 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1066 bz->map_count = 0; 1067 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1068 busdma_zonecount++; 1069 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1070 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1071 dmat->bounce_zone = bz; 1072 1073 sysctl_ctx_init(&bz->sysctl_tree); 1074 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1075 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1076 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); 1077 if (bz->sysctl_tree_top == NULL) { 1078 sysctl_ctx_free(&bz->sysctl_tree); 1079 return (0); /* XXX error code? */ 1080 } 1081 1082 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1083 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1084 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1085 "Total bounce pages"); 1086 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1087 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1088 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1089 "Free bounce pages"); 1090 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1091 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1092 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1093 "Reserved bounce pages"); 1094 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1095 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1096 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1097 "Active bounce pages"); 1098 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1099 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1100 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1101 "Total bounce requests"); 1102 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1103 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1104 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1105 "Total bounce requests that were deferred"); 1106 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1107 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1108 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1109 SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), 1110 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1111 "alignment", CTLFLAG_RD, &bz->alignment, ""); 1112 1113 return (0); 1114 } 1115 1116 static int 1117 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1118 { 1119 struct bounce_zone *bz; 1120 int count; 1121 1122 bz = dmat->bounce_zone; 1123 count = 0; 1124 while (numpages > 0) { 1125 struct bounce_page *bpage; 1126 1127 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1128 M_NOWAIT | M_ZERO); 1129 1130 if (bpage == NULL) 1131 break; 1132 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1133 M_NOWAIT, 0ul, 1134 bz->lowaddr, 1135 PAGE_SIZE, 1136 0); 1137 if (bpage->vaddr == 0) { 1138 free(bpage, M_DEVBUF); 1139 break; 1140 } 1141 bpage->busaddr = pmap_kextract(bpage->vaddr); 1142 mtx_lock(&bounce_lock); 1143 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1144 total_bpages++; 1145 bz->total_bpages++; 1146 bz->free_bpages++; 1147 mtx_unlock(&bounce_lock); 1148 count++; 1149 numpages--; 1150 } 1151 return (count); 1152 } 1153 1154 static int 1155 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1156 { 1157 struct bounce_zone *bz; 1158 int pages; 1159 1160 mtx_assert(&bounce_lock, MA_OWNED); 1161 bz = dmat->bounce_zone; 1162 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1163 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1164 return (map->pagesneeded - (map->pagesreserved + pages)); 1165 bz->free_bpages -= pages; 1166 bz->reserved_bpages += pages; 1167 map->pagesreserved += pages; 1168 pages = map->pagesneeded - map->pagesreserved; 1169 1170 return (pages); 1171 } 1172 1173 static bus_addr_t 1174 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1175 bus_addr_t addr, bus_size_t size) 1176 { 1177 struct bounce_zone *bz; 1178 struct bounce_page *bpage; 1179 1180 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1181 1182 bz = dmat->bounce_zone; 1183 if (map->pagesneeded == 0) 1184 panic("add_bounce_page: map doesn't need any pages"); 1185 map->pagesneeded--; 1186 1187 if (map->pagesreserved == 0) 1188 panic("add_bounce_page: map doesn't need any pages"); 1189 map->pagesreserved--; 1190 1191 mtx_lock(&bounce_lock); 1192 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1193 if (bpage == NULL) 1194 panic("add_bounce_page: free page list is empty"); 1195 1196 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1197 bz->reserved_bpages--; 1198 bz->active_bpages++; 1199 mtx_unlock(&bounce_lock); 1200 1201 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1202 /* Page offset needs to be preserved. */ 1203 bpage->vaddr |= addr & PAGE_MASK; 1204 bpage->busaddr |= addr & PAGE_MASK; 1205 } 1206 bpage->datavaddr = vaddr; 1207 bpage->datapage = PHYS_TO_VM_PAGE(addr); 1208 bpage->dataoffs = addr & PAGE_MASK; 1209 bpage->datacount = size; 1210 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1211 return (bpage->busaddr); 1212 } 1213 1214 static void 1215 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1216 { 1217 struct bus_dmamap *map; 1218 struct bounce_zone *bz; 1219 1220 bz = dmat->bounce_zone; 1221 bpage->datavaddr = 0; 1222 bpage->datacount = 0; 1223 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1224 /* 1225 * Reset the bounce page to start at offset 0. Other uses 1226 * of this bounce page may need to store a full page of 1227 * data and/or assume it starts on a page boundary. 1228 */ 1229 bpage->vaddr &= ~PAGE_MASK; 1230 bpage->busaddr &= ~PAGE_MASK; 1231 } 1232 1233 mtx_lock(&bounce_lock); 1234 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1235 bz->free_bpages++; 1236 bz->active_bpages--; 1237 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1238 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1239 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1240 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1241 map, links); 1242 busdma_swi_pending = 1; 1243 bz->total_deferred++; 1244 swi_sched(vm_ih, 0); 1245 } 1246 } 1247 mtx_unlock(&bounce_lock); 1248 } 1249 1250 void 1251 busdma_swi(void) 1252 { 1253 bus_dma_tag_t dmat; 1254 struct bus_dmamap *map; 1255 1256 mtx_lock(&bounce_lock); 1257 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1258 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1259 mtx_unlock(&bounce_lock); 1260 dmat = map->dmat; 1261 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1262 bus_dmamap_load_mem(map->dmat, map, &map->mem, 1263 map->callback, map->callback_arg, 1264 BUS_DMA_WAITOK); 1265 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1266 mtx_lock(&bounce_lock); 1267 } 1268 mtx_unlock(&bounce_lock); 1269 } 1270 1271 int 1272 bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie) 1273 { 1274 tag->iommu = iommu; 1275 tag->iommu_cookie = cookie; 1276 1277 return (0); 1278 } 1279 1280