1 /* $NetBSD: bus_dma.c,v 1.38 2011/01/18 01:02:55 matt Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #define _POWERPC_BUS_DMA_PRIVATE 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.38 2011/01/18 01:02:55 matt Exp $"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/device.h> 41 #include <sys/malloc.h> 42 #include <sys/proc.h> 43 #include <sys/mbuf.h> 44 45 #include <uvm/uvm.h> 46 47 #include <machine/bus.h> 48 #include <machine/intr.h> 49 50 #ifdef PPC_BOOKE 51 #define EIEIO __asm volatile("mbar\t0") 52 #define SYNC __asm volatile("msync") 53 #else 54 #define EIEIO __asm volatile("eieio") 55 #define SYNC __asm volatile("sync") 56 #endif 57 58 int _bus_dmamap_load_buffer (bus_dma_tag_t, bus_dmamap_t, void *, 59 bus_size_t, struct vmspace *, int, paddr_t *, int *, int); 60 61 static inline void 62 dcbst(paddr_t pa, long len, int dcache_line_size) 63 { 64 paddr_t epa; 65 for (epa = pa + len; pa < epa; pa += dcache_line_size) 66 __asm volatile("dcbst 0,%0" :: "r"(pa)); 67 } 68 69 static inline void 70 dcbi(paddr_t pa, long len, int dcache_line_size) 71 { 72 paddr_t epa; 73 for (epa = pa + len; pa < epa; pa += dcache_line_size) 74 __asm volatile("dcbi 0,%0" :: "r"(pa)); 75 } 76 77 static inline void 78 dcbf(paddr_t pa, long len, int dcache_line_size) 79 { 80 paddr_t epa; 81 for (epa = pa + len; pa < epa; pa += dcache_line_size) 82 __asm volatile("dcbf 0,%0" :: "r"(pa)); 83 } 84 85 /* 86 * Common function for DMA map creation. May be called by bus-specific 87 * DMA map creation functions. 88 */ 89 int 90 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 91 { 92 struct powerpc_bus_dmamap *map; 93 void *mapstore; 94 size_t mapsize; 95 96 /* 97 * Allocate and initialize the DMA map. The end of the map 98 * is a variable-sized array of segments, so we allocate enough 99 * room for them in one shot. 100 * 101 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 102 * of ALLOCNOW notifies others that we've reserved these resources, 103 * and they are not to be freed. 104 * 105 * The bus_dmamap_t includes one bus_dma_segment_t, hence 106 * the (nsegments - 1). 107 */ 108 mapsize = sizeof(struct powerpc_bus_dmamap) + 109 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 110 if ((mapstore = malloc(mapsize, M_DMAMAP, 111 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 112 return (ENOMEM); 113 114 memset(mapstore, 0, mapsize); 115 map = (struct powerpc_bus_dmamap *)mapstore; 116 map->_dm_size = size; 117 map->_dm_segcnt = nsegments; 118 map->_dm_maxmaxsegsz = maxsegsz; 119 map->_dm_boundary = boundary; 120 map->_dm_bounce_thresh = t->_bounce_thresh; 121 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 122 map->dm_maxsegsz = maxsegsz; 123 map->dm_mapsize = 0; /* no valid mappings */ 124 map->dm_nsegs = 0; 125 126 *dmamp = map; 127 return (0); 128 } 129 130 /* 131 * Common function for DMA map destruction. May be called by bus-specific 132 * DMA map destruction functions. 133 */ 134 void 135 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 136 { 137 138 free(map, M_DMAMAP); 139 } 140 141 /* 142 * Utility function to load a linear buffer. lastaddrp holds state 143 * between invocations (for multiple-buffer loads). segp contains 144 * the starting segment on entrance, and the ending segment on exit. 145 * first indicates if this is the first invocation of this function. 146 */ 147 int 148 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp, int *segp, int first) 149 { 150 bus_size_t sgsize; 151 bus_addr_t curaddr, lastaddr, baddr, bmask; 152 vaddr_t vaddr = (vaddr_t)buf; 153 int seg; 154 155 // printf("%s(%p,%p,%p,%u,%p,%#x,%p,%p,%u)\n", __func__, 156 // t, map, buf, buflen, vm, flags, lastaddrp, segp, first); 157 158 lastaddr = *lastaddrp; 159 bmask = ~(map->_dm_boundary - 1); 160 161 for (seg = *segp; buflen > 0 ; ) { 162 /* 163 * Get the physical address for this segment. 164 */ 165 if (!VMSPACE_IS_KERNEL_P(vm)) 166 (void) pmap_extract(vm_map_pmap(&vm->vm_map), 167 vaddr, (void *)&curaddr); 168 else 169 curaddr = vtophys(vaddr); 170 171 /* 172 * If we're beyond the bounce threshold, notify 173 * the caller. 174 */ 175 if (map->_dm_bounce_thresh != 0 && 176 curaddr >= map->_dm_bounce_thresh) 177 return (EINVAL); 178 179 /* 180 * Compute the segment size, and adjust counts. 181 */ 182 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 183 if (buflen < sgsize) 184 sgsize = buflen; 185 sgsize = min(sgsize, map->dm_maxsegsz); 186 187 /* 188 * Make sure we don't cross any boundaries. 189 */ 190 if (map->_dm_boundary > 0) { 191 baddr = (curaddr + map->_dm_boundary) & bmask; 192 if (sgsize > (baddr - curaddr)) 193 sgsize = (baddr - curaddr); 194 } 195 196 /* 197 * Insert chunk into a segment, coalescing with 198 * the previous segment if possible. 199 */ 200 if (first) { 201 map->dm_segs[seg].ds_addr = PHYS_TO_BUS_MEM(t, curaddr); 202 map->dm_segs[seg].ds_len = sgsize; 203 first = 0; 204 } else { 205 if (curaddr == lastaddr && 206 (map->dm_segs[seg].ds_len + sgsize) <= 207 map->dm_maxsegsz && 208 (map->_dm_boundary == 0 || 209 (map->dm_segs[seg].ds_addr & bmask) == 210 (PHYS_TO_BUS_MEM(t, curaddr) & bmask))) 211 map->dm_segs[seg].ds_len += sgsize; 212 else { 213 if (++seg >= map->_dm_segcnt) 214 break; 215 map->dm_segs[seg].ds_addr = 216 PHYS_TO_BUS_MEM(t, curaddr); 217 map->dm_segs[seg].ds_len = sgsize; 218 } 219 } 220 221 lastaddr = curaddr + sgsize; 222 vaddr += sgsize; 223 buflen -= sgsize; 224 } 225 226 *segp = seg; 227 *lastaddrp = lastaddr; 228 229 /* 230 * Did we fit? 231 */ 232 if (buflen != 0) 233 return (EFBIG); /* XXX better return value here? */ 234 235 return (0); 236 } 237 238 /* 239 * Common function for loading a DMA map with a linear buffer. May 240 * be called by bus-specific DMA map load functions. 241 */ 242 int 243 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags) 244 { 245 paddr_t lastaddr = 0; 246 int seg, error; 247 struct vmspace *vm; 248 249 /* 250 * Make sure that on error condition we return "no valid mappings". 251 */ 252 map->dm_mapsize = 0; 253 map->dm_nsegs = 0; 254 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 255 256 if (buflen > map->_dm_size) 257 return (EINVAL); 258 259 if (p != NULL) { 260 vm = p->p_vmspace; 261 } else { 262 vm = vmspace_kernel(); 263 } 264 265 seg = 0; 266 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags, 267 &lastaddr, &seg, 1); 268 if (error == 0) { 269 map->dm_mapsize = buflen; 270 map->dm_nsegs = seg + 1; 271 } 272 return (error); 273 } 274 275 /* 276 * Like _bus_dmamap_load(), but for mbufs. 277 */ 278 int 279 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, int flags) 280 { 281 paddr_t lastaddr = 0; 282 int seg, error, first; 283 struct mbuf *m; 284 285 /* 286 * Make sure that on error condition we return "no valid mappings." 287 */ 288 map->dm_mapsize = 0; 289 map->dm_nsegs = 0; 290 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 291 292 #ifdef DIAGNOSTIC 293 if ((m0->m_flags & M_PKTHDR) == 0) 294 panic("_bus_dmamap_load_mbuf: no packet header"); 295 #endif 296 297 if (m0->m_pkthdr.len > map->_dm_size) 298 return (EINVAL); 299 300 first = 1; 301 seg = 0; 302 error = 0; 303 for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) { 304 if (m->m_len == 0) 305 continue; 306 #ifdef POOL_VTOPHYS 307 /* XXX Could be better about coalescing. */ 308 /* XXX Doesn't check boundaries. */ 309 switch (m->m_flags & (M_EXT|M_CLUSTER)) { 310 case M_EXT|M_CLUSTER: 311 /* XXX KDASSERT */ 312 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 313 lastaddr = m->m_ext.ext_paddr + 314 (m->m_data - m->m_ext.ext_buf); 315 have_addr: 316 if (first == 0 && ++seg >= map->_dm_segcnt) { 317 error = EFBIG; 318 continue; 319 } 320 map->dm_segs[seg].ds_addr = 321 PHYS_TO_BUS_MEM(t, lastaddr); 322 map->dm_segs[seg].ds_len = m->m_len; 323 lastaddr += m->m_len; 324 continue; 325 326 case 0: 327 lastaddr = m->m_paddr + M_BUFOFFSET(m) + 328 (m->m_data - M_BUFADDR(m)); 329 goto have_addr; 330 331 default: 332 break; 333 } 334 #endif 335 error = _bus_dmamap_load_buffer(t, map, m->m_data, 336 m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first); 337 } 338 if (error == 0) { 339 map->dm_mapsize = m0->m_pkthdr.len; 340 map->dm_nsegs = seg + 1; 341 } 342 return (error); 343 } 344 345 /* 346 * Like _bus_dmamap_load(), but for uios. 347 */ 348 int 349 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags) 350 { 351 paddr_t lastaddr = 0; 352 int seg, i, error, first; 353 bus_size_t minlen, resid; 354 struct iovec *iov; 355 void *addr; 356 357 /* 358 * Make sure that on error condition we return "no valid mappings." 359 */ 360 map->dm_mapsize = 0; 361 map->dm_nsegs = 0; 362 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 363 364 resid = uio->uio_resid; 365 iov = uio->uio_iov; 366 367 first = 1; 368 seg = 0; 369 error = 0; 370 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 371 /* 372 * Now at the first iovec to load. Load each iovec 373 * until we have exhausted the residual count. 374 */ 375 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 376 addr = (void *)iov[i].iov_base; 377 378 error = _bus_dmamap_load_buffer(t, map, addr, minlen, 379 uio->uio_vmspace, flags, &lastaddr, &seg, first); 380 first = 0; 381 382 resid -= minlen; 383 } 384 if (error == 0) { 385 map->dm_mapsize = uio->uio_resid; 386 map->dm_nsegs = seg + 1; 387 } 388 return (error); 389 } 390 391 /* 392 * Like _bus_dmamap_load(), but for raw memory allocated with 393 * bus_dmamem_alloc(). 394 */ 395 int 396 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 397 { 398 399 panic("_bus_dmamap_load_raw: not implemented"); 400 } 401 402 /* 403 * Common function for unloading a DMA map. May be called by 404 * chipset-specific DMA map unload functions. 405 */ 406 void 407 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 408 { 409 410 /* 411 * No resources to free; just mark the mappings as 412 * invalid. 413 */ 414 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 415 map->dm_mapsize = 0; 416 map->dm_nsegs = 0; 417 } 418 419 /* 420 * Common function for DMA map synchronization. May be called 421 * by chipset-specific DMA map synchronization functions. 422 */ 423 void 424 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops) 425 { 426 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 427 const bus_dma_segment_t *ds = map->dm_segs; 428 429 // printf("%s(%p,%p,%#x,%u,%#x) from %p\n", __func__, 430 // t, map, offset, len, ops, __builtin_return_address(0)); 431 432 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 433 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 434 panic("_bus_dmamap_sync: invalid ops %#x", ops); 435 436 #ifdef DIAGNOSTIC 437 if (offset + len > map->dm_mapsize) 438 panic("%s: ops %#x mapsize %u: bad offset (%u) and/or length (%u)", __func__, ops, map->dm_mapsize, offset, len); 439 #endif 440 441 /* 442 * Skip leading amount 443 */ 444 while (offset >= ds->ds_len) { 445 offset -= ds->ds_len; 446 ds++; 447 } 448 EIEIO; 449 for (; len > 0; ds++, offset = 0) { 450 bus_size_t seglen = ds->ds_len - offset; 451 bus_addr_t addr = BUS_MEM_TO_PHYS(t, ds->ds_addr) + offset; 452 if (seglen > len) 453 seglen = len; 454 len -= seglen; 455 KASSERT(ds < &map->dm_segs[map->dm_nsegs]); 456 /* 457 * Readjust things to start on cacheline boundarys 458 */ 459 offset = (addr & (dcache_line_size-1)); 460 seglen += offset; 461 addr -= offset; 462 /* 463 * Now do the appropriate thing. 464 */ 465 switch (ops) { 466 case BUS_DMASYNC_PREWRITE: 467 /* 468 * Make sure cache contents are in memory for the DMA. 469 */ 470 dcbst(addr, seglen, dcache_line_size); 471 break; 472 case BUS_DMASYNC_PREREAD: 473 /* 474 * If the region to be invalidated doesn't fall on 475 * cacheline boundary, flush that cacheline so we 476 * preserve the leading content. 477 */ 478 if (offset) { 479 dcbf(addr, 1, 1); 480 /* 481 * If we are doing <= one cache line, stop now. 482 */ 483 if (seglen <= dcache_line_size) 484 break; 485 /* 486 * Advance one cache line since we've flushed 487 * this one. 488 */ 489 addr += dcache_line_size; 490 seglen -= dcache_line_size; 491 } 492 /* 493 * If the byte after the region to be invalidated 494 * doesn't fall on cacheline boundary, flush that 495 * cacheline so we preserve the trailing content. 496 */ 497 if (seglen & (dcache_line_size-1)) { 498 dcbf(addr + seglen, 1, 1); 499 if (seglen <= dcache_line_size) 500 break; 501 /* 502 * Truncate the length to a multiple of a 503 * dcache line size. No reason to flush 504 * the last entry again. 505 */ 506 seglen &= ~(dcache_line_size - 1); 507 } 508 SYNC; /* is this needed? */ 509 EIEIO; /* is this needed? */ 510 /* FALLTHROUGH */ 511 case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE: 512 case BUS_DMASYNC_POSTREAD: 513 /* 514 * The contents will have changed, make sure to remove 515 * them from the cache. Note: some implementation 516 * implement dcbi identically to dcbf. Thus if the 517 * cacheline has data, it will be written to memory. 518 * If the DMA is updating the same cacheline at the 519 * time, bad things can happen. 520 */ 521 dcbi(addr, seglen, dcache_line_size); 522 break; 523 case BUS_DMASYNC_POSTWRITE: 524 /* 525 * Do nothing. 526 */ 527 break; 528 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 529 /* 530 * Force it to memory and remove from cache. 531 */ 532 dcbf(addr, seglen, dcache_line_size); 533 break; 534 } 535 } 536 __asm volatile("sync"); 537 } 538 539 /* 540 * Common function for DMA-safe memory allocation. May be called 541 * by bus-specific DMA memory allocation functions. 542 */ 543 int 544 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) 545 { 546 paddr_t start = 0xffffffff, end = 0; 547 int bank; 548 549 for (bank = 0; bank < vm_nphysseg; bank++) { 550 if (start > VM_PHYSMEM_PTR(bank)->avail_start << PGSHIFT) 551 start = VM_PHYSMEM_PTR(bank)->avail_start << PGSHIFT; 552 if (end < VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT) 553 end = VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT; 554 } 555 556 return _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, 557 nsegs, rsegs, flags, start, end - PAGE_SIZE); 558 } 559 560 /* 561 * Common function for freeing DMA-safe memory. May be called by 562 * bus-specific DMA memory free functions. 563 */ 564 void 565 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 566 { 567 struct vm_page *m; 568 bus_addr_t addr; 569 struct pglist mlist; 570 int curseg; 571 572 /* 573 * Build a list of pages to free back to the VM system. 574 */ 575 TAILQ_INIT(&mlist); 576 for (curseg = 0; curseg < nsegs; curseg++) { 577 for (addr = BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr); 578 addr < (BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr) 579 + segs[curseg].ds_len); 580 addr += PAGE_SIZE) { 581 m = PHYS_TO_VM_PAGE(addr); 582 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue); 583 } 584 } 585 586 uvm_pglistfree(&mlist); 587 } 588 589 /* 590 * Common function for mapping DMA-safe memory. May be called by 591 * bus-specific DMA memory map functions. 592 */ 593 int 594 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size, void **kvap, int flags) 595 { 596 vaddr_t va; 597 bus_addr_t addr; 598 int curseg; 599 const uvm_flag_t kmflags = 600 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; 601 602 size = round_page(size); 603 604 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); 605 606 if (va == 0) 607 return (ENOMEM); 608 609 *kvap = (void *)va; 610 611 for (curseg = 0; curseg < nsegs; curseg++) { 612 for (addr = BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr); 613 addr < (BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr) 614 + segs[curseg].ds_len); 615 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { 616 if (size == 0) 617 panic("_bus_dmamem_map: size botch"); 618 /* 619 * If we are mapping nocache, flush the page from 620 * cache before we map it. 621 */ 622 if (flags & BUS_DMA_NOCACHE) 623 dcbf(addr, PAGE_SIZE, 624 curcpu()->ci_ci.dcache_line_size); 625 pmap_kenter_pa(va, addr, 626 VM_PROT_READ | VM_PROT_WRITE, 627 PMAP_WIRED | 628 ((flags & BUS_DMA_NOCACHE) ? PMAP_MD_NOCACHE : 0)); 629 } 630 } 631 632 return (0); 633 } 634 635 /* 636 * Common function for unmapping DMA-safe memory. May be called by 637 * bus-specific DMA memory unmapping functions. 638 */ 639 void 640 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 641 { 642 643 #ifdef DIAGNOSTIC 644 if ((u_long)kva & PGOFSET) 645 panic("_bus_dmamem_unmap"); 646 #endif 647 648 size = round_page(size); 649 650 pmap_kremove((vaddr_t)kva, size); 651 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); 652 } 653 654 /* 655 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 656 * bus-specific DMA mmap(2)'ing functions. 657 */ 658 paddr_t 659 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off, int prot, int flags) 660 { 661 int i; 662 663 for (i = 0; i < nsegs; i++) { 664 #ifdef DIAGNOSTIC 665 if (off & PGOFSET) 666 panic("_bus_dmamem_mmap: offset unaligned"); 667 if (BUS_MEM_TO_PHYS(t, segs[i].ds_addr) & PGOFSET) 668 panic("_bus_dmamem_mmap: segment unaligned"); 669 if (segs[i].ds_len & PGOFSET) 670 panic("_bus_dmamem_mmap: segment size not multiple" 671 " of page size"); 672 #endif 673 if (off >= segs[i].ds_len) { 674 off -= segs[i].ds_len; 675 continue; 676 } 677 678 return (BUS_MEM_TO_PHYS(t, segs[i].ds_addr) + off); 679 } 680 681 /* Page not found. */ 682 return (-1); 683 } 684 685 /* 686 * Allocate physical memory from the given physical address range. 687 * Called by DMA-safe memory allocation methods. 688 */ 689 int 690 _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, 691 flags, low, high) 692 bus_dma_tag_t t; 693 bus_size_t size, alignment, boundary; 694 bus_dma_segment_t *segs; 695 int nsegs; 696 int *rsegs; 697 int flags; 698 paddr_t low; 699 paddr_t high; 700 { 701 paddr_t curaddr, lastaddr; 702 struct vm_page *m; 703 struct pglist mlist; 704 int curseg, error; 705 706 /* Always round the size. */ 707 size = round_page(size); 708 709 /* 710 * Allocate pages from the VM system. 711 */ 712 error = uvm_pglistalloc(size, low, high, alignment, boundary, 713 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 714 if (error) 715 return (error); 716 717 /* 718 * Compute the location, size, and number of segments actually 719 * returned by the VM code. 720 */ 721 m = mlist.tqh_first; 722 curseg = 0; 723 lastaddr = VM_PAGE_TO_PHYS(m); 724 segs[curseg].ds_addr = PHYS_TO_BUS_MEM(t, lastaddr); 725 segs[curseg].ds_len = PAGE_SIZE; 726 m = m->pageq.queue.tqe_next; 727 728 for (; m != NULL; m = m->pageq.queue.tqe_next) { 729 curaddr = VM_PAGE_TO_PHYS(m); 730 #ifdef DIAGNOSTIC 731 if (curaddr < low || curaddr >= high) { 732 printf("vm_page_alloc_memory returned non-sensical" 733 " address 0x%lx\n", curaddr); 734 panic("_bus_dmamem_alloc_range"); 735 } 736 #endif 737 if (curaddr == (lastaddr + PAGE_SIZE)) 738 segs[curseg].ds_len += PAGE_SIZE; 739 else { 740 curseg++; 741 segs[curseg].ds_addr = PHYS_TO_BUS_MEM(t, curaddr); 742 segs[curseg].ds_len = PAGE_SIZE; 743 } 744 lastaddr = curaddr; 745 } 746 747 *rsegs = curseg + 1; 748 749 return (0); 750 } 751 752 /* 753 * Generic form of PHYS_TO_BUS_MEM(). 754 */ 755 bus_addr_t 756 _bus_dma_phys_to_bus_mem_generic(bus_dma_tag_t t, bus_addr_t addr) 757 { 758 759 return (addr); 760 } 761 762 /* 763 * Generic form of BUS_MEM_TO_PHYS(). 764 */ 765 bus_addr_t 766 _bus_dma_bus_mem_to_phys_generic(bus_dma_tag_t t, bus_addr_t addr) 767 { 768 769 return (addr); 770 } 771