1 /* $NetBSD: bus_dma.c,v 1.11 2002/04/10 19:35:22 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/map.h> 44 #include <sys/proc.h> 45 #include <sys/buf.h> 46 #include <sys/reboot.h> 47 #include <sys/conf.h> 48 #include <sys/file.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/vnode.h> 52 #include <sys/device.h> 53 54 #include <uvm/uvm_extern.h> 55 56 #define _ARM32_BUS_DMA_PRIVATE 57 #include <machine/bus.h> 58 59 #include <machine/cpu.h> 60 61 #include <arm/cpufunc.h> 62 63 int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, 64 bus_size_t, struct proc *, int, paddr_t *, int *, int); 65 int _bus_dma_inrange(bus_dma_segment_t *, int, bus_addr_t); 66 67 /* 68 * Common function for DMA map creation. May be called by bus-specific 69 * DMA map creation functions. 70 */ 71 int 72 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 73 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 74 { 75 struct arm32_bus_dmamap *map; 76 void *mapstore; 77 size_t mapsize; 78 79 #ifdef DEBUG_DMA 80 printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n", 81 t, size, nsegments, maxsegsz, boundary, flags); 82 #endif /* DEBUG_DMA */ 83 84 /* 85 * Allocate and initialize the DMA map. The end of the map 86 * is a variable-sized array of segments, so we allocate enough 87 * room for them in one shot. 88 * 89 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 90 * of ALLOCNOW notifies others that we've reserved these resources, 91 * and they are not to be freed. 92 * 93 * The bus_dmamap_t includes one bus_dma_segment_t, hence 94 * the (nsegments - 1). 95 */ 96 mapsize = sizeof(struct arm32_bus_dmamap) + 97 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 98 if ((mapstore = malloc(mapsize, M_DMAMAP, 99 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 100 return (ENOMEM); 101 102 memset(mapstore, 0, mapsize); 103 map = (struct arm32_bus_dmamap *)mapstore; 104 map->_dm_size = size; 105 map->_dm_segcnt = nsegments; 106 map->_dm_maxsegsz = maxsegsz; 107 map->_dm_boundary = boundary; 108 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 109 map->_dm_proc = NULL; 110 map->dm_mapsize = 0; /* no valid mappings */ 111 map->dm_nsegs = 0; 112 113 *dmamp = map; 114 #ifdef DEBUG_DMA 115 printf("dmamap_create:map=%p\n", map); 116 #endif /* DEBUG_DMA */ 117 return (0); 118 } 119 120 /* 121 * Common function for DMA map destruction. May be called by bus-specific 122 * DMA map destruction functions. 123 */ 124 void 125 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 126 { 127 128 #ifdef DEBUG_DMA 129 printf("dmamap_destroy: t=%p map=%p\n", t, map); 130 #endif /* DEBUG_DMA */ 131 #ifdef DIAGNOSTIC 132 if (map->dm_nsegs > 0) 133 printf("bus_dmamap_destroy() called for map with valid mappings\n"); 134 #endif /* DIAGNOSTIC */ 135 free(map, M_DEVBUF); 136 } 137 138 /* 139 * Common function for loading a DMA map with a linear buffer. May 140 * be called by bus-specific DMA map load functions. 141 */ 142 int 143 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 144 bus_size_t buflen, struct proc *p, int flags) 145 { 146 paddr_t lastaddr; 147 int seg, error; 148 149 #ifdef DEBUG_DMA 150 printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n", 151 t, map, buf, buflen, p, flags); 152 #endif /* DEBUG_DMA */ 153 154 /* 155 * Make sure that on error condition we return "no valid mappings". 156 */ 157 map->dm_mapsize = 0; 158 map->dm_nsegs = 0; 159 160 if (buflen > map->_dm_size) 161 return (EINVAL); 162 163 seg = 0; 164 error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, 165 &lastaddr, &seg, 1); 166 if (error == 0) { 167 map->dm_mapsize = buflen; 168 map->dm_nsegs = seg + 1; 169 map->_dm_proc = p; 170 } 171 #ifdef DEBUG_DMA 172 printf("dmamap_load: error=%d\n", error); 173 #endif /* DEBUG_DMA */ 174 return (error); 175 } 176 177 /* 178 * Like _bus_dmamap_load(), but for mbufs. 179 */ 180 int 181 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, 182 int flags) 183 { 184 paddr_t lastaddr; 185 int seg, error, first; 186 struct mbuf *m; 187 188 #ifdef DEBUG_DMA 189 printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n", 190 t, map, m0, flags); 191 #endif /* DEBUG_DMA */ 192 193 /* 194 * Make sure that on error condition we return "no valid mappings." 195 */ 196 map->dm_mapsize = 0; 197 map->dm_nsegs = 0; 198 199 #ifdef DIAGNOSTIC 200 if ((m0->m_flags & M_PKTHDR) == 0) 201 panic("_bus_dmamap_load_mbuf: no packet header"); 202 #endif /* DIAGNOSTIC */ 203 204 if (m0->m_pkthdr.len > map->_dm_size) 205 return (EINVAL); 206 207 first = 1; 208 seg = 0; 209 error = 0; 210 for (m = m0; m != NULL && error == 0; m = m->m_next) { 211 error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len, 212 NULL, flags, &lastaddr, &seg, first); 213 first = 0; 214 } 215 if (error == 0) { 216 map->dm_mapsize = m0->m_pkthdr.len; 217 map->dm_nsegs = seg + 1; 218 map->_dm_proc = NULL; /* always kernel */ 219 } 220 #ifdef DEBUG_DMA 221 printf("dmamap_load_mbuf: error=%d\n", error); 222 #endif /* DEBUG_DMA */ 223 return (error); 224 } 225 226 /* 227 * Like _bus_dmamap_load(), but for uios. 228 */ 229 int 230 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 231 int flags) 232 { 233 paddr_t lastaddr; 234 int seg, i, error, first; 235 bus_size_t minlen, resid; 236 struct proc *p = NULL; 237 struct iovec *iov; 238 caddr_t addr; 239 240 /* 241 * Make sure that on error condition we return "no valid mappings." 242 */ 243 map->dm_mapsize = 0; 244 map->dm_nsegs = 0; 245 246 resid = uio->uio_resid; 247 iov = uio->uio_iov; 248 249 if (uio->uio_segflg == UIO_USERSPACE) { 250 p = uio->uio_procp; 251 #ifdef DIAGNOSTIC 252 if (p == NULL) 253 panic("_bus_dmamap_load_uio: USERSPACE but no proc"); 254 #endif 255 } 256 257 first = 1; 258 seg = 0; 259 error = 0; 260 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 261 /* 262 * Now at the first iovec to load. Load each iovec 263 * until we have exhausted the residual count. 264 */ 265 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 266 addr = (caddr_t)iov[i].iov_base; 267 268 error = _bus_dmamap_load_buffer(t, map, addr, minlen, 269 p, flags, &lastaddr, &seg, first); 270 first = 0; 271 272 resid -= minlen; 273 } 274 if (error == 0) { 275 map->dm_mapsize = uio->uio_resid; 276 map->dm_nsegs = seg + 1; 277 map->_dm_proc = p; 278 } 279 return (error); 280 } 281 282 /* 283 * Like _bus_dmamap_load(), but for raw memory allocated with 284 * bus_dmamem_alloc(). 285 */ 286 int 287 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 288 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 289 { 290 291 panic("_bus_dmamap_load_raw: not implemented"); 292 } 293 294 /* 295 * Common function for unloading a DMA map. May be called by 296 * bus-specific DMA map unload functions. 297 */ 298 void 299 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 300 { 301 302 #ifdef DEBUG_DMA 303 printf("dmamap_unload: t=%p map=%p\n", t, map); 304 #endif /* DEBUG_DMA */ 305 306 /* 307 * No resources to free; just mark the mappings as 308 * invalid. 309 */ 310 map->dm_mapsize = 0; 311 map->dm_nsegs = 0; 312 map->_dm_proc = NULL; 313 } 314 315 /* 316 * Common function for DMA map synchronization. May be called 317 * by bus-specific DMA map synchronization functions. 318 * 319 * This version works for the Virtually Indexed Virtually Tagged 320 * cache found on 32-bit ARM processors. 321 * 322 * XXX Should have separate versions for write-through vs. 323 * XXX write-back caches. We currently assume write-back 324 * XXX here, which is not as efficient as it could be for 325 * XXX the write-through case. 326 */ 327 void 328 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 329 bus_size_t len, int ops) 330 { 331 bus_size_t minlen; 332 bus_addr_t addr; 333 int i; 334 335 #ifdef DEBUG_DMA 336 printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n", 337 t, map, offset, len, ops); 338 #endif /* DEBUG_DMA */ 339 340 /* 341 * Mixing of PRE and POST operations is not allowed. 342 */ 343 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 344 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 345 panic("_bus_dmamap_sync: mix PRE and POST"); 346 347 #ifdef DIAGNOSTIC 348 if (offset >= map->dm_mapsize) 349 panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)", 350 offset, map->dm_mapsize); 351 if (len == 0 || (offset + len) > map->dm_mapsize) 352 panic("_bus_dmamap_sync: bad length"); 353 #endif 354 355 /* 356 * For a virtually-indexed write-back cache, we need 357 * to do the following things: 358 * 359 * PREREAD -- Invalidate the D-cache. We do this 360 * here in case a write-back is required by the back-end. 361 * 362 * PREWRITE -- Write-back the D-cache. Note that if 363 * we are doing a PREREAD|PREWRITE, we can collapse 364 * the whole thing into a single Wb-Inv. 365 * 366 * POSTREAD -- Nothing. 367 * 368 * POSTWRITE -- Nothing. 369 */ 370 371 ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 372 if (ops == 0) 373 return; 374 375 /* 376 * XXX Skip cache frobbing if mapping was COHERENT. 377 */ 378 379 /* 380 * If the mapping is not the kernel's and also not the 381 * current process's (XXX actually, vmspace), then we 382 * don't have anything to do, since the cache is Wb-Inv'd 383 * on context switch. 384 * 385 * XXX REVISIT WHEN WE DO FCSE! 386 */ 387 if (__predict_false(map->_dm_proc != NULL && map->_dm_proc != curproc)) 388 return; 389 390 for (i = 0; i < map->dm_nsegs && len != 0; i++) { 391 /* Find beginning segment. */ 392 if (offset >= map->dm_segs[i].ds_len) { 393 offset -= map->dm_segs[i].ds_len; 394 continue; 395 } 396 397 /* 398 * Now at the first segment to sync; nail 399 * each segment until we have exhausted the 400 * length. 401 */ 402 minlen = len < map->dm_segs[i].ds_len - offset ? 403 len : map->dm_segs[i].ds_len - offset; 404 405 addr = map->dm_segs[i]._ds_vaddr; 406 407 #ifdef DEBUG_DMA 408 printf("bus_dmamap_sync: flushing segment %d " 409 "(0x%lx..0x%lx) ...", i, addr + offset, 410 addr + offset + minlen - 1); 411 #endif 412 413 switch (ops) { 414 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 415 cpu_dcache_wbinv_range(addr + offset, minlen); 416 break; 417 418 case BUS_DMASYNC_PREREAD: 419 #if 1 420 cpu_dcache_wbinv_range(addr + offset, minlen); 421 #else 422 cpu_dcache_inv_range(addr + offset, minlen); 423 #endif 424 break; 425 426 case BUS_DMASYNC_PREWRITE: 427 cpu_dcache_wb_range(addr + offset, minlen); 428 break; 429 } 430 #ifdef DEBUG_DMA 431 printf("\n"); 432 #endif 433 offset = 0; 434 len -= minlen; 435 } 436 437 /* Drain the write buffer. */ 438 cpu_drain_writebuf(); 439 } 440 441 /* 442 * Common function for DMA-safe memory allocation. May be called 443 * by bus-specific DMA memory allocation functions. 444 */ 445 446 extern paddr_t physical_start; 447 extern paddr_t physical_freestart; 448 extern paddr_t physical_freeend; 449 extern paddr_t physical_end; 450 451 int 452 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 453 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 454 int flags) 455 { 456 int error; 457 #ifdef DEBUG_DMA 458 printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x\n", 459 t, size, alignment, boundary, segs, nsegs, rsegs, flags); 460 #endif /* DEBUG_DMA */ 461 error = (_bus_dmamem_alloc_range(t, size, alignment, boundary, 462 segs, nsegs, rsegs, flags, trunc_page(physical_start), trunc_page(physical_end))); 463 #ifdef DEBUG_DMA 464 printf("dmamem_alloc: =%d\n", error); 465 #endif /* DEBUG_DMA */ 466 return(error); 467 } 468 469 /* 470 * Common function for freeing DMA-safe memory. May be called by 471 * bus-specific DMA memory free functions. 472 */ 473 void 474 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 475 { 476 struct vm_page *m; 477 bus_addr_t addr; 478 struct pglist mlist; 479 int curseg; 480 481 #ifdef DEBUG_DMA 482 printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs); 483 #endif /* DEBUG_DMA */ 484 485 /* 486 * Build a list of pages to free back to the VM system. 487 */ 488 TAILQ_INIT(&mlist); 489 for (curseg = 0; curseg < nsegs; curseg++) { 490 for (addr = segs[curseg].ds_addr; 491 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 492 addr += PAGE_SIZE) { 493 m = PHYS_TO_VM_PAGE(addr); 494 TAILQ_INSERT_TAIL(&mlist, m, pageq); 495 } 496 } 497 uvm_pglistfree(&mlist); 498 } 499 500 /* 501 * Common function for mapping DMA-safe memory. May be called by 502 * bus-specific DMA memory map functions. 503 */ 504 int 505 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 506 size_t size, caddr_t *kvap, int flags) 507 { 508 vaddr_t va; 509 bus_addr_t addr; 510 int curseg; 511 pt_entry_t *ptep/*, pte*/; 512 513 #ifdef DEBUG_DMA 514 printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t, 515 segs, nsegs, (unsigned long)size, flags); 516 #endif /* DEBUG_DMA */ 517 518 size = round_page(size); 519 va = uvm_km_valloc(kernel_map, size); 520 521 if (va == 0) 522 return (ENOMEM); 523 524 *kvap = (caddr_t)va; 525 526 for (curseg = 0; curseg < nsegs; curseg++) { 527 for (addr = segs[curseg].ds_addr; 528 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 529 addr += NBPG, va += NBPG, size -= NBPG) { 530 #ifdef DEBUG_DMA 531 printf("wiring p%lx to v%lx", addr, va); 532 #endif /* DEBUG_DMA */ 533 if (size == 0) 534 panic("_bus_dmamem_map: size botch"); 535 pmap_enter(pmap_kernel(), va, addr, 536 VM_PROT_READ | VM_PROT_WRITE, 537 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 538 /* 539 * If the memory must remain coherent with the 540 * cache then we must make the memory uncacheable 541 * in order to maintain virtual cache coherency. 542 * We must also guarentee the cache does not already 543 * contain the virtal addresses we are making 544 * uncacheable. 545 */ 546 if (flags & BUS_DMA_COHERENT) { 547 cpu_dcache_wbinv_range(va, NBPG); 548 cpu_drain_writebuf(); 549 ptep = vtopte(va); 550 *ptep &= ~(L2_B | L2_C); 551 tlb_flush(); 552 } 553 #ifdef DEBUG_DMA 554 ptep = vtopte(va); 555 printf(" pte=v%p *pte=%x\n", ptep, *ptep); 556 #endif /* DEBUG_DMA */ 557 } 558 } 559 pmap_update(pmap_kernel()); 560 #ifdef DEBUG_DMA 561 printf("dmamem_map: =%p\n", *kvap); 562 #endif /* DEBUG_DMA */ 563 return (0); 564 } 565 566 /* 567 * Common function for unmapping DMA-safe memory. May be called by 568 * bus-specific DMA memory unmapping functions. 569 */ 570 void 571 _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size) 572 { 573 574 #ifdef DEBUG_DMA 575 printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva, 576 (unsigned long)size); 577 #endif /* DEBUG_DMA */ 578 #ifdef DIAGNOSTIC 579 if ((u_long)kva & PGOFSET) 580 panic("_bus_dmamem_unmap"); 581 #endif /* DIAGNOSTIC */ 582 583 size = round_page(size); 584 uvm_km_free(kernel_map, (vaddr_t)kva, size); 585 } 586 587 /* 588 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 589 * bus-specific DMA mmap(2)'ing functions. 590 */ 591 paddr_t 592 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 593 off_t off, int prot, int flags) 594 { 595 int i; 596 597 for (i = 0; i < nsegs; i++) { 598 #ifdef DIAGNOSTIC 599 if (off & PGOFSET) 600 panic("_bus_dmamem_mmap: offset unaligned"); 601 if (segs[i].ds_addr & PGOFSET) 602 panic("_bus_dmamem_mmap: segment unaligned"); 603 if (segs[i].ds_len & PGOFSET) 604 panic("_bus_dmamem_mmap: segment size not multiple" 605 " of page size"); 606 #endif /* DIAGNOSTIC */ 607 if (off >= segs[i].ds_len) { 608 off -= segs[i].ds_len; 609 continue; 610 } 611 612 return (arm_btop((u_long)segs[i].ds_addr + off)); 613 } 614 615 /* Page not found. */ 616 return (-1); 617 } 618 619 /********************************************************************** 620 * DMA utility functions 621 **********************************************************************/ 622 623 /* 624 * Utility function to load a linear buffer. lastaddrp holds state 625 * between invocations (for multiple-buffer loads). segp contains 626 * the starting segment on entrace, and the ending segment on exit. 627 * first indicates if this is the first invocation of this function. 628 */ 629 int 630 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 631 bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp, 632 int *segp, int first) 633 { 634 bus_size_t sgsize; 635 bus_addr_t curaddr, lastaddr, baddr, bmask; 636 vaddr_t vaddr = (vaddr_t)buf; 637 int seg; 638 pmap_t pmap; 639 640 #ifdef DEBUG_DMA 641 printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n", 642 buf, buflen, flags, first); 643 #endif /* DEBUG_DMA */ 644 645 if (p != NULL) 646 pmap = p->p_vmspace->vm_map.pmap; 647 else 648 pmap = pmap_kernel(); 649 650 lastaddr = *lastaddrp; 651 bmask = ~(map->_dm_boundary - 1); 652 653 for (seg = *segp; buflen > 0; ) { 654 /* 655 * Get the physical address for this segment. 656 */ 657 (void) pmap_extract(pmap, (vaddr_t)vaddr, &curaddr); 658 659 /* 660 * Make sure we're in an allowed DMA range. 661 */ 662 if (t->_ranges != NULL && 663 _bus_dma_inrange(t->_ranges, t->_nranges, curaddr) == 0) 664 return (EINVAL); 665 666 /* 667 * Compute the segment size, and adjust counts. 668 */ 669 sgsize = NBPG - ((u_long)vaddr & PGOFSET); 670 if (buflen < sgsize) 671 sgsize = buflen; 672 673 /* 674 * Make sure we don't cross any boundaries. 675 */ 676 if (map->_dm_boundary > 0) { 677 baddr = (curaddr + map->_dm_boundary) & bmask; 678 if (sgsize > (baddr - curaddr)) 679 sgsize = (baddr - curaddr); 680 } 681 682 /* 683 * Insert chunk into a segment, coalescing with 684 * previous segment if possible. 685 */ 686 if (first) { 687 map->dm_segs[seg].ds_addr = curaddr; 688 map->dm_segs[seg].ds_len = sgsize; 689 map->dm_segs[seg]._ds_vaddr = vaddr; 690 first = 0; 691 } else { 692 if (curaddr == lastaddr && 693 (map->dm_segs[seg].ds_len + sgsize) <= 694 map->_dm_maxsegsz && 695 (map->_dm_boundary == 0 || 696 (map->dm_segs[seg].ds_addr & bmask) == 697 (curaddr & bmask))) 698 map->dm_segs[seg].ds_len += sgsize; 699 else { 700 if (++seg >= map->_dm_segcnt) 701 break; 702 map->dm_segs[seg].ds_addr = curaddr; 703 map->dm_segs[seg].ds_len = sgsize; 704 map->dm_segs[seg]._ds_vaddr = vaddr; 705 } 706 } 707 708 lastaddr = curaddr + sgsize; 709 vaddr += sgsize; 710 buflen -= sgsize; 711 } 712 713 *segp = seg; 714 *lastaddrp = lastaddr; 715 716 /* 717 * Did we fit? 718 */ 719 if (buflen != 0) 720 return (EFBIG); /* XXX better return value here? */ 721 return (0); 722 } 723 724 /* 725 * Check to see if the specified page is in an allowed DMA range. 726 */ 727 int 728 _bus_dma_inrange(bus_dma_segment_t *ranges, int nranges, bus_addr_t curaddr) 729 { 730 bus_dma_segment_t *ds; 731 int i; 732 733 for (i = 0, ds = ranges; i < nranges; i++, ds++) { 734 if (curaddr >= ds->ds_addr && 735 round_page(curaddr) <= (ds->ds_addr + ds->ds_len)) 736 return (1); 737 } 738 739 return (0); 740 } 741 742 /* 743 * Allocate physical memory from the given physical address range. 744 * Called by DMA-safe memory allocation methods. 745 */ 746 int 747 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 748 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 749 int flags, paddr_t low, paddr_t high) 750 { 751 paddr_t curaddr, lastaddr; 752 struct vm_page *m; 753 struct pglist mlist; 754 int curseg, error; 755 756 #ifdef DEBUG_DMA 757 printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n", 758 t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high); 759 #endif /* DEBUG_DMA */ 760 761 /* Always round the size. */ 762 size = round_page(size); 763 764 /* 765 * Allocate pages from the VM system. 766 */ 767 TAILQ_INIT(&mlist); 768 error = uvm_pglistalloc(size, low, high, alignment, boundary, 769 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 770 if (error) 771 return (error); 772 773 /* 774 * Compute the location, size, and number of segments actually 775 * returned by the VM code. 776 */ 777 m = mlist.tqh_first; 778 curseg = 0; 779 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); 780 segs[curseg].ds_len = PAGE_SIZE; 781 #ifdef DEBUG_DMA 782 printf("alloc: page %lx\n", lastaddr); 783 #endif /* DEBUG_DMA */ 784 m = m->pageq.tqe_next; 785 786 for (; m != NULL; m = m->pageq.tqe_next) { 787 curaddr = VM_PAGE_TO_PHYS(m); 788 #ifdef DIAGNOSTIC 789 if (curaddr < low || curaddr >= high) { 790 printf("uvm_pglistalloc returned non-sensical" 791 " address 0x%lx\n", curaddr); 792 panic("_bus_dmamem_alloc_range"); 793 } 794 #endif /* DIAGNOSTIC */ 795 #ifdef DEBUG_DMA 796 printf("alloc: page %lx\n", curaddr); 797 #endif /* DEBUG_DMA */ 798 if (curaddr == (lastaddr + PAGE_SIZE)) 799 segs[curseg].ds_len += PAGE_SIZE; 800 else { 801 curseg++; 802 segs[curseg].ds_addr = curaddr; 803 segs[curseg].ds_len = PAGE_SIZE; 804 } 805 lastaddr = curaddr; 806 } 807 808 *rsegs = curseg + 1; 809 810 return (0); 811 } 812