1 /* $NetBSD: bus_dma.c,v 1.11 2001/11/14 18:15:12 thorpej Exp $ */ 2 /* NetBSD: bus_dma.c,v 1.20 2000/01/10 03:24:36 simonb Exp */ 3 4 /*- 5 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 10 * NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the NetBSD 23 * Foundation, Inc. and its contributors. 24 * 4. Neither the name of The NetBSD Foundation nor the names of its 25 * contributors may be used to endorse or promote products derived 26 * from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGE. 39 */ 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/mbuf.h> 44 #include <sys/device.h> 45 #include <sys/proc.h> 46 47 #include <uvm/uvm_extern.h> 48 49 #include <mips/cache.h> 50 51 #define _ARC_BUS_DMA_PRIVATE 52 #include <machine/bus.h> 53 54 paddr_t kvtophys __P((vaddr_t)); /* XXX */ 55 56 static int _bus_dmamap_load_buffer __P((bus_dma_tag_t, bus_dmamap_t, 57 void *, bus_size_t, struct proc *, int, paddr_t *, 58 int *, int)); 59 60 extern paddr_t avail_start, avail_end; /* from pmap.c */ 61 62 void 63 _bus_dma_tag_init(t) 64 bus_dma_tag_t t; 65 { 66 t->dma_offset = 0; 67 68 t->_dmamap_create = _bus_dmamap_create; 69 t->_dmamap_destroy = _bus_dmamap_destroy; 70 t->_dmamap_load = _bus_dmamap_load; 71 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf; 72 t->_dmamap_load_uio = _bus_dmamap_load_uio; 73 t->_dmamap_load_raw = _bus_dmamap_load_raw; 74 t->_dmamap_unload = _bus_dmamap_unload; 75 t->_dmamap_sync = _bus_dmamap_sync; 76 t->_dmamem_alloc = _bus_dmamem_alloc; 77 t->_dmamem_free = _bus_dmamem_free; 78 t->_dmamem_map = _bus_dmamem_map; 79 t->_dmamem_unmap = _bus_dmamem_unmap; 80 t->_dmamem_mmap = _bus_dmamem_mmap; 81 } 82 83 /* 84 * Common function for DMA map creation. May be called by bus-specific 85 * DMA map creation functions. 86 */ 87 int 88 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) 89 bus_dma_tag_t t; 90 bus_size_t size; 91 int nsegments; 92 bus_size_t maxsegsz; 93 bus_size_t boundary; 94 int flags; 95 bus_dmamap_t *dmamp; 96 { 97 struct arc_bus_dmamap *map; 98 void *mapstore; 99 size_t mapsize; 100 101 /* 102 * Allocate and initialize the DMA map. The end of the map 103 * is a variable-sized array of segments, so we allocate enough 104 * room for them in one shot. 105 * 106 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 107 * of ALLOCNOW notifies others that we've reserved these resources, 108 * and they are not to be freed. 109 * 110 * The bus_dmamap_t includes one bus_dma_segment_t, hence 111 * the (nsegments - 1). 112 */ 113 mapsize = sizeof(struct arc_bus_dmamap) + 114 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 115 if ((mapstore = malloc(mapsize, M_DMAMAP, 116 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 117 return (ENOMEM); 118 119 bzero(mapstore, mapsize); 120 map = (struct arc_bus_dmamap *)mapstore; 121 map->_dm_size = size; 122 map->_dm_segcnt = nsegments; 123 map->_dm_maxsegsz = maxsegsz; 124 map->_dm_boundary = boundary; 125 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 126 map->_dm_proc = NULL; 127 map->dm_mapsize = 0; /* no valid mappings */ 128 map->dm_nsegs = 0; 129 130 *dmamp = map; 131 return (0); 132 } 133 134 /* 135 * Common function for DMA map destruction. May be called by bus-specific 136 * DMA map destruction functions. 137 */ 138 void 139 _bus_dmamap_destroy(t, map) 140 bus_dma_tag_t t; 141 bus_dmamap_t map; 142 { 143 144 free(map, M_DMAMAP); 145 } 146 147 /* 148 * Utility function to load a linear buffer. lastaddrp holds state 149 * between invocations (for multiple-buffer loads). segp contains 150 * the starting segment on entrance, and the ending segment on exit. 151 * first indicates if this is the first invocation of this function. 152 */ 153 static int 154 _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first) 155 bus_dma_tag_t t; 156 bus_dmamap_t map; 157 void *buf; 158 bus_size_t buflen; 159 struct proc *p; 160 int flags; 161 paddr_t *lastaddrp; 162 int *segp; 163 int first; 164 { 165 bus_size_t sgsize; 166 bus_addr_t baddr, bmask; 167 paddr_t curaddr, lastaddr; 168 vaddr_t vaddr = (vaddr_t)buf; 169 int seg; 170 171 lastaddr = *lastaddrp; 172 bmask = ~(map->_dm_boundary - 1); 173 174 for (seg = *segp; buflen > 0 ; ) { 175 /* 176 * Get the physical address for this segment. 177 */ 178 if (p != NULL) { 179 (void) pmap_extract(p->p_vmspace->vm_map.pmap, 180 vaddr, &curaddr); 181 } else 182 curaddr = kvtophys(vaddr); 183 184 /* 185 * Compute the segment size, and adjust counts. 186 */ 187 sgsize = NBPG - ((u_long)vaddr & PGOFSET); 188 if (buflen < sgsize) 189 sgsize = buflen; 190 191 /* 192 * Make sure we don't cross any boundaries. 193 */ 194 if (map->_dm_boundary > 0) { 195 baddr = (curaddr + map->_dm_boundary) & bmask; 196 if (sgsize > (baddr - curaddr)) 197 sgsize = (baddr - curaddr); 198 } 199 200 /* 201 * Insert chunk into a segment, coalescing with 202 * the previous segment if possible. 203 */ 204 if (first) { 205 map->dm_segs[seg].ds_addr = curaddr + t->dma_offset; 206 map->dm_segs[seg].ds_len = sgsize; 207 map->dm_segs[seg]._ds_vaddr = vaddr; 208 map->dm_segs[seg]._ds_paddr = curaddr; 209 first = 0; 210 } else { 211 if (curaddr == lastaddr && 212 (map->dm_segs[seg].ds_len + sgsize) <= 213 map->_dm_maxsegsz && 214 (map->_dm_boundary == 0 || 215 (map->dm_segs[seg]._ds_paddr & bmask) == 216 (curaddr & bmask))) 217 map->dm_segs[seg].ds_len += sgsize; 218 else { 219 if (++seg >= map->_dm_segcnt) 220 break; 221 map->dm_segs[seg].ds_addr = 222 curaddr + t->dma_offset; 223 map->dm_segs[seg].ds_len = sgsize; 224 map->dm_segs[seg]._ds_vaddr = vaddr; 225 map->dm_segs[seg]._ds_paddr = curaddr; 226 } 227 } 228 229 lastaddr = curaddr + sgsize; 230 vaddr += sgsize; 231 buflen -= sgsize; 232 } 233 234 *segp = seg; 235 *lastaddrp = lastaddr; 236 237 /* 238 * Did we fit? 239 */ 240 if (buflen != 0) 241 return (EFBIG); /* XXX better return value here? */ 242 243 return (0); 244 } 245 246 /* 247 * Common function for loading a direct-mapped DMA map with a linear 248 * buffer. 249 */ 250 int 251 _bus_dmamap_load(t, map, buf, buflen, p, flags) 252 bus_dma_tag_t t; 253 bus_dmamap_t map; 254 void *buf; 255 bus_size_t buflen; 256 struct proc *p; 257 int flags; 258 { 259 paddr_t lastaddr; 260 int seg, error; 261 262 /* 263 * Make sure that on error condition we return "no valid mappings". 264 */ 265 map->dm_mapsize = 0; 266 map->dm_nsegs = 0; 267 268 if (buflen > map->_dm_size) 269 return (EINVAL); 270 271 seg = 0; 272 error = _bus_dmamap_load_buffer(t, map, buf, buflen, 273 p, flags, &lastaddr, &seg, 1); 274 if (error == 0) { 275 map->dm_mapsize = buflen; 276 map->dm_nsegs = seg + 1; 277 map->_dm_proc = p; 278 279 /* 280 * For linear buffers, we support marking the mapping 281 * as COHERENT. 282 * 283 * XXX Check TLB entries for cache-inhibit bits? 284 */ 285 if (buf >= (void *)MIPS_KSEG1_START && 286 buf < (void *)MIPS_KSEG2_START) 287 map->_dm_flags |= ARC_DMAMAP_COHERENT; 288 } 289 return (error); 290 } 291 292 /* 293 * Like _bus_dmamap_load(), but for mbufs. 294 */ 295 int 296 _bus_dmamap_load_mbuf(t, map, m0, flags) 297 bus_dma_tag_t t; 298 bus_dmamap_t map; 299 struct mbuf *m0; 300 int flags; 301 { 302 paddr_t lastaddr; 303 int seg, error, first; 304 struct mbuf *m; 305 306 /* 307 * Make sure that on error condition we return "no valid mappings." 308 */ 309 map->dm_mapsize = 0; 310 map->dm_nsegs = 0; 311 312 #ifdef DIAGNOSTIC 313 if ((m0->m_flags & M_PKTHDR) == 0) 314 panic("_bus_dmamap_load_mbuf: no packet header"); 315 #endif 316 317 if (m0->m_pkthdr.len > map->_dm_size) 318 return (EINVAL); 319 320 first = 1; 321 seg = 0; 322 error = 0; 323 for (m = m0; m != NULL && error == 0; m = m->m_next) { 324 error = _bus_dmamap_load_buffer(t, map, 325 m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first); 326 first = 0; 327 } 328 if (error == 0) { 329 map->dm_mapsize = m0->m_pkthdr.len; 330 map->dm_nsegs = seg + 1; 331 map->_dm_proc = NULL; /* always kernel */ 332 } 333 return (error); 334 } 335 336 /* 337 * Like _bus_dmamap_load(), but for uios. 338 */ 339 int 340 _bus_dmamap_load_uio(t, map, uio, flags) 341 bus_dma_tag_t t; 342 bus_dmamap_t map; 343 struct uio *uio; 344 int flags; 345 { 346 paddr_t lastaddr; 347 int seg, i, error, first; 348 bus_size_t minlen, resid; 349 struct proc *p = NULL; 350 struct iovec *iov; 351 caddr_t addr; 352 353 /* 354 * Make sure that on error condition we return "no valid mappings." 355 */ 356 map->dm_mapsize = 0; 357 map->dm_nsegs = 0; 358 359 resid = uio->uio_resid; 360 iov = uio->uio_iov; 361 362 if (uio->uio_segflg == UIO_USERSPACE) { 363 p = uio->uio_procp; 364 #ifdef DIAGNOSTIC 365 if (p == NULL) 366 panic("_bus_dmamap_load_uio: USERSPACE but no proc"); 367 #endif 368 } 369 370 first = 1; 371 seg = 0; 372 error = 0; 373 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 374 /* 375 * Now at the first iovec to load. Load each iovec 376 * until we have exhausted the residual count. 377 */ 378 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 379 addr = (caddr_t)iov[i].iov_base; 380 381 error = _bus_dmamap_load_buffer(t, map, addr, minlen, 382 p, flags, &lastaddr, &seg, first); 383 first = 0; 384 385 resid -= minlen; 386 } 387 if (error == 0) { 388 map->dm_mapsize = uio->uio_resid; 389 map->dm_nsegs = seg + 1; 390 map->_dm_proc = p; 391 } 392 return (error); 393 } 394 395 /* 396 * Like _bus_dmamap_load(), but for raw memory. 397 */ 398 int 399 _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags) 400 bus_dma_tag_t t; 401 bus_dmamap_t map; 402 bus_dma_segment_t *segs; 403 int nsegs; 404 bus_size_t size; 405 int flags; 406 { 407 panic("_bus_dmamap_load_raw: not implemented"); 408 } 409 410 /* 411 * Common function for unloading a DMA map. May be called by 412 * chipset-specific DMA map unload functions. 413 */ 414 void 415 _bus_dmamap_unload(t, map) 416 bus_dma_tag_t t; 417 bus_dmamap_t map; 418 { 419 420 /* 421 * No resources to free; just mark the mappings as 422 * invalid. 423 */ 424 map->dm_mapsize = 0; 425 map->dm_nsegs = 0; 426 map->_dm_flags &= ~ARC_DMAMAP_COHERENT; 427 } 428 429 /* 430 * Common function for DMA map synchronization. May be called by 431 * chipset-specific DMA map synchronization functions. 432 * 433 * This version works with the virtually-indexed, write-back cache 434 * found in the MIPS-3 CPUs available in ARC machines. 435 */ 436 void 437 _bus_dmamap_sync(t, map, offset, len, ops) 438 bus_dma_tag_t t; 439 bus_dmamap_t map; 440 bus_addr_t offset; 441 bus_size_t len; 442 int ops; 443 { 444 bus_size_t minlen; 445 bus_addr_t addr; 446 int i, useindex; 447 448 /* 449 * Mixing PRE and POST operations is not allowed. 450 */ 451 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 452 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 453 panic("_bus_dmamap_sync: mix PRE and POST"); 454 455 #ifdef DIAGNOSTIC 456 if (offset >= map->dm_mapsize) 457 panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)", 458 offset, map->dm_mapsize); 459 if (len == 0 || (offset + len) > map->dm_mapsize) 460 panic("_bus_dmamap_sync: bad length"); 461 #endif 462 463 /* 464 * Since we're dealing with a virtually-indexed, write-back 465 * cache, we need to do the following things: 466 * 467 * PREREAD -- Invalidate D-cache. Note we might have 468 * to also write-back here if we have to use an Index 469 * op, or if the buffer start/end is not cache-line aligned. 470 * 471 * PREWRITE -- Write-back the D-cache. If we have to use 472 * an Index op, we also have to invalidate. Note that if 473 * we are doing PREREAD|PREWRITE, we can collapse everything 474 * into a single op. 475 * 476 * POSTREAD -- Nothing. 477 * 478 * POSTWRITE -- Nothing. 479 */ 480 481 /* 482 * Flush the write buffer. 483 * XXX Is this always necessary? 484 */ 485 wbflush(); 486 487 ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 488 if (ops == 0) 489 return; 490 491 /* 492 * If the mapping is of COHERENT DMA-safe memory, no cache 493 * flush is necessary. 494 */ 495 if (map->_dm_flags & ARC_DMAMAP_COHERENT) 496 return; 497 498 /* 499 * If the mapping belongs to the kernel, or it belongs 500 * to the currently-running process (XXX actually, vmspace), 501 * then we can use Hit ops. Otherwise, Index ops. 502 * 503 * This should be true the vast majority of the time. 504 */ 505 if (__predict_true(map->_dm_proc == NULL || map->_dm_proc == curproc)) 506 useindex = 0; 507 else 508 useindex = 1; 509 510 for (i = 0; i < map->dm_nsegs && len != 0; i++) { 511 /* Find the beginning segment. */ 512 if (offset >= map->dm_segs[i].ds_len) { 513 offset -= map->dm_segs[i].ds_len; 514 continue; 515 } 516 517 /* 518 * Now at the first segment to sync; nail 519 * each segment until we have exhausted the 520 * length. 521 */ 522 minlen = len < map->dm_segs[i].ds_len - offset ? 523 len : map->dm_segs[i].ds_len - offset; 524 525 addr = map->dm_segs[i]._ds_vaddr; 526 527 #ifdef BUS_DMA_DEBUG 528 printf("bus_dmamap_sync: flushing segment %d " 529 "(0x%lx..0x%lx) ...", i, addr + offset, 530 addr + offset + minlen - 1); 531 #endif 532 533 /* 534 * If we are forced to use Index ops, it's always a 535 * Write-back,Invalidate, so just do one test. 536 */ 537 if (__predict_false(useindex)) { 538 mips_dcache_wbinv_range_index(addr + offset, minlen); 539 #ifdef BUS_DMA_DEBUG 540 printf("\n"); 541 #endif 542 offset = 0; 543 len -= minlen; 544 continue; 545 } 546 547 switch (ops) { 548 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 549 mips_dcache_wbinv_range(addr + offset, minlen); 550 break; 551 552 case BUS_DMASYNC_PREREAD: 553 #if 1 554 mips_dcache_wbinv_range(addr + offset, minlen); 555 #else 556 mips_dcache_inv_range(addr + offset, minlen); 557 #endif 558 break; 559 560 case BUS_DMASYNC_PREWRITE: 561 mips_dcache_wb_range(addr + offset, minlen); 562 break; 563 } 564 #ifdef BUS_DMA_DEBUG 565 printf("\n"); 566 #endif 567 offset = 0; 568 len -= minlen; 569 } 570 } 571 572 /* 573 * Common function for DMA-safe memory allocation. May be called 574 * by bus-specific DMA memory allocation functions. 575 */ 576 int 577 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags) 578 bus_dma_tag_t t; 579 bus_size_t size, alignment, boundary; 580 bus_dma_segment_t *segs; 581 int nsegs; 582 int *rsegs; 583 int flags; 584 { 585 586 return (_bus_dmamem_alloc_range(t, size, alignment, boundary, 587 segs, nsegs, rsegs, flags, avail_start, trunc_page(avail_end))); 588 } 589 590 /* 591 * Allocate physical memory from the given physical address range. 592 * Called by DMA-safe memory allocation methods. 593 */ 594 int 595 _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, 596 flags, low, high) 597 bus_dma_tag_t t; 598 bus_size_t size, alignment, boundary; 599 bus_dma_segment_t *segs; 600 int nsegs; 601 int *rsegs; 602 int flags; 603 paddr_t low; 604 paddr_t high; 605 { 606 paddr_t curaddr, lastaddr; 607 struct vm_page *m; 608 struct pglist mlist; 609 int curseg, error; 610 611 /* Always round the size. */ 612 size = round_page(size); 613 614 high = avail_end - PAGE_SIZE; 615 616 /* 617 * Allocate pages from the VM system. 618 */ 619 TAILQ_INIT(&mlist); 620 error = uvm_pglistalloc(size, low, high, alignment, boundary, 621 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 622 if (error) 623 return (error); 624 625 /* 626 * Compute the location, size, and number of segments actually 627 * returned by the VM code. 628 */ 629 m = mlist.tqh_first; 630 curseg = 0; 631 lastaddr = segs[curseg]._ds_paddr = VM_PAGE_TO_PHYS(m); 632 segs[curseg].ds_addr = segs[curseg]._ds_paddr + t->dma_offset; 633 segs[curseg].ds_len = PAGE_SIZE; 634 m = m->pageq.tqe_next; 635 636 for (; m != NULL; m = m->pageq.tqe_next) { 637 curaddr = VM_PAGE_TO_PHYS(m); 638 #ifdef DIAGNOSTIC 639 if (curaddr < avail_start || curaddr >= high) { 640 printf("uvm_pglistalloc returned non-sensical" 641 " address 0x%llx\n", (long long)curaddr); 642 panic("_bus_dmamem_alloc_range"); 643 } 644 #endif 645 if (curaddr == (lastaddr + PAGE_SIZE)) 646 segs[curseg].ds_len += PAGE_SIZE; 647 else { 648 curseg++; 649 segs[curseg].ds_addr = curaddr + t->dma_offset; 650 segs[curseg].ds_len = PAGE_SIZE; 651 segs[curseg]._ds_paddr = curaddr; 652 } 653 lastaddr = curaddr; 654 } 655 656 *rsegs = curseg + 1; 657 658 return (0); 659 } 660 661 /* 662 * Common function for freeing DMA-safe memory. May be called by 663 * bus-specific DMA memory free functions. 664 */ 665 void 666 _bus_dmamem_free(t, segs, nsegs) 667 bus_dma_tag_t t; 668 bus_dma_segment_t *segs; 669 int nsegs; 670 { 671 struct vm_page *m; 672 bus_addr_t addr; 673 struct pglist mlist; 674 int curseg; 675 676 /* 677 * Build a list of pages to free back to the VM system. 678 */ 679 TAILQ_INIT(&mlist); 680 for (curseg = 0; curseg < nsegs; curseg++) { 681 for (addr = segs[curseg]._ds_paddr; 682 addr < (segs[curseg]._ds_paddr + segs[curseg].ds_len); 683 addr += PAGE_SIZE) { 684 m = PHYS_TO_VM_PAGE(addr); 685 TAILQ_INSERT_TAIL(&mlist, m, pageq); 686 } 687 } 688 689 uvm_pglistfree(&mlist); 690 } 691 692 /* 693 * Common function for mapping DMA-safe memory. May be called by 694 * bus-specific DMA memory map functions. 695 */ 696 int 697 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags) 698 bus_dma_tag_t t; 699 bus_dma_segment_t *segs; 700 int nsegs; 701 size_t size; 702 caddr_t *kvap; 703 int flags; 704 { 705 vaddr_t va; 706 bus_addr_t addr; 707 int curseg; 708 709 /* 710 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid 711 * TLB thrashing. 712 */ 713 if (nsegs == 1) { 714 if (flags & BUS_DMA_COHERENT) 715 *kvap = (caddr_t)MIPS_PHYS_TO_KSEG1(segs[0]._ds_paddr); 716 else 717 *kvap = (caddr_t)MIPS_PHYS_TO_KSEG0(segs[0]._ds_paddr); 718 return (0); 719 } 720 721 size = round_page(size); 722 723 va = uvm_km_valloc(kernel_map, size); 724 725 if (va == 0) 726 return (ENOMEM); 727 728 *kvap = (caddr_t)va; 729 730 for (curseg = 0; curseg < nsegs; curseg++) { 731 segs[curseg]._ds_vaddr = va; 732 for (addr = segs[curseg]._ds_paddr; 733 addr < (segs[curseg]._ds_paddr + segs[curseg].ds_len); 734 addr += NBPG, va += NBPG, size -= NBPG) { 735 if (size == 0) 736 panic("_bus_dmamem_map: size botch"); 737 pmap_enter(pmap_kernel(), va, addr, 738 VM_PROT_READ | VM_PROT_WRITE, 739 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 740 741 /* XXX Do something about COHERENT here. */ 742 } 743 } 744 pmap_update(pmap_kernel()); 745 746 return (0); 747 } 748 749 /* 750 * Common function for unmapping DMA-safe memory. May be called by 751 * bus-specific DMA memory unmapping functions. 752 */ 753 void 754 _bus_dmamem_unmap(t, kva, size) 755 bus_dma_tag_t t; 756 caddr_t kva; 757 size_t size; 758 { 759 760 #ifdef DIAGNOSTIC 761 if ((u_long)kva & PGOFSET) 762 panic("_bus_dmamem_unmap"); 763 #endif 764 765 /* 766 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e. 767 * not in KSEG2). 768 */ 769 if (kva >= (caddr_t)MIPS_KSEG0_START && 770 kva < (caddr_t)MIPS_KSEG2_START) 771 return; 772 773 size = round_page(size); 774 uvm_km_free(kernel_map, (vaddr_t)kva, size); 775 } 776 777 /* 778 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 779 * bus-specific DMA mmap(2)'ing functions. 780 */ 781 paddr_t 782 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags) 783 bus_dma_tag_t t; 784 bus_dma_segment_t *segs; 785 int nsegs; 786 off_t off; 787 int prot, flags; 788 { 789 int i; 790 791 for (i = 0; i < nsegs; i++) { 792 #ifdef DIAGNOSTIC 793 if (off & PGOFSET) 794 panic("_bus_dmamem_mmap: offset unaligned"); 795 if (segs[i]._ds_paddr & PGOFSET) 796 panic("_bus_dmamem_mmap: segment unaligned"); 797 if (segs[i].ds_len & PGOFSET) 798 panic("_bus_dmamem_mmap: segment size not multiple" 799 " of page size"); 800 #endif 801 if (off >= segs[i].ds_len) { 802 off -= segs[i].ds_len; 803 continue; 804 } 805 806 return (mips_btop(segs[i]._ds_paddr + off)); 807 } 808 809 /* Page not found. */ 810 return (-1); 811 } 812