1 /* $NetBSD: bus.c,v 1.13 2002/06/02 14:44:38 drochner Exp $ */ 2 3 /* 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/device.h> 44 #include <sys/malloc.h> 45 #include <sys/proc.h> 46 #include <sys/mbuf.h> 47 48 #define _COBALT_BUS_DMA_PRIVATE 49 #include <machine/bus.h> 50 51 #include <uvm/uvm_extern.h> 52 53 #include <mips/cache.h> 54 55 static int _bus_dmamap_load_buffer(bus_dmamap_t, void *, bus_size_t, 56 struct proc *, int, vaddr_t *, int *, int); 57 58 struct cobalt_bus_dma_tag cobalt_default_bus_dma_tag = { 59 _bus_dmamap_create, 60 _bus_dmamap_destroy, 61 _bus_dmamap_load, 62 _bus_dmamap_load_mbuf, 63 _bus_dmamap_load_uio, 64 _bus_dmamap_load_raw, 65 _bus_dmamap_unload, 66 _bus_dmamap_sync, 67 _bus_dmamem_alloc, 68 _bus_dmamem_free, 69 _bus_dmamem_map, 70 _bus_dmamem_unmap, 71 _bus_dmamem_mmap, 72 }; 73 74 int 75 bus_space_map(t, bpa, size, flags, bshp) 76 bus_space_tag_t t; 77 bus_addr_t bpa; 78 bus_size_t size; 79 int flags; 80 bus_space_handle_t *bshp; 81 { 82 int cacheable = flags & BUS_SPACE_MAP_CACHEABLE; 83 84 if (cacheable) 85 *bshp = MIPS_PHYS_TO_KSEG0(bpa); 86 else 87 *bshp = MIPS_PHYS_TO_KSEG1(bpa); 88 89 /* XXX Evil! */ 90 if (bpa < 0x10000000) 91 *bshp += 0x10000000; 92 93 return 0; 94 } 95 96 int 97 bus_space_alloc(t, rstart, rend, size, alignment, boundary, flags, bpap, bshp) 98 bus_space_tag_t t; 99 bus_addr_t rstart, rend; 100 bus_size_t size, alignment, boundary; 101 int flags; 102 bus_addr_t *bpap; 103 bus_space_handle_t *bshp; 104 { 105 panic("bus_space_alloc: not implemented"); 106 } 107 108 void 109 bus_space_free(t, bsh, size) 110 bus_space_tag_t t; 111 bus_space_handle_t bsh; 112 bus_size_t size; 113 { 114 panic("bus_space_free: not implemented"); 115 } 116 117 void 118 bus_space_unmap(t, bsh, size) 119 bus_space_tag_t t; 120 bus_space_handle_t bsh; 121 bus_size_t size; 122 { 123 return; 124 } 125 126 int 127 bus_space_subregion(t, bsh, offset, size, nbshp) 128 bus_space_tag_t t; 129 bus_space_handle_t bsh; 130 bus_size_t offset, size; 131 bus_space_handle_t *nbshp; 132 { 133 134 *nbshp = bsh + offset; 135 return 0; 136 } 137 138 /* 139 * Common function for DMA map creation. May be called by bus-specific 140 * DMA map creation functions. 141 */ 142 int 143 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) 144 bus_dma_tag_t t; 145 bus_size_t size; 146 int nsegments; 147 bus_size_t maxsegsz; 148 bus_size_t boundary; 149 int flags; 150 bus_dmamap_t *dmamp; 151 { 152 struct cobalt_bus_dmamap *map; 153 void *mapstore; 154 size_t mapsize; 155 156 /* 157 * Allcoate and initialize the DMA map. The end of the map 158 * is a variable-sized array of segments, so we allocate enough 159 * room for them in one shot. 160 * 161 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 162 * of ALLOCNOW notifes others that we've reserved these resources, 163 * and they are not to be freed. 164 * 165 * The bus_dmamap_t includes one bus_dma_segment_t, hence 166 * the (nsegments - 1). 167 */ 168 mapsize = sizeof(struct cobalt_bus_dmamap) + 169 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 170 if ((mapstore = malloc(mapsize, M_DMAMAP, 171 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 172 return ENOMEM; 173 174 memset(mapstore, 0, mapsize); 175 map = (struct cobalt_bus_dmamap *)mapstore; 176 map->_dm_size = size; 177 map->_dm_segcnt = nsegments; 178 map->_dm_maxsegsz = maxsegsz; 179 map->_dm_boundary = boundary; 180 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 181 map->_dm_proc = NULL; 182 map->dm_mapsize = 0; /* no valid mappings */ 183 map->dm_nsegs = 0; 184 185 *dmamp = map; 186 return 0; 187 } 188 189 /* 190 * Common function for DMA map destruction. May be called by bus-specific 191 * DMA map destruction functions. 192 */ 193 void 194 _bus_dmamap_destroy(t, map) 195 bus_dma_tag_t t; 196 bus_dmamap_t map; 197 { 198 199 free(map, M_DMAMAP); 200 } 201 extern paddr_t kvtophys(vaddr_t); /* XXX */ 202 203 /* 204 * Utility function to load a linear buffer. lastaddrp holds state 205 * between invocations (for multiple-buffer loads). segp contains 206 * the starting segment on entrance, and the ending segment on exit. 207 * first indicates if this is the first invocation of this function. 208 */ 209 int 210 _bus_dmamap_load_buffer(map, buf, buflen, p, flags, 211 lastaddrp, segp, first) 212 bus_dmamap_t map; 213 void *buf; 214 bus_size_t buflen; 215 struct proc *p; 216 int flags; 217 vaddr_t *lastaddrp; 218 int *segp; 219 int first; 220 { 221 bus_size_t sgsize; 222 bus_addr_t curaddr, lastaddr, baddr, bmask; 223 vaddr_t vaddr = (vaddr_t)buf; 224 int seg; 225 226 lastaddr = *lastaddrp; 227 bmask = ~(map->_dm_boundary - 1); 228 229 for (seg = *segp; buflen > 0 ; ) { 230 /* 231 * Get the physical address for this segment. 232 */ 233 if (p != NULL) 234 (void) pmap_extract(p->p_vmspace->vm_map.pmap, 235 vaddr, &curaddr); 236 else 237 curaddr = kvtophys(vaddr); 238 239 /* 240 * Compute the segment size, and adjust counts. 241 */ 242 sgsize = NBPG - ((u_long)vaddr & PGOFSET); 243 if (buflen < sgsize) 244 sgsize = buflen; 245 246 /* 247 * Make sure we don't cross any boundaries. 248 */ 249 if (map->_dm_boundary > 0) { 250 baddr = (curaddr + map->_dm_boundary) & bmask; 251 if (sgsize > (baddr - curaddr)) 252 sgsize = (baddr - curaddr); 253 } 254 255 /* 256 * Insert chunk into a segment, coalescing with 257 * the previous segment if possible. 258 */ 259 if (first) { 260 map->dm_segs[seg].ds_addr = curaddr; 261 map->dm_segs[seg].ds_len = sgsize; 262 map->dm_segs[seg]._ds_vaddr = vaddr; 263 first = 0; 264 } else { 265 if (curaddr == lastaddr && 266 (map->dm_segs[seg].ds_len + sgsize) <= 267 map->_dm_maxsegsz && 268 (map->_dm_boundary == 0 || 269 (map->dm_segs[seg].ds_addr & bmask) == 270 (curaddr & bmask))) 271 map->dm_segs[seg].ds_len += sgsize; 272 else { 273 if (++seg >= map->_dm_segcnt) 274 break; 275 map->dm_segs[seg].ds_addr = curaddr; 276 map->dm_segs[seg].ds_len = sgsize; 277 map->dm_segs[seg]._ds_vaddr = vaddr; 278 } 279 } 280 281 lastaddr = curaddr + sgsize; 282 vaddr += sgsize; 283 buflen -= sgsize; 284 } 285 286 *segp = seg; 287 *lastaddrp = lastaddr; 288 289 /* 290 * Did we fit? 291 */ 292 if (buflen != 0) 293 return EFBIG; /* XXX Better return value here? */ 294 295 return 0; 296 } 297 298 /* 299 * Common function for loading a direct-mapped DMA map with a linear 300 * buffer. 301 */ 302 int 303 _bus_dmamap_load(t, map, buf, buflen, p, flags) 304 bus_dma_tag_t t; 305 bus_dmamap_t map; 306 void *buf; 307 bus_size_t buflen; 308 struct proc *p; 309 int flags; 310 { 311 vaddr_t lastaddr; 312 int seg, error; 313 314 /* 315 * Make sure that on error condition we return "no valid mappings". 316 */ 317 map->dm_mapsize = 0; 318 map->dm_nsegs = 0; 319 320 if (buflen > map->_dm_size) 321 return EINVAL; 322 323 seg = 0; 324 error = _bus_dmamap_load_buffer(map, buf, buflen, 325 p, flags, &lastaddr, &seg, 1); 326 if (error == 0) { 327 map->dm_mapsize = buflen; 328 map->dm_nsegs = seg + 1; 329 map->_dm_proc = p; 330 331 /* 332 * For linear buffers, we support marking the mapping 333 * as COHERENT. 334 * 335 * XXX Check TLB entries for cache-inhibit bits? 336 */ 337 if (buf >= (void *)MIPS_KSEG1_START && 338 buf < (void *)MIPS_KSEG2_START) 339 map->_dm_flags |= COBALT_DMAMAP_COHERENT; 340 } 341 return error; 342 } 343 344 /* 345 * Like _bus_dmamap_load(), but for mbufs. 346 */ 347 int 348 _bus_dmamap_load_mbuf(t, map, m0, flags) 349 bus_dma_tag_t t; 350 bus_dmamap_t map; 351 struct mbuf *m0; 352 int flags; 353 { 354 vaddr_t lastaddr; 355 int seg, error, first; 356 struct mbuf *m; 357 358 /* 359 * Make sure that on error condition we return "no valid mappings." 360 */ 361 map->dm_mapsize = 0; 362 map->dm_nsegs = 0; 363 364 #ifdef DIAGNOSTIC 365 if ((m0->m_flags & M_PKTHDR) == 0) 366 panic("_bus_dmamap_load_mbuf: no packet header"); 367 #endif 368 369 if (m0->m_pkthdr.len > map->_dm_size) 370 return EINVAL; 371 372 first = 1; 373 seg = 0; 374 error = 0; 375 for (m = m0; m != NULL && error == 0; m = m->m_next) { 376 error = _bus_dmamap_load_buffer(map, 377 m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first); 378 first = 0; 379 } 380 if (error == 0) { 381 map->dm_mapsize = m0->m_pkthdr.len; 382 map->dm_nsegs = seg + 1; 383 map->_dm_proc = NULL; /* always kernel */ 384 } 385 return error; 386 } 387 388 /* 389 * Like _bus_dmamap_load(), but for uios. 390 */ 391 int 392 _bus_dmamap_load_uio(t, map, uio, flags) 393 bus_dma_tag_t t; 394 bus_dmamap_t map; 395 struct uio *uio; 396 int flags; 397 { 398 vaddr_t lastaddr; 399 int seg, i, error, first; 400 bus_size_t minlen, resid; 401 struct proc *p = NULL; 402 struct iovec *iov; 403 caddr_t addr; 404 405 /* 406 * Make sure that on error condition we return "no valid mappings." 407 */ 408 map->dm_mapsize = 0; 409 map->dm_nsegs = 0; 410 411 resid = uio->uio_resid; 412 iov = uio->uio_iov; 413 414 if (uio->uio_segflg == UIO_USERSPACE) { 415 p = uio->uio_procp; 416 #ifdef DIAGNOSTIC 417 if (p == NULL) 418 panic("_bus_dmamap_load_uio: USERSPACE but no proc"); 419 #endif 420 } 421 422 first = 1; 423 seg = 0; 424 error = 0; 425 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 426 /* 427 * Now at the first iovec to load. Load each iovec 428 * until we have exhausted the residual count. 429 */ 430 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 431 addr = (caddr_t)iov[i].iov_base; 432 433 error = _bus_dmamap_load_buffer(map, addr, minlen, 434 p, flags, &lastaddr, &seg, first); 435 first = 0; 436 437 resid -= minlen; 438 } 439 if (error == 0) { 440 map->dm_mapsize = uio->uio_resid; 441 map->dm_nsegs = seg + 1; 442 map->_dm_proc = p; 443 } 444 return error; 445 } 446 447 /* 448 * Like _bus_dmamap_load(), but for raw memory. 449 */ 450 int 451 _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags) 452 bus_dma_tag_t t; 453 bus_dmamap_t map; 454 bus_dma_segment_t *segs; 455 int nsegs; 456 bus_size_t size; 457 int flags; 458 { 459 460 panic("_bus_dmamap_load_raw: not implemented"); 461 } 462 463 /* 464 * Common function for unloading a DMA map. May be called by 465 * chipset-specific DMA map unload functions. 466 */ 467 void 468 _bus_dmamap_unload(t, map) 469 bus_dma_tag_t t; 470 bus_dmamap_t map; 471 { 472 473 /* 474 * No resources to free; just mark the mappings as 475 * invalid. 476 */ 477 map->dm_mapsize = 0; 478 map->dm_nsegs = 0; 479 map->_dm_flags &= ~COBALT_DMAMAP_COHERENT; 480 map->_dm_proc = NULL; 481 } 482 483 /* 484 * Common function for DMA map synchronization. May be called 485 * by chipset-specific DMA map synchronization functions. 486 * 487 * This version works with the virtually-indexed write-back 488 * cache found on Cobalt systems. 489 */ 490 void 491 _bus_dmamap_sync(t, map, offset, len, ops) 492 bus_dma_tag_t t; 493 bus_dmamap_t map; 494 bus_addr_t offset; 495 bus_size_t len; 496 int ops; 497 { 498 bus_size_t minlen; 499 bus_addr_t addr; 500 int i, useindex; 501 502 /* 503 * Mising PRE and POST operations is not allowed. 504 */ 505 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 506 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 507 panic("_bus_dmamap_sync_r4k: mix PRE and POST"); 508 509 #ifdef DIAGNOSTIC 510 if (offset >= map->dm_mapsize) 511 panic("_bus_dmamap_sync_r4k: bad offset %lu (map size is %lu)", 512 offset, map->dm_mapsize); 513 if (len == 0 || (offset + len) > map->dm_mapsize) 514 panic("_bus_dmamap_sync_r4k: bad length"); 515 #endif 516 517 /* 518 * The RM52xx cache is virtually-indexed, write-back. This means 519 * we need to do the following things: 520 * 521 * PREREAD -- Invalidate D-cache. Note we might have 522 * to also write-back here if we have to use an Index 523 * op, or if the buffer start/end is not cache-line aligned. 524 * 525 * PREWRITE -- Write-back the D-cache. If we have to use 526 * an Index op, we also have to invalidate. Note that if 527 * we are doing PREREAD|PREWRITE, we can collapse everything 528 * into a single op. 529 * 530 * POSTREAD -- Nothing. 531 * 532 * POSTWRITE -- Nothing. 533 */ 534 535 /* 536 * Flush the write buffer. 537 * XXX Is this always necessary? 538 */ 539 wbflush(); 540 541 ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 542 if (ops == 0) 543 return; 544 545 /* 546 * If the mapping is of COHERENT DMA-safe memory, no cache 547 * flush is necessary. 548 */ 549 if (map->_dm_flags & COBALT_DMAMAP_COHERENT) 550 return; 551 552 /* 553 * If the mapping belongs to the kernel, or if it belongs 554 * to the currently-running process (XXX actually, vmspace), 555 * then we can use Hit ops. Otherwise, Index ops. 556 * 557 * This should be true the vast majority of the time. 558 */ 559 if (__predict_true(map->_dm_proc == NULL || map->_dm_proc == curproc)) 560 useindex = 0; 561 else 562 useindex = 1; 563 564 for (i = 0; i < map->dm_nsegs && len != 0; i++) { 565 /* Find the beginning segment. */ 566 if (offset >= map->dm_segs[i].ds_len) { 567 offset -= map->dm_segs[i].ds_len; 568 continue; 569 } 570 571 /* 572 * Now at the first segment to sync; nail 573 * each segment until we have exhausted the 574 * length. 575 */ 576 minlen = len < map->dm_segs[i].ds_len - offset ? 577 len : map->dm_segs[i].ds_len - offset; 578 579 addr = map->dm_segs[i]._ds_vaddr; 580 581 #ifdef BUS_DMA_DEBUG 582 printf("bus_dmamap_sync: flushing segment %d " 583 "(0x%lx..0x%lx) ...", i, addr + offset, 584 addr + offset + minlen - 1); 585 #endif 586 587 /* 588 * If we are forced to use Index ops, it's always a 589 * Write-back,Invalidate, so just do one test. 590 */ 591 if (__predict_false(useindex)) { 592 mips_dcache_wbinv_range_index(addr + offset, minlen); 593 #ifdef BUS_DMA_DEBUG 594 printf("\n"); 595 #endif 596 offset = 0; 597 len -= minlen; 598 continue; 599 } 600 601 switch (ops) { 602 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 603 mips_dcache_wbinv_range(addr + offset, minlen); 604 break; 605 606 case BUS_DMASYNC_PREREAD: 607 #if 1 608 mips_dcache_wbinv_range(addr + offset, minlen); 609 #else 610 mips_dcache_inv_range(addr + offset, minlen); 611 #endif 612 break; 613 614 case BUS_DMASYNC_PREWRITE: 615 mips_dcache_wb_range(addr + offset, minlen); 616 break; 617 } 618 #ifdef BUS_DMA_DEBUG 619 printf("\n"); 620 #endif 621 offset = 0; 622 len -= minlen; 623 } 624 } 625 626 /* 627 * Common function for DMA-safe memory allocation. May be called 628 * by bus-specific DMA memory allocation functions. 629 */ 630 int 631 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags) 632 bus_dma_tag_t t; 633 bus_size_t size, alignment, boundary; 634 bus_dma_segment_t *segs; 635 int nsegs; 636 int *rsegs; 637 int flags; 638 { 639 extern paddr_t avail_start, avail_end; 640 vaddr_t curaddr, lastaddr; 641 psize_t high; 642 struct vm_page *m; 643 struct pglist mlist; 644 int curseg, error; 645 646 /* Always round the size. */ 647 size = round_page(size); 648 649 high = avail_end - PAGE_SIZE; 650 651 /* 652 * Allocate pages from the VM system. 653 */ 654 error = uvm_pglistalloc(size, avail_start, high, alignment, boundary, 655 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 656 if (error) 657 return error; 658 659 /* 660 * Compute the location, size, and number of segments actually 661 * returned by the VM code. 662 */ 663 m = mlist.tqh_first; 664 curseg = 0; 665 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); 666 segs[curseg].ds_len = PAGE_SIZE; 667 m = m->pageq.tqe_next; 668 669 for (; m != NULL; m = m->pageq.tqe_next) { 670 curaddr = VM_PAGE_TO_PHYS(m); 671 #ifdef DIAGNOSTIC 672 if (curaddr < avail_start || curaddr >= high) { 673 printf("uvm_pglistalloc returned non-sensical" 674 " address 0x%lx\n", curaddr); 675 panic("_bus_dmamem_alloc"); 676 } 677 #endif 678 if (curaddr == (lastaddr + PAGE_SIZE)) 679 segs[curseg].ds_len += PAGE_SIZE; 680 else { 681 curseg++; 682 segs[curseg].ds_addr = curaddr; 683 segs[curseg].ds_len = PAGE_SIZE; 684 } 685 lastaddr = curaddr; 686 } 687 688 *rsegs = curseg + 1; 689 690 return 0; 691 } 692 693 /* 694 * Common function for freeing DMA-safe memory. May be called by 695 * bus-specific DMA memory free functions. 696 */ 697 void 698 _bus_dmamem_free(t, segs, nsegs) 699 bus_dma_tag_t t; 700 bus_dma_segment_t *segs; 701 int nsegs; 702 { 703 struct vm_page *m; 704 bus_addr_t addr; 705 struct pglist mlist; 706 int curseg; 707 708 /* 709 * Build a list of pages to free back to the VM system. 710 */ 711 TAILQ_INIT(&mlist); 712 for (curseg = 0; curseg < nsegs; curseg++) { 713 for (addr = segs[curseg].ds_addr; 714 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 715 addr += PAGE_SIZE) { 716 m = PHYS_TO_VM_PAGE(addr); 717 TAILQ_INSERT_TAIL(&mlist, m, pageq); 718 } 719 } 720 721 uvm_pglistfree(&mlist); 722 } 723 724 /* 725 * Common function for mapping DMA-safe memory. May be called by 726 * bus-specific DMA memory map functions. 727 */ 728 int 729 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags) 730 bus_dma_tag_t t; 731 bus_dma_segment_t *segs; 732 int nsegs; 733 size_t size; 734 caddr_t *kvap; 735 int flags; 736 { 737 vaddr_t va; 738 bus_addr_t addr; 739 int curseg; 740 741 /* 742 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid 743 * TLB thrashing. 744 */ 745 if (nsegs == 1) { 746 if (flags & BUS_DMA_COHERENT) 747 *kvap = (caddr_t)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr); 748 else 749 *kvap = (caddr_t)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr); 750 return 0; 751 } 752 753 size = round_page(size); 754 755 va = uvm_km_valloc(kernel_map, size); 756 757 if (va == 0) 758 return (ENOMEM); 759 760 *kvap = (caddr_t)va; 761 762 for (curseg = 0; curseg < nsegs; curseg++) { 763 for (addr = segs[curseg].ds_addr; 764 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 765 addr += NBPG, va += NBPG, size -= NBPG) { 766 if (size == 0) 767 panic("_bus_dmamem_map: size botch"); 768 pmap_enter(pmap_kernel(), va, addr, 769 VM_PROT_READ | VM_PROT_WRITE, 770 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 771 772 /* XXX Do something about COHERENT here. */ 773 } 774 } 775 pmap_update(pmap_kernel()); 776 777 return 0; 778 } 779 780 /* 781 * Common function for unmapping DMA-safe memory. May be called by 782 * bus-specific DMA memory unmapping functions. 783 */ 784 void 785 _bus_dmamem_unmap(t, kva, size) 786 bus_dma_tag_t t; 787 caddr_t kva; 788 size_t size; 789 { 790 791 #ifdef DIAGNOSTIC 792 if ((u_long)kva & PGOFSET) 793 panic("_bus_dmamem_unmap"); 794 #endif 795 796 /* 797 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e. 798 * not in KSEG2). 799 */ 800 if (kva >= (caddr_t)MIPS_KSEG0_START && 801 kva < (caddr_t)MIPS_KSEG2_START) 802 return; 803 804 size = round_page(size); 805 uvm_km_free(kernel_map, (vaddr_t)kva, size); 806 } 807 808 /* 809 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 810 * bus-specific DMA mmap(2)'ing functions. 811 */ 812 paddr_t 813 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags) 814 bus_dma_tag_t t; 815 bus_dma_segment_t *segs; 816 int nsegs; 817 off_t off; 818 int prot, flags; 819 { 820 int i; 821 822 for (i = 0; i < nsegs; i++) { 823 #ifdef DIAGNOSTIC 824 if (off & PGOFSET) 825 panic("_bus_dmamem_mmap: offset unaligned"); 826 if (segs[i].ds_addr & PGOFSET) 827 panic("_bus_dmamem_mmap: segment unaligned"); 828 if (segs[i].ds_len & PGOFSET) 829 panic("_bus_dmamem_mmap: segment size not multiple" 830 " of page size"); 831 #endif 832 if (off >= segs[i].ds_len) { 833 off -= segs[i].ds_len; 834 continue; 835 } 836 837 return mips_btop((caddr_t)segs[i].ds_addr + off); 838 } 839 840 /* Page not found. */ 841 return -1; 842 } 843