1 /* $NetBSD: bus.c,v 1.9 2001/11/14 18:15:31 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/device.h> 44 #include <sys/malloc.h> 45 #include <sys/proc.h> 46 #include <sys/mbuf.h> 47 48 #define _NEWSMIPS_BUS_DMA_PRIVATE 49 #include <machine/bus.h> 50 #include <machine/cpu.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #include <mips/cache.h> 55 56 static int _bus_dmamap_load_buffer(bus_dmamap_t, void *, bus_size_t, 57 struct proc *, int, vaddr_t *, int *, int); 58 59 struct newsmips_bus_dma_tag newsmips_default_bus_dma_tag = { 60 _bus_dmamap_create, 61 _bus_dmamap_destroy, 62 _bus_dmamap_load, 63 _bus_dmamap_load_mbuf, 64 _bus_dmamap_load_uio, 65 _bus_dmamap_load_raw, 66 _bus_dmamap_unload, 67 NULL, 68 _bus_dmamem_alloc, 69 _bus_dmamem_free, 70 _bus_dmamem_map, 71 _bus_dmamem_unmap, 72 _bus_dmamem_mmap, 73 }; 74 75 void 76 newsmips_bus_dma_init(void) 77 { 78 #ifdef MIPS1 79 if (CPUISMIPS3 == 0) 80 newsmips_default_bus_dma_tag._dmamap_sync = 81 _bus_dmamap_sync_r3k; 82 #endif 83 #ifdef MIPS3 84 if (CPUISMIPS3) 85 newsmips_default_bus_dma_tag._dmamap_sync = 86 _bus_dmamap_sync_r4k; 87 #endif 88 } 89 90 int 91 bus_space_map(t, bpa, size, flags, bshp) 92 bus_space_tag_t t; 93 bus_addr_t bpa; 94 bus_size_t size; 95 int flags; 96 bus_space_handle_t *bshp; 97 { 98 int cacheable = flags & BUS_SPACE_MAP_CACHEABLE; 99 100 if (cacheable) 101 *bshp = MIPS_PHYS_TO_KSEG0(bpa); 102 else 103 *bshp = MIPS_PHYS_TO_KSEG1(bpa); 104 105 return 0; 106 } 107 108 int 109 bus_space_alloc(t, rstart, rend, size, alignment, boundary, flags, bpap, bshp) 110 bus_space_tag_t t; 111 bus_addr_t rstart, rend; 112 bus_size_t size, alignment, boundary; 113 int flags; 114 bus_addr_t *bpap; 115 bus_space_handle_t *bshp; 116 { 117 panic("bus_space_alloc: not implemented"); 118 } 119 120 void 121 bus_space_free(t, bsh, size) 122 bus_space_tag_t t; 123 bus_space_handle_t bsh; 124 bus_size_t size; 125 { 126 panic("bus_space_free: not implemented"); 127 } 128 129 void 130 bus_space_unmap(t, bsh, size) 131 bus_space_tag_t t; 132 bus_space_handle_t bsh; 133 bus_size_t size; 134 { 135 return; 136 } 137 138 int 139 bus_space_subregion(t, bsh, offset, size, nbshp) 140 bus_space_tag_t t; 141 bus_space_handle_t bsh; 142 bus_size_t offset, size; 143 bus_space_handle_t *nbshp; 144 { 145 146 *nbshp = bsh + offset; 147 return 0; 148 } 149 150 /* 151 * Common function for DMA map creation. May be called by bus-specific 152 * DMA map creation functions. 153 */ 154 int 155 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) 156 bus_dma_tag_t t; 157 bus_size_t size; 158 int nsegments; 159 bus_size_t maxsegsz; 160 bus_size_t boundary; 161 int flags; 162 bus_dmamap_t *dmamp; 163 { 164 struct newsmips_bus_dmamap *map; 165 void *mapstore; 166 size_t mapsize; 167 168 /* 169 * Allcoate and initialize the DMA map. The end of the map 170 * is a variable-sized array of segments, so we allocate enough 171 * room for them in one shot. 172 * 173 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 174 * of ALLOCNOW notifes others that we've reserved these resources, 175 * and they are not to be freed. 176 * 177 * The bus_dmamap_t includes one bus_dma_segment_t, hence 178 * the (nsegments - 1). 179 */ 180 mapsize = sizeof(struct newsmips_bus_dmamap) + 181 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 182 if ((mapstore = malloc(mapsize, M_DMAMAP, 183 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 184 return ENOMEM; 185 186 bzero(mapstore, mapsize); 187 map = (struct newsmips_bus_dmamap *)mapstore; 188 map->_dm_size = size; 189 map->_dm_segcnt = nsegments; 190 map->_dm_maxsegsz = maxsegsz; 191 map->_dm_boundary = boundary; 192 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 193 map->_dm_proc = NULL; 194 map->dm_mapsize = 0; /* no valid mappings */ 195 map->dm_nsegs = 0; 196 197 *dmamp = map; 198 return 0; 199 } 200 201 /* 202 * Common function for DMA map destruction. May be called by bus-specific 203 * DMA map destruction functions. 204 */ 205 void 206 _bus_dmamap_destroy(t, map) 207 bus_dma_tag_t t; 208 bus_dmamap_t map; 209 { 210 211 free(map, M_DMAMAP); 212 } 213 extern paddr_t kvtophys(vaddr_t); /* XXX */ 214 215 /* 216 * Utility function to load a linear buffer. lastaddrp holds state 217 * between invocations (for multiple-buffer loads). segp contains 218 * the starting segment on entrance, and the ending segment on exit. 219 * first indicates if this is the first invocation of this function. 220 */ 221 int 222 _bus_dmamap_load_buffer(map, buf, buflen, p, flags, 223 lastaddrp, segp, first) 224 bus_dmamap_t map; 225 void *buf; 226 bus_size_t buflen; 227 struct proc *p; 228 int flags; 229 vaddr_t *lastaddrp; 230 int *segp; 231 int first; 232 { 233 bus_size_t sgsize; 234 bus_addr_t curaddr, lastaddr, baddr, bmask; 235 vaddr_t vaddr = (vaddr_t)buf; 236 int seg; 237 238 lastaddr = *lastaddrp; 239 bmask = ~(map->_dm_boundary - 1); 240 241 for (seg = *segp; buflen > 0 ; ) { 242 /* 243 * Get the physical address for this segment. 244 */ 245 if (p != NULL) 246 (void) pmap_extract(p->p_vmspace->vm_map.pmap, 247 vaddr, &curaddr); 248 else 249 curaddr = kvtophys(vaddr); 250 251 /* 252 * Compute the segment size, and adjust counts. 253 */ 254 sgsize = NBPG - ((u_long)vaddr & PGOFSET); 255 if (buflen < sgsize) 256 sgsize = buflen; 257 258 /* 259 * Make sure we don't cross any boundaries. 260 */ 261 if (map->_dm_boundary > 0) { 262 baddr = (curaddr + map->_dm_boundary) & bmask; 263 if (sgsize > (baddr - curaddr)) 264 sgsize = (baddr - curaddr); 265 } 266 267 /* 268 * Insert chunk into a segment, coalescing with 269 * the previous segment if possible. 270 */ 271 if (first) { 272 map->dm_segs[seg].ds_addr = curaddr; 273 map->dm_segs[seg].ds_len = sgsize; 274 map->dm_segs[seg]._ds_vaddr = vaddr; 275 first = 0; 276 } else { 277 if (curaddr == lastaddr && 278 (map->dm_segs[seg].ds_len + sgsize) <= 279 map->_dm_maxsegsz && 280 (map->_dm_boundary == 0 || 281 (map->dm_segs[seg].ds_addr & bmask) == 282 (curaddr & bmask))) 283 map->dm_segs[seg].ds_len += sgsize; 284 else { 285 if (++seg >= map->_dm_segcnt) 286 break; 287 map->dm_segs[seg].ds_addr = curaddr; 288 map->dm_segs[seg].ds_len = sgsize; 289 map->dm_segs[seg]._ds_vaddr = vaddr; 290 } 291 } 292 293 lastaddr = curaddr + sgsize; 294 vaddr += sgsize; 295 buflen -= sgsize; 296 } 297 298 *segp = seg; 299 *lastaddrp = lastaddr; 300 301 /* 302 * Did we fit? 303 */ 304 if (buflen != 0) 305 return EFBIG; /* XXX Better return value here? */ 306 307 return 0; 308 } 309 310 /* 311 * Common function for loading a direct-mapped DMA map with a linear 312 * buffer. 313 */ 314 int 315 _bus_dmamap_load(t, map, buf, buflen, p, flags) 316 bus_dma_tag_t t; 317 bus_dmamap_t map; 318 void *buf; 319 bus_size_t buflen; 320 struct proc *p; 321 int flags; 322 { 323 vaddr_t lastaddr; 324 int seg, error; 325 326 /* 327 * Make sure that on error condition we return "no valid mappings". 328 */ 329 map->dm_mapsize = 0; 330 map->dm_nsegs = 0; 331 332 if (buflen > map->_dm_size) 333 return EINVAL; 334 335 seg = 0; 336 error = _bus_dmamap_load_buffer(map, buf, buflen, 337 p, flags, &lastaddr, &seg, 1); 338 if (error == 0) { 339 map->dm_mapsize = buflen; 340 map->dm_nsegs = seg + 1; 341 map->_dm_proc = p; 342 343 /* 344 * For linear buffers, we support marking the mapping 345 * as COHERENT. 346 * 347 * XXX Check TLB entries for cache-inhibit bits? 348 */ 349 if (buf >= (void *)MIPS_KSEG1_START && 350 buf < (void *)MIPS_KSEG2_START) 351 map->_dm_flags |= NEWSMIPS_DMAMAP_COHERENT; 352 } 353 return error; 354 } 355 356 /* 357 * Like _bus_dmamap_load(), but for mbufs. 358 */ 359 int 360 _bus_dmamap_load_mbuf(t, map, m0, flags) 361 bus_dma_tag_t t; 362 bus_dmamap_t map; 363 struct mbuf *m0; 364 int flags; 365 { 366 vaddr_t lastaddr; 367 int seg, error, first; 368 struct mbuf *m; 369 370 /* 371 * Make sure that on error condition we return "no valid mappings." 372 */ 373 map->dm_mapsize = 0; 374 map->dm_nsegs = 0; 375 376 #ifdef DIAGNOSTIC 377 if ((m0->m_flags & M_PKTHDR) == 0) 378 panic("_bus_dmamap_load_mbuf: no packet header"); 379 #endif 380 381 if (m0->m_pkthdr.len > map->_dm_size) 382 return EINVAL; 383 384 first = 1; 385 seg = 0; 386 error = 0; 387 for (m = m0; m != NULL && error == 0; m = m->m_next) { 388 error = _bus_dmamap_load_buffer(map, 389 m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first); 390 first = 0; 391 } 392 if (error == 0) { 393 map->dm_mapsize = m0->m_pkthdr.len; 394 map->dm_nsegs = seg + 1; 395 map->_dm_proc = NULL; /* always kernel */ 396 } 397 return error; 398 } 399 400 /* 401 * Like _bus_dmamap_load(), but for uios. 402 */ 403 int 404 _bus_dmamap_load_uio(t, map, uio, flags) 405 bus_dma_tag_t t; 406 bus_dmamap_t map; 407 struct uio *uio; 408 int flags; 409 { 410 vaddr_t lastaddr; 411 int seg, i, error, first; 412 bus_size_t minlen, resid; 413 struct proc *p = NULL; 414 struct iovec *iov; 415 caddr_t addr; 416 417 /* 418 * Make sure that on error condition we return "no valid mappings." 419 */ 420 map->dm_mapsize = 0; 421 map->dm_nsegs = 0; 422 423 resid = uio->uio_resid; 424 iov = uio->uio_iov; 425 426 if (uio->uio_segflg == UIO_USERSPACE) { 427 p = uio->uio_procp; 428 #ifdef DIAGNOSTIC 429 if (p == NULL) 430 panic("_bus_dmamap_load_uio: USERSPACE but no proc"); 431 #endif 432 } 433 434 first = 1; 435 seg = 0; 436 error = 0; 437 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 438 /* 439 * Now at the first iovec to load. Load each iovec 440 * until we have exhausted the residual count. 441 */ 442 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 443 addr = (caddr_t)iov[i].iov_base; 444 445 error = _bus_dmamap_load_buffer(map, addr, minlen, 446 p, flags, &lastaddr, &seg, first); 447 first = 0; 448 449 resid -= minlen; 450 } 451 if (error == 0) { 452 map->dm_mapsize = uio->uio_resid; 453 map->dm_nsegs = seg + 1; 454 map->_dm_proc = p; 455 } 456 return error; 457 } 458 459 /* 460 * Like _bus_dmamap_load(), but for raw memory. 461 */ 462 int 463 _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags) 464 bus_dma_tag_t t; 465 bus_dmamap_t map; 466 bus_dma_segment_t *segs; 467 int nsegs; 468 bus_size_t size; 469 int flags; 470 { 471 472 panic("_bus_dmamap_load_raw: not implemented"); 473 } 474 475 /* 476 * Common function for unloading a DMA map. May be called by 477 * chipset-specific DMA map unload functions. 478 */ 479 void 480 _bus_dmamap_unload(t, map) 481 bus_dma_tag_t t; 482 bus_dmamap_t map; 483 { 484 485 /* 486 * No resources to free; just mark the mappings as 487 * invalid. 488 */ 489 map->dm_mapsize = 0; 490 map->dm_nsegs = 0; 491 map->_dm_flags &= ~NEWSMIPS_DMAMAP_COHERENT; 492 map->_dm_proc = NULL; 493 } 494 495 #ifdef MIPS1 496 /* 497 * Common function for DMA map synchronization. May be called 498 * by chipset-specific DMA map synchronization functions. 499 * 500 * This is the R3000 version. 501 */ 502 void 503 _bus_dmamap_sync_r3k(t, map, offset, len, ops) 504 bus_dma_tag_t t; 505 bus_dmamap_t map; 506 bus_addr_t offset; 507 bus_size_t len; 508 int ops; 509 { 510 bus_size_t minlen; 511 bus_addr_t addr; 512 int i; 513 514 /* 515 * Mixing PRE and POST operations is not allowed. 516 */ 517 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 518 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 519 panic("_bus_dmamap_sync_r3k: mix PRE and POST"); 520 521 #ifdef DIAGNOSTIC 522 if (offset >= map->dm_mapsize) 523 panic("_bus_dmamap_sync_r3k: bad offset %lu (map size is %lu)", 524 offset, map->dm_mapsize); 525 if (len == 0 || (offset + len) > map->dm_mapsize) 526 panic("_bus_dmamap_sync_r3k: bad length"); 527 #endif 528 529 /* 530 * The R3000 cache is write-though. Therefore, we only need 531 * to drain the write buffer on PREWRITE. The cache is not 532 * coherent, however, so we need to invalidate the data cache 533 * on PREREAD (should we do it POSTREAD instead?). 534 * 535 * POSTWRITE (and POSTREAD, currently) are noops. 536 */ 537 538 if (ops & BUS_DMASYNC_PREWRITE) { 539 /* 540 * Flush the write buffer. 541 */ 542 wbflush(); 543 } 544 545 /* 546 * If we're not doing PREREAD, nothing more to do. 547 */ 548 if ((ops & BUS_DMASYNC_PREREAD) == 0) 549 return; 550 551 /* 552 * No cache invlidation is necessary if the DMA map covers 553 * COHERENT DMA-safe memory (which is mapped un-cached). 554 */ 555 if (map->_dm_flags & NEWSMIPS_DMAMAP_COHERENT) 556 return; 557 558 /* 559 * If we are going to hit something as large or larger 560 * than the entire data cache, just nail the whole thing. 561 * 562 * NOTE: Even though this is `wbinv_all', since the cache is 563 * write-though, it just invalidates it. 564 */ 565 if (len >= mips_pdcache_size) { 566 mips_dcache_wbinv_all(); 567 return; 568 } 569 570 for (i = 0; i < map->dm_nsegs && len != 0; i++) { 571 /* Find the beginning segment. */ 572 if (offset >= map->dm_segs[i].ds_len) { 573 offset -= map->dm_segs[i].ds_len; 574 continue; 575 } 576 577 /* 578 * Now at the first segment to sync; nail 579 * each segment until we have exhausted the 580 * length. 581 */ 582 minlen = len < map->dm_segs[i].ds_len - offset ? 583 len : map->dm_segs[i].ds_len - offset; 584 585 addr = map->dm_segs[i].ds_addr; 586 587 #ifdef BUS_DMA_DEBUG 588 printf("bus_dmamap_sync_r3k: flushing segment %d " 589 "(0x%lx..0x%lx) ...", i, addr + offset, 590 addr + offset + minlen - 1); 591 #endif 592 mips_dcache_inv_range( 593 MIPS_PHYS_TO_KSEG0(addr + offset), minlen); 594 #ifdef BUS_DMA_DEBUG 595 printf("\n"); 596 #endif 597 offset = 0; 598 len -= minlen; 599 } 600 } 601 #endif /* MIPS1 */ 602 603 #ifdef MIPS3 604 /* 605 * Common function for DMA map synchronization. May be called 606 * by chipset-specific DMA map synchronization functions. 607 * 608 * This is the R4000 version. 609 */ 610 void 611 _bus_dmamap_sync_r4k(t, map, offset, len, ops) 612 bus_dma_tag_t t; 613 bus_dmamap_t map; 614 bus_addr_t offset; 615 bus_size_t len; 616 int ops; 617 { 618 bus_size_t minlen; 619 bus_addr_t addr; 620 int i, useindex; 621 622 /* 623 * Mising PRE and POST operations is not allowed. 624 */ 625 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 626 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 627 panic("_bus_dmamap_sync_r4k: mix PRE and POST"); 628 629 #ifdef DIAGNOSTIC 630 if (offset >= map->dm_mapsize) 631 panic("_bus_dmamap_sync_r4k: bad offset %lu (map size is %lu)", 632 offset, map->dm_mapsize); 633 if (len == 0 || (offset + len) > map->dm_mapsize) 634 panic("_bus_dmamap_sync_r4k: bad length"); 635 #endif 636 637 /* 638 * The R4000 cache is virtually-indexed, write-back. This means 639 * we need to do the following things: 640 * 641 * PREREAD -- Invalidate D-cache. Note we might have 642 * to also write-back here if we have to use an Index 643 * op, or if the buffer start/end is not cache-line aligned. 644 * 645 * PREWRITE -- Write-back the D-cache. If we have to use 646 * an Index op, we also have to invalidate. Note that if 647 * we are doing PREREAD|PREWRITE, we can collapse everything 648 * into a single op. 649 * 650 * POSTREAD -- Nothing. 651 * 652 * POSTWRITE -- Nothing. 653 */ 654 655 /* 656 * Flush the write buffer. 657 * XXX Is this always necessary? 658 */ 659 wbflush(); 660 661 ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 662 if (ops == 0) 663 return; 664 665 /* 666 * If the mapping is of COHERENT DMA-safe memory, no cache 667 * flush is necessary. 668 */ 669 if (map->_dm_flags & NEWSMIPS_DMAMAP_COHERENT) 670 return; 671 672 /* 673 * If the mapping belongs to the kernel, or if it belongs 674 * to the currently-running process (XXX actually, vmspace), 675 * then we can use Hit ops. Otherwise, Index ops. 676 * 677 * This should be true the vast majority of the time. 678 */ 679 if (__predict_true(map->_dm_proc == NULL || map->_dm_proc == curproc)) 680 useindex = 0; 681 else 682 useindex = 1; 683 684 for (i = 0; i < map->dm_nsegs && len != 0; i++) { 685 /* Find the beginning segment. */ 686 if (offset >= map->dm_segs[i].ds_len) { 687 offset -= map->dm_segs[i].ds_len; 688 continue; 689 } 690 691 /* 692 * Now at the first segment to sync; nail 693 * each segment until we have exhausted the 694 * length. 695 */ 696 minlen = len < map->dm_segs[i].ds_len - offset ? 697 len : map->dm_segs[i].ds_len - offset; 698 699 addr = map->dm_segs[i]._ds_vaddr; 700 701 #ifdef BUS_DMA_DEBUG 702 printf("bus_dmamap_sync: flushing segment %d " 703 "(0x%lx..0x%lx) ...", i, addr + offset, 704 addr + offset + minlen - 1); 705 #endif 706 707 /* 708 * If we are forced to use Index ops, it's always a 709 * Write-back,Invalidate, so just do one test. 710 */ 711 if (__predict_false(useindex)) { 712 mips_dcache_wbinv_range_index(addr + offset, minlen); 713 #ifdef BUS_DMA_DEBUG 714 printf("\n"); 715 #endif 716 offset = 0; 717 len -= minlen; 718 continue; 719 } 720 721 switch (ops) { 722 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 723 mips_dcache_wbinv_range(addr + offset, minlen); 724 break; 725 726 case BUS_DMASYNC_PREREAD: 727 #if 1 728 mips_dcache_wbinv_range(addr + offset, minlen); 729 #else 730 mips_dcache_inv_range(addr + offset, minlen); 731 #endif 732 break; 733 734 case BUS_DMASYNC_PREWRITE: 735 mips_dcache_wb_range(addr + offset, minlen); 736 break; 737 } 738 #ifdef BUS_DMA_DEBUG 739 printf("\n"); 740 #endif 741 offset = 0; 742 len -= minlen; 743 } 744 } 745 #endif /* MIPS3 */ 746 747 /* 748 * Common function for DMA-safe memory allocation. May be called 749 * by bus-specific DMA memory allocation functions. 750 */ 751 int 752 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags) 753 bus_dma_tag_t t; 754 bus_size_t size, alignment, boundary; 755 bus_dma_segment_t *segs; 756 int nsegs; 757 int *rsegs; 758 int flags; 759 { 760 extern paddr_t avail_start, avail_end; 761 vaddr_t curaddr, lastaddr; 762 psize_t high; 763 struct vm_page *m; 764 struct pglist mlist; 765 int curseg, error; 766 767 /* Always round the size. */ 768 size = round_page(size); 769 770 high = avail_end - PAGE_SIZE; 771 772 /* 773 * Allocate pages from the VM system. 774 */ 775 TAILQ_INIT(&mlist); 776 error = uvm_pglistalloc(size, avail_start, high, alignment, boundary, 777 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 778 if (error) 779 return error; 780 781 /* 782 * Compute the location, size, and number of segments actually 783 * returned by the VM code. 784 */ 785 m = mlist.tqh_first; 786 curseg = 0; 787 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); 788 segs[curseg].ds_len = PAGE_SIZE; 789 m = m->pageq.tqe_next; 790 791 for (; m != NULL; m = m->pageq.tqe_next) { 792 curaddr = VM_PAGE_TO_PHYS(m); 793 #ifdef DIAGNOSTIC 794 if (curaddr < avail_start || curaddr >= high) { 795 printf("uvm_pglistalloc returned non-sensical" 796 " address 0x%lx\n", curaddr); 797 panic("_bus_dmamem_alloc"); 798 } 799 #endif 800 if (curaddr == (lastaddr + PAGE_SIZE)) 801 segs[curseg].ds_len += PAGE_SIZE; 802 else { 803 curseg++; 804 segs[curseg].ds_addr = curaddr; 805 segs[curseg].ds_len = PAGE_SIZE; 806 } 807 lastaddr = curaddr; 808 } 809 810 *rsegs = curseg + 1; 811 812 return 0; 813 } 814 815 /* 816 * Common function for freeing DMA-safe memory. May be called by 817 * bus-specific DMA memory free functions. 818 */ 819 void 820 _bus_dmamem_free(t, segs, nsegs) 821 bus_dma_tag_t t; 822 bus_dma_segment_t *segs; 823 int nsegs; 824 { 825 struct vm_page *m; 826 bus_addr_t addr; 827 struct pglist mlist; 828 int curseg; 829 830 /* 831 * Build a list of pages to free back to the VM system. 832 */ 833 TAILQ_INIT(&mlist); 834 for (curseg = 0; curseg < nsegs; curseg++) { 835 for (addr = segs[curseg].ds_addr; 836 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 837 addr += PAGE_SIZE) { 838 m = PHYS_TO_VM_PAGE(addr); 839 TAILQ_INSERT_TAIL(&mlist, m, pageq); 840 } 841 } 842 843 uvm_pglistfree(&mlist); 844 } 845 846 /* 847 * Common function for mapping DMA-safe memory. May be called by 848 * bus-specific DMA memory map functions. 849 */ 850 int 851 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags) 852 bus_dma_tag_t t; 853 bus_dma_segment_t *segs; 854 int nsegs; 855 size_t size; 856 caddr_t *kvap; 857 int flags; 858 { 859 vaddr_t va; 860 bus_addr_t addr; 861 int curseg; 862 863 /* 864 * If we're only mapping 1 segment, and the address is lower than 865 * 256MB, use KSEG0 or KSEG1, to avoid TLB thrashing. 866 */ 867 if (nsegs == 1 && segs[0].ds_addr + segs[0].ds_len <= 0x10000000) { 868 if (flags & BUS_DMA_COHERENT) 869 *kvap = (caddr_t)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr); 870 else 871 *kvap = (caddr_t)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr); 872 return 0; 873 } 874 875 size = round_page(size); 876 877 va = uvm_km_valloc(kernel_map, size); 878 879 if (va == 0) 880 return (ENOMEM); 881 882 *kvap = (caddr_t)va; 883 884 for (curseg = 0; curseg < nsegs; curseg++) { 885 for (addr = segs[curseg].ds_addr; 886 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 887 addr += NBPG, va += NBPG, size -= NBPG) { 888 if (size == 0) 889 panic("_bus_dmamem_map: size botch"); 890 pmap_enter(pmap_kernel(), va, addr, 891 VM_PROT_READ | VM_PROT_WRITE, 892 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 893 894 /* XXX Do something about COHERENT here. */ 895 } 896 } 897 pmap_update(pmap_kernel()); 898 899 return 0; 900 } 901 902 /* 903 * Common function for unmapping DMA-safe memory. May be called by 904 * bus-specific DMA memory unmapping functions. 905 */ 906 void 907 _bus_dmamem_unmap(t, kva, size) 908 bus_dma_tag_t t; 909 caddr_t kva; 910 size_t size; 911 { 912 913 #ifdef DIAGNOSTIC 914 if ((u_long)kva & PGOFSET) 915 panic("_bus_dmamem_unmap"); 916 #endif 917 918 /* 919 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e. 920 * not in KSEG2). 921 */ 922 if (kva >= (caddr_t)MIPS_KSEG0_START && 923 kva < (caddr_t)MIPS_KSEG2_START) 924 return; 925 926 size = round_page(size); 927 uvm_km_free(kernel_map, (vaddr_t)kva, size); 928 } 929 930 /* 931 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 932 * bus-specific DMA mmap(2)'ing functions. 933 */ 934 paddr_t 935 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags) 936 bus_dma_tag_t t; 937 bus_dma_segment_t *segs; 938 int nsegs; 939 off_t off; 940 int prot, flags; 941 { 942 int i; 943 944 for (i = 0; i < nsegs; i++) { 945 #ifdef DIAGNOSTIC 946 if (off & PGOFSET) 947 panic("_bus_dmamem_mmap: offset unaligned"); 948 if (segs[i].ds_addr & PGOFSET) 949 panic("_bus_dmamem_mmap: segment unaligned"); 950 if (segs[i].ds_len & PGOFSET) 951 panic("_bus_dmamem_mmap: segment size not multiple" 952 " of page size"); 953 #endif 954 if (off >= segs[i].ds_len) { 955 off -= segs[i].ds_len; 956 continue; 957 } 958 959 return mips_btop((caddr_t)segs[i].ds_addr + off); 960 } 961 962 /* Page not found. */ 963 return -1; 964 } 965