1 /* $NetBSD: bus_dma.c,v 1.2 2001/11/14 18:15:32 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/param.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/proc.h> 44 45 #include <mips/cache.h> 46 47 #define _PLAYSTATION2_BUS_DMA_PRIVATE 48 #include <machine/bus.h> 49 50 #include <uvm/uvm_extern.h> 51 52 #include <machine/locore.h> 53 54 extern paddr_t kvtophys(vaddr_t); /* XXX */ 55 static int _bus_dmamap_load_buffer(bus_dmamap_t, void *, bus_size_t, 56 struct proc *, int, vaddr_t *, int *, int); 57 58 struct playstation2_bus_dma_tag playstation2_default_bus_dma_tag = { 59 _bus_dmamap_create, 60 _bus_dmamap_destroy, 61 _bus_dmamap_load, 62 _bus_dmamap_load_mbuf, 63 _bus_dmamap_load_uio, 64 _bus_dmamap_load_raw, 65 _bus_dmamap_unload, 66 _bus_dmamap_sync, 67 _bus_dmamem_alloc, 68 _bus_dmamem_free, 69 _bus_dmamem_map, 70 _bus_dmamem_unmap, 71 _bus_dmamem_mmap, 72 }; 73 74 /* 75 * Common function for DMA map creation. May be called by bus-specific 76 * DMA map creation functions. 77 */ 78 int 79 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 80 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 81 { 82 struct playstation2_bus_dmamap *map; 83 void *mapstore; 84 size_t mapsize; 85 86 /* 87 * Allcoate and initialize the DMA map. The end of the map 88 * is a variable-sized array of segments, so we allocate enough 89 * room for them in one shot. 90 * 91 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 92 * of ALLOCNOW notifes others that we've reserved these resources, 93 * and they are not to be freed. 94 * 95 * The bus_dmamap_t includes one bus_dma_segment_t, hence 96 * the (nsegments - 1). 97 */ 98 mapsize = sizeof(struct playstation2_bus_dmamap) + 99 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 100 if ((mapstore = malloc(mapsize, M_DMAMAP, 101 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 102 return ENOMEM; 103 104 memset(mapstore, 0, mapsize); 105 map = (struct playstation2_bus_dmamap *)mapstore; 106 map->_dm_size = size; 107 map->_dm_segcnt = nsegments; 108 map->_dm_maxsegsz = maxsegsz; 109 map->_dm_boundary = boundary; 110 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 111 map->dm_mapsize = 0; /* no valid mappings */ 112 map->dm_nsegs = 0; 113 114 *dmamp = map; 115 return 0; 116 } 117 118 /* 119 * Common function for DMA map destruction. May be called by bus-specific 120 * DMA map destruction functions. 121 */ 122 void 123 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 124 { 125 126 free(map, M_DMAMAP); 127 } 128 129 130 /* 131 * Utility function to load a linear buffer. lastaddrp holds state 132 * between invocations (for multiple-buffer loads). segp contains 133 * the starting segment on entrance, and the ending segment on exit. 134 * first indicates if this is the first invocation of this function. 135 */ 136 int 137 _bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen, 138 struct proc *p, int flags, vaddr_t *lastaddrp, int *segp, int first) 139 { 140 bus_size_t sgsize; 141 bus_addr_t curaddr, lastaddr, baddr, bmask; 142 vaddr_t vaddr = (vaddr_t)buf; 143 int seg; 144 145 lastaddr = *lastaddrp; 146 bmask = ~(map->_dm_boundary - 1); 147 148 for (seg = *segp; buflen > 0 ; ) { 149 /* 150 * Get the physical address for this segment. 151 */ 152 if (p != NULL) 153 (void) pmap_extract(p->p_vmspace->vm_map.pmap, 154 vaddr, (paddr_t *)&curaddr); 155 else 156 curaddr = kvtophys(vaddr); 157 158 /* 159 * Compute the segment size, and adjust counts. 160 */ 161 sgsize = NBPG - ((u_long)vaddr & PGOFSET); 162 if (buflen < sgsize) 163 sgsize = buflen; 164 165 /* 166 * Make sure we don't cross any boundaries. 167 */ 168 if (map->_dm_boundary > 0) { 169 baddr = (curaddr + map->_dm_boundary) & bmask; 170 if (sgsize > (baddr - curaddr)) 171 sgsize = (baddr - curaddr); 172 } 173 174 /* 175 * Insert chunk into a segment, coalescing with 176 * the previous segment if possible. 177 */ 178 if (first) { 179 map->dm_segs[seg].ds_addr = curaddr; 180 map->dm_segs[seg].ds_len = sgsize; 181 map->dm_segs[seg]._ds_vaddr = vaddr; 182 first = 0; 183 } else { 184 if (curaddr == lastaddr && 185 (map->dm_segs[seg].ds_len + sgsize) <= 186 map->_dm_maxsegsz && 187 (map->_dm_boundary == 0 || 188 (map->dm_segs[seg].ds_addr & bmask) == 189 (curaddr & bmask))) 190 map->dm_segs[seg].ds_len += sgsize; 191 else { 192 if (++seg >= map->_dm_segcnt) 193 break; 194 map->dm_segs[seg].ds_addr = curaddr; 195 map->dm_segs[seg].ds_len = sgsize; 196 map->dm_segs[seg]._ds_vaddr = vaddr; 197 } 198 } 199 200 lastaddr = curaddr + sgsize; 201 vaddr += sgsize; 202 buflen -= sgsize; 203 } 204 205 *segp = seg; 206 *lastaddrp = lastaddr; 207 208 /* 209 * Did we fit? 210 */ 211 if (buflen != 0) 212 return EFBIG; /* XXX Better return value here? */ 213 214 return 0; 215 } 216 217 /* 218 * Common function for loading a direct-mapped DMA map with a linear 219 * buffer. 220 */ 221 int 222 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 223 bus_size_t buflen, struct proc *p, int flags) 224 { 225 vaddr_t lastaddr; 226 int seg, error; 227 228 /* 229 * Make sure that on error condition we return "no valid mappings". 230 */ 231 map->dm_mapsize = 0; 232 map->dm_nsegs = 0; 233 234 if (buflen > map->_dm_size) 235 return EINVAL; 236 237 seg = 0; 238 error = _bus_dmamap_load_buffer(map, buf, buflen, 239 p, flags, &lastaddr, &seg, 1); 240 if (error == 0) { 241 map->dm_mapsize = buflen; 242 map->dm_nsegs = seg + 1; 243 244 /* 245 * For linear buffers, we support marking the mapping 246 * as COHERENT. 247 * 248 * XXX Check TLB entries for cache-inhibit bits? 249 */ 250 if (buf >= (void *)MIPS_KSEG1_START && 251 buf < (void *)MIPS_KSEG2_START) 252 map->_dm_flags |= PLAYSTATION2_DMAMAP_COHERENT; 253 } 254 return error; 255 } 256 257 /* 258 * Like _bus_dmamap_load(), but for mbufs. 259 */ 260 int 261 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, 262 int flags) 263 { 264 vaddr_t lastaddr; 265 int seg, error, first; 266 struct mbuf *m; 267 268 /* 269 * Make sure that on error condition we return "no valid mappings." 270 */ 271 map->dm_mapsize = 0; 272 map->dm_nsegs = 0; 273 274 #ifdef DIAGNOSTIC 275 if ((m0->m_flags & M_PKTHDR) == 0) 276 panic("_bus_dmamap_load_mbuf: no packet header"); 277 #endif 278 279 if (m0->m_pkthdr.len > map->_dm_size) 280 return EINVAL; 281 282 first = 1; 283 seg = 0; 284 error = 0; 285 for (m = m0; m != NULL && error == 0; m = m->m_next) { 286 error = _bus_dmamap_load_buffer(map, 287 m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first); 288 first = 0; 289 } 290 if (error == 0) { 291 map->dm_mapsize = m0->m_pkthdr.len; 292 map->dm_nsegs = seg + 1; 293 } 294 return error; 295 } 296 297 /* 298 * Like _bus_dmamap_load(), but for uios. 299 */ 300 int 301 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 302 int flags) 303 { 304 vaddr_t lastaddr; 305 int seg, i, error, first; 306 bus_size_t minlen, resid; 307 struct proc *p = NULL; 308 struct iovec *iov; 309 caddr_t addr; 310 311 /* 312 * Make sure that on error condition we return "no valid mappings." 313 */ 314 map->dm_mapsize = 0; 315 map->dm_nsegs = 0; 316 317 resid = uio->uio_resid; 318 iov = uio->uio_iov; 319 320 if (uio->uio_segflg == UIO_USERSPACE) { 321 p = uio->uio_procp; 322 #ifdef DIAGNOSTIC 323 if (p == NULL) 324 panic("_bus_dmamap_load_uio: USERSPACE but no proc"); 325 #endif 326 } 327 328 first = 1; 329 seg = 0; 330 error = 0; 331 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 332 /* 333 * Now at the first iovec to load. Load each iovec 334 * until we have exhausted the residual count. 335 */ 336 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 337 addr = (caddr_t)iov[i].iov_base; 338 339 error = _bus_dmamap_load_buffer(map, addr, minlen, 340 p, flags, &lastaddr, &seg, first); 341 first = 0; 342 343 resid -= minlen; 344 } 345 if (error == 0) { 346 map->dm_mapsize = uio->uio_resid; 347 map->dm_nsegs = seg + 1; 348 } 349 return error; 350 } 351 352 /* 353 * Like _bus_dmamap_load(), but for raw memory. 354 */ 355 int 356 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 357 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 358 { 359 360 panic("_bus_dmamap_load_raw: not implemented"); 361 } 362 363 /* 364 * Common function for unloading a DMA map. May be called by 365 * chipset-specific DMA map unload functions. 366 */ 367 void 368 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 369 { 370 371 /* 372 * No resources to free; just mark the mappings as 373 * invalid. 374 */ 375 map->dm_mapsize = 0; 376 map->dm_nsegs = 0; 377 map->_dm_flags &= ~PLAYSTATION2_DMAMAP_COHERENT; 378 } 379 380 /* 381 * Common function for DMA map synchronization. May be called 382 * by chipset-specific DMA map synchronization functions. 383 */ 384 void 385 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 386 bus_size_t len, int ops) 387 { 388 bus_size_t minlen; 389 bus_addr_t addr; 390 int i; 391 392 /* 393 * Mising PRE and POST operations is not allowed. 394 */ 395 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 396 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 397 panic("_bus_dmamap_sync: mix PRE and POST"); 398 399 #ifdef DIAGNOSTIC 400 if (offset >= map->dm_mapsize) 401 panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)", 402 offset, map->dm_mapsize); 403 if (len == 0 || (offset + len) > map->dm_mapsize) 404 panic("_bus_dmamap_sync: bad length"); 405 #endif 406 407 /* 408 * Flush the write buffer. 409 */ 410 wbflush(); 411 412 /* 413 * If the mapping is of COHERENT DMA-safe memory, no cache 414 * flush is necessary. 415 */ 416 if (map->_dm_flags & PLAYSTATION2_DMAMAP_COHERENT) 417 return; 418 419 /* 420 * No cache flushes are necessary if we're only doing 421 * POSTREAD or POSTWRITE (i.e. not doing PREREAD or PREWRITE). 422 */ 423 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0) 424 return; 425 426 /* 427 * Flush data cache for PREREAD. This has the side-effect 428 * of invalidating the cache. Done at PREREAD since it 429 * causes the cache line(s) to be written back to memory. 430 * 431 * Flush data cache for PREWRITE, so that the contents of 432 * the data buffer in memory reflect reality. 433 * 434 * Given the test above, we know we're doing one of these 435 * two operations, so no additional tests are necessary. 436 */ 437 438 for (i = 0; i < map->dm_nsegs && len != 0; i++) { 439 /* Find the beginning segment. */ 440 if (offset >= map->dm_segs[i].ds_len) { 441 offset -= map->dm_segs[i].ds_len; 442 continue; 443 } 444 445 /* 446 * Now at the first segment to sync; nail 447 * each segment until we have exhausted the 448 * length. 449 */ 450 minlen = len < map->dm_segs[i].ds_len - offset ? 451 len : map->dm_segs[i].ds_len - offset; 452 453 addr = map->dm_segs[i]._ds_vaddr; 454 455 #ifdef BUS_DMA_DEBUG 456 printf("bus_dmamap_sync: flushing segment %d " 457 "(0x%lx..0x%lx) ...", i, addr + offset, 458 addr + offset + minlen - 1); 459 #endif 460 mips_dcache_wbinv_range(addr + offset, minlen); 461 462 #ifdef BUS_DMA_DEBUG 463 printf("\n"); 464 #endif 465 offset = 0; 466 len -= minlen; 467 } 468 } 469 470 /* 471 * Common function for DMA-safe memory allocation. May be called 472 * by bus-specific DMA memory allocation functions. 473 */ 474 int 475 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 476 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 477 int flags) 478 { 479 extern paddr_t avail_start, avail_end; 480 vaddr_t curaddr, lastaddr; 481 psize_t high; 482 struct vm_page *m; 483 struct pglist mlist; 484 int curseg, error; 485 486 /* Always round the size. */ 487 size = round_page(size); 488 489 high = avail_end - PAGE_SIZE; 490 491 /* 492 * Allocate pages from the VM system. 493 */ 494 TAILQ_INIT(&mlist); 495 error = uvm_pglistalloc(size, avail_start, high, alignment, boundary, 496 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 497 if (error) 498 return error; 499 500 /* 501 * Compute the location, size, and number of segments actually 502 * returned by the VM code. 503 */ 504 m = mlist.tqh_first; 505 curseg = 0; 506 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); 507 segs[curseg].ds_len = PAGE_SIZE; 508 m = m->pageq.tqe_next; 509 510 for (; m != NULL; m = m->pageq.tqe_next) { 511 curaddr = VM_PAGE_TO_PHYS(m); 512 #ifdef DIAGNOSTIC 513 if (curaddr < avail_start || curaddr >= high) { 514 printf("uvm_pglistalloc returned non-sensical" 515 " address 0x%lx\n", curaddr); 516 panic("_bus_dmamem_alloc"); 517 } 518 #endif 519 if (curaddr == (lastaddr + PAGE_SIZE)) 520 segs[curseg].ds_len += PAGE_SIZE; 521 else { 522 curseg++; 523 segs[curseg].ds_addr = curaddr; 524 segs[curseg].ds_len = PAGE_SIZE; 525 } 526 lastaddr = curaddr; 527 } 528 529 *rsegs = curseg + 1; 530 531 return 0; 532 } 533 534 /* 535 * Common function for freeing DMA-safe memory. May be called by 536 * bus-specific DMA memory free functions. 537 */ 538 void 539 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 540 { 541 struct vm_page *m; 542 bus_addr_t addr; 543 struct pglist mlist; 544 int curseg; 545 546 /* 547 * Build a list of pages to free back to the VM system. 548 */ 549 TAILQ_INIT(&mlist); 550 for (curseg = 0; curseg < nsegs; curseg++) { 551 for (addr = segs[curseg].ds_addr; 552 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 553 addr += PAGE_SIZE) { 554 m = PHYS_TO_VM_PAGE(addr); 555 TAILQ_INSERT_TAIL(&mlist, m, pageq); 556 } 557 } 558 559 uvm_pglistfree(&mlist); 560 } 561 562 /* 563 * Common function for mapping DMA-safe memory. May be called by 564 * bus-specific DMA memory map functions. 565 */ 566 int 567 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 568 size_t size, caddr_t *kvap, int flags) 569 { 570 vaddr_t va; 571 bus_addr_t addr; 572 int curseg; 573 574 /* 575 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid 576 * TLB thrashing. 577 */ 578 if (nsegs == 1) { 579 if (flags & BUS_DMA_COHERENT) 580 *kvap = (caddr_t)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr); 581 else 582 *kvap = (caddr_t)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr); 583 return 0; 584 } 585 586 size = round_page(size); 587 588 va = uvm_km_valloc(kernel_map, size); 589 590 if (va == 0) 591 return (ENOMEM); 592 593 *kvap = (caddr_t)va; 594 595 for (curseg = 0; curseg < nsegs; curseg++) { 596 for (addr = segs[curseg].ds_addr; 597 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 598 addr += NBPG, va += NBPG, size -= NBPG) { 599 if (size == 0) 600 panic("_bus_dmamem_map: size botch"); 601 pmap_enter(pmap_kernel(), va, addr, 602 VM_PROT_READ | VM_PROT_WRITE, 603 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 604 605 /* XXX Do something about COHERENT here. */ 606 } 607 } 608 pmap_update(pmap_kernel()); 609 610 return 0; 611 } 612 613 /* 614 * Common function for unmapping DMA-safe memory. May be called by 615 * bus-specific DMA memory unmapping functions. 616 */ 617 void 618 _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size) 619 { 620 621 #ifdef DIAGNOSTIC 622 if ((u_long)kva & PGOFSET) 623 panic("_bus_dmamem_unmap"); 624 #endif 625 626 /* 627 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e. 628 * not in KSEG2). 629 */ 630 if (kva >= (caddr_t)MIPS_KSEG0_START && 631 kva < (caddr_t)MIPS_KSEG2_START) 632 return; 633 634 size = round_page(size); 635 uvm_km_free(kernel_map, (vaddr_t)kva, size); 636 } 637 638 /* 639 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 640 * bus-specific DMA mmap(2)'ing functions. 641 */ 642 paddr_t 643 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 644 off_t off, int prot, int flags) 645 { 646 int i; 647 648 for (i = 0; i < nsegs; i++) { 649 #ifdef DIAGNOSTIC 650 if (off & PGOFSET) 651 panic("_bus_dmamem_mmap: offset unaligned"); 652 if (segs[i].ds_addr & PGOFSET) 653 panic("_bus_dmamem_mmap: segment unaligned"); 654 if (segs[i].ds_len & PGOFSET) 655 panic("_bus_dmamem_mmap: segment size not multiple" 656 " of page size"); 657 #endif 658 if (off >= segs[i].ds_len) { 659 off -= segs[i].ds_len; 660 continue; 661 } 662 663 return mips_btop((caddr_t)segs[i].ds_addr + off); 664 } 665 666 /* Page not found. */ 667 return -1; 668 } 669