1 /* $NetBSD: bus_dma.c,v 1.15 2002/01/06 12:39:55 takemura Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/mbuf.h> 43 #include <sys/proc.h> 44 45 #include <uvm/uvm_extern.h> 46 #include <mips/cache.h> 47 48 #include <machine/bus.h> 49 #include <machine/bus_dma_hpcmips.h> 50 51 static int _hpcmips_bd_map_load_buffer(bus_dmamap_t, void *, bus_size_t, 52 struct proc *, int, vaddr_t *, int *, int); 53 54 paddr_t kvtophys(vaddr_t); /* XXX */ 55 56 /* 57 * The default DMA tag for all busses on the hpcmips 58 */ 59 struct bus_dma_tag_hpcmips hpcmips_default_bus_dma_tag = { 60 { 61 NULL, 62 { 63 _hpcmips_bd_map_create, 64 _hpcmips_bd_map_destroy, 65 _hpcmips_bd_map_load, 66 _hpcmips_bd_map_load_mbuf, 67 _hpcmips_bd_map_load_uio, 68 _hpcmips_bd_map_load_raw, 69 _hpcmips_bd_map_unload, 70 _hpcmips_bd_map_sync, 71 _hpcmips_bd_mem_alloc, 72 _hpcmips_bd_mem_free, 73 _hpcmips_bd_mem_map, 74 _hpcmips_bd_mem_unmap, 75 _hpcmips_bd_mem_mmap, 76 }, 77 }, 78 NULL, 79 }; 80 81 /* 82 * Common function for DMA map creation. May be called by bus-specific 83 * DMA map creation functions. 84 */ 85 int 86 _hpcmips_bd_map_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 87 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 88 { 89 struct bus_dmamap_hpcmips *map; 90 void *mapstore; 91 size_t mapsize; 92 93 /* 94 * Allcoate and initialize the DMA map. The end of the map 95 * has two variable-sized array of segments, so we allocate enough 96 * room for them in one shot. 97 * 98 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 99 * of ALLOCNOW notifes others that we've reserved these resources, 100 * and they are not to be freed. 101 */ 102 mapsize = sizeof(struct bus_dmamap_hpcmips) + 103 sizeof(struct bus_dma_segment_hpcmips) * (nsegments - 1) + 104 sizeof(bus_dma_segment_t) * nsegments; 105 if ((mapstore = malloc(mapsize, M_DMAMAP, 106 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 107 return (ENOMEM); 108 109 bzero(mapstore, mapsize); 110 map = (struct bus_dmamap_hpcmips *)mapstore; 111 map->_dm_size = size; 112 map->_dm_segcnt = nsegments; 113 map->_dm_maxsegsz = maxsegsz; 114 map->_dm_boundary = boundary; 115 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 116 map->bdm.dm_mapsize = 0; /* no valid mappings */ 117 map->bdm.dm_nsegs = 0; 118 map->bdm.dm_segs = (bus_dma_segment_t *)((char *)mapstore + 119 sizeof(struct bus_dmamap_hpcmips) + 120 sizeof(struct bus_dma_segment_hpcmips) * (nsegments - 1)); 121 122 *dmamp = &map->bdm; 123 return (0); 124 } 125 126 /* 127 * Common function for DMA map destruction. May be called by bus-specific 128 * DMA map destruction functions. 129 */ 130 void 131 _hpcmips_bd_map_destroy(bus_dma_tag_t t, bus_dmamap_t map) 132 { 133 134 free(map, M_DMAMAP); 135 } 136 137 /* 138 * Utility function to load a linear buffer. lastaddrp holds state 139 * between invocations (for multiple-buffer loads). segp contains 140 * the starting segment on entrance, and the ending segment on exit. 141 * first indicates if this is the first invocation of this function. 142 */ 143 static int 144 _hpcmips_bd_map_load_buffer(bus_dmamap_t mapx, void *buf, bus_size_t buflen, 145 struct proc *p, int flags, vaddr_t *lastaddrp, int *segp, int first) 146 { 147 struct bus_dmamap_hpcmips *map = (struct bus_dmamap_hpcmips *)mapx; 148 bus_size_t sgsize; 149 bus_addr_t curaddr, lastaddr, baddr, bmask; 150 vaddr_t vaddr = (vaddr_t)buf; 151 int seg; 152 153 lastaddr = *lastaddrp; 154 bmask = ~(map->_dm_boundary - 1); 155 156 for (seg = *segp; buflen > 0 ; ) { 157 /* 158 * Get the physical address for this segment. 159 */ 160 if (p != NULL) 161 (void) pmap_extract(p->p_vmspace->vm_map.pmap, 162 vaddr, &curaddr); 163 else 164 curaddr = kvtophys(vaddr); 165 166 /* 167 * Compute the segment size, and adjust counts. 168 */ 169 sgsize = NBPG - ((u_long)vaddr & PGOFSET); 170 if (buflen < sgsize) 171 sgsize = buflen; 172 173 /* 174 * Make sure we don't cross any boundaries. 175 */ 176 if (map->_dm_boundary > 0) { 177 baddr = (curaddr + map->_dm_boundary) & bmask; 178 if (sgsize > (baddr - curaddr)) 179 sgsize = (baddr - curaddr); 180 } 181 182 /* 183 * Insert chunk into a segment, coalescing with 184 * the previous segment if possible. 185 */ 186 if (first) { 187 map->bdm.dm_segs[seg].ds_addr = curaddr; 188 map->bdm.dm_segs[seg].ds_len = sgsize; 189 map->_dm_segs[seg]._ds_vaddr = vaddr; 190 first = 0; 191 } else { 192 if (curaddr == lastaddr && 193 (map->bdm.dm_segs[seg].ds_len + sgsize) <= 194 map->_dm_maxsegsz && 195 (map->_dm_boundary == 0 || 196 (map->bdm.dm_segs[seg].ds_addr & bmask) == 197 (curaddr & bmask))) 198 map->bdm.dm_segs[seg].ds_len += sgsize; 199 else { 200 if (++seg >= map->_dm_segcnt) 201 break; 202 map->bdm.dm_segs[seg].ds_addr = curaddr; 203 map->bdm.dm_segs[seg].ds_len = sgsize; 204 map->_dm_segs[seg]._ds_vaddr = vaddr; 205 } 206 } 207 208 lastaddr = curaddr + sgsize; 209 vaddr += sgsize; 210 buflen -= sgsize; 211 } 212 213 *segp = seg; 214 *lastaddrp = lastaddr; 215 216 /* 217 * Did we fit? 218 */ 219 if (buflen != 0) 220 return (EFBIG); /* XXX better return value here? */ 221 222 return (0); 223 } 224 225 /* 226 * Common function for loading a direct-mapped DMA map with a linear 227 * buffer. 228 */ 229 int 230 _hpcmips_bd_map_load(bus_dma_tag_t t, bus_dmamap_t mapx, void *buf, 231 bus_size_t buflen, struct proc *p, int flags) 232 { 233 struct bus_dmamap_hpcmips *map = (struct bus_dmamap_hpcmips *)mapx; 234 vaddr_t lastaddr; 235 int seg, error; 236 237 /* 238 * Make sure that on error condition we return "no valid mappings". 239 */ 240 map->bdm.dm_mapsize = 0; 241 map->bdm.dm_nsegs = 0; 242 243 if (buflen > map->_dm_size) 244 return (EINVAL); 245 246 seg = 0; 247 error = _hpcmips_bd_map_load_buffer(mapx, buf, buflen, 248 p, flags, &lastaddr, &seg, 1); 249 if (error == 0) { 250 map->bdm.dm_mapsize = buflen; 251 map->bdm.dm_nsegs = seg + 1; 252 253 /* 254 * For linear buffers, we support marking the mapping 255 * as COHERENT. 256 * 257 * XXX Check TLB entries for cache-inhibit bits? 258 */ 259 if (buf >= (void *)MIPS_KSEG1_START && 260 buf < (void *)MIPS_KSEG2_START) 261 map->_dm_flags |= HPCMIPS_DMAMAP_COHERENT; 262 } 263 return (error); 264 } 265 266 /* 267 * Like _hpcmips_bd_map_load(), but for mbufs. 268 */ 269 int 270 _hpcmips_bd_map_load_mbuf(bus_dma_tag_t t, bus_dmamap_t mapx, struct mbuf *m0, 271 int flags) 272 { 273 struct bus_dmamap_hpcmips *map = (struct bus_dmamap_hpcmips *)mapx; 274 vaddr_t lastaddr; 275 int seg, error, first; 276 struct mbuf *m; 277 278 /* 279 * Make sure that on error condition we return "no valid mappings." 280 */ 281 map->bdm.dm_mapsize = 0; 282 map->bdm.dm_nsegs = 0; 283 284 #ifdef DIAGNOSTIC 285 if ((m0->m_flags & M_PKTHDR) == 0) 286 panic("_hpcmips_bd_map_load_mbuf: no packet header"); 287 #endif 288 289 if (m0->m_pkthdr.len > map->_dm_size) 290 return (EINVAL); 291 292 first = 1; 293 seg = 0; 294 error = 0; 295 for (m = m0; m != NULL && error == 0; m = m->m_next) { 296 error = _hpcmips_bd_map_load_buffer(mapx, 297 m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first); 298 first = 0; 299 } 300 if (error == 0) { 301 map->bdm.dm_mapsize = m0->m_pkthdr.len; 302 map->bdm.dm_nsegs = seg + 1; 303 } 304 return (error); 305 } 306 307 /* 308 * Like _hpcmips_bd_map_load(), but for uios. 309 */ 310 int 311 _hpcmips_bd_map_load_uio(bus_dma_tag_t t, bus_dmamap_t mapx, struct uio *uio, 312 int flags) 313 { 314 struct bus_dmamap_hpcmips *map = (struct bus_dmamap_hpcmips *)mapx; 315 vaddr_t lastaddr; 316 int seg, i, error, first; 317 bus_size_t minlen, resid; 318 struct proc *p = NULL; 319 struct iovec *iov; 320 caddr_t addr; 321 322 /* 323 * Make sure that on error condition we return "no valid mappings." 324 */ 325 map->bdm.dm_mapsize = 0; 326 map->bdm.dm_nsegs = 0; 327 328 resid = uio->uio_resid; 329 iov = uio->uio_iov; 330 331 if (uio->uio_segflg == UIO_USERSPACE) { 332 p = uio->uio_procp; 333 #ifdef DIAGNOSTIC 334 if (p == NULL) 335 panic("_hpcmips_bd_map_load_uio: USERSPACE but no proc"); 336 #endif 337 } 338 339 first = 1; 340 seg = 0; 341 error = 0; 342 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 343 /* 344 * Now at the first iovec to load. Load each iovec 345 * until we have exhausted the residual count. 346 */ 347 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 348 addr = (caddr_t)iov[i].iov_base; 349 350 error = _hpcmips_bd_map_load_buffer(mapx, addr, minlen, 351 p, flags, &lastaddr, &seg, first); 352 first = 0; 353 354 resid -= minlen; 355 } 356 if (error == 0) { 357 map->bdm.dm_mapsize = uio->uio_resid; 358 map->bdm.dm_nsegs = seg + 1; 359 } 360 return (error); 361 } 362 363 /* 364 * Like _hpcmips_bd_map_load(), but for raw memory. 365 */ 366 int 367 _hpcmips_bd_map_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 368 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 369 { 370 371 panic("_hpcmips_bd_map_load_raw: not implemented"); 372 } 373 374 /* 375 * Common function for unloading a DMA map. May be called by 376 * chipset-specific DMA map unload functions. 377 */ 378 void 379 _hpcmips_bd_map_unload(bus_dma_tag_t t, bus_dmamap_t mapx) 380 { 381 struct bus_dmamap_hpcmips *map = (struct bus_dmamap_hpcmips *)mapx; 382 383 /* 384 * No resources to free; just mark the mappings as 385 * invalid. 386 */ 387 map->bdm.dm_mapsize = 0; 388 map->bdm.dm_nsegs = 0; 389 map->_dm_flags &= ~HPCMIPS_DMAMAP_COHERENT; 390 } 391 392 /* 393 * Common function for DMA map synchronization. May be called 394 * by chipset-specific DMA map synchronization functions. 395 */ 396 void 397 _hpcmips_bd_map_sync(bus_dma_tag_t t, bus_dmamap_t mapx, bus_addr_t offset, 398 bus_size_t len, int ops) 399 { 400 struct bus_dmamap_hpcmips *map = (struct bus_dmamap_hpcmips *)mapx; 401 bus_size_t minlen; 402 bus_addr_t addr; 403 int i; 404 405 /* 406 * Mising PRE and POST operations is not allowed. 407 */ 408 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 409 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 410 panic("_hpcmips_bd_map_sync: mix PRE and POST"); 411 412 #ifdef DIAGNOSTIC 413 if (offset >= map->bdm.dm_mapsize) 414 panic("_hpcmips_bd_map_sync: bad offset %lu (map size is %lu)", 415 offset, map->bdm.dm_mapsize); 416 if (len == 0 || (offset + len) > map->bdm.dm_mapsize) 417 panic("_hpcmips_bd_map_sync: bad length"); 418 #endif 419 420 /* 421 * Flush the write buffer. 422 */ 423 wbflush(); 424 425 /* 426 * If the mapping is of COHERENT DMA-safe memory, no cache 427 * flush is necessary. 428 */ 429 if (map->_dm_flags & HPCMIPS_DMAMAP_COHERENT) 430 return; 431 432 /* 433 * No cache flushes are necessary if we're only doing 434 * POSTREAD or POSTWRITE (i.e. not doing PREREAD or PREWRITE). 435 */ 436 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0) 437 return; 438 439 /* 440 * Flush data cache for PREREAD. This has the side-effect 441 * of invalidating the cache. Done at PREREAD since it 442 * causes the cache line(s) to be written back to memory. 443 * 444 * Flush data cache for PREWRITE, so that the contents of 445 * the data buffer in memory reflect reality. 446 * 447 * Given the test above, we know we're doing one of these 448 * two operations, so no additional tests are necessary. 449 */ 450 451 /* 452 * The R2000 and R3000 have a physically indexed 453 * cache. Loop through the DMA segments, looking 454 * for the appropriate offset, and flush the D-cache 455 * at that physical address. 456 * 457 * The R4000 has a virtually indexed primary data cache. We 458 * do the same loop, instead using the virtual address stashed 459 * away in the segments when the map was loaded. 460 */ 461 for (i = 0; i < map->bdm.dm_nsegs && len != 0; i++) { 462 /* Find the beginning segment. */ 463 if (offset >= map->bdm.dm_segs[i].ds_len) { 464 offset -= map->bdm.dm_segs[i].ds_len; 465 continue; 466 } 467 468 /* 469 * Now at the first segment to sync; nail 470 * each segment until we have exhausted the 471 * length. 472 */ 473 minlen = len < map->bdm.dm_segs[i].ds_len - offset ? 474 len : map->bdm.dm_segs[i].ds_len - offset; 475 476 if (CPUISMIPS3) 477 addr = map->_dm_segs[i]._ds_vaddr; 478 else 479 addr = map->bdm.dm_segs[i].ds_addr; 480 481 #ifdef BUS_DMA_DEBUG 482 printf("_hpcmips_bd_map_sync: flushing segment %d " 483 "(0x%lx..0x%lx) ...", i, addr + offset, 484 addr + offset + minlen - 1); 485 #endif 486 if (CPUISMIPS3) 487 mips_dcache_wbinv_range(addr + offset, minlen); 488 else { 489 /* 490 * We can't have a TLB miss; use KSEG0. 491 */ 492 mips_dcache_wbinv_range( 493 MIPS_PHYS_TO_KSEG0(map->bdm.dm_segs[i].ds_addr 494 + offset), 495 minlen); 496 } 497 #ifdef BUS_DMA_DEBUG 498 printf("\n"); 499 #endif 500 offset = 0; 501 len -= minlen; 502 } 503 } 504 505 /* 506 * Common function for DMA-safe memory allocation. May be called 507 * by bus-specific DMA memory allocation functions. 508 */ 509 int 510 _hpcmips_bd_mem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 511 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 512 int flags) 513 { 514 extern paddr_t avail_start, avail_end; /* XXX */ 515 psize_t high; 516 517 high = avail_end - PAGE_SIZE; 518 519 return (_hpcmips_bd_mem_alloc_range(t, size, alignment, boundary, 520 segs, nsegs, rsegs, flags, avail_start, high)); 521 } 522 523 /* 524 * Allocate physical memory from the given physical address range. 525 * Called by DMA-safe memory allocation methods. 526 */ 527 int 528 _hpcmips_bd_mem_alloc_range(bus_dma_tag_t t, bus_size_t size, 529 bus_size_t alignment, bus_size_t boundary, 530 bus_dma_segment_t *segs, int nsegs, int *rsegs, 531 int flags, paddr_t low, paddr_t high) 532 { 533 vaddr_t curaddr, lastaddr; 534 struct vm_page *m; 535 struct pglist mlist; 536 int curseg, error; 537 #ifdef DIAGNOSTIC 538 extern paddr_t avail_start, avail_end; /* XXX */ 539 540 high = high<(avail_end - PAGE_SIZE)? high: (avail_end - PAGE_SIZE); 541 low = low>avail_start? low: avail_start; 542 #endif 543 /* Always round the size. */ 544 size = round_page(size); 545 546 /* 547 * Allocate pages from the VM system. 548 */ 549 TAILQ_INIT(&mlist); 550 error = uvm_pglistalloc(size, low, high, alignment, boundary, 551 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 552 if (error) 553 return (error); 554 555 /* 556 * Compute the location, size, and number of segments actually 557 * returned by the VM code. 558 */ 559 m = mlist.tqh_first; 560 curseg = 0; 561 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); 562 segs[curseg].ds_len = PAGE_SIZE; 563 m = m->pageq.tqe_next; 564 565 for (; m != NULL; m = m->pageq.tqe_next) { 566 curaddr = VM_PAGE_TO_PHYS(m); 567 #ifdef DIAGNOSTIC 568 if (curaddr < low || curaddr >= high) { 569 printf("uvm_pglistalloc returned non-sensical" 570 " address 0x%lx\n", curaddr); 571 panic("_hpcmips_bd_mem_alloc"); 572 } 573 #endif 574 if (curaddr == (lastaddr + PAGE_SIZE)) 575 segs[curseg].ds_len += PAGE_SIZE; 576 else { 577 curseg++; 578 segs[curseg].ds_addr = curaddr; 579 segs[curseg].ds_len = PAGE_SIZE; 580 } 581 lastaddr = curaddr; 582 } 583 584 *rsegs = curseg + 1; 585 586 return (0); 587 } 588 589 /* 590 * Common function for freeing DMA-safe memory. May be called by 591 * bus-specific DMA memory free functions. 592 */ 593 void 594 _hpcmips_bd_mem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 595 { 596 struct vm_page *m; 597 bus_addr_t addr; 598 struct pglist mlist; 599 int curseg; 600 601 /* 602 * Build a list of pages to free back to the VM system. 603 */ 604 TAILQ_INIT(&mlist); 605 for (curseg = 0; curseg < nsegs; curseg++) { 606 for (addr = segs[curseg].ds_addr; 607 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 608 addr += PAGE_SIZE) { 609 m = PHYS_TO_VM_PAGE(addr); 610 TAILQ_INSERT_TAIL(&mlist, m, pageq); 611 } 612 } 613 614 uvm_pglistfree(&mlist); 615 } 616 617 /* 618 * Common function for mapping DMA-safe memory. May be called by 619 * bus-specific DMA memory map functions. 620 */ 621 int 622 _hpcmips_bd_mem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 623 size_t size, caddr_t *kvap, int flags) 624 { 625 vaddr_t va; 626 bus_addr_t addr; 627 int curseg; 628 629 /* 630 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid 631 * TLB thrashing. 632 */ 633 if (nsegs == 1) { 634 if (flags & BUS_DMA_COHERENT) 635 *kvap = (caddr_t)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr); 636 else 637 *kvap = (caddr_t)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr); 638 return (0); 639 } 640 641 size = round_page(size); 642 643 va = uvm_km_valloc(kernel_map, size); 644 645 if (va == 0) 646 return (ENOMEM); 647 648 *kvap = (caddr_t)va; 649 650 for (curseg = 0; curseg < nsegs; curseg++) { 651 for (addr = segs[curseg].ds_addr; 652 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 653 addr += NBPG, va += NBPG, size -= NBPG) { 654 if (size == 0) 655 panic("_hpcmips_bd_mem_map: size botch"); 656 pmap_enter(pmap_kernel(), va, addr, 657 VM_PROT_READ | VM_PROT_WRITE, 658 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 659 660 /* XXX Do something about COHERENT here. */ 661 } 662 } 663 pmap_update(pmap_kernel()); 664 665 return (0); 666 } 667 668 /* 669 * Common function for unmapping DMA-safe memory. May be called by 670 * bus-specific DMA memory unmapping functions. 671 */ 672 void 673 _hpcmips_bd_mem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size) 674 { 675 676 #ifdef DIAGNOSTIC 677 if ((u_long)kva & PGOFSET) 678 panic("_hpcmips_bd_mem_unmap"); 679 #endif 680 681 /* 682 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e. 683 * not in KSEG2). 684 */ 685 if (kva >= (caddr_t)MIPS_KSEG0_START && 686 kva < (caddr_t)MIPS_KSEG2_START) 687 return; 688 689 size = round_page(size); 690 uvm_km_free(kernel_map, (vaddr_t)kva, size); 691 } 692 693 /* 694 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 695 * bus-specific DMA mmap(2)'ing functions. 696 */ 697 paddr_t 698 _hpcmips_bd_mem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 699 off_t off, int prot, int flags) 700 { 701 int i; 702 703 for (i = 0; i < nsegs; i++) { 704 #ifdef DIAGNOSTIC 705 if (off & PGOFSET) 706 panic("_hpcmips_bd_mem_mmap: offset unaligned"); 707 if (segs[i].ds_addr & PGOFSET) 708 panic("_hpcmips_bd_mem_mmap: segment unaligned"); 709 if (segs[i].ds_len & PGOFSET) 710 panic("_hpcmips_bd_mem_mmap: segment size not multiple" 711 " of page size"); 712 #endif 713 if (off >= segs[i].ds_len) { 714 off -= segs[i].ds_len; 715 continue; 716 } 717 718 return (mips_btop((caddr_t)segs[i].ds_addr + off)); 719 } 720 721 /* Page not found. */ 722 return (-1); 723 } 724