1 /* $NetBSD: bus.c,v 1.15 2002/10/10 18:16:40 rafal Exp $ */ 2 3 /* 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/endian.h> 43 #include <sys/bswap.h> 44 #include <sys/kernel.h> 45 #include <sys/device.h> 46 #include <sys/malloc.h> 47 #include <sys/proc.h> 48 #include <sys/mbuf.h> 49 50 #define _SGIMIPS_BUS_DMA_PRIVATE 51 #include <machine/bus.h> 52 #include <machine/cpu.h> 53 54 #include <uvm/uvm_extern.h> 55 56 #include <mips/cpuregs.h> 57 #include <mips/locore.h> 58 #include <mips/cache.h> 59 60 static int _bus_dmamap_load_buffer(bus_dmamap_t, void *, bus_size_t, 61 struct proc *, int, vaddr_t *, int *, int); 62 63 struct sgimips_bus_dma_tag sgimips_default_bus_dma_tag = { 64 _bus_dmamap_create, 65 _bus_dmamap_destroy, 66 _bus_dmamap_load, 67 _bus_dmamap_load_mbuf, 68 _bus_dmamap_load_uio, 69 _bus_dmamap_load_raw, 70 _bus_dmamap_unload, 71 _bus_dmamap_sync, 72 _bus_dmamem_alloc, 73 _bus_dmamem_free, 74 _bus_dmamem_map, 75 _bus_dmamem_unmap, 76 _bus_dmamem_mmap, 77 }; 78 79 u_int8_t 80 bus_space_read_1(t, h, o) 81 bus_space_tag_t t; 82 bus_space_handle_t h; 83 bus_size_t o; 84 { 85 wbflush(); /* XXX ? */ 86 87 switch (t) { 88 case 0: 89 return *(volatile u_int8_t *)(h + o); 90 case 1: /* XXX HPC */ 91 return *(volatile u_int8_t *)(h + (o << 2) + 3); 92 case 2: /* mem */ 93 case 4: /* I/O */ 94 return *(volatile u_int8_t *)(h + (o | 3) - (o & 3)); 95 case 3: /* mace devices */ 96 return *(volatile u_int8_t *)(h + (o << 8) + 7); 97 default: 98 panic("no bus tag"); 99 } 100 } 101 102 void 103 bus_space_write_1(t, h, o, v) 104 bus_space_tag_t t; 105 bus_space_handle_t h; 106 bus_size_t o; 107 u_int8_t v; 108 { 109 switch (t) { 110 case 0: 111 *(volatile u_int8_t *)(h + o) = v; 112 break; 113 case 1: /* XXX HPC */ 114 *(volatile u_int8_t *)(h + (o << 2) + 3) = v; 115 break; 116 case 2: /* mem */ 117 case 4: /* I/O */ 118 *(volatile u_int8_t *)(h + (o | 3) - (o & 3)) = v; 119 break; 120 case 3: /* mace devices */ 121 *(volatile u_int8_t *)(h + (o << 8) + 7) = v; 122 break; 123 default: 124 panic("no bus tag"); 125 } 126 127 wbflush(); /* XXX */ 128 } 129 130 u_int16_t 131 bus_space_read_2(t, h, o) 132 bus_space_tag_t t; 133 bus_space_handle_t h; 134 bus_size_t o; 135 { 136 wbflush(); /* XXX ? */ 137 138 switch (t) { 139 case 0: 140 return *(volatile u_int16_t *)(h + o); 141 case 1: /* XXX HPC */ 142 return *(volatile u_int16_t *)(h + (o << 2) + 1); 143 case 2: /* mem */ 144 case 4: /* I/O */ 145 return *(volatile u_int16_t *)(h + (o | 2) - (o & 3)); 146 default: 147 panic("no bus tag"); 148 } 149 } 150 151 void 152 bus_space_write_2(t, h, o, v) 153 bus_space_tag_t t; 154 bus_space_handle_t h; 155 bus_size_t o; 156 u_int16_t v; 157 { 158 switch (t) { 159 case 0: 160 *(volatile u_int16_t *)(h + o) = v; 161 break; 162 case 1: /* XXX HPC */ 163 *(volatile u_int16_t *)(h + (o << 2) + 1) = v; 164 break; 165 case 2: /* mem */ 166 case 4: /* I/O */ 167 *(volatile u_int16_t *)(h + (o | 2) - (o & 3)) = v; 168 break; 169 default: 170 panic("no bus tag"); 171 } 172 173 wbflush(); /* XXX */ 174 } 175 176 int 177 bus_space_map(t, bpa, size, flags, bshp) 178 bus_space_tag_t t; 179 bus_addr_t bpa; 180 bus_size_t size; 181 int flags; 182 bus_space_handle_t *bshp; 183 { 184 int cacheable = flags & BUS_SPACE_MAP_CACHEABLE; 185 186 if (cacheable) 187 *bshp = MIPS_PHYS_TO_KSEG0(bpa); 188 else 189 *bshp = MIPS_PHYS_TO_KSEG1(bpa); 190 191 /* 192 * XXX 193 */ 194 195 #define PCI_LOW_MEMORY 0x1A000000 196 #define PCI_LOW_IO 0x18000000 197 198 /* XXX O2 */ 199 if (bpa > 0x80000000 && bpa < 0x82000000) 200 *bshp = MIPS_PHYS_TO_KSEG1(PCI_LOW_MEMORY + (bpa & 0xfffffff)); 201 if (bpa < 0x00010000) 202 *bshp = MIPS_PHYS_TO_KSEG1(PCI_LOW_IO + bpa); 203 204 return 0; 205 } 206 207 int 208 bus_space_alloc(t, rstart, rend, size, alignment, boundary, flags, bpap, bshp) 209 bus_space_tag_t t; 210 bus_addr_t rstart, rend; 211 bus_size_t size, alignment, boundary; 212 int flags; 213 bus_addr_t *bpap; 214 bus_space_handle_t *bshp; 215 { 216 panic("bus_space_alloc: not implemented"); 217 } 218 219 void 220 bus_space_free(t, bsh, size) 221 bus_space_tag_t t; 222 bus_space_handle_t bsh; 223 bus_size_t size; 224 { 225 panic("bus_space_free: not implemented"); 226 } 227 228 void 229 bus_space_unmap(t, bsh, size) 230 bus_space_tag_t t; 231 bus_space_handle_t bsh; 232 bus_size_t size; 233 { 234 return; 235 } 236 237 int 238 bus_space_subregion(t, bsh, offset, size, nbshp) 239 bus_space_tag_t t; 240 bus_space_handle_t bsh; 241 bus_size_t offset, size; 242 bus_space_handle_t *nbshp; 243 { 244 245 *nbshp = bsh + offset; 246 return 0; 247 } 248 249 /* 250 * Common function for DMA map creation. May be called by bus-specific 251 * DMA map creation functions. 252 */ 253 int 254 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) 255 bus_dma_tag_t t; 256 bus_size_t size; 257 int nsegments; 258 bus_size_t maxsegsz; 259 bus_size_t boundary; 260 int flags; 261 bus_dmamap_t *dmamp; 262 { 263 struct sgimips_bus_dmamap *map; 264 void *mapstore; 265 size_t mapsize; 266 267 /* 268 * Allcoate and initialize the DMA map. The end of the map 269 * is a variable-sized array of segments, so we allocate enough 270 * room for them in one shot. 271 * 272 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 273 * of ALLOCNOW notifes others that we've reserved these resources, 274 * and they are not to be freed. 275 * 276 * The bus_dmamap_t includes one bus_dma_segment_t, hence 277 * the (nsegments - 1). 278 */ 279 mapsize = sizeof(struct sgimips_bus_dmamap) + 280 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 281 if ((mapstore = malloc(mapsize, M_DMAMAP, 282 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 283 return ENOMEM; 284 285 memset(mapstore, 0, mapsize); 286 map = (struct sgimips_bus_dmamap *)mapstore; 287 map->_dm_size = size; 288 map->_dm_segcnt = nsegments; 289 map->_dm_maxsegsz = maxsegsz; 290 map->_dm_boundary = boundary; 291 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 292 map->dm_mapsize = 0; /* no valid mappings */ 293 map->dm_nsegs = 0; 294 295 *dmamp = map; 296 return 0; 297 } 298 299 /* 300 * Common function for DMA map destruction. May be called by bus-specific 301 * DMA map destruction functions. 302 */ 303 void 304 _bus_dmamap_destroy(t, map) 305 bus_dma_tag_t t; 306 bus_dmamap_t map; 307 { 308 309 free(map, M_DMAMAP); 310 } 311 extern paddr_t kvtophys(vaddr_t); /* XXX */ 312 313 /* 314 * Utility function to load a linear buffer. lastaddrp holds state 315 * between invocations (for multiple-buffer loads). segp contains 316 * the starting segment on entrance, and the ending segment on exit. 317 * first indicates if this is the first invocation of this function. 318 */ 319 int 320 _bus_dmamap_load_buffer(map, buf, buflen, p, flags, lastaddrp, segp, first) 321 bus_dmamap_t map; 322 void *buf; 323 bus_size_t buflen; 324 struct proc *p; 325 int flags; 326 vaddr_t *lastaddrp; 327 int *segp; 328 int first; 329 { 330 bus_size_t sgsize; 331 bus_addr_t curaddr, lastaddr, baddr, bmask; 332 vaddr_t vaddr = (vaddr_t)buf; 333 int seg; 334 335 lastaddr = *lastaddrp; 336 bmask = ~(map->_dm_boundary - 1); 337 338 for (seg = *segp; buflen > 0 ; ) { 339 /* 340 * Get the physical address for this segment. 341 */ 342 if (p != NULL) 343 (void) pmap_extract(p->p_vmspace->vm_map.pmap, 344 vaddr, &curaddr); 345 else 346 curaddr = kvtophys(vaddr); 347 348 /* 349 * Compute the segment size, and adjust counts. 350 */ 351 sgsize = NBPG - ((u_long)vaddr & PGOFSET); 352 if (buflen < sgsize) 353 sgsize = buflen; 354 355 /* 356 * Make sure we don't cross any boundaries. 357 */ 358 if (map->_dm_boundary > 0) { 359 baddr = (curaddr + map->_dm_boundary) & bmask; 360 if (sgsize > (baddr - curaddr)) 361 sgsize = (baddr - curaddr); 362 } 363 364 /* 365 * Insert chunk into a segment, coalescing with 366 * the previous segment if possible. 367 */ 368 if (first) { 369 map->dm_segs[seg].ds_addr = curaddr; 370 map->dm_segs[seg].ds_len = sgsize; 371 map->dm_segs[seg]._ds_vaddr = vaddr; 372 first = 0; 373 } else { 374 if (curaddr == lastaddr && 375 (map->dm_segs[seg].ds_len + sgsize) <= 376 map->_dm_maxsegsz && 377 (map->_dm_boundary == 0 || 378 (map->dm_segs[seg].ds_addr & bmask) == 379 (curaddr & bmask))) 380 map->dm_segs[seg].ds_len += sgsize; 381 else { 382 if (++seg >= map->_dm_segcnt) 383 break; 384 map->dm_segs[seg].ds_addr = curaddr; 385 map->dm_segs[seg].ds_len = sgsize; 386 map->dm_segs[seg]._ds_vaddr = vaddr; 387 } 388 } 389 390 lastaddr = curaddr + sgsize; 391 vaddr += sgsize; 392 buflen -= sgsize; 393 } 394 395 *segp = seg; 396 *lastaddrp = lastaddr; 397 398 /* 399 * Did we fit? 400 */ 401 if (buflen != 0) 402 return EFBIG; /* XXX Better return value here? */ 403 404 return 0; 405 } 406 407 /* 408 * Common function for loading a direct-mapped DMA map with a linear 409 * buffer. 410 */ 411 int 412 _bus_dmamap_load(t, map, buf, buflen, p, flags) 413 bus_dma_tag_t t; 414 bus_dmamap_t map; 415 void *buf; 416 bus_size_t buflen; 417 struct proc *p; 418 int flags; 419 { 420 vaddr_t lastaddr; 421 int seg, error; 422 423 /* 424 * Make sure that on error condition we return "no valid mappings". 425 */ 426 map->dm_mapsize = 0; 427 map->dm_nsegs = 0; 428 429 if (buflen > map->_dm_size) 430 return EINVAL; 431 432 seg = 0; 433 error = _bus_dmamap_load_buffer(map, buf, buflen, 434 p, flags, &lastaddr, &seg, 1); 435 if (error == 0) { 436 map->dm_mapsize = buflen; 437 map->dm_nsegs = seg + 1; 438 439 /* 440 * For linear buffers, we support marking the mapping 441 * as COHERENT. 442 * 443 * XXX Check TLB entries for cache-inhibit bits? 444 */ 445 if (buf >= (void *)MIPS_KSEG1_START && 446 buf < (void *)MIPS_KSEG2_START) 447 map->_dm_flags |= SGIMIPS_DMAMAP_COHERENT; 448 } 449 return error; 450 } 451 452 /* 453 * Like _bus_dmamap_load(), but for mbufs. 454 */ 455 int 456 _bus_dmamap_load_mbuf(t, map, m0, flags) 457 bus_dma_tag_t t; 458 bus_dmamap_t map; 459 struct mbuf *m0; 460 int flags; 461 { 462 vaddr_t lastaddr; 463 int seg, error, first; 464 struct mbuf *m; 465 466 /* 467 * Make sure that on error condition we return "no valid mappings." 468 */ 469 map->dm_mapsize = 0; 470 map->dm_nsegs = 0; 471 472 #ifdef DIAGNOSTIC 473 if ((m0->m_flags & M_PKTHDR) == 0) 474 panic("_bus_dmamap_load_mbuf: no packet header"); 475 #endif 476 477 if (m0->m_pkthdr.len > map->_dm_size) 478 return EINVAL; 479 480 first = 1; 481 seg = 0; 482 error = 0; 483 for (m = m0; m != NULL && error == 0; m = m->m_next) { 484 error = _bus_dmamap_load_buffer(map, 485 m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first); 486 first = 0; 487 } 488 if (error == 0) { 489 map->dm_mapsize = m0->m_pkthdr.len; 490 map->dm_nsegs = seg + 1; 491 } 492 return error; 493 } 494 495 /* 496 * Like _bus_dmamap_load(), but for uios. 497 */ 498 int 499 _bus_dmamap_load_uio(t, map, uio, flags) 500 bus_dma_tag_t t; 501 bus_dmamap_t map; 502 struct uio *uio; 503 int flags; 504 { 505 vaddr_t lastaddr; 506 int seg, i, error, first; 507 bus_size_t minlen, resid; 508 struct proc *p = NULL; 509 struct iovec *iov; 510 caddr_t addr; 511 512 /* 513 * Make sure that on error condition we return "no valid mappings." 514 */ 515 map->dm_mapsize = 0; 516 map->dm_nsegs = 0; 517 518 resid = uio->uio_resid; 519 iov = uio->uio_iov; 520 521 if (uio->uio_segflg == UIO_USERSPACE) { 522 p = uio->uio_procp; 523 #ifdef DIAGNOSTIC 524 if (p == NULL) 525 panic("_bus_dmamap_load_uio: USERSPACE but no proc"); 526 #endif 527 } 528 529 first = 1; 530 seg = 0; 531 error = 0; 532 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 533 /* 534 * Now at the first iovec to load. Load each iovec 535 * until we have exhausted the residual count. 536 */ 537 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 538 addr = (caddr_t)iov[i].iov_base; 539 540 error = _bus_dmamap_load_buffer(map, addr, minlen, 541 p, flags, &lastaddr, &seg, first); 542 first = 0; 543 544 resid -= minlen; 545 } 546 if (error == 0) { 547 map->dm_mapsize = uio->uio_resid; 548 map->dm_nsegs = seg + 1; 549 } 550 return error; 551 } 552 553 /* 554 * Like _bus_dmamap_load(), but for raw memory. 555 */ 556 int 557 _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags) 558 bus_dma_tag_t t; 559 bus_dmamap_t map; 560 bus_dma_segment_t *segs; 561 int nsegs; 562 bus_size_t size; 563 int flags; 564 { 565 566 panic("_bus_dmamap_load_raw: not implemented"); 567 } 568 569 /* 570 * Common function for unloading a DMA map. May be called by 571 * chipset-specific DMA map unload functions. 572 */ 573 void 574 _bus_dmamap_unload(t, map) 575 bus_dma_tag_t t; 576 bus_dmamap_t map; 577 { 578 579 /* 580 * No resources to free; just mark the mappings as 581 * invalid. 582 */ 583 map->dm_mapsize = 0; 584 map->dm_nsegs = 0; 585 map->_dm_flags &= ~SGIMIPS_DMAMAP_COHERENT; 586 } 587 588 /* 589 * Common function for DMA map synchronization. May be called 590 * by chipset-specific DMA map synchronization functions. 591 */ 592 void 593 _bus_dmamap_sync(t, map, offset, len, ops) 594 bus_dma_tag_t t; 595 bus_dmamap_t map; 596 bus_addr_t offset; 597 bus_size_t len; 598 int ops; 599 { 600 bus_size_t minlen; 601 bus_addr_t addr; 602 int i; 603 604 /* 605 * Mising PRE and POST operations is not allowed. 606 */ 607 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 608 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 609 panic("_bus_dmamap_sync: mix PRE and POST"); 610 611 #ifdef DIAGNOSTIC 612 if (offset >= map->dm_mapsize) 613 panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)", 614 offset, map->dm_mapsize); 615 if (len == 0 || (offset + len) > map->dm_mapsize) 616 panic("_bus_dmamap_sync: bad length"); 617 #endif 618 619 /* 620 * Flush the write buffer. 621 */ 622 wbflush(); 623 624 /* 625 * If the mapping is of COHERENT DMA-safe memory, no cache 626 * flush is necessary. 627 */ 628 if (map->_dm_flags & SGIMIPS_DMAMAP_COHERENT) 629 return; 630 631 /* 632 * No cache flushes are necessary if we're only doing 633 * POSTREAD or POSTWRITE (i.e. not doing PREREAD or PREWRITE). 634 */ 635 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0) 636 return; 637 638 /* 639 * Flush data cache for PREREAD. This has the side-effect 640 * of invalidating the cache. Done at PREREAD since it 641 * causes the cache line(s) to be written back to memory. 642 * 643 * Flush data cache for PREWRITE, so that the contents of 644 * the data buffer in memory reflect reality. 645 * 646 * Given the test above, we know we're doing one of these 647 * two operations, so no additional tests are necessary. 648 */ 649 650 for (i = 0; i < map->dm_nsegs && len != 0; i++) { 651 /* Find the beginning segment. */ 652 if (offset >= map->dm_segs[i].ds_len) { 653 offset -= map->dm_segs[i].ds_len; 654 continue; 655 } 656 657 /* 658 * Now at the first segment to sync; nail 659 * each segment until we have exhausted the 660 * length. 661 */ 662 minlen = len < map->dm_segs[i].ds_len - offset ? 663 len : map->dm_segs[i].ds_len - offset; 664 665 addr = map->dm_segs[i]._ds_vaddr; 666 667 #ifdef BUS_DMA_DEBUG 668 printf("bus_dmamap_sync: flushing segment %d " 669 "(0x%lx+%lx, 0x%lx+0x%lx) (olen = %ld)...", i, 670 addr, offset, addr, offset + minlen - 1, len); 671 #endif 672 #if 0 673 MachFlushDCache(addr + offset, minlen); 674 #endif 675 #if 1 676 mips_dcache_wbinv_range(addr + offset, minlen); 677 #endif 678 #if 0 679 MachFlushCache(); 680 #endif 681 682 #ifdef BUS_DMA_DEBUG 683 printf("\n"); 684 #endif 685 offset = 0; 686 len -= minlen; 687 } 688 } 689 690 /* 691 * Common function for DMA-safe memory allocation. May be called 692 * by bus-specific DMA memory allocation functions. 693 */ 694 int 695 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags) 696 bus_dma_tag_t t; 697 bus_size_t size, alignment, boundary; 698 bus_dma_segment_t *segs; 699 int nsegs; 700 int *rsegs; 701 int flags; 702 { 703 extern paddr_t avail_start, avail_end; 704 vaddr_t curaddr, lastaddr; 705 psize_t high; 706 struct vm_page *m; 707 struct pglist mlist; 708 int curseg, error; 709 710 /* Always round the size. */ 711 size = round_page(size); 712 713 high = avail_end - PAGE_SIZE; 714 715 /* 716 * Allocate pages from the VM system. 717 */ 718 error = uvm_pglistalloc(size, avail_start, high, alignment, boundary, 719 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 720 if (error) 721 return error; 722 723 /* 724 * Compute the location, size, and number of segments actually 725 * returned by the VM code. 726 */ 727 m = mlist.tqh_first; 728 curseg = 0; 729 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); 730 segs[curseg].ds_len = PAGE_SIZE; 731 m = m->pageq.tqe_next; 732 733 for (; m != NULL; m = m->pageq.tqe_next) { 734 curaddr = VM_PAGE_TO_PHYS(m); 735 #ifdef DIAGNOSTIC 736 if (curaddr < avail_start || curaddr >= high) { 737 printf("uvm_pglistalloc returned non-sensical" 738 " address 0x%lx\n", curaddr); 739 panic("_bus_dmamem_alloc"); 740 } 741 #endif 742 if (curaddr == (lastaddr + PAGE_SIZE)) 743 segs[curseg].ds_len += PAGE_SIZE; 744 else { 745 curseg++; 746 segs[curseg].ds_addr = curaddr; 747 segs[curseg].ds_len = PAGE_SIZE; 748 } 749 lastaddr = curaddr; 750 } 751 752 *rsegs = curseg + 1; 753 754 return 0; 755 } 756 757 /* 758 * Common function for freeing DMA-safe memory. May be called by 759 * bus-specific DMA memory free functions. 760 */ 761 void 762 _bus_dmamem_free(t, segs, nsegs) 763 bus_dma_tag_t t; 764 bus_dma_segment_t *segs; 765 int nsegs; 766 { 767 struct vm_page *m; 768 bus_addr_t addr; 769 struct pglist mlist; 770 int curseg; 771 772 /* 773 * Build a list of pages to free back to the VM system. 774 */ 775 TAILQ_INIT(&mlist); 776 for (curseg = 0; curseg < nsegs; curseg++) { 777 for (addr = segs[curseg].ds_addr; 778 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 779 addr += PAGE_SIZE) { 780 m = PHYS_TO_VM_PAGE(addr); 781 TAILQ_INSERT_TAIL(&mlist, m, pageq); 782 } 783 } 784 785 uvm_pglistfree(&mlist); 786 } 787 788 /* 789 * Common function for mapping DMA-safe memory. May be called by 790 * bus-specific DMA memory map functions. 791 */ 792 int 793 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags) 794 bus_dma_tag_t t; 795 bus_dma_segment_t *segs; 796 int nsegs; 797 size_t size; 798 caddr_t *kvap; 799 int flags; 800 { 801 vaddr_t va; 802 bus_addr_t addr; 803 int curseg; 804 805 /* 806 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid 807 * TLB thrashing. 808 */ 809 if (nsegs == 1) { 810 if (flags & BUS_DMA_COHERENT) 811 *kvap = (caddr_t)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr); 812 else 813 *kvap = (caddr_t)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr); 814 return 0; 815 } 816 817 size = round_page(size); 818 819 va = uvm_km_valloc(kernel_map, size); 820 821 if (va == 0) 822 return (ENOMEM); 823 824 *kvap = (caddr_t)va; 825 826 for (curseg = 0; curseg < nsegs; curseg++) { 827 for (addr = segs[curseg].ds_addr; 828 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 829 addr += NBPG, va += NBPG, size -= NBPG) { 830 if (size == 0) 831 panic("_bus_dmamem_map: size botch"); 832 pmap_enter(pmap_kernel(), va, addr, 833 VM_PROT_READ | VM_PROT_WRITE, 834 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 835 836 /* XXX Do something about COHERENT here. */ 837 } 838 } 839 pmap_update(pmap_kernel()); 840 841 return 0; 842 } 843 844 /* 845 * Common function for unmapping DMA-safe memory. May be called by 846 * bus-specific DMA memory unmapping functions. 847 */ 848 void 849 _bus_dmamem_unmap(t, kva, size) 850 bus_dma_tag_t t; 851 caddr_t kva; 852 size_t size; 853 { 854 855 #ifdef DIAGNOSTIC 856 if ((u_long)kva & PGOFSET) 857 panic("_bus_dmamem_unmap"); 858 #endif 859 860 /* 861 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e. 862 * not in KSEG2). 863 */ 864 if (kva >= (caddr_t)MIPS_KSEG0_START && 865 kva < (caddr_t)MIPS_KSEG2_START) 866 return; 867 868 size = round_page(size); 869 uvm_km_free(kernel_map, (vaddr_t)kva, size); 870 } 871 872 /* 873 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 874 * bus-specific DMA mmap(2)'ing functions. 875 */ 876 paddr_t 877 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags) 878 bus_dma_tag_t t; 879 bus_dma_segment_t *segs; 880 int nsegs; 881 off_t off; 882 int prot, flags; 883 { 884 int i; 885 886 for (i = 0; i < nsegs; i++) { 887 #ifdef DIAGNOSTIC 888 if (off & PGOFSET) 889 panic("_bus_dmamem_mmap: offset unaligned"); 890 if (segs[i].ds_addr & PGOFSET) 891 panic("_bus_dmamem_mmap: segment unaligned"); 892 if (segs[i].ds_len & PGOFSET) 893 panic("_bus_dmamem_mmap: segment size not multiple" 894 " of page size"); 895 #endif 896 if (off >= segs[i].ds_len) { 897 off -= segs[i].ds_len; 898 continue; 899 } 900 901 return mips_btop((caddr_t)segs[i].ds_addr + off); 902 } 903 904 /* Page not found. */ 905 return -1; 906 } 907