1 /* $NetBSD: bus_dma.c,v 1.5 2002/09/27 15:36:14 provos Exp $ */ 2 3 /* 4 * This file was taken from from alpha/common/bus_dma.c 5 * should probably be re-synced when needed. 6 * Darrin B. Jewell <dbj@netbsd.org> Sat Jul 31 06:11:33 UTC 1999 7 * original cvs id: NetBSD: bus_dma.c,v 1.31 1999/07/08 18:05:23 thorpej Exp 8 */ 9 10 /*- 11 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 12 * All rights reserved. 13 * 14 * This code is derived from software contributed to The NetBSD Foundation 15 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 16 * NASA Ames Research Center. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions 20 * are met: 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 3. All advertising materials mentioning features or use of this software 27 * must display the following acknowledgement: 28 * This product includes software developed by the NetBSD 29 * Foundation, Inc. and its contributors. 30 * 4. Neither the name of The NetBSD Foundation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 35 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 36 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 37 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 38 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 39 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 40 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 41 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 42 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 43 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 44 * POSSIBILITY OF SUCH DAMAGE. 45 */ 46 47 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 48 49 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.5 2002/09/27 15:36:14 provos Exp $"); 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/kernel.h> 54 #include <sys/device.h> 55 #include <sys/malloc.h> 56 #include <sys/proc.h> 57 #include <sys/mbuf.h> 58 59 #include <uvm/uvm_extern.h> 60 61 #include <machine/cpu.h> 62 63 #define _M68K_BUS_DMA_PRIVATE 64 #include <machine/bus.h> 65 #include <m68k/cacheops.h> 66 67 int _bus_dmamap_load_buffer_direct_common __P((bus_dma_tag_t, 68 bus_dmamap_t, void *, bus_size_t, struct proc *, int, 69 paddr_t *, int *, int)); 70 71 /* 72 * Common function for DMA map creation. May be called by bus-specific 73 * DMA map creation functions. 74 */ 75 int 76 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) 77 bus_dma_tag_t t; 78 bus_size_t size; 79 int nsegments; 80 bus_size_t maxsegsz; 81 bus_size_t boundary; 82 int flags; 83 bus_dmamap_t *dmamp; 84 { 85 struct m68k_bus_dmamap *map; 86 void *mapstore; 87 size_t mapsize; 88 89 /* 90 * Allcoate and initialize the DMA map. The end of the map 91 * is a variable-sized array of segments, so we allocate enough 92 * room for them in one shot. 93 * 94 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 95 * of ALLOCNOW notifes others that we've reserved these resources, 96 * and they are not to be freed. 97 * 98 * The bus_dmamap_t includes one bus_dma_segment_t, hence 99 * the (nsegments - 1). 100 */ 101 mapsize = sizeof(struct m68k_bus_dmamap) + 102 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 103 if ((mapstore = malloc(mapsize, M_DMAMAP, 104 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 105 return (ENOMEM); 106 107 bzero(mapstore, mapsize); 108 map = (struct m68k_bus_dmamap *)mapstore; 109 map->_dm_size = size; 110 map->_dm_segcnt = nsegments; 111 map->_dm_maxsegsz = maxsegsz; 112 if (t->_boundary != 0 && t->_boundary < boundary) 113 map->_dm_boundary = t->_boundary; 114 else 115 map->_dm_boundary = boundary; 116 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 117 map->dm_mapsize = 0; /* no valid mappings */ 118 map->dm_nsegs = 0; 119 120 *dmamp = map; 121 return (0); 122 } 123 124 /* 125 * Common function for DMA map destruction. May be called by bus-specific 126 * DMA map destruction functions. 127 */ 128 void 129 _bus_dmamap_destroy(t, map) 130 bus_dma_tag_t t; 131 bus_dmamap_t map; 132 { 133 134 free(map, M_DMAMAP); 135 } 136 137 /* 138 * Utility function to load a linear buffer. lastaddrp holds state 139 * between invocations (for multiple-buffer loads). segp contains 140 * the starting segment on entrance, and the ending segment on exit. 141 * first indicates if this is the first invocation of this function. 142 */ 143 int 144 _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen, p, flags, 145 lastaddrp, segp, first) 146 bus_dma_tag_t t; 147 bus_dmamap_t map; 148 void *buf; 149 bus_size_t buflen; 150 struct proc *p; 151 int flags; 152 paddr_t *lastaddrp; 153 int *segp; 154 int first; 155 { 156 bus_size_t sgsize; 157 bus_addr_t curaddr, lastaddr, baddr, bmask; 158 vaddr_t vaddr = (vaddr_t)buf; 159 int seg; 160 boolean_t rv; 161 162 lastaddr = *lastaddrp; 163 bmask = ~(map->_dm_boundary - 1); 164 165 for (seg = *segp; buflen > 0 ; ) { 166 /* 167 * Get the physical address for this segment. 168 */ 169 if (p != NULL) 170 rv = pmap_extract(p->p_vmspace->vm_map.pmap, 171 vaddr, &curaddr); 172 else 173 rv = pmap_extract(pmap_kernel(), vaddr, &curaddr); 174 KASSERT(rv); 175 176 /* 177 * Compute the segment size, and adjust counts. 178 */ 179 sgsize = NBPG - ((u_long)vaddr & PGOFSET); 180 if (buflen < sgsize) 181 sgsize = buflen; 182 183 /* 184 * Make sure we don't cross any boundaries. 185 */ 186 if (map->_dm_boundary > 0) { 187 baddr = (curaddr + map->_dm_boundary) & bmask; 188 if (sgsize > (baddr - curaddr)) 189 sgsize = (baddr - curaddr); 190 } 191 192 /* 193 * Insert chunk into a segment, coalescing with 194 * the previous segment if possible. 195 */ 196 if (first) { 197 map->dm_segs[seg].ds_addr = curaddr; 198 map->dm_segs[seg].ds_len = sgsize; 199 first = 0; 200 } else { 201 if (curaddr == lastaddr && 202 (map->dm_segs[seg].ds_len + sgsize) <= 203 map->_dm_maxsegsz && 204 (map->_dm_boundary == 0 || 205 (map->dm_segs[seg].ds_addr & bmask) == 206 (curaddr & bmask))) 207 map->dm_segs[seg].ds_len += sgsize; 208 else { 209 if (++seg >= map->_dm_segcnt) 210 break; 211 map->dm_segs[seg].ds_addr = curaddr; 212 map->dm_segs[seg].ds_len = sgsize; 213 } 214 } 215 216 lastaddr = curaddr + sgsize; 217 vaddr += sgsize; 218 buflen -= sgsize; 219 } 220 221 *segp = seg; 222 *lastaddrp = lastaddr; 223 224 /* 225 * Did we fit? 226 */ 227 if (buflen != 0) { 228 /* 229 * If there is a chained window, we will automatically 230 * fall back to it. 231 */ 232 return (EFBIG); /* XXX better return value here? */ 233 } 234 235 return (0); 236 } 237 238 /* 239 * Common function for loading a direct-mapped DMA map with a linear 240 * buffer. Called by bus-specific DMA map load functions with the 241 * OR value appropriate for indicating "direct-mapped" for that 242 * chipset. 243 */ 244 int 245 _bus_dmamap_load_direct(t, map, buf, buflen, p, flags) 246 bus_dma_tag_t t; 247 bus_dmamap_t map; 248 void *buf; 249 bus_size_t buflen; 250 struct proc *p; 251 int flags; 252 { 253 paddr_t lastaddr; 254 int seg, error; 255 256 /* 257 * Make sure that on error condition we return "no valid mappings". 258 */ 259 map->dm_mapsize = 0; 260 map->dm_nsegs = 0; 261 262 if (buflen > map->_dm_size) 263 return (EINVAL); 264 265 seg = 0; 266 error = _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen, 267 p, flags, &lastaddr, &seg, 1); 268 if (error == 0) { 269 map->dm_mapsize = buflen; 270 map->dm_nsegs = seg + 1; 271 } 272 return (error); 273 } 274 275 /* 276 * Like _bus_dmamap_load_direct_common(), but for mbufs. 277 */ 278 int 279 _bus_dmamap_load_mbuf_direct(t, map, m0, flags) 280 bus_dma_tag_t t; 281 bus_dmamap_t map; 282 struct mbuf *m0; 283 int flags; 284 { 285 paddr_t lastaddr; 286 int seg, error, first; 287 struct mbuf *m; 288 289 /* 290 * Make sure that on error condition we return "no valid mappings." 291 */ 292 map->dm_mapsize = 0; 293 map->dm_nsegs = 0; 294 295 #ifdef DIAGNOSTIC 296 if ((m0->m_flags & M_PKTHDR) == 0) 297 panic("_bus_dmamap_load_mbuf_direct_common: no packet header"); 298 #endif 299 300 if (m0->m_pkthdr.len > map->_dm_size) 301 return (EINVAL); 302 303 first = 1; 304 seg = 0; 305 error = 0; 306 for (m = m0; m != NULL && error == 0; m = m->m_next) { 307 error = _bus_dmamap_load_buffer_direct_common(t, map, 308 m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first); 309 first = 0; 310 } 311 if (error == 0) { 312 map->dm_mapsize = m0->m_pkthdr.len; 313 map->dm_nsegs = seg + 1; 314 } 315 return (error); 316 } 317 318 /* 319 * Like _bus_dmamap_load_direct_common(), but for uios. 320 */ 321 int 322 _bus_dmamap_load_uio_direct(t, map, uio, flags) 323 bus_dma_tag_t t; 324 bus_dmamap_t map; 325 struct uio *uio; 326 int flags; 327 { 328 paddr_t lastaddr; 329 int seg, i, error, first; 330 bus_size_t minlen, resid; 331 struct proc *p = NULL; 332 struct iovec *iov; 333 caddr_t addr; 334 335 /* 336 * Make sure that on error condition we return "no valid mappings." 337 */ 338 map->dm_mapsize = 0; 339 map->dm_nsegs = 0; 340 341 resid = uio->uio_resid; 342 iov = uio->uio_iov; 343 344 if (uio->uio_segflg == UIO_USERSPACE) { 345 p = uio->uio_procp; 346 #ifdef DIAGNOSTIC 347 if (p == NULL) 348 panic("_bus_dmamap_load_direct_common: USERSPACE but no proc"); 349 #endif 350 } 351 352 first = 1; 353 seg = 0; 354 error = 0; 355 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 356 /* 357 * Now at the first iovec to load. Load each iovec 358 * until we have exhausted the residual count. 359 */ 360 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 361 addr = (caddr_t)iov[i].iov_base; 362 363 error = _bus_dmamap_load_buffer_direct_common(t, map, 364 addr, minlen, p, flags, &lastaddr, &seg, first); 365 first = 0; 366 367 resid -= minlen; 368 } 369 if (error == 0) { 370 map->dm_mapsize = uio->uio_resid; 371 map->dm_nsegs = seg + 1; 372 } 373 return (error); 374 } 375 376 /* 377 * Like _bus_dmamap_load_direct_common(), but for raw memory. 378 */ 379 int 380 _bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags) 381 bus_dma_tag_t t; 382 bus_dmamap_t map; 383 bus_dma_segment_t *segs; 384 int nsegs; 385 bus_size_t size; 386 int flags; 387 { 388 /* @@@ This routine doesn't enforce map boundary requirement 389 * @@@ perhaps it should return an error instead of panicing 390 */ 391 392 #ifdef DIAGNOSTIC 393 if (map->_dm_size < size) { 394 panic("_bus_dmamap_load_raw_direct: size is too large for map"); 395 } 396 if (map->_dm_segcnt < nsegs) { 397 panic("_bus_dmamap_load_raw_direct: too many segments for map"); 398 } 399 #endif 400 401 { 402 int i; 403 for (i=0;i<nsegs;i++) { 404 #ifdef DIAGNOSTIC 405 if (map->_dm_maxsegsz < map->dm_segs[i].ds_len) { 406 panic("_bus_dmamap_load_raw_direct: segment too large for map"); 407 } 408 #endif 409 map->dm_segs[i] = segs[i]; 410 } 411 } 412 413 map->dm_nsegs = nsegs; 414 map->dm_mapsize = size; 415 416 return (0); 417 } 418 419 /* 420 * Common function for unloading a DMA map. May be called by 421 * chipset-specific DMA map unload functions. 422 */ 423 void 424 _bus_dmamap_unload(t, map) 425 bus_dma_tag_t t; 426 bus_dmamap_t map; 427 { 428 429 /* 430 * No resources to free; just mark the mappings as 431 * invalid. 432 */ 433 map->dm_mapsize = 0; 434 map->dm_nsegs = 0; 435 } 436 437 /* 438 * Common function for DMA map synchronization. May be called 439 * by chipset-specific DMA map synchronization functions. 440 */ 441 void 442 _bus_dmamap_sync(t, map, offset, len, ops) 443 bus_dma_tag_t t; 444 bus_dmamap_t map; 445 bus_addr_t offset; 446 bus_size_t len; 447 int ops; 448 { 449 #if defined(M68040) || defined(M68060) 450 int i; 451 #endif 452 453 /* flush/purge the cache. 454 * @@@ should probably be fixed to use offset and len args. 455 */ 456 457 #if defined(M68040) || defined(M68060) 458 if (ops & BUS_DMASYNC_PREWRITE) { 459 for (i = 0; i < map->dm_nsegs; i++) { 460 bus_addr_t p = map->dm_segs[i].ds_addr; 461 bus_addr_t e = p+map->dm_segs[i].ds_len; 462 /* If the pointers are unaligned, it's ok to flush surrounding cache line */ 463 p -= p % 16; 464 if (e % 16) e += 16 - (e % 16); 465 #ifdef DIAGNOSTIC 466 if ((p % 16) || (e % 16)) { 467 panic("unaligned address in _bus_dmamap_sync while flushing." 468 "address=0x%08lx, end=0x%08lx, ops=0x%x", p, e, ops); 469 } 470 #endif 471 while ((p < e) && (p % NBPG)) { 472 DCFL(p); /* flush cache line */ 473 p += 16; 474 } 475 while (p + NBPG <= e) { 476 DCFP(p); /* flush page */ 477 p += NBPG; 478 } 479 while (p < e) { 480 DCFL(p); /* flush cache line */ 481 p += 16; 482 } 483 #ifdef DIAGNOSTIC 484 if (p != e) { 485 panic("overrun in _bus_dmamap_sync while flushing." 486 "address=0x%08lx, end=0x%08lx, ops=0x%x", p, e, ops); 487 } 488 #endif 489 } 490 } 491 #endif /* M68040 || M68060 */ 492 493 if (ops & BUS_DMASYNC_PREREAD) { 494 switch (cputype) { 495 default: 496 #ifdef M68020 497 case CPU_68020: 498 break; 499 #endif 500 #ifdef M68030 501 case CPU_68030: 502 break; 503 #endif 504 #if defined(M68040) || defined(M68060) 505 #ifdef M68040 506 case CPU_68040: 507 #endif 508 #ifdef M68060 509 case CPU_68060: 510 #endif 511 for (i = 0; i < map->dm_nsegs; i++) { 512 bus_addr_t p = map->dm_segs[i].ds_addr; 513 bus_addr_t e = p+map->dm_segs[i].ds_len; 514 if (p % 16) { 515 p -= p % 16; 516 DCFL(p); 517 } 518 if (e % 16) { 519 e += 16 - (e % 16); 520 DCFL(e - 16); 521 } 522 #ifdef DIAGNOSTIC 523 if ((p % 16) || (e % 16)) { 524 panic("unaligned address in _bus_dmamap_sync while purging." 525 "address=0x%08lx, end=0x%08lx, ops=0x%x", p, e, ops); 526 } 527 #endif 528 while ((p < e) && (p % NBPG)) { 529 DCPL(p); /* purge cache line */ 530 p += 16; 531 } 532 while (p + NBPG <= e) { 533 DCPP(p); /* purge page */ 534 p += NBPG; 535 } 536 while (p < e) { 537 DCPL(p); /* purge cache line */ 538 p += 16; 539 } 540 #ifdef DIAGNOSTIC 541 if (p != e) { 542 panic("overrun in _bus_dmamap_sync while purging." 543 "address=0x%08lx, end=0x%08lx, ops=0x%x", p, e, ops); 544 } 545 #endif 546 } 547 break; 548 #endif /* M68040 || M68060 */ 549 } 550 } 551 } 552 553 /* 554 * Common function for DMA-safe memory allocation. May be called 555 * by bus-specific DMA memory allocation functions. 556 */ 557 int 558 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags) 559 bus_dma_tag_t t; 560 bus_size_t size, alignment, boundary; 561 bus_dma_segment_t *segs; 562 int nsegs; 563 int *rsegs; 564 int flags; 565 { 566 extern paddr_t avail_start, avail_end; 567 paddr_t curaddr, lastaddr, high; 568 struct vm_page *m; 569 struct pglist mlist; 570 int curseg, error; 571 572 /* Always round the size. */ 573 size = round_page(size); 574 575 high = avail_end - PAGE_SIZE; 576 577 /* 578 * Allocate pages from the VM system. 579 */ 580 error = uvm_pglistalloc(size, avail_start, high, alignment, boundary, 581 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 582 if (error) 583 return (error); 584 585 /* 586 * Compute the location, size, and number of segments actually 587 * returned by the VM code. 588 */ 589 m = mlist.tqh_first; 590 curseg = 0; 591 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); 592 segs[curseg].ds_len = PAGE_SIZE; 593 m = m->pageq.tqe_next; 594 595 for (; m != NULL; m = m->pageq.tqe_next) { 596 curaddr = VM_PAGE_TO_PHYS(m); 597 #ifdef DIAGNOSTIC 598 if (curaddr < avail_start || curaddr >= high) { 599 printf("uvm_pglistalloc returned non-sensical" 600 " address 0x%lx\n", curaddr); 601 panic("_bus_dmamem_alloc"); 602 } 603 #endif 604 if (curaddr == (lastaddr + PAGE_SIZE)) 605 segs[curseg].ds_len += PAGE_SIZE; 606 else { 607 curseg++; 608 segs[curseg].ds_addr = curaddr; 609 segs[curseg].ds_len = PAGE_SIZE; 610 } 611 lastaddr = curaddr; 612 } 613 614 *rsegs = curseg + 1; 615 616 return (0); 617 } 618 619 /* 620 * Common function for freeing DMA-safe memory. May be called by 621 * bus-specific DMA memory free functions. 622 */ 623 void 624 _bus_dmamem_free(t, segs, nsegs) 625 bus_dma_tag_t t; 626 bus_dma_segment_t *segs; 627 int nsegs; 628 { 629 struct vm_page *m; 630 bus_addr_t addr; 631 struct pglist mlist; 632 int curseg; 633 634 /* 635 * Build a list of pages to free back to the VM system. 636 */ 637 TAILQ_INIT(&mlist); 638 for (curseg = 0; curseg < nsegs; curseg++) { 639 for (addr = segs[curseg].ds_addr; 640 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 641 addr += PAGE_SIZE) { 642 m = PHYS_TO_VM_PAGE(addr); 643 TAILQ_INSERT_TAIL(&mlist, m, pageq); 644 } 645 } 646 647 uvm_pglistfree(&mlist); 648 } 649 650 /* 651 * Common function for mapping DMA-safe memory. May be called by 652 * bus-specific DMA memory map functions. 653 */ 654 int 655 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags) 656 bus_dma_tag_t t; 657 bus_dma_segment_t *segs; 658 int nsegs; 659 size_t size; 660 caddr_t *kvap; 661 int flags; 662 { 663 vaddr_t va; 664 bus_addr_t addr; 665 int curseg; 666 667 size = round_page(size); 668 669 va = uvm_km_valloc(kernel_map, size); 670 671 if (va == 0) 672 return (ENOMEM); 673 674 *kvap = (caddr_t)va; 675 676 for (curseg = 0; curseg < nsegs; curseg++) { 677 for (addr = segs[curseg].ds_addr; 678 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 679 addr += NBPG, va += NBPG, size -= NBPG) { 680 if (size == 0) 681 panic("_bus_dmamem_map: size botch"); 682 pmap_enter(pmap_kernel(), va, addr, 683 VM_PROT_READ | VM_PROT_WRITE, 684 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 685 } 686 } 687 pmap_update(pmap_kernel()); 688 689 return (0); 690 } 691 692 /* 693 * Common function for unmapping DMA-safe memory. May be called by 694 * bus-specific DMA memory unmapping functions. 695 */ 696 void 697 _bus_dmamem_unmap(t, kva, size) 698 bus_dma_tag_t t; 699 caddr_t kva; 700 size_t size; 701 { 702 703 #ifdef DIAGNOSTIC 704 if ((u_long)kva & PGOFSET) 705 panic("_bus_dmamem_unmap"); 706 #endif 707 708 size = round_page(size); 709 uvm_km_free(kernel_map, (vaddr_t)kva, size); 710 } 711 712 /* 713 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 714 * bus-specific DMA mmap(2)'ing functions. 715 */ 716 paddr_t 717 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags) 718 bus_dma_tag_t t; 719 bus_dma_segment_t *segs; 720 int nsegs; 721 off_t off; 722 int prot, flags; 723 { 724 int i; 725 726 for (i = 0; i < nsegs; i++) { 727 #ifdef DIAGNOSTIC 728 if (off & PGOFSET) 729 panic("_bus_dmamem_mmap: offset unaligned"); 730 if (segs[i].ds_addr & PGOFSET) 731 panic("_bus_dmamem_mmap: segment unaligned"); 732 if (segs[i].ds_len & PGOFSET) 733 panic("_bus_dmamem_mmap: segment size not multiple" 734 " of page size"); 735 #endif 736 if (off >= segs[i].ds_len) { 737 off -= segs[i].ds_len; 738 continue; 739 } 740 741 return (m68k_btop((caddr_t)segs[i].ds_addr + off)); 742 } 743 744 /* Page not found. */ 745 return (-1); 746 } 747