xref: /openbsd/sys/arch/alpha/dev/bus_dma.c (revision 09467b48)
1 /* $OpenBSD: bus_dma.c,v 1.36 2018/01/11 15:49:34 visa Exp $ */
2 /* $NetBSD: bus_dma.c,v 1.40 2000/07/17 04:47:56 thorpej Exp $ */
3 
4 /*-
5  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #define _ALPHA_BUS_DMA_PRIVATE
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/device.h>
39 #include <sys/malloc.h>
40 #include <sys/proc.h>
41 #include <sys/mbuf.h>
42 
43 #include <uvm/uvm_extern.h>
44 
45 #include <machine/bus.h>
46 #include <machine/intr.h>
47 
48 int	_bus_dmamap_load_buffer_direct(bus_dma_tag_t,
49 	    bus_dmamap_t, void *, bus_size_t, struct proc *, int,
50 	    paddr_t *, int *, int);
51 
52 /*
53  * Common function for DMA map creation.  May be called by bus-specific
54  * DMA map creation functions.
55  */
56 int
57 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
58 	bus_dma_tag_t t;
59 	bus_size_t size;
60 	int nsegments;
61 	bus_size_t maxsegsz;
62 	bus_size_t boundary;
63 	int flags;
64 	bus_dmamap_t *dmamp;
65 {
66 	struct alpha_bus_dmamap *map;
67 	void *mapstore;
68 	size_t mapsize;
69 
70 	/*
71 	 * Allocate and initialize the DMA map.  The end of the map
72 	 * is a variable-sized array of segments, so we allocate enough
73 	 * room for them in one shot.
74 	 *
75 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
76 	 * of ALLOCNOW notifies others that we've reserved these resources,
77 	 * and they are not to be freed.
78 	 *
79 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
80 	 * the (nsegments - 1).
81 	 */
82 	mapsize = sizeof(struct alpha_bus_dmamap) +
83 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
84 	if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
85 	    (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
86 		return (ENOMEM);
87 
88 	map = (struct alpha_bus_dmamap *)mapstore;
89 	map->_dm_size = size;
90 	map->_dm_segcnt = nsegments;
91 	map->_dm_maxsegsz = maxsegsz;
92 	if (t->_boundary != 0 && t->_boundary < boundary)
93 		map->_dm_boundary = t->_boundary;
94 	else
95 		map->_dm_boundary = boundary;
96 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
97 	map->dm_mapsize = 0;		/* no valid mappings */
98 	map->dm_nsegs = 0;
99 	map->_dm_window = NULL;
100 
101 	*dmamp = map;
102 	return (0);
103 }
104 
105 /*
106  * Common function for DMA map destruction.  May be called by bus-specific
107  * DMA map destruction functions.
108  */
109 void
110 _bus_dmamap_destroy(t, map)
111 	bus_dma_tag_t t;
112 	bus_dmamap_t map;
113 {
114 	size_t mapsize;
115 
116 	mapsize = sizeof(struct alpha_bus_dmamap) +
117 	    (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
118 	free(map, M_DEVBUF, mapsize);
119 }
120 
121 /*
122  * Utility function to load a linear buffer.  lastaddrp holds state
123  * between invocations (for multiple-buffer loads).  segp contains
124  * the starting segment on entrance, and the ending segment on exit.
125  * first indicates if this is the first invocation of this function.
126  */
127 int
128 _bus_dmamap_load_buffer_direct(t, map, buf, buflen, p, flags,
129     lastaddrp, segp, first)
130 	bus_dma_tag_t t;
131 	bus_dmamap_t map;
132 	void *buf;
133 	bus_size_t buflen;
134 	struct proc *p;
135 	int flags;
136 	paddr_t *lastaddrp;
137 	int *segp;
138 	int first;
139 {
140 	bus_size_t sgsize;
141 	pmap_t pmap;
142 	bus_addr_t curaddr, lastaddr, baddr, bmask;
143 	vaddr_t vaddr = (vaddr_t)buf;
144 	int seg;
145 
146 	if (p != NULL)
147 		pmap = p->p_vmspace->vm_map.pmap;
148 	else
149 		pmap = pmap_kernel();
150 
151 	lastaddr = *lastaddrp;
152 	bmask = ~(map->_dm_boundary - 1);
153 
154 	for (seg = *segp; buflen > 0 ; ) {
155 		/*
156 		 * Get the physical address for this segment.
157 		 */
158 		pmap_extract(pmap, vaddr, &curaddr);
159 
160 		/*
161 		 * If we're beyond the current DMA window, indicate
162 		 * that and try to fall back into SGMAPs.
163 		 */
164 		if (t->_wsize != 0 && curaddr >= t->_wsize)
165 			return (EINVAL);
166 
167 		curaddr |= t->_wbase;
168 
169 		/*
170 		 * Compute the segment size, and adjust counts.
171 		 */
172 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
173 		if (buflen < sgsize)
174 			sgsize = buflen;
175 		if (map->_dm_maxsegsz < sgsize)
176 			sgsize = map->_dm_maxsegsz;
177 
178 		/*
179 		 * Make sure we don't cross any boundaries.
180 		 */
181 		if (map->_dm_boundary > 0) {
182 			baddr = (curaddr + map->_dm_boundary) & bmask;
183 			if (sgsize > (baddr - curaddr))
184 				sgsize = (baddr - curaddr);
185 		}
186 
187 		/*
188 		 * Insert chunk into a segment, coalescing with
189 		 * the previous segment if possible.
190 		 */
191 		if (first) {
192 			map->dm_segs[seg].ds_addr = curaddr;
193 			map->dm_segs[seg].ds_len = sgsize;
194 			first = 0;
195 		} else {
196 			if ((map->_dm_flags & DMAMAP_NO_COALESCE) == 0 &&
197 			    curaddr == lastaddr &&
198 			    (map->dm_segs[seg].ds_len + sgsize) <=
199 			     map->_dm_maxsegsz &&
200 			    (map->_dm_boundary == 0 ||
201 			     (map->dm_segs[seg].ds_addr & bmask) ==
202 			     (curaddr & bmask)))
203 				map->dm_segs[seg].ds_len += sgsize;
204 			else {
205 				if (++seg >= map->_dm_segcnt)
206 					break;
207 				map->dm_segs[seg].ds_addr = curaddr;
208 				map->dm_segs[seg].ds_len = sgsize;
209 			}
210 		}
211 
212 		lastaddr = curaddr + sgsize;
213 		vaddr += sgsize;
214 		buflen -= sgsize;
215 	}
216 
217 	*segp = seg;
218 	*lastaddrp = lastaddr;
219 
220 	/*
221 	 * Did we fit?
222 	 */
223 	if (buflen != 0) {
224 		/*
225 		 * If there is a chained window, we will automatically
226 		 * fall back to it.
227 		 */
228 		return (EFBIG);		/* XXX better return value here? */
229 	}
230 
231 	return (0);
232 }
233 
234 /*
235  * Common function for loading a direct-mapped DMA map with a linear
236  * buffer.  Called by bus-specific DMA map load functions with the
237  * OR value appropriate for indicating "direct-mapped" for that
238  * chipset.
239  */
240 int
241 _bus_dmamap_load_direct(t, map, buf, buflen, p, flags)
242 	bus_dma_tag_t t;
243 	bus_dmamap_t map;
244 	void *buf;
245 	bus_size_t buflen;
246 	struct proc *p;
247 	int flags;
248 {
249 	paddr_t lastaddr;
250 	int seg, error;
251 
252 	/*
253 	 * Make sure that on error condition we return "no valid mappings".
254 	 */
255 	map->dm_mapsize = 0;
256 	map->dm_nsegs = 0;
257 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
258 
259 	if (buflen > map->_dm_size)
260 		return (EINVAL);
261 
262 	seg = 0;
263 	error = _bus_dmamap_load_buffer_direct(t, map, buf, buflen,
264 	    p, flags, &lastaddr, &seg, 1);
265 	if (error == 0) {
266 		map->dm_mapsize = buflen;
267 		map->dm_nsegs = seg + 1;
268 		map->_dm_window = t;
269 	} else if (t->_next_window != NULL) {
270 		/*
271 		 * Give the next window a chance.
272 		 */
273 		error = bus_dmamap_load(t->_next_window, map, buf, buflen,
274 		    p, flags);
275 	}
276 	return (error);
277 }
278 
279 /*
280  * Like _bus_dmamap_load_direct(), but for mbufs.
281  */
282 int
283 _bus_dmamap_load_mbuf_direct(t, map, m0, flags)
284 	bus_dma_tag_t t;
285 	bus_dmamap_t map;
286 	struct mbuf *m0;
287 	int flags;
288 {
289 	paddr_t lastaddr;
290 	int seg, error, first;
291 	struct mbuf *m;
292 
293 	/*
294 	 * Make sure that on error condition we return "no valid mappings."
295 	 */
296 	map->dm_mapsize = 0;
297 	map->dm_nsegs = 0;
298 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
299 
300 #ifdef DIAGNOSTIC
301 	if ((m0->m_flags & M_PKTHDR) == 0)
302 		panic("_bus_dmamap_load_mbuf_direct: no packet header");
303 #endif
304 
305 	if (m0->m_pkthdr.len > map->_dm_size)
306 		return (EINVAL);
307 
308 	first = 1;
309 	seg = 0;
310 	error = 0;
311 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
312 		if (m->m_len == 0)
313 			continue;
314 		error = _bus_dmamap_load_buffer_direct(t, map,
315 		    m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first);
316 		first = 0;
317 	}
318 	if (error == 0) {
319 		map->dm_mapsize = m0->m_pkthdr.len;
320 		map->dm_nsegs = seg + 1;
321 		map->_dm_window = t;
322 	} else if (t->_next_window != NULL) {
323 		/*
324 		 * Give the next window a chance.
325 		 */
326 		error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
327 	}
328 	return (error);
329 }
330 
331 /*
332  * Like _bus_dmamap_load_direct(), but for uios.
333  */
334 int
335 _bus_dmamap_load_uio_direct(t, map, uio, flags)
336 	bus_dma_tag_t t;
337 	bus_dmamap_t map;
338 	struct uio *uio;
339 	int flags;
340 {
341 	paddr_t lastaddr;
342 	int seg, i, error, first;
343 	bus_size_t minlen, resid;
344 	struct proc *p = NULL;
345 	struct iovec *iov;
346 	caddr_t addr;
347 
348 	/*
349 	 * Make sure that on error condition we return "no valid mappings."
350 	 */
351 	map->dm_mapsize = 0;
352 	map->dm_nsegs = 0;
353 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
354 
355 	resid = uio->uio_resid;
356 	iov = uio->uio_iov;
357 
358 	if (uio->uio_segflg == UIO_USERSPACE) {
359 		p = uio->uio_procp;
360 #ifdef DIAGNOSTIC
361 		if (p == NULL)
362 			panic("_bus_dmamap_load_uio_direct: "
363 			    "USERSPACE but no proc");
364 #endif
365 	}
366 
367 	first = 1;
368 	seg = 0;
369 	error = 0;
370 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
371 		/*
372 		 * Now at the first iovec to load.  Load each iovec
373 		 * until we have exhausted the residual count.
374 		 */
375 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
376 		addr = (caddr_t)iov[i].iov_base;
377 
378 		error = _bus_dmamap_load_buffer_direct(t, map,
379 		    addr, minlen, p, flags, &lastaddr, &seg, first);
380 		first = 0;
381 
382 		resid -= minlen;
383 	}
384 	if (error == 0) {
385 		map->dm_mapsize = uio->uio_resid;
386 		map->dm_nsegs = seg + 1;
387 		map->_dm_window = t;
388 	} else if (t->_next_window != NULL) {
389 		/*
390 		 * Give the next window a chance.
391 		 */
392 		error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
393 	}
394 	return (error);
395 }
396 
397 /*
398  * Like _bus_dmamap_load_direct(), but for raw memory.
399  */
400 int
401 _bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags)
402 	bus_dma_tag_t t;
403 	bus_dmamap_t map;
404 	bus_dma_segment_t *segs;
405 	int nsegs;
406 	bus_size_t size;
407 	int flags;
408 {
409 
410 	panic("_bus_dmamap_load_raw_direct: not implemented");
411 }
412 
413 /*
414  * Common function for unloading a DMA map.  May be called by
415  * chipset-specific DMA map unload functions.
416  */
417 void
418 _bus_dmamap_unload(t, map)
419 	bus_dma_tag_t t;
420 	bus_dmamap_t map;
421 {
422 
423 	/*
424 	 * No resources to free; just mark the mappings as
425 	 * invalid.
426 	 */
427 	map->dm_mapsize = 0;
428 	map->dm_nsegs = 0;
429 	map->_dm_window = NULL;
430 	map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
431 }
432 
433 /*
434  * Common function for DMA map synchronization.  May be called
435  * by chipset-specific DMA map synchronization functions.
436  */
437 void
438 _bus_dmamap_sync(t, map, offset, len, op)
439 	bus_dma_tag_t t;
440 	bus_dmamap_t map;
441 	bus_addr_t offset;
442 	bus_size_t len;
443 	int op;
444 {
445 
446 	/*
447 	 * Flush the store buffer.
448 	 */
449 	alpha_mb();
450 }
451 
452 /*
453  * Common function for DMA-safe memory allocation.  May be called
454  * by bus-specific DMA memory allocation functions.
455  */
456 int
457 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
458 	bus_dma_tag_t t;
459 	bus_size_t size, alignment, boundary;
460 	bus_dma_segment_t *segs;
461 	int nsegs;
462 	int *rsegs;
463 	int flags;
464 {
465 
466 	return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
467 	    segs, nsegs, rsegs, flags, (paddr_t)0, (paddr_t)-1));
468 }
469 
470 /*
471  * Allocate physical memory from the given physical address range.
472  * Called by DMA-safe memory allocation methods.
473  */
474 int
475 _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
476     flags, low, high)
477 	bus_dma_tag_t t;
478 	bus_size_t size, alignment, boundary;
479 	bus_dma_segment_t *segs;
480 	int nsegs;
481 	int *rsegs;
482 	int flags;
483 	paddr_t low;
484 	paddr_t high;
485 {
486 	paddr_t curaddr, lastaddr;
487 	struct vm_page *m;
488 	struct pglist mlist;
489 	int curseg, error, plaflag;
490 
491 	/* Always round the size. */
492 	size = round_page(size);
493 
494 	/*
495 	 * Allocate pages from the VM system.
496 	 */
497 	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
498 	if (flags & BUS_DMA_ZERO)
499 		plaflag |= UVM_PLA_ZERO;
500 
501 	TAILQ_INIT(&mlist);
502 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
503 	    &mlist, nsegs, plaflag);
504 	if (error)
505 		return (error);
506 
507 	/*
508 	 * Compute the location, size, and number of segments actually
509 	 * returned by the VM code.
510 	 */
511 	m = TAILQ_FIRST(&mlist);
512 	curseg = 0;
513 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
514 	segs[curseg].ds_len = PAGE_SIZE;
515 	m = TAILQ_NEXT(m, pageq);
516 
517 	for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
518 		curaddr = VM_PAGE_TO_PHYS(m);
519 #ifdef DIAGNOSTIC
520 		if (curaddr < low || curaddr >= high) {
521 			printf("uvm_pglistalloc returned non-sensical"
522 			    " address 0x%lx\n", curaddr);
523 			panic("_bus_dmamem_alloc");
524 		}
525 #endif
526 		if (curaddr == (lastaddr + PAGE_SIZE))
527 			segs[curseg].ds_len += PAGE_SIZE;
528 		else {
529 			curseg++;
530 			segs[curseg].ds_addr = curaddr;
531 			segs[curseg].ds_len = PAGE_SIZE;
532 		}
533 		lastaddr = curaddr;
534 	}
535 
536 	*rsegs = curseg + 1;
537 
538 	return (0);
539 }
540 
541 /*
542  * Common function for freeing DMA-safe memory.  May be called by
543  * bus-specific DMA memory free functions.
544  */
545 void
546 _bus_dmamem_free(t, segs, nsegs)
547 	bus_dma_tag_t t;
548 	bus_dma_segment_t *segs;
549 	int nsegs;
550 {
551 	struct vm_page *m;
552 	bus_addr_t addr;
553 	struct pglist mlist;
554 	int curseg;
555 
556 	/*
557 	 * Build a list of pages to free back to the VM system.
558 	 */
559 	TAILQ_INIT(&mlist);
560 	for (curseg = 0; curseg < nsegs; curseg++) {
561 		for (addr = segs[curseg].ds_addr;
562 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
563 		    addr += PAGE_SIZE) {
564 			m = PHYS_TO_VM_PAGE(addr);
565 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
566 		}
567 	}
568 
569 	uvm_pglistfree(&mlist);
570 }
571 
572 /*
573  * Common function for mapping DMA-safe memory.  May be called by
574  * bus-specific DMA memory map functions.
575  */
576 int
577 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
578 	bus_dma_tag_t t;
579 	bus_dma_segment_t *segs;
580 	int nsegs;
581 	size_t size;
582 	caddr_t *kvap;
583 	int flags;
584 {
585 	vaddr_t va, sva;
586 	size_t ssize;
587 	bus_addr_t addr;
588 	int curseg, error;
589 	const struct kmem_dyn_mode *kd;
590 
591 	/*
592 	 * If we're only mapping 1 segment, use K0SEG, to avoid
593 	 * TLB thrashing.
594 	 */
595 	if (nsegs == 1) {
596 		*kvap = (caddr_t)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
597 		return (0);
598 	}
599 
600 	size = round_page(size);
601 	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
602 	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
603 	if (va == 0)
604 		return (ENOMEM);
605 
606 	*kvap = (caddr_t)va;
607 
608 	sva = va;
609 	ssize = size;
610 	for (curseg = 0; curseg < nsegs; curseg++) {
611 		for (addr = segs[curseg].ds_addr;
612 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
613 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
614 			if (size == 0)
615 				panic("_bus_dmamem_map: size botch");
616 			error = pmap_enter(pmap_kernel(), va, addr,
617 			    PROT_READ | PROT_WRITE,
618 			    PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
619 			if (error) {
620 				pmap_update(pmap_kernel());
621 				km_free((void *)sva, ssize, &kv_any, &kp_none);
622 				return (error);
623 			}
624 		}
625 	}
626 	pmap_update(pmap_kernel());
627 
628 	return (0);
629 }
630 
631 /*
632  * Common function for unmapping DMA-safe memory.  May be called by
633  * bus-specific DMA memory unmapping functions.
634  */
635 void
636 _bus_dmamem_unmap(t, kva, size)
637 	bus_dma_tag_t t;
638 	caddr_t kva;
639 	size_t size;
640 {
641 
642 #ifdef DIAGNOSTIC
643 	if ((u_long)kva & PGOFSET)
644 		panic("_bus_dmamem_unmap");
645 #endif
646 
647 	/*
648 	 * Nothing to do if we mapped it with K0SEG.
649 	 */
650 	if (kva >= (caddr_t)ALPHA_K0SEG_BASE &&
651 	    kva <= (caddr_t)ALPHA_K0SEG_END)
652 		return;
653 
654 	km_free(kva, round_page(size), &kv_any, &kp_none);
655 }
656 
657 /*
658  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
659  * bus-specific DMA mmap(2)'ing functions.
660  */
661 paddr_t
662 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
663 	bus_dma_tag_t t;
664 	bus_dma_segment_t *segs;
665 	int nsegs;
666 	off_t off;
667 	int prot, flags;
668 {
669 	int i;
670 
671 	for (i = 0; i < nsegs; i++) {
672 #ifdef DIAGNOSTIC
673 		if (off & PGOFSET)
674 			panic("_bus_dmamem_mmap: offset unaligned");
675 		if (segs[i].ds_addr & PGOFSET)
676 			panic("_bus_dmamem_mmap: segment unaligned");
677 		if (segs[i].ds_len & PGOFSET)
678 			panic("_bus_dmamem_mmap: segment size not multiple"
679 			    " of page size");
680 #endif
681 		if (off >= segs[i].ds_len) {
682 			off -= segs[i].ds_len;
683 			continue;
684 		}
685 
686 		return (segs[i].ds_addr + off);
687 	}
688 
689 	/* Page not found. */
690 	return (-1);
691 }
692