xref: /netbsd/sys/arch/m68k/m68k/bus_dma.c (revision bf9ec67e)
1 /* $NetBSD: bus_dma.c,v 1.1 2002/04/10 04:36:20 briggs Exp $ */
2 
3 /*
4  * This file was taken from from alpha/common/bus_dma.c
5  * should probably be re-synced when needed.
6  * Darrin B. Jewell <dbj@netbsd.org> Sat Jul 31 06:11:33 UTC 1999
7  * original cvs id: NetBSD: bus_dma.c,v 1.31 1999/07/08 18:05:23 thorpej Exp
8  */
9 
10 /*-
11  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
12  * All rights reserved.
13  *
14  * This code is derived from software contributed to The NetBSD Foundation
15  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
16  * NASA Ames Research Center.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions
20  * are met:
21  * 1. Redistributions of source code must retain the above copyright
22  *    notice, this list of conditions and the following disclaimer.
23  * 2. Redistributions in binary form must reproduce the above copyright
24  *    notice, this list of conditions and the following disclaimer in the
25  *    documentation and/or other materials provided with the distribution.
26  * 3. All advertising materials mentioning features or use of this software
27  *    must display the following acknowledgement:
28  *	This product includes software developed by the NetBSD
29  *	Foundation, Inc. and its contributors.
30  * 4. Neither the name of The NetBSD Foundation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
35  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
36  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
37  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
38  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
39  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
40  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
41  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
42  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
43  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
44  * POSSIBILITY OF SUCH DAMAGE.
45  */
46 
47 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
48 
49 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.1 2002/04/10 04:36:20 briggs Exp $");
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/device.h>
55 #include <sys/malloc.h>
56 #include <sys/proc.h>
57 #include <sys/mbuf.h>
58 
59 #include <uvm/uvm_extern.h>
60 
61 #include <machine/cpu.h>
62 
63 #define _M68K_BUS_DMA_PRIVATE
64 #include <machine/bus.h>
65 #include <m68k/cacheops.h>
66 
67 int	_bus_dmamap_load_buffer_direct_common __P((bus_dma_tag_t,
68 	    bus_dmamap_t, void *, bus_size_t, struct proc *, int,
69 	    paddr_t *, int *, int));
70 
71 /*
72  * Common function for DMA map creation.  May be called by bus-specific
73  * DMA map creation functions.
74  */
75 int
76 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
77 	bus_dma_tag_t t;
78 	bus_size_t size;
79 	int nsegments;
80 	bus_size_t maxsegsz;
81 	bus_size_t boundary;
82 	int flags;
83 	bus_dmamap_t *dmamp;
84 {
85 	struct m68k_bus_dmamap *map;
86 	void *mapstore;
87 	size_t mapsize;
88 
89 	/*
90 	 * Allcoate and initialize the DMA map.  The end of the map
91 	 * is a variable-sized array of segments, so we allocate enough
92 	 * room for them in one shot.
93 	 *
94 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
95 	 * of ALLOCNOW notifes others that we've reserved these resources,
96 	 * and they are not to be freed.
97 	 *
98 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
99 	 * the (nsegments - 1).
100 	 */
101 	mapsize = sizeof(struct m68k_bus_dmamap) +
102 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
103 	if ((mapstore = malloc(mapsize, M_DMAMAP,
104 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
105 		return (ENOMEM);
106 
107 	bzero(mapstore, mapsize);
108 	map = (struct m68k_bus_dmamap *)mapstore;
109 	map->_dm_size = size;
110 	map->_dm_segcnt = nsegments;
111 	map->_dm_maxsegsz = maxsegsz;
112 	if (t->_boundary != 0 && t->_boundary < boundary)
113 		map->_dm_boundary = t->_boundary;
114 	else
115 		map->_dm_boundary = boundary;
116 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
117 	map->dm_mapsize = 0;		/* no valid mappings */
118 	map->dm_nsegs = 0;
119 
120 	*dmamp = map;
121 	return (0);
122 }
123 
124 /*
125  * Common function for DMA map destruction.  May be called by bus-specific
126  * DMA map destruction functions.
127  */
128 void
129 _bus_dmamap_destroy(t, map)
130 	bus_dma_tag_t t;
131 	bus_dmamap_t map;
132 {
133 
134 	free(map, M_DMAMAP);
135 }
136 
137 /*
138  * Utility function to load a linear buffer.  lastaddrp holds state
139  * between invocations (for multiple-buffer loads).  segp contains
140  * the starting segment on entrance, and the ending segment on exit.
141  * first indicates if this is the first invocation of this function.
142  */
143 int
144 _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen, p, flags,
145     lastaddrp, segp, first)
146 	bus_dma_tag_t t;
147 	bus_dmamap_t map;
148 	void *buf;
149 	bus_size_t buflen;
150 	struct proc *p;
151 	int flags;
152 	paddr_t *lastaddrp;
153 	int *segp;
154 	int first;
155 {
156 	bus_size_t sgsize;
157 	bus_addr_t curaddr, lastaddr, baddr, bmask;
158 	vaddr_t vaddr = (vaddr_t)buf;
159 	int seg;
160 	boolean_t rv;
161 
162 	lastaddr = *lastaddrp;
163 	bmask = ~(map->_dm_boundary - 1);
164 
165 	for (seg = *segp; buflen > 0 ; ) {
166 		/*
167 		 * Get the physical address for this segment.
168 		 */
169 		if (p != NULL)
170 			rv = pmap_extract(p->p_vmspace->vm_map.pmap,
171 			    vaddr, &curaddr);
172 		else
173 			rv = pmap_extract(pmap_kernel(), vaddr, &curaddr);
174 		KASSERT(rv);
175 
176 		/*
177 		 * Compute the segment size, and adjust counts.
178 		 */
179 		sgsize = NBPG - ((u_long)vaddr & PGOFSET);
180 		if (buflen < sgsize)
181 			sgsize = buflen;
182 
183 		/*
184 		 * Make sure we don't cross any boundaries.
185 		 */
186 		if (map->_dm_boundary > 0) {
187 			baddr = (curaddr + map->_dm_boundary) & bmask;
188 			if (sgsize > (baddr - curaddr))
189 				sgsize = (baddr - curaddr);
190 		}
191 
192 		/*
193 		 * Insert chunk into a segment, coalescing with
194 		 * the previous segment if possible.
195 		 */
196 		if (first) {
197 			map->dm_segs[seg].ds_addr = curaddr;
198 			map->dm_segs[seg].ds_len = sgsize;
199 			first = 0;
200 		} else {
201 			if (curaddr == lastaddr &&
202 			    (map->dm_segs[seg].ds_len + sgsize) <=
203 			     map->_dm_maxsegsz &&
204 			    (map->_dm_boundary == 0 ||
205 			     (map->dm_segs[seg].ds_addr & bmask) ==
206 			     (curaddr & bmask)))
207 				map->dm_segs[seg].ds_len += sgsize;
208 			else {
209 				if (++seg >= map->_dm_segcnt)
210 					break;
211 				map->dm_segs[seg].ds_addr = curaddr;
212 				map->dm_segs[seg].ds_len = sgsize;
213 			}
214 		}
215 
216 		lastaddr = curaddr + sgsize;
217 		vaddr += sgsize;
218 		buflen -= sgsize;
219 	}
220 
221 	*segp = seg;
222 	*lastaddrp = lastaddr;
223 
224 	/*
225 	 * Did we fit?
226 	 */
227 	if (buflen != 0) {
228 		/*
229 		 * If there is a chained window, we will automatically
230 		 * fall back to it.
231 		 */
232 		return (EFBIG);		/* XXX better return value here? */
233 	}
234 
235 	return (0);
236 }
237 
238 /*
239  * Common function for loading a direct-mapped DMA map with a linear
240  * buffer.  Called by bus-specific DMA map load functions with the
241  * OR value appropriate for indicating "direct-mapped" for that
242  * chipset.
243  */
244 int
245 _bus_dmamap_load_direct(t, map, buf, buflen, p, flags)
246 	bus_dma_tag_t t;
247 	bus_dmamap_t map;
248 	void *buf;
249 	bus_size_t buflen;
250 	struct proc *p;
251 	int flags;
252 {
253 	paddr_t lastaddr;
254 	int seg, error;
255 
256 	/*
257 	 * Make sure that on error condition we return "no valid mappings".
258 	 */
259 	map->dm_mapsize = 0;
260 	map->dm_nsegs = 0;
261 
262 	if (buflen > map->_dm_size)
263 		return (EINVAL);
264 
265 	seg = 0;
266 	error = _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen,
267 	    p, flags, &lastaddr, &seg, 1);
268 	if (error == 0) {
269 		map->dm_mapsize = buflen;
270 		map->dm_nsegs = seg + 1;
271 	}
272 	return (error);
273 }
274 
275 /*
276  * Like _bus_dmamap_load_direct_common(), but for mbufs.
277  */
278 int
279 _bus_dmamap_load_mbuf_direct(t, map, m0, flags)
280 	bus_dma_tag_t t;
281 	bus_dmamap_t map;
282 	struct mbuf *m0;
283 	int flags;
284 {
285 	paddr_t lastaddr;
286 	int seg, error, first;
287 	struct mbuf *m;
288 
289 	/*
290 	 * Make sure that on error condition we return "no valid mappings."
291 	 */
292 	map->dm_mapsize = 0;
293 	map->dm_nsegs = 0;
294 
295 #ifdef DIAGNOSTIC
296 	if ((m0->m_flags & M_PKTHDR) == 0)
297 		panic("_bus_dmamap_load_mbuf_direct_common: no packet header");
298 #endif
299 
300 	if (m0->m_pkthdr.len > map->_dm_size)
301 		return (EINVAL);
302 
303 	first = 1;
304 	seg = 0;
305 	error = 0;
306 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
307 		error = _bus_dmamap_load_buffer_direct_common(t, map,
308 		    m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first);
309 		first = 0;
310 	}
311 	if (error == 0) {
312 		map->dm_mapsize = m0->m_pkthdr.len;
313 		map->dm_nsegs = seg + 1;
314 	}
315 	return (error);
316 }
317 
318 /*
319  * Like _bus_dmamap_load_direct_common(), but for uios.
320  */
321 int
322 _bus_dmamap_load_uio_direct(t, map, uio, flags)
323 	bus_dma_tag_t t;
324 	bus_dmamap_t map;
325 	struct uio *uio;
326 	int flags;
327 {
328 	paddr_t lastaddr;
329 	int seg, i, error, first;
330 	bus_size_t minlen, resid;
331 	struct proc *p = NULL;
332 	struct iovec *iov;
333 	caddr_t addr;
334 
335 	/*
336 	 * Make sure that on error condition we return "no valid mappings."
337 	 */
338 	map->dm_mapsize = 0;
339 	map->dm_nsegs = 0;
340 
341 	resid = uio->uio_resid;
342 	iov = uio->uio_iov;
343 
344 	if (uio->uio_segflg == UIO_USERSPACE) {
345 		p = uio->uio_procp;
346 #ifdef DIAGNOSTIC
347 		if (p == NULL)
348 			panic("_bus_dmamap_load_direct_common: USERSPACE but no proc");
349 #endif
350 	}
351 
352 	first = 1;
353 	seg = 0;
354 	error = 0;
355 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
356 		/*
357 		 * Now at the first iovec to load.  Load each iovec
358 		 * until we have exhausted the residual count.
359 		 */
360 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
361 		addr = (caddr_t)iov[i].iov_base;
362 
363 		error = _bus_dmamap_load_buffer_direct_common(t, map,
364 		    addr, minlen, p, flags, &lastaddr, &seg, first);
365 		first = 0;
366 
367 		resid -= minlen;
368 	}
369 	if (error == 0) {
370 		map->dm_mapsize = uio->uio_resid;
371 		map->dm_nsegs = seg + 1;
372 	}
373 	return (error);
374 }
375 
376 /*
377  * Like _bus_dmamap_load_direct_common(), but for raw memory.
378  */
379 int
380 _bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags)
381 	bus_dma_tag_t t;
382 	bus_dmamap_t map;
383 	bus_dma_segment_t *segs;
384 	int nsegs;
385 	bus_size_t size;
386 	int flags;
387 {
388 	/* @@@ This routine doesn't enforce map boundary requirement
389 	 * @@@ perhaps it should return an error instead of panicing
390 	 */
391 
392 #ifdef DIAGNOSTIC
393 	if (map->_dm_size < size) {
394 		panic("_bus_dmamap_load_raw_direct: size is too large for map");
395 	}
396 	if (map->_dm_segcnt < nsegs) {
397 		panic("_bus_dmamap_load_raw_direct: too many segments for map");
398 	}
399 #endif
400 
401 	{
402 		int i;
403 		for (i=0;i<nsegs;i++) {
404 #ifdef DIAGNOSTIC
405 			if (map->_dm_maxsegsz < map->dm_segs[i].ds_len) {
406 				panic("_bus_dmamap_load_raw_direct: segment too large for map");
407 			}
408 #endif
409 			map->dm_segs[i] = segs[i];
410 		}
411 	}
412 
413 	map->dm_nsegs   = nsegs;
414 	map->dm_mapsize = size;
415 
416 	return (0);
417 }
418 
419 /*
420  * Common function for unloading a DMA map.  May be called by
421  * chipset-specific DMA map unload functions.
422  */
423 void
424 _bus_dmamap_unload(t, map)
425 	bus_dma_tag_t t;
426 	bus_dmamap_t map;
427 {
428 
429 	/*
430 	 * No resources to free; just mark the mappings as
431 	 * invalid.
432 	 */
433 	map->dm_mapsize = 0;
434 	map->dm_nsegs = 0;
435 }
436 
437 /*
438  * Common function for DMA map synchronization.  May be called
439  * by chipset-specific DMA map synchronization functions.
440  */
441 void
442 _bus_dmamap_sync(t, map, offset, len, ops)
443 	bus_dma_tag_t t;
444 	bus_dmamap_t map;
445 	bus_addr_t offset;
446 	bus_size_t len;
447 	int ops;
448 {
449 	/* flush/purge the cache.
450 	 * @@@ should probably be fixed to use offset and len args.
451 	 */
452 
453 	if (ops & BUS_DMASYNC_PREWRITE) {
454 		int i;
455 		for(i=0;i<map->dm_nsegs;i++) {
456 			bus_addr_t p = map->dm_segs[i].ds_addr;
457 			bus_addr_t e = p+map->dm_segs[i].ds_len;
458 			/* If the pointers are unaligned, it's ok to flush surrounding cache line */
459 			p -= p%16;
460 			if (e % 16) e += 16-(e%16);
461 #ifdef DIAGNOSTIC
462 			if ((p % 16) || (e % 16)) {
463 				panic("unaligned address in _bus_dmamap_sync while flushing.\n"
464 						"address=0x%08lx, end=0x%08lx, ops=0x%x",p,e,ops);
465 			}
466 #endif
467 			while((p<e)&&(p%NBPG)) {
468 				DCFL(p);							/* flush cache line */
469 				p += 16;
470 			}
471 			while(p+NBPG<=e) {
472 				DCFP(p);							/* flush page */
473 				p += NBPG;
474 			}
475 			while(p<e) {
476 				DCFL(p);							/* flush cache line */
477 				p += 16;
478 			}
479 #ifdef DIAGNOSTIC
480 			if (p != e) {
481 				panic("overrun in _bus_dmamap_sync while flushing.\n"
482 						"address=0x%08lx, end=0x%08lx, ops=0x%x",p,e,ops);
483 			}
484 #endif
485 		}
486 	}
487 
488 	if (ops & BUS_DMASYNC_PREREAD) {
489 		int i;
490 		for(i=0;i<map->dm_nsegs;i++) {
491 			bus_addr_t p = map->dm_segs[i].ds_addr;
492 			bus_addr_t e = p+map->dm_segs[i].ds_len;
493 			if (p % 16) {
494 				p -= p%16;
495 				DCFL(p);
496 			}
497 			if (e % 16) {
498 				e += 16-(e%16);
499 				DCFL(e-16);
500 			}
501 #ifdef DIAGNOSTIC
502 			if ((p % 16) || (e % 16)) {
503 				panic("unaligned address in _bus_dmamap_sync while purging.\n"
504 						"address=0x%08lx, end=0x%08lx, ops=0x%x", p,e,ops);
505 			}
506 #endif
507 			while((p<e)&&(p%NBPG)) {
508 				DCPL(p);							/* purge cache line */
509 				p += 16;
510 			}
511 			while(p+NBPG<=e) {
512 				DCPP(p);							/* purge page */
513 				p += NBPG;
514 			}
515 			while(p<e) {
516 				DCPL(p);							/* purge cache line */
517 				p += 16;
518 			}
519 #ifdef DIAGNOSTIC
520 			if (p != e) {
521 				panic("overrun in _bus_dmamap_sync while flushing.\n"
522 						"address=0x%08lx, end=0x%08lx, ops=0x%x",p,e,ops);
523 			}
524 #endif
525 		}
526 	}
527 }
528 
529 /*
530  * Common function for DMA-safe memory allocation.  May be called
531  * by bus-specific DMA memory allocation functions.
532  */
533 int
534 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
535 	bus_dma_tag_t t;
536 	bus_size_t size, alignment, boundary;
537 	bus_dma_segment_t *segs;
538 	int nsegs;
539 	int *rsegs;
540 	int flags;
541 {
542 	extern paddr_t avail_start, avail_end;
543 	paddr_t curaddr, lastaddr, high;
544 	struct vm_page *m;
545 	struct pglist mlist;
546 	int curseg, error;
547 
548 	/* Always round the size. */
549 	size = round_page(size);
550 
551 	high = avail_end - PAGE_SIZE;
552 
553 	/*
554 	 * Allocate pages from the VM system.
555 	 */
556 	TAILQ_INIT(&mlist);
557 	error = uvm_pglistalloc(size, avail_start, high, alignment, boundary,
558 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
559 	if (error)
560 		return (error);
561 
562 	/*
563 	 * Compute the location, size, and number of segments actually
564 	 * returned by the VM code.
565 	 */
566 	m = mlist.tqh_first;
567 	curseg = 0;
568 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
569 	segs[curseg].ds_len = PAGE_SIZE;
570 	m = m->pageq.tqe_next;
571 
572 	for (; m != NULL; m = m->pageq.tqe_next) {
573 		curaddr = VM_PAGE_TO_PHYS(m);
574 #ifdef DIAGNOSTIC
575 		if (curaddr < avail_start || curaddr >= high) {
576 			printf("uvm_pglistalloc returned non-sensical"
577 			    " address 0x%lx\n", curaddr);
578 			panic("_bus_dmamem_alloc");
579 		}
580 #endif
581 		if (curaddr == (lastaddr + PAGE_SIZE))
582 			segs[curseg].ds_len += PAGE_SIZE;
583 		else {
584 			curseg++;
585 			segs[curseg].ds_addr = curaddr;
586 			segs[curseg].ds_len = PAGE_SIZE;
587 		}
588 		lastaddr = curaddr;
589 	}
590 
591 	*rsegs = curseg + 1;
592 
593 	return (0);
594 }
595 
596 /*
597  * Common function for freeing DMA-safe memory.  May be called by
598  * bus-specific DMA memory free functions.
599  */
600 void
601 _bus_dmamem_free(t, segs, nsegs)
602 	bus_dma_tag_t t;
603 	bus_dma_segment_t *segs;
604 	int nsegs;
605 {
606 	struct vm_page *m;
607 	bus_addr_t addr;
608 	struct pglist mlist;
609 	int curseg;
610 
611 	/*
612 	 * Build a list of pages to free back to the VM system.
613 	 */
614 	TAILQ_INIT(&mlist);
615 	for (curseg = 0; curseg < nsegs; curseg++) {
616 		for (addr = segs[curseg].ds_addr;
617 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
618 		    addr += PAGE_SIZE) {
619 			m = PHYS_TO_VM_PAGE(addr);
620 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
621 		}
622 	}
623 
624 	uvm_pglistfree(&mlist);
625 }
626 
627 /*
628  * Common function for mapping DMA-safe memory.  May be called by
629  * bus-specific DMA memory map functions.
630  */
631 int
632 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
633 	bus_dma_tag_t t;
634 	bus_dma_segment_t *segs;
635 	int nsegs;
636 	size_t size;
637 	caddr_t *kvap;
638 	int flags;
639 {
640 	vaddr_t va;
641 	bus_addr_t addr;
642 	int curseg;
643 
644 	size = round_page(size);
645 
646 	va = uvm_km_valloc(kernel_map, size);
647 
648 	if (va == 0)
649 		return (ENOMEM);
650 
651 	*kvap = (caddr_t)va;
652 
653 	for (curseg = 0; curseg < nsegs; curseg++) {
654 		for (addr = segs[curseg].ds_addr;
655 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
656 		    addr += NBPG, va += NBPG, size -= NBPG) {
657 			if (size == 0)
658 				panic("_bus_dmamem_map: size botch");
659 			pmap_enter(pmap_kernel(), va, addr,
660 			    VM_PROT_READ | VM_PROT_WRITE,
661 			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
662 		}
663 	}
664 	pmap_update(pmap_kernel());
665 
666 	return (0);
667 }
668 
669 /*
670  * Common function for unmapping DMA-safe memory.  May be called by
671  * bus-specific DMA memory unmapping functions.
672  */
673 void
674 _bus_dmamem_unmap(t, kva, size)
675 	bus_dma_tag_t t;
676 	caddr_t kva;
677 	size_t size;
678 {
679 
680 #ifdef DIAGNOSTIC
681 	if ((u_long)kva & PGOFSET)
682 		panic("_bus_dmamem_unmap");
683 #endif
684 
685 	size = round_page(size);
686 	uvm_km_free(kernel_map, (vaddr_t)kva, size);
687 }
688 
689 /*
690  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
691  * bus-specific DMA mmap(2)'ing functions.
692  */
693 paddr_t
694 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
695 	bus_dma_tag_t t;
696 	bus_dma_segment_t *segs;
697 	int nsegs;
698 	off_t off;
699 	int prot, flags;
700 {
701 	int i;
702 
703 	for (i = 0; i < nsegs; i++) {
704 #ifdef DIAGNOSTIC
705 		if (off & PGOFSET)
706 			panic("_bus_dmamem_mmap: offset unaligned");
707 		if (segs[i].ds_addr & PGOFSET)
708 			panic("_bus_dmamem_mmap: segment unaligned");
709 		if (segs[i].ds_len & PGOFSET)
710 			panic("_bus_dmamem_mmap: segment size not multiple"
711 			    " of page size");
712 #endif
713 		if (off >= segs[i].ds_len) {
714 			off -= segs[i].ds_len;
715 			continue;
716 		}
717 
718 		return (m68k_btop((caddr_t)segs[i].ds_addr + off));
719 	}
720 
721 	/* Page not found. */
722 	return (-1);
723 }
724