xref: /netbsd/sys/arch/mvme68k/mvme68k/bus_dma.c (revision c4a72b64)
1 /* $NetBSD: bus_dma.c,v 1.20 2002/06/02 14:44:37 drochner Exp $	*/
2 
3 /*
4  * This file was taken from from next68k/dev/bus_dma.c, which was originally
5  * taken from alpha/common/bus_dma.c.
6  * It should probably be re-synced when needed.
7  * original cvs id: NetBSD: bus_dma.c,v 1.13 1999/11/13 00:30:40 thorpej Exp
8  */
9 
10 /*-
11  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
12  * All rights reserved.
13  *
14  * This code is derived from software contributed to The NetBSD Foundation
15  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
16  * NASA Ames Research Center.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions
20  * are met:
21  * 1. Redistributions of source code must retain the above copyright
22  *    notice, this list of conditions and the following disclaimer.
23  * 2. Redistributions in binary form must reproduce the above copyright
24  *    notice, this list of conditions and the following disclaimer in the
25  *    documentation and/or other materials provided with the distribution.
26  * 3. All advertising materials mentioning features or use of this software
27  *    must display the following acknowledgement:
28  *	This product includes software developed by the NetBSD
29  *	Foundation, Inc. and its contributors.
30  * 4. Neither the name of The NetBSD Foundation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
35  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
36  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
37  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
38  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
39  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
40  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
41  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
42  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
43  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
44  * POSSIBILITY OF SUCH DAMAGE.
45  */
46 
47 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
48 
49 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.20 2002/06/02 14:44:37 drochner Exp $");
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/device.h>
55 #include <sys/malloc.h>
56 #include <sys/proc.h>
57 #include <sys/mbuf.h>
58 #include <sys/kcore.h>
59 
60 #include <uvm/uvm_extern.h>
61 
62 #include <machine/cpu.h>
63 #include <machine/pmap.h>
64 #define _MVME68K_BUS_DMA_PRIVATE
65 #include <machine/bus.h>
66 #include <m68k/cacheops.h>
67 
68 extern	phys_ram_seg_t mem_clusters[];
69 
70 int	_bus_dmamap_load_buffer_direct_common __P((bus_dma_tag_t,
71 	    bus_dmamap_t, void *, bus_size_t, struct proc *, int,
72 	    paddr_t *, int *, int));
73 
74 /*
75  * Common function for DMA map creation.  May be called by bus-specific
76  * DMA map creation functions.
77  */
78 int
79 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
80 	bus_dma_tag_t t;
81 	bus_size_t size;
82 	int nsegments;
83 	bus_size_t maxsegsz;
84 	bus_size_t boundary;
85 	int flags;
86 	bus_dmamap_t *dmamp;
87 {
88 	struct mvme68k_bus_dmamap *map;
89 	void *mapstore;
90 	size_t mapsize;
91 
92 	/*
93 	 * Allcoate and initialize the DMA map.  The end of the map
94 	 * is a variable-sized array of segments, so we allocate enough
95 	 * room for them in one shot.
96 	 *
97 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
98 	 * of ALLOCNOW notifes others that we've reserved these resources,
99 	 * and they are not to be freed.
100 	 *
101 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
102 	 * the (nsegments - 1).
103 	 */
104 	mapsize = sizeof(struct mvme68k_bus_dmamap) +
105 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
106 	if ((mapstore = malloc(mapsize, M_DMAMAP,
107 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
108 		return (ENOMEM);
109 
110 	memset(mapstore, 0, mapsize);
111 	map = (struct mvme68k_bus_dmamap *)mapstore;
112 	map->_dm_size = size;
113 	map->_dm_segcnt = nsegments;
114 	map->_dm_maxsegsz = maxsegsz;
115 	map->_dm_boundary = boundary;
116 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
117 	map->dm_mapsize = 0;		/* no valid mappings */
118 	map->dm_nsegs = 0;
119 
120 	*dmamp = map;
121 	return (0);
122 }
123 
124 /*
125  * Common function for DMA map destruction.  May be called by bus-specific
126  * DMA map destruction functions.
127  */
128 void
129 _bus_dmamap_destroy(t, map)
130 	bus_dma_tag_t t;
131 	bus_dmamap_t map;
132 {
133 
134 	free(map, M_DMAMAP);
135 }
136 
137 /*
138  * Utility function to load a linear buffer.  lastaddrp holds state
139  * between invocations (for multiple-buffer loads).  segp contains
140  * the starting segment on entrance, and the ending segment on exit.
141  * first indicates if this is the first invocation of this function.
142  */
143 int
144 _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen, p, flags,
145     lastaddrp, segp, first)
146 	bus_dma_tag_t t;
147 	bus_dmamap_t map;
148 	void *buf;
149 	bus_size_t buflen;
150 	struct proc *p;
151 	int flags;
152 	paddr_t *lastaddrp;
153 	int *segp;
154 	int first;
155 {
156 	bus_size_t sgsize;
157 	bus_addr_t curaddr, lastaddr, baddr, bmask;
158 	vaddr_t vaddr = (vaddr_t)buf;
159 	int seg, cacheable, coherent = BUS_DMA_COHERENT;
160 
161 	lastaddr = *lastaddrp;
162 	bmask = ~(map->_dm_boundary - 1);
163 
164 	for (seg = *segp; buflen > 0 ; ) {
165 		/*
166 		 * Get the physical address for this segment.
167 		 */
168 		if (p != NULL) {
169 			(void) pmap_extract(p->p_vmspace->vm_map.pmap,
170 			    vaddr, &curaddr);
171 			cacheable =
172 			    _pmap_page_is_cacheable(p->p_vmspace->vm_map.pmap,
173 				vaddr);
174 		} else {
175 			(void) pmap_extract(pmap_kernel(),vaddr, &curaddr);
176 			cacheable =
177 			    _pmap_page_is_cacheable(pmap_kernel(), vaddr);
178 		}
179 
180 		if (cacheable)
181 			coherent = 0;
182 
183 		/*
184 		 * Compute the segment size, and adjust counts.
185 		 */
186 		sgsize = NBPG - ((u_long)vaddr & PGOFSET);
187 		if (buflen < sgsize)
188 			sgsize = buflen;
189 
190 		/*
191 		 * Make sure we don't cross any boundaries.
192 		 */
193 		if (map->_dm_boundary > 0) {
194 			baddr = (curaddr + map->_dm_boundary) & bmask;
195 			if (sgsize > (baddr - curaddr))
196 				sgsize = (baddr - curaddr);
197 		}
198 
199 		/*
200 		 * Insert chunk into a segment, coalescing with
201 		 * the previous segment if possible.
202 		 */
203 		if (first) {
204 			map->dm_segs[seg].ds_addr =
205 			    map->dm_segs[seg]._ds_cpuaddr = curaddr;
206 			map->dm_segs[seg].ds_len = sgsize;
207 			map->dm_segs[seg]._ds_flags =
208 			    cacheable ? 0 : BUS_DMA_COHERENT;
209 			first = 0;
210 		} else {
211 			if (curaddr == lastaddr &&
212 			    (map->dm_segs[seg].ds_len + sgsize) <=
213 			     map->_dm_maxsegsz &&
214 			    (map->_dm_boundary == 0 ||
215 			     (map->dm_segs[seg].ds_addr & bmask) ==
216 			     (curaddr & bmask)))
217 				map->dm_segs[seg].ds_len += sgsize;
218 			else {
219 				if (++seg >= map->_dm_segcnt)
220 					break;
221 				map->dm_segs[seg].ds_addr =
222 				    map->dm_segs[seg]._ds_cpuaddr = curaddr;
223 				map->dm_segs[seg].ds_len = sgsize;
224 				map->dm_segs[seg]._ds_flags =
225 				    cacheable ? 0 : BUS_DMA_COHERENT;
226 			}
227 		}
228 
229 		lastaddr = curaddr + sgsize;
230 		vaddr += sgsize;
231 		buflen -= sgsize;
232 	}
233 
234 	*segp = seg;
235 	*lastaddrp = lastaddr;
236 	map->_dm_flags &= ~BUS_DMA_COHERENT;
237 	map->_dm_flags |= coherent;
238 
239 	/*
240 	 * Did we fit?
241 	 */
242 	if (buflen != 0) {
243 		/*
244 		 * If there is a chained window, we will automatically
245 		 * fall back to it.
246 		 */
247 		return (EFBIG);		/* XXX better return value here? */
248 	}
249 
250 	return (0);
251 }
252 
253 /*
254  * Common function for loading a direct-mapped DMA map with a linear
255  * buffer.  Called by bus-specific DMA map load functions with the
256  * OR value appropriate for indicating "direct-mapped" for that
257  * chipset.
258  */
259 int
260 _bus_dmamap_load_direct(t, map, buf, buflen, p, flags)
261 	bus_dma_tag_t t;
262 	bus_dmamap_t map;
263 	void *buf;
264 	bus_size_t buflen;
265 	struct proc *p;
266 	int flags;
267 {
268 	paddr_t lastaddr;
269 	int seg, error;
270 
271 	/*
272 	 * Make sure that on error condition we return "no valid mappings".
273 	 */
274 	map->dm_mapsize = 0;
275 	map->dm_nsegs = 0;
276 
277 	if (buflen > map->_dm_size)
278 		return (EINVAL);
279 
280 	seg = 0;
281 	error = _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen,
282 	    p, flags, &lastaddr, &seg, 1);
283 	if (error == 0) {
284 		map->dm_mapsize = buflen;
285 		map->dm_nsegs = seg + 1;
286 	}
287 	return (error);
288 }
289 
290 /*
291  * Like _bus_dmamap_load_direct_common(), but for mbufs.
292  */
293 int
294 _bus_dmamap_load_mbuf_direct(t, map, m0, flags)
295 	bus_dma_tag_t t;
296 	bus_dmamap_t map;
297 	struct mbuf *m0;
298 	int flags;
299 {
300 	paddr_t lastaddr;
301 	int seg, error, first;
302 	struct mbuf *m;
303 
304 	/*
305 	 * Make sure that on error condition we return "no valid mappings."
306 	 */
307 	map->dm_mapsize = 0;
308 	map->dm_nsegs = 0;
309 
310 #ifdef DIAGNOSTIC
311 	if ((m0->m_flags & M_PKTHDR) == 0)
312 		panic("_bus_dmamap_load_mbuf_direct_common: no packet header");
313 #endif
314 
315 	if (m0->m_pkthdr.len > map->_dm_size)
316 		return (EINVAL);
317 
318 	first = 1;
319 	seg = 0;
320 	error = 0;
321 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
322 		error = _bus_dmamap_load_buffer_direct_common(t, map,
323 		    m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first);
324 		first = 0;
325 	}
326 	if (error == 0) {
327 		map->dm_mapsize = m0->m_pkthdr.len;
328 		map->dm_nsegs = seg + 1;
329 	}
330 	return (error);
331 }
332 
333 /*
334  * Like _bus_dmamap_load_direct_common(), but for uios.
335  */
336 int
337 _bus_dmamap_load_uio_direct(t, map, uio, flags)
338 	bus_dma_tag_t t;
339 	bus_dmamap_t map;
340 	struct uio *uio;
341 	int flags;
342 {
343 	paddr_t lastaddr;
344 	int seg, i, error, first;
345 	bus_size_t minlen, resid;
346 	struct proc *p = NULL;
347 	struct iovec *iov;
348 	caddr_t addr;
349 
350 	/*
351 	 * Make sure that on error condition we return "no valid mappings."
352 	 */
353 	map->dm_mapsize = 0;
354 	map->dm_nsegs = 0;
355 
356 	resid = uio->uio_resid;
357 	iov = uio->uio_iov;
358 
359 	if (uio->uio_segflg == UIO_USERSPACE) {
360 		p = uio->uio_procp;
361 #ifdef DIAGNOSTIC
362 		if (p == NULL)
363 			panic("_bus_dmamap_load_direct_common: USERSPACE but no proc");
364 #endif
365 	}
366 
367 	first = 1;
368 	seg = 0;
369 	error = 0;
370 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
371 		/*
372 		 * Now at the first iovec to load.  Load each iovec
373 		 * until we have exhausted the residual count.
374 		 */
375 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
376 		addr = (caddr_t)iov[i].iov_base;
377 
378 		error = _bus_dmamap_load_buffer_direct_common(t, map,
379 		    addr, minlen, p, flags, &lastaddr, &seg, first);
380 		first = 0;
381 
382 		resid -= minlen;
383 	}
384 	if (error == 0) {
385 		map->dm_mapsize = uio->uio_resid;
386 		map->dm_nsegs = seg + 1;
387 	}
388 	return (error);
389 }
390 
391 /*
392  * Like _bus_dmamap_load_direct_common(), but for raw memory.
393  */
394 int
395 _bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags)
396 	bus_dma_tag_t t;
397 	bus_dmamap_t map;
398 	bus_dma_segment_t *segs;
399 	int nsegs;
400 	bus_size_t size;
401 	int flags;
402 {
403 	/* @@@ This routine doesn't enforce map boundary requirement
404 	 * @@@ perhaps it should return an error instead of panicing
405 	 */
406 
407 #ifdef DIAGNOSTIC
408 	if (map->_dm_size < size) {
409 		panic("_bus_dmamap_load_raw_direct: size is too large for map");
410 	}
411 	if (map->_dm_segcnt < nsegs) {
412 		panic("_bus_dmamap_load_raw_direct: too many segments for map");
413 	}
414 #endif
415 
416 	{
417 		int i;
418 		for (i=0;i<nsegs;i++) {
419 #ifdef DIAGNOSTIC
420 			if (map->_dm_maxsegsz < map->dm_segs[i].ds_len) {
421 				panic("_bus_dmamap_load_raw_direct: segment too large for map");
422 			}
423 #endif
424 			map->dm_segs[i] = segs[i];
425 		}
426 	}
427 
428 	map->dm_nsegs   = nsegs;
429 	map->dm_mapsize = size;
430 
431 	return (0);
432 }
433 
434 /*
435  * Common function for unloading a DMA map.  May be called by
436  * chipset-specific DMA map unload functions.
437  */
438 void
439 _bus_dmamap_unload(t, map)
440 	bus_dma_tag_t t;
441 	bus_dmamap_t map;
442 {
443 
444 	/*
445 	 * No resources to free; just mark the mappings as
446 	 * invalid.
447 	 */
448 	map->dm_mapsize = 0;
449 	map->dm_nsegs = 0;
450 	map->_dm_flags &= ~BUS_DMA_COHERENT;
451 }
452 
453 /*
454  * 68030 DMA map synchronization.  May be called
455  * by chipset-specific DMA map synchronization functions.
456  */
457 void
458 _bus_dmamap_sync_030(t, map, offset, len, ops)
459 	bus_dma_tag_t t;
460 	bus_dmamap_t map;
461 	bus_addr_t offset;
462 	bus_size_t len;
463 	int ops;
464 {
465 	/* Nothing yet */
466 }
467 
468 /*
469  * 68040/68060 DMA map synchronization.  May be called
470  * by chipset-specific DMA map synchronization functions.
471  */
472 void
473 _bus_dmamap_sync_0460(t, map, offset, len, ops)
474 	bus_dma_tag_t t;
475 	bus_dmamap_t map;
476 	bus_addr_t offset;
477 	bus_size_t len;
478 	int ops;
479 {
480 	bus_addr_t p, e, ps, pe;
481 	bus_size_t seglen;
482 	int i;
483 
484 	/* If the whole DMA map is uncached, do nothing.  */
485 	if (map->_dm_flags & BUS_DMA_COHERENT)
486 		return;
487 
488 	/* Short-circuit for unsupported `ops' */
489 	if ((ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) == 0)
490 		return;
491 
492 	for (i = 0; i < map->dm_nsegs && len > 0; i++) {
493 		if (map->dm_segs[i].ds_len <= offset) {
494 			/* Segment irrelevant - before requested offset */
495 			offset -= map->dm_segs[i].ds_len;
496 			continue;
497 		}
498 
499 		seglen = map->dm_segs[i].ds_len - offset;
500 		if (seglen > len)
501 			seglen = len;
502 		len -= seglen;
503 
504 		/* Ignore cache-inhibited segments */
505 		if (map->dm_segs[i]._ds_flags & BUS_DMA_COHERENT)
506 			continue;
507 
508 		ps = map->dm_segs[i]._ds_cpuaddr + offset;
509 		pe = ps + seglen;
510 
511 		if (ops & BUS_DMASYNC_PREWRITE) {
512 			p = ps & ~0xf;
513 			e = (pe + 15) & ~0xf;
514 
515 			/* flush cache line (060 too) */
516 			while((p < e) && (p % NBPG)) {
517 				DCFL_40(p);
518 				p += 16;
519 			}
520 
521 			/* flush page (060 too) */
522 			while((p + NBPG) <= e) {
523 				DCFP_40(p);
524 				p += NBPG;
525 			}
526 
527 			/* flush cache line (060 too) */
528 			while(p < e) {
529 				DCFL_40(p);
530 				p += 16;
531 			}
532 		}
533 
534 		/*
535 		 * Normally, the `PREREAD' flag instructs us to purge the
536 		 * cache for the specified offset and length. However, if
537 		 * the offset/length is not aligned to a cacheline boundary,
538 		 * we may end up purging some legitimate data from the
539 		 * start/end of the cache. In such a case, *flush* the
540 		 * cachelines at the start and end of the required region.
541 		 */
542 		if (ops & BUS_DMASYNC_PREREAD) {
543 			if (ps & 0xf) {
544 				DCFL_40(ps & ~0xf);
545 				ICPL_40(ps & ~0xf);
546 			}
547 			if (pe & 0xf) {
548 				DCFL_40(pe & ~0xf);
549 				ICPL_40(pe & ~0xf);
550 			}
551 
552 			p = (ps + 15) & ~0xf;
553 			e = pe & ~0xf;
554 
555 			/* purge cache line */
556 			while((p < e) && (p % NBPG)) {
557 				DCPL_40(p);
558 				ICPL_40(p);
559 				p += 16;
560 			}
561 
562 			/* purge page */
563 			while((p + NBPG) <= e) {
564 				DCPP_40(p);
565 				ICPP_40(p);
566 				p += NBPG;
567 			}
568 
569 			/* purge cache line */
570 			while(p < e) {
571 				DCPL_40(p);
572 				ICPL_40(p);
573 				p += 16;
574 			}
575 		}
576 	}
577 }
578 
579 /*
580  * Common function for DMA-safe memory allocation.  May be called
581  * by bus-specific DMA memory allocation functions.
582  */
583 int
584 _bus_dmamem_alloc_common(t, low, high, size, alignment, boundary,
585     segs, nsegs, rsegs, flags)
586 	bus_dma_tag_t t;
587 	bus_addr_t low, high;
588 	bus_size_t size, alignment, boundary;
589 	bus_dma_segment_t *segs;
590 	int nsegs;
591 	int *rsegs;
592 	int flags;
593 {
594 	paddr_t curaddr, lastaddr;
595 	struct vm_page *m;
596 	struct pglist mlist;
597 	int curseg, error;
598 
599 	/* Always round the size. */
600 	size = round_page(size);
601 	high -= PAGE_SIZE;
602 
603 	/*
604 	 * Allocate pages from the VM system.
605 	 *
606 	 * XXXSCW: This will be sub-optimal if the base-address of offboard
607 	 * RAM is significantly higher than the end-address of onboard RAM.
608 	 * (Due to how uvm_pglistalloc() is implemented.)
609 	 *
610 	 * uvm_pglistalloc() also currently ignores the 'nsegs' parameter,
611 	 * and always returns only one (contiguous) segment.
612 	 */
613 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
614 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
615 	if (error)
616 		return (error);
617 
618 	/*
619 	 * Compute the location, size, and number of segments actually
620 	 * returned by the VM code.
621 	 */
622 	m = mlist.tqh_first;
623 	curseg = 0;
624 	lastaddr = VM_PAGE_TO_PHYS(m);
625 	segs[curseg].ds_addr = segs[curseg]._ds_cpuaddr = lastaddr;
626 	segs[curseg].ds_len = PAGE_SIZE;
627 	segs[curseg]._ds_flags = 0;
628 	m = m->pageq.tqe_next;
629 
630 	for (; m != NULL; m = m->pageq.tqe_next) {
631 		if (curseg > nsegs) {
632 #ifdef DIAGNOSTIC
633 			printf("_bus_dmamem_alloc_common: too many segments\n");
634 #ifdef DEBUG
635 			panic("_bus_dmamem_alloc_common");
636 #endif
637 #endif
638 			uvm_pglistfree(&mlist);
639 			return (-1);
640 		}
641 
642 		curaddr = VM_PAGE_TO_PHYS(m);
643 #ifdef DIAGNOSTIC
644 		if (curaddr < low || curaddr > high) {
645 			printf("uvm_pglistalloc returned non-sensical"
646 			    " address 0x%lx\n", curaddr);
647 			panic("_bus_dmamem_alloc_common");
648 		}
649 #endif
650 		if (curaddr == (lastaddr + PAGE_SIZE))
651 			segs[curseg].ds_len += PAGE_SIZE;
652 		else {
653 			curseg++;
654 			segs[curseg].ds_addr =
655 			    segs[curseg]._ds_cpuaddr = curaddr;
656 			segs[curseg].ds_len = PAGE_SIZE;
657 			segs[curseg]._ds_flags = 0;
658 		}
659 		lastaddr = curaddr;
660 	}
661 
662 	*rsegs = curseg + 1;
663 
664 	return (0);
665 }
666 /*
667  * Common function for DMA-safe memory allocation.  May be called
668  * by bus-specific DMA memory allocation functions.
669  */
670 int
671 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
672 	bus_dma_tag_t t;
673 	bus_size_t size, alignment, boundary;
674 	bus_dma_segment_t *segs;
675 	int nsegs;
676 	int *rsegs;
677 	int flags;
678 {
679 	extern paddr_t avail_start, avail_end;
680 	bus_addr_t high;
681 
682 	/*
683 	 * Assume any memory will do (this includes off-board RAM)
684 	 */
685 	high = avail_end;
686 
687 	if ( (flags & BUS_DMA_ONBOARD_RAM) != 0 ) {
688 		/*
689 		 * Constrain the memory to 'onboard' RAM only
690 		 */
691 		high = mem_clusters[0].size;
692 	}
693 
694 	if ( (flags & BUS_DMA_24BIT) != 0 && (high & 0xff000000u) != 0 ) {
695 		/*
696 		 * We need to constrain the memory to a 24-bit address
697 		 */
698 		high = 0x01000000u;
699 	}
700 
701 	return _bus_dmamem_alloc_common(t, avail_start, high,
702 	    size, alignment, boundary, segs, nsegs, rsegs, flags);
703 }
704 
705 /*
706  * Common function for freeing DMA-safe memory.  May be called by
707  * bus-specific DMA memory free functions.
708  */
709 void
710 _bus_dmamem_free(t, segs, nsegs)
711 	bus_dma_tag_t t;
712 	bus_dma_segment_t *segs;
713 	int nsegs;
714 {
715 	struct vm_page *m;
716 	bus_addr_t addr;
717 	struct pglist mlist;
718 	int curseg;
719 
720 	/*
721 	 * Build a list of pages to free back to the VM system.
722 	 */
723 	TAILQ_INIT(&mlist);
724 	for (curseg = 0; curseg < nsegs; curseg++) {
725 		for (addr = segs[curseg]._ds_cpuaddr;
726 		    addr < (segs[curseg]._ds_cpuaddr + segs[curseg].ds_len);
727 		    addr += PAGE_SIZE) {
728 			m = PHYS_TO_VM_PAGE(addr);
729 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
730 		}
731 	}
732 
733 	uvm_pglistfree(&mlist);
734 }
735 
736 /*
737  * Common function for mapping DMA-safe memory.  May be called by
738  * bus-specific DMA memory map functions.
739  */
740 int
741 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
742 	bus_dma_tag_t t;
743 	bus_dma_segment_t *segs;
744 	int nsegs;
745 	size_t size;
746 	caddr_t *kvap;
747 	int flags;
748 {
749 	vaddr_t va;
750 	bus_addr_t addr;
751 	int curseg;
752 
753 	size = round_page(size);
754 
755 	va = uvm_km_valloc(kernel_map, size);
756 
757 	if (va == 0)
758 		return (ENOMEM);
759 
760 	*kvap = (caddr_t)va;
761 
762 	for (curseg = 0; curseg < nsegs; curseg++) {
763 		for (addr = segs[curseg]._ds_cpuaddr;
764 		    addr < (segs[curseg]._ds_cpuaddr + segs[curseg].ds_len);
765 		    addr += NBPG, va += NBPG, size -= NBPG) {
766 			if (size == 0)
767 				panic("_bus_dmamem_map: size botch");
768 
769 			pmap_enter(pmap_kernel(), va, addr,
770 			    VM_PROT_READ | VM_PROT_WRITE,
771 			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
772 
773 			/* Cache-inhibit the page if necessary */
774 			if ((flags & BUS_DMA_COHERENT) != 0)
775 				_pmap_set_page_cacheinhibit(pmap_kernel(), va);
776 
777 			segs[curseg]._ds_flags &= ~BUS_DMA_COHERENT;
778 			segs[curseg]._ds_flags |= (flags & BUS_DMA_COHERENT);
779 		}
780 	}
781 	pmap_update(pmap_kernel());
782 
783 	if ( (flags & BUS_DMA_COHERENT) != 0 )
784 		TBIAS();
785 
786 	return (0);
787 }
788 
789 /*
790  * Common function for unmapping DMA-safe memory.  May be called by
791  * bus-specific DMA memory unmapping functions.
792  */
793 void
794 _bus_dmamem_unmap(t, kva, size)
795 	bus_dma_tag_t t;
796 	caddr_t kva;
797 	size_t size;
798 {
799 	caddr_t va;
800 	size_t s;
801 
802 #ifdef DIAGNOSTIC
803 	if ((u_long)kva & PGOFSET)
804 		panic("_bus_dmamem_unmap");
805 #endif
806 
807 	size = round_page(size);
808 
809 	/*
810 	 * Re-enable cacheing on the range
811 	 * XXXSCW: There should be some way to indicate that the pages
812 	 * were mapped DMA_MAP_COHERENT in the first place...
813 	 */
814 	for (s = 0, va = kva; s < size; s += PAGE_SIZE, va += PAGE_SIZE)
815 		_pmap_set_page_cacheable(pmap_kernel(), (vaddr_t)va);
816 
817 	uvm_km_free(kernel_map, (vaddr_t)kva, size);
818 }
819 
820 /*
821  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
822  * bus-specific DMA mmap(2)'ing functions.
823  */
824 paddr_t
825 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
826 	bus_dma_tag_t t;
827 	bus_dma_segment_t *segs;
828 	int nsegs;
829 	off_t off;
830 	int prot, flags;
831 {
832 	int i;
833 
834 	for (i = 0; i < nsegs; i++) {
835 #ifdef DIAGNOSTIC
836 		if (off & PGOFSET)
837 			panic("_bus_dmamem_mmap: offset unaligned");
838 		if (segs[i]._ds_cpuaddr & PGOFSET)
839 			panic("_bus_dmamem_mmap: segment unaligned");
840 		if (segs[i].ds_len & PGOFSET)
841 			panic("_bus_dmamem_mmap: segment size not multiple"
842 			    " of page size");
843 #endif
844 		if (off >= segs[i].ds_len) {
845 			off -= segs[i].ds_len;
846 			continue;
847 		}
848 
849 		/*
850 		 * XXXSCW: What about BUS_DMA_COHERENT ??
851 		 */
852 
853 		return (m68k_btop((caddr_t)segs[i]._ds_cpuaddr + off));
854 	}
855 
856 	/* Page not found. */
857 	return (-1);
858 }
859