xref: /netbsd/sys/arch/landisk/landisk/bus_dma.c (revision 6550d01e)
1 /*	$NetBSD: bus_dma.c,v 1.13 2010/11/06 11:46:01 uebayasi Exp $	*/
2 
3 /*
4  * Copyright (c) 2005 NONAKA Kimihiro
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.13 2010/11/06 11:46:01 uebayasi Exp $");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/device.h>
35 #include <sys/malloc.h>
36 #include <sys/mbuf.h>
37 
38 #include <uvm/uvm.h>
39 
40 #include <sh3/cache.h>
41 
42 #include <machine/autoconf.h>
43 #define	_LANDISK_BUS_DMA_PRIVATE
44 #include <machine/bus.h>
45 
46 #if defined(DEBUG) && defined(BUSDMA_DEBUG)
47 int busdma_debug = 0;
48 #define	DPRINTF(a)	if (busdma_debug) printf a
49 #else
50 #define	DPRINTF(a)
51 #endif
52 
53 struct _bus_dma_tag landisk_bus_dma = {
54 	._cookie = NULL,
55 
56 	._dmamap_create = _bus_dmamap_create,
57 	._dmamap_destroy = _bus_dmamap_destroy,
58 	._dmamap_load = _bus_dmamap_load,
59 	._dmamap_load_mbuf = _bus_dmamap_load_mbuf,
60 	._dmamap_load_uio = _bus_dmamap_load_uio,
61 	._dmamap_load_raw = _bus_dmamap_load_raw,
62 	._dmamap_unload = _bus_dmamap_unload,
63 	._dmamap_sync = _bus_dmamap_sync,
64 
65 	._dmamem_alloc = _bus_dmamem_alloc,
66 	._dmamem_free = _bus_dmamem_free,
67 	._dmamem_map = _bus_dmamem_map,
68 	._dmamem_unmap = _bus_dmamem_unmap,
69 	._dmamem_mmap = _bus_dmamem_mmap,
70 };
71 
72 /*
73  * Create a DMA map.
74  */
75 int
76 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
77     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
78 {
79 	bus_dmamap_t map;
80 	void *mapstore;
81 	size_t mapsize;
82 
83 	DPRINTF(("%s: t = %p, size = %ld, nsegments = %d, maxsegsz = %ld,"
84 		 " boundary = %ld, flags = %x\n",
85 		 __func__, t, size, nsegments, maxsegsz, boundary, flags));
86 
87 	/*
88 	 * Allocate and initialize the DMA map.  The end of the map is
89 	 * a variable-sized array of segments, so we allocate enough
90 	 * room for them in one shot.  bus_dmamap_t includes one
91 	 * bus_dma_segment_t already, hence the (nsegments - 1).
92 	 *
93 	 * Note that we don't preserve WAITOK and NOWAIT flags.
94 	 * Preservation of ALLOCNOW notifies others that we've
95 	 * reserved these resources, and they are not to be freed.
96 	 */
97 	mapsize = sizeof(struct _bus_dmamap)
98 		+ (sizeof(bus_dma_segment_t) * (nsegments - 1));
99 	mapstore = malloc(mapsize, M_DMAMAP, M_ZERO
100 			  | ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK));
101 	if (mapstore == NULL)
102 		return ENOMEM;
103 
104 	DPRINTF(("%s: dmamp = %p\n", __func__, mapstore));
105 
106 	map = (bus_dmamap_t)mapstore;
107 	map->_dm_size = size;
108 	map->_dm_segcnt = nsegments;
109 	map->_dm_maxsegsz = maxsegsz;
110 	map->_dm_boundary = boundary;
111 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK | BUS_DMA_NOWAIT);
112 
113 	map->dm_mapsize = 0;		/* no valid mappings */
114 	map->dm_nsegs = 0;
115 
116 	*dmamp = map;
117 	return 0;
118 }
119 
120 /*
121  * Destroy a DMA map.
122  */
123 void
124 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
125 {
126 
127 	DPRINTF(("%s: t = %p, map = %p\n", __func__, t, map));
128 
129 	free(map, M_DMAMAP);
130 }
131 
132 static inline int
133 _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map,
134     paddr_t paddr, vaddr_t vaddr, int size, int *segp, paddr_t *lastaddrp,
135     int first)
136 {
137 	bus_dma_segment_t * const segs = map->dm_segs;
138 	bus_addr_t bmask = ~(map->_dm_boundary - 1);
139 	bus_addr_t lastaddr;
140 	int nseg;
141 	int sgsize;
142 
143 	nseg = *segp;
144 	lastaddr = *lastaddrp;
145 
146 	DPRINTF(("%s: t = %p, map = %p, paddr = 0x%08lx,"
147 		 " vaddr = 0x%08lx, size = %d\n",
148 		 __func__, t, map, paddr, vaddr, size));
149 	DPRINTF(("%s: nseg = %d, bmask = 0x%08lx, lastaddr = 0x%08lx\n",
150 		 __func__, nseg, bmask, lastaddr));
151 
152 	do {
153 		sgsize = size;
154 
155 		/*
156 		 * Make sure we don't cross any boundaries.
157 		 */
158 		if (map->_dm_boundary > 0) {
159 			bus_addr_t baddr; /* next boundary address */
160 
161 			baddr = (paddr + map->_dm_boundary) & bmask;
162 			if (sgsize > (baddr - paddr))
163 				sgsize = (baddr - paddr);
164 		}
165 
166 		DPRINTF(("%s: sgsize = %d\n", __func__, sgsize));
167 
168 		/*
169 		 * Insert chunk coalescing with previous segment if possible.
170 		 */
171 		if (first) {
172 			DPRINTF(("%s: first\n", __func__));
173 			first = 0;
174 
175 			segs[nseg].ds_addr = SH3_PHYS_TO_P2SEG(paddr);
176 			segs[nseg].ds_len = sgsize;
177 			segs[nseg]._ds_vaddr = vaddr;
178 		}
179 		else if ((paddr == lastaddr)
180 			 && (segs[nseg].ds_len + sgsize <= map->_dm_maxsegsz)
181 			 && (map->_dm_boundary == 0 ||
182 			     (segs[nseg].ds_addr & bmask) == (paddr & bmask)))
183 		{
184 			DPRINTF(("%s: coalesce\n", __func__));
185 
186 			segs[nseg].ds_len += sgsize;
187 		}
188 		else {
189 			DPRINTF(("%s: new\n", __func__));
190 
191 			++nseg;
192 			if (nseg >= map->_dm_segcnt)
193 				break;
194 
195 			segs[nseg].ds_addr = SH3_PHYS_TO_P2SEG(paddr);
196 			segs[nseg].ds_len = sgsize;
197 			segs[nseg]._ds_vaddr = vaddr;
198 		}
199 
200 		paddr += sgsize;
201 		vaddr += sgsize;
202 		size -= sgsize;
203 		lastaddr = paddr;
204 
205 		DPRINTF(("%s: lastaddr = 0x%08lx, paddr = 0x%08lx,"
206 			 " vaddr = 0x%08lx, size = %d\n",
207 			 __func__, lastaddr, paddr, vaddr, size));
208 	} while (size > 0);
209 
210 	DPRINTF(("%s: nseg = %d\n", __func__, nseg));
211 
212 	*segp = nseg;
213 	*lastaddrp = lastaddr;
214 
215 	if (size != 0) {
216 		/*
217 		 * It didn't fit.  If there is a chained window, we
218 		 * will automatically fall back to it.
219 		 */
220 		return (EFBIG);		/* XXX better return value here? */
221 	}
222 
223 	return (0);
224 }
225 
226 static inline int
227 _bus_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
228     bus_size_t buflen, struct proc *p, int flags, int *segp)
229 {
230 	bus_size_t sgsize;
231 	bus_addr_t curaddr;
232 	bus_size_t len;
233 	paddr_t lastaddr;
234 	vaddr_t vaddr = (vaddr_t)buf;
235 	pmap_t pmap;
236 	int first;
237 	int error;
238 
239 	DPRINTF(("%s: t = %p, map = %p, buf = %p, buflen = %ld,"
240 		 " p = %p, flags = %x\n",
241 		 __func__, t, map, buf, buflen, p, flags));
242 
243 	if (p != NULL)
244 		pmap = p->p_vmspace->vm_map.pmap;
245 	else
246 		pmap = pmap_kernel();
247 
248 	first = 1;
249 	lastaddr = 0;
250 
251 	len = buflen;
252 	while (len > 0) {
253 		bool mapped;
254 
255 		mapped = pmap_extract(pmap, vaddr, &curaddr);
256 		if (!mapped)
257 			return EFAULT;
258 
259 		sgsize = PAGE_SIZE - (vaddr & PGOFSET);
260 		if (len < sgsize)
261 			sgsize = len;
262 
263 		error = _bus_dmamap_load_paddr(t, map, curaddr, vaddr, sgsize,
264 					       segp, &lastaddr, first);
265 		if (error)
266 			return error;
267 
268 		vaddr += sgsize;
269 		len -= sgsize;
270 		first = 0;
271 	}
272 
273 	return 0;
274 }
275 
276 /*
277  * Load a DMA map with a linear buffer.
278  */
279 int
280 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
281     bus_size_t buflen, struct proc *p, int flags)
282 {
283 	bus_addr_t addr = (bus_addr_t)buf;
284 	paddr_t lastaddr;
285 	int seg;
286 	int first;
287 	int error;
288 
289 	DPRINTF(("%s: t = %p, map = %p, buf = %p, buflen = %ld,"
290 		 " p = %p, flags = %x\n",
291 		 __func__, t, map, buf, buflen, p, flags));
292 
293 	/* make sure that on error condition we return "no valid mappings" */
294 	map->dm_mapsize = 0;
295 	map->dm_nsegs = 0;
296 
297 	if (buflen > map->_dm_size)
298 		return (EINVAL);
299 
300 	error = 0;
301 	seg = 0;
302 
303 	if (SH3_P1SEG_BASE <= addr && addr + buflen <= SH3_P2SEG_END) {
304 		bus_addr_t curaddr;
305 		bus_size_t sgsize;
306 		bus_size_t len = buflen;
307 
308 		DPRINTF(("%s: P[12]SEG (0x%08lx)\n", __func__, addr));
309 
310 		first = 1;
311 		lastaddr = 0;
312 
313 		while (len > 0) {
314 			curaddr = SH3_P1SEG_TO_PHYS(addr);
315 
316 			sgsize = PAGE_SIZE - ((u_long)addr & PGOFSET);
317 			if (len < sgsize)
318 				sgsize = len;
319 
320 			error = _bus_dmamap_load_paddr(t, map,
321 						       curaddr, addr, sgsize,
322 						       &seg, &lastaddr, first);
323 			if (error)
324 				break;
325 
326 			addr += sgsize;
327 			len -= sgsize;
328 			first = 0;
329 		}
330 	}
331 	else {
332 		error = _bus_bus_dmamap_load_buffer(t, map, buf, buflen,
333 						    p, flags, &seg);
334 	}
335 
336 	if (error)
337 		return (error);
338 
339 	map->dm_nsegs = seg + 1;
340 	map->dm_mapsize = buflen;
341 	return 0;
342 }
343 
344 /*
345  * Like _bus_dmamap_load(), but for mbufs.
346  */
347 int
348 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
349     int flags)
350 {
351 	struct mbuf *m;
352 	paddr_t lastaddr;
353 	int seg;
354 	int first;
355 	int error;
356 
357 	DPRINTF(("%s: t = %p, map = %p, m0 = %p, flags = %x\n",
358 		 __func__, t, map, m0, flags));
359 
360 	/* make sure that on error condition we return "no valid mappings" */
361 	map->dm_nsegs = 0;
362 	map->dm_mapsize = 0;
363 
364 #ifdef DIAGNOSTIC
365 	if ((m0->m_flags & M_PKTHDR) == 0)
366 		panic("_bus_dmamap_load_mbuf: no packet header");
367 #endif
368 
369 	if (m0->m_pkthdr.len > map->_dm_size)
370 		return (EINVAL);
371 
372 	seg = 0;
373 	first = 1;
374 	lastaddr = 0;
375 
376 	for (m = m0; m != NULL; m = m->m_next) {
377 		paddr_t paddr;
378 		vaddr_t vaddr;
379 		int size;
380 
381 		if (m->m_len == 0)
382 			continue;
383 
384 		vaddr = (vaddr_t)m->m_data;
385 		size = m->m_len;
386 
387 		if (SH3_P1SEG_BASE <= vaddr && vaddr < SH3_P3SEG_BASE) {
388 			paddr = (paddr_t)(PMAP_UNMAP_POOLPAGE(vaddr));
389 			error = _bus_dmamap_load_paddr(t, map,
390 						       paddr, vaddr, size,
391 						       &seg, &lastaddr, first);
392 			if (error)
393 				return error;
394 			first = 0;
395 		}
396 		else {
397 			/* XXX: stolen from load_buffer, need to refactor */
398 			while (size > 0) {
399 				bus_size_t sgsize;
400 				bool mapped;
401 
402 				mapped = pmap_extract(pmap_kernel(), vaddr,
403 						      &paddr);
404 				if (!mapped)
405 					return EFAULT;
406 
407 				sgsize = PAGE_SIZE - (vaddr & PGOFSET);
408 				if (size < sgsize)
409 					sgsize = size;
410 
411 				error = _bus_dmamap_load_paddr(t, map,
412 						paddr, vaddr, sgsize,
413 						&seg, &lastaddr, first);
414 				if (error)
415 					return error;
416 
417 				vaddr += sgsize;
418 				size -= sgsize;
419 				first = 0;
420 			}
421 
422 		}
423 	}
424 
425 	map->dm_nsegs = seg + 1;
426 	map->dm_mapsize = m0->m_pkthdr.len;
427 	return 0;
428 }
429 
430 /*
431  * Like _bus_dmamap_load(), but for uios.
432  */
433 int
434 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
435     int flags)
436 {
437 
438 	panic("_bus_dmamap_load_uio: not implemented");
439 }
440 
441 /*
442  * Like _bus_dmamap_load(), but for raw memory allocated with
443  * bus_dmamem_alloc().
444  */
445 int
446 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
447     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
448 {
449 
450 	panic("_bus_dmamap_load_raw: not implemented");
451 }
452 
453 /*
454  * Unload a DMA map.
455  */
456 void
457 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
458 {
459 
460 	DPRINTF(("%s: t = %p, map = %p\n", __func__, t, map));
461 
462 	map->dm_nsegs = 0;
463 	map->dm_mapsize = 0;
464 }
465 
466 /*
467  * Synchronize a DMA map.
468  */
469 void
470 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
471     bus_size_t len, int ops)
472 {
473 	bus_size_t minlen;
474 	bus_addr_t addr, naddr;
475 	int i;
476 
477 	DPRINTF(("%s: t = %p, map = %p, offset = %ld, len = %ld, ops = %x\n",
478 		 __func__, t, map, offset, len, ops));
479 
480 #ifdef DIAGNOSTIC
481 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
482 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
483 		panic("_bus_dmamap_sync: mix PRE and POST");
484 
485 	if (offset >= map->dm_mapsize)
486 		panic("_bus_dmamap_sync: bad offset");
487 	if ((offset + len) > map->dm_mapsize)
488 		panic("_bus_dmamap_sync: bad length");
489 #endif
490 
491 	if (!sh_cache_enable_dcache) {
492 		/* Nothing to do */
493 		DPRINTF(("%s: disabled D-Cache\n", __func__));
494 		return;
495 	}
496 
497 	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
498 		/* Find the beginning segment. */
499 		if (offset >= map->dm_segs[i].ds_len) {
500 			offset -= map->dm_segs[i].ds_len;
501 			continue;
502 		}
503 
504 		/*
505 		 * Now at the first segment to sync; nail
506 		 * each segment until we have exhausted the
507 		 * length.
508 		 */
509 		minlen = len < map->dm_segs[i].ds_len - offset ?
510 		    len : map->dm_segs[i].ds_len - offset;
511 
512 		addr = map->dm_segs[i]._ds_vaddr;
513 		naddr = addr + offset;
514 
515 		if ((naddr >= SH3_P2SEG_BASE)
516 		 && (naddr + minlen <= SH3_P2SEG_END)) {
517 			DPRINTF(("%s: P2SEG (0x%08lx)\n", __func__, naddr));
518 			offset = 0;
519 			len -= minlen;
520 			continue;
521 		}
522 
523 		DPRINTF(("%s: flushing segment %d "
524 			 "(0x%lx+%lx, 0x%lx+0x%lx) (remain = %ld)\n",
525 			 __func__, i,
526 			 addr, offset, addr, offset + minlen - 1, len));
527 
528 		switch (ops) {
529 		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
530 			if (SH_HAS_WRITEBACK_CACHE)
531 				sh_dcache_wbinv_range(naddr, minlen);
532 			else
533 				sh_dcache_inv_range(naddr, minlen);
534 			break;
535 
536 		case BUS_DMASYNC_PREREAD:
537 			if (SH_HAS_WRITEBACK_CACHE &&
538 			    ((naddr | minlen) & (sh_cache_line_size - 1)) != 0)
539 				sh_dcache_wbinv_range(naddr, minlen);
540 			else
541 				sh_dcache_inv_range(naddr, minlen);
542 			break;
543 
544 		case BUS_DMASYNC_PREWRITE:
545 			if (SH_HAS_WRITEBACK_CACHE)
546 				sh_dcache_wb_range(naddr, minlen);
547 			break;
548 
549 		case BUS_DMASYNC_POSTREAD:
550 		case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
551 			sh_dcache_inv_range(naddr, minlen);
552 			break;
553 		}
554 		offset = 0;
555 		len -= minlen;
556 	}
557 }
558 
559 /*
560  * Allocate memory safe for DMA.
561  */
562 int
563 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
564     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
565     int flags)
566 {
567 	extern paddr_t avail_start, avail_end;	/* from pmap.c */
568 	struct pglist mlist;
569 	paddr_t curaddr, lastaddr;
570 	struct vm_page *m;
571 	int curseg, error;
572 
573 	DPRINTF(("%s: t = %p, size = %ld, alignment = %ld, boundary = %ld,"
574 		 " segs = %p, nsegs = %d, rsegs = %p, flags = %x\n",
575 		 __func__, t, size, alignment, boundary,
576 		 segs, nsegs, rsegs, flags));
577 	DPRINTF(("%s: avail_start = 0x%08lx, avail_end = 0x%08lx\n",
578 		 __func__, avail_start, avail_end));
579 
580 	/* Always round the size. */
581 	size = round_page(size);
582 
583 	/*
584 	 * Allocate the pages from the VM system.
585 	 */
586 	error = uvm_pglistalloc(size, avail_start, avail_end - PAGE_SIZE,
587 	    alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
588 	if (error)
589 		return (error);
590 
591 	/*
592 	 * Compute the location, size, and number of segments actually
593 	 * returned by the VM code.
594 	 */
595 	m = mlist.tqh_first;
596 	curseg = 0;
597 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
598 	segs[curseg].ds_len = PAGE_SIZE;
599 
600 	DPRINTF(("%s: m = %p, lastaddr = 0x%08lx\n", __func__, m, lastaddr));
601 
602 	while ((m = TAILQ_NEXT(m, pageq.queue)) != NULL) {
603 		curaddr = VM_PAGE_TO_PHYS(m);
604 		DPRINTF(("%s: m = %p, curaddr = 0x%08lx, lastaddr = 0x%08lx\n",
605 			 __func__, m, curaddr, lastaddr));
606 
607 		if (curaddr == (lastaddr + PAGE_SIZE)) {
608 			segs[curseg].ds_len += PAGE_SIZE;
609 		} else {
610 			DPRINTF(("%s: new segment\n", __func__));
611 			curseg++;
612 			segs[curseg].ds_addr = curaddr;
613 			segs[curseg].ds_len = PAGE_SIZE;
614 		}
615 		lastaddr = curaddr;
616 	}
617 
618 	*rsegs = curseg + 1;
619 
620 	DPRINTF(("%s: curseg = %d, *rsegs = %d\n", __func__, curseg, *rsegs));
621 
622 	return (0);
623 }
624 
625 /*
626  * Common function for freeing DMA-safe memory.  May be called by
627  * bus-specific DMA memory free functions.
628  */
629 void
630 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
631 {
632 	struct vm_page *m;
633 	bus_addr_t addr;
634 	struct pglist mlist;
635 	int curseg;
636 
637 	DPRINTF(("%s: t = %p, segs = %p, nsegs = %d\n",
638 		 __func__, t, segs, nsegs));
639 
640 	/*
641 	 * Build a list of pages to free back to the VM system.
642 	 */
643 	TAILQ_INIT(&mlist);
644 	for (curseg = 0; curseg < nsegs; curseg++) {
645 		DPRINTF(("%s: segs[%d]: ds_addr = 0x%08lx, ds_len = %ld\n",
646 			 __func__, curseg,
647 			 segs[curseg].ds_addr, segs[curseg].ds_len));
648 
649 		for (addr = segs[curseg].ds_addr;
650 		     addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
651 		     addr += PAGE_SIZE)
652 		{
653 			m = PHYS_TO_VM_PAGE(addr);
654 			DPRINTF(("%s: m = %p\n", __func__, m));
655 			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
656 		}
657 	}
658 
659 	uvm_pglistfree(&mlist);
660 }
661 
662 
663 int
664 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
665     size_t size, void **kvap, int flags)
666 {
667 	vaddr_t va, topva;
668 	bus_addr_t addr;
669 	int curseg;
670 
671 	DPRINTF(("%s: t = %p, segs = %p, nsegs = %d, size = %d,"
672 		 " kvap = %p, flags = %x\n",
673 		 __func__, t, segs, nsegs, size, kvap, flags));
674 
675 	/*
676 	 * If we're mapping only a single segment, use direct-mapped
677 	 * va, to avoid thrashing the TLB.
678 	 */
679 	if (nsegs == 1) {
680 		if (flags & BUS_DMA_COHERENT)
681 			*kvap = (void *)SH3_PHYS_TO_P2SEG(segs[0].ds_addr);
682 		else
683 			*kvap = (void *)SH3_PHYS_TO_P1SEG(segs[0].ds_addr);
684 
685 		DPRINTF(("%s: addr = 0x%08lx, kva = %p\n",
686 			 __func__, segs[0].ds_addr, *kvap));
687 		return 0;
688 	}
689 
690 
691 	/* Always round the size. */
692 	size = round_page(size);
693 
694 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY
695 			  | ((flags & BUS_DMA_NOWAIT) ? UVM_KMF_NOWAIT : 0));
696 	if (va == 0)
697 		return (ENOMEM);
698 	topva = va;
699 
700 	for (curseg = 0; curseg < nsegs; curseg++) {
701 		DPRINTF(("%s: segs[%d]: ds_addr = 0x%08lx, ds_len = %ld\n",
702 			 __func__, curseg,
703 			 segs[curseg].ds_addr, segs[curseg].ds_len));
704 
705 		for (addr = segs[curseg].ds_addr;
706 		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
707 		     addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE)
708 		{
709 			if (__predict_false(size == 0))
710 				panic("_bus_dmamem_map: size botch");
711 
712 			pmap_kenter_pa(va, addr,
713 				       VM_PROT_READ | VM_PROT_WRITE, 0);
714 		}
715 	}
716 
717 	pmap_update(pmap_kernel());
718 	*kvap = (void *)topva;
719 	return (0);
720 }
721 
722 void
723 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
724 {
725 	vaddr_t vaddr = (vaddr_t)kva;
726 
727 	DPRINTF(("%s: t = %p, kva = %p, size = %d\n",
728 		 __func__, t, kva, size));
729 
730 #ifdef DIAGNOSTIC
731 	if (vaddr & PAGE_MASK)
732 		panic("_bus_dmamem_unmap");
733 #endif
734 
735 	/* nothing to do if we mapped it via P1SEG or P2SEG */
736 	if (SH3_P1SEG_BASE <= vaddr && vaddr <= SH3_P2SEG_END)
737 		return;
738 
739 	size = round_page(size);
740 	pmap_kremove(vaddr, size);
741 	pmap_update(pmap_kernel());
742 	uvm_km_free(kernel_map, vaddr, size, UVM_KMF_VAONLY);
743 }
744 
745 paddr_t
746 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
747     off_t off, int prot, int flags)
748 {
749 
750 	/* Not implemented. */
751 	return (paddr_t)(-1);
752 }
753