xref: /openbsd/sys/arch/landisk/landisk/bus_dma.c (revision fc61954a)
1 /*	$OpenBSD: bus_dma.c,v 1.13 2015/01/25 11:36:41 dlg Exp $	*/
2 /*	$NetBSD: bus_dma.c,v 1.1 2006/09/01 21:26:18 uwe Exp $	*/
3 
4 /*
5  * Copyright (c) 2005 NONAKA Kimihiro
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/proc.h>
32 #include <sys/kernel.h>
33 #include <sys/device.h>
34 #include <sys/malloc.h>
35 #include <sys/mbuf.h>
36 
37 #include <uvm/uvm_extern.h>
38 
39 #include <sh/cache.h>
40 
41 #include <machine/autoconf.h>
42 #define	_LANDISK_BUS_DMA_PRIVATE
43 #include <machine/bus.h>
44 
45 #if defined(DEBUG) && defined(BUSDMA_DEBUG)
46 #define	DPRINTF(a)	printf a
47 #else
48 #define	DPRINTF(a)
49 #endif
50 
51 struct _bus_dma_tag landisk_bus_dma = {
52 	._cookie = NULL,
53 
54 	._dmamap_create = _bus_dmamap_create,
55 	._dmamap_destroy = _bus_dmamap_destroy,
56 	._dmamap_load = _bus_dmamap_load,
57 	._dmamap_load_mbuf = _bus_dmamap_load_mbuf,
58 	._dmamap_load_uio = _bus_dmamap_load_uio,
59 	._dmamap_load_raw = _bus_dmamap_load_raw,
60 	._dmamap_unload = _bus_dmamap_unload,
61 	._dmamap_sync = _bus_dmamap_sync,
62 
63 	._dmamem_alloc = _bus_dmamem_alloc,
64 	._dmamem_free = _bus_dmamem_free,
65 	._dmamem_map = _bus_dmamem_map,
66 	._dmamem_unmap = _bus_dmamem_unmap,
67 	._dmamem_mmap = _bus_dmamem_mmap,
68 };
69 
70 #define DMAMAP_RESET(_m) do { \
71 	(_m)->dm_mapsize = 0; \
72 	(_m)->dm_nsegs = 0; \
73 } while (0)
74 
75 int	_bus_dmamap_load_vaddr(bus_dma_tag_t, bus_dmamap_t,
76 	    void *, bus_size_t, pmap_t);
77 int	_bus_dmamap_load_paddr(bus_dma_tag_t, bus_dmamap_t,
78 	    paddr_t, vaddr_t, bus_size_t);
79 
80 /*
81  * Create a DMA map.
82  */
83 int
84 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
85     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
86 {
87 	bus_dmamap_t map;
88 	void *mapstore;
89 	size_t mapsize;
90 	int error;
91 
92 	DPRINTF(("bus_dmamap_create: t = %p, size = %ld, nsegments = %d, maxsegsz = %ld, boundary = %ld, flags = %x\n", t, size, nsegments, maxsegsz, boundary, flags));
93 
94 	/*
95 	 * Allocate and initialize the DMA map.  The end of the map
96 	 * is a variable-sized array of segments, so we allocate enough
97 	 * room for them in one shot.
98 	 *
99 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
100 	 * of ALLOCNOW notifies others that we've reserved these resources,
101 	 * and they are not to be freed.
102 	 *
103 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
104 	 * the (nsegments - 1).
105 	 */
106 	error = 0;
107 	mapsize = sizeof(struct _bus_dmamap) +
108 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
109 	if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
110 	    (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
111 		return (ENOMEM);
112 
113 	DPRINTF(("bus_dmamap_create: dmamp = %p\n", mapstore));
114 
115 	map = (bus_dmamap_t)mapstore;
116 	map->_dm_size = size;
117 	map->_dm_segcnt = nsegments;
118 	map->_dm_maxsegsz = maxsegsz;
119 	map->_dm_boundary = boundary;
120 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
121 
122 	DMAMAP_RESET(map); /* no valid mappings */
123 
124 	*dmamp = map;
125 
126 	return (0);
127 }
128 
129 /*
130  * Destroy a DMA map.
131  */
132 void
133 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
134 {
135 
136 	DPRINTF(("bus_dmamap_destroy: t = %p, map = %p\n", t, map));
137 
138 	free(map, M_DEVBUF, 0);
139 }
140 
141 int
142 _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map,
143     paddr_t paddr, vaddr_t vaddr, bus_size_t size)
144 {
145 	bus_dma_segment_t * const segs = map->dm_segs;
146 	bus_addr_t bmask = ~(map->_dm_boundary - 1);
147 
148 	int first = map->dm_mapsize == 0;
149 	int nseg = map->dm_nsegs;
150 	paddr_t lastaddr = SH3_P2SEG_TO_PHYS(segs[nseg].ds_addr);
151 
152 	map->dm_mapsize += size;
153 
154 	do {
155 		bus_size_t sgsize = size;
156 
157 		/* Make sure we don't cross any boundaries. */
158 		if (map->_dm_boundary > 0) {
159 			bus_addr_t baddr; /* next boundary address */
160 
161 			baddr = (paddr + map->_dm_boundary) & bmask;
162 			if (sgsize > (baddr - paddr))
163 				sgsize = (baddr - paddr);
164 		}
165 
166 		/*
167 		 * Insert chunk into a segment, coalescing with
168 		 * previous segment if possible.
169 		 */
170 		if (first) {
171 			/* first segment */
172 			segs[nseg].ds_addr = SH3_PHYS_TO_P2SEG(paddr);
173 			segs[nseg].ds_len = sgsize;
174 			segs[nseg]._ds_vaddr = vaddr;
175 			first = 0;
176 		} else if ((paddr == lastaddr)
177 		 && (segs[nseg].ds_len + sgsize <= map->_dm_maxsegsz)
178 		 && (map->_dm_boundary == 0 ||
179 		     (segs[nseg].ds_addr & bmask) == (paddr & bmask))) {
180 			/* coalesce */
181 			segs[nseg].ds_len += sgsize;
182 		} else {
183 			if (++nseg >= map->_dm_segcnt)
184 				return (EFBIG);
185 
186 			/* new segment */
187 			segs[nseg].ds_addr = SH3_PHYS_TO_P2SEG(paddr);
188 			segs[nseg].ds_len = sgsize;
189 			segs[nseg]._ds_vaddr = vaddr;
190 		}
191 
192 		lastaddr = paddr + sgsize;
193 		paddr += sgsize;
194 		vaddr += sgsize;
195 		size -= sgsize;
196 	} while (size > 0);
197 
198 	map->dm_nsegs = nseg;
199 
200 	return (0);
201 }
202 
203 int
204 _bus_dmamap_load_vaddr(bus_dma_tag_t t, bus_dmamap_t map,
205     void *buf, bus_size_t size, pmap_t pmap)
206 {
207 	vaddr_t vaddr;
208 	paddr_t paddr;
209 	vaddr_t next, end;
210 	int error;
211 
212 	vaddr = (vaddr_t)buf;
213 	end = vaddr + size;
214 
215 	if (pmap == pmap_kernel() &&
216 	    vaddr >= SH3_P1SEG_BASE && end <= SH3_P2SEG_END)
217 		paddr = SH3_P1SEG_TO_PHYS(vaddr);
218 	else {
219 		for (next = (vaddr + PAGE_SIZE) & ~PAGE_MASK;
220 		    next < end; next += PAGE_SIZE) {
221 			pmap_extract(pmap, vaddr, &paddr);
222 			error = _bus_dmamap_load_paddr(t, map,
223 			    paddr, vaddr, next - vaddr);
224 			if (error != 0)
225 				return (error);
226 
227 			vaddr = next;
228 		}
229 
230 		pmap_extract(pmap, vaddr, &paddr);
231 		size = end - vaddr;
232 	}
233 
234 	return (_bus_dmamap_load_paddr(t, map, paddr, vaddr, size));
235 }
236 
237 /*
238  * Load a DMA map with a linear buffer.
239  */
240 int
241 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
242     bus_size_t buflen, struct proc *p, int flags)
243 {
244 	int error;
245 
246 	DPRINTF(("bus_dmamap_load: t = %p, map = %p, buf = %p, buflen = %ld, p = %p, flags = %x\n", t, map, buf, buflen, p, flags));
247 
248 	DMAMAP_RESET(map);
249 
250 	if (buflen > map->_dm_size)
251 		return (EINVAL);
252 
253 	error = _bus_dmamap_load_vaddr(t, map, buf, buflen,
254 	    p == NULL ? pmap_kernel() : p->p_vmspace->vm_map.pmap);
255 	if (error != 0) {
256 		DMAMAP_RESET(map); /* no valid mappings */
257 		return (error);
258 	}
259 
260 	map->dm_nsegs++;
261 
262 	return (0);
263 }
264 
265 /*
266  * Like _bus_dmamap_load(), but for mbufs.
267  */
268 int
269 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
270     int flags)
271 {
272 	struct mbuf *m;
273 	int error;
274 
275 	DMAMAP_RESET(map);
276 
277 #ifdef DIAGNOSTIC
278 	if ((m0->m_flags & M_PKTHDR) == 0)
279 		panic("_bus_dmamap_load_mbuf: no packet header");
280 #endif
281 
282 	if (m0->m_pkthdr.len > map->_dm_size)
283 		return (EINVAL);
284 
285 	for (m = m0; m != NULL; m = m->m_next) {
286 		if (m->m_len == 0)
287 			continue;
288 
289 		error = _bus_dmamap_load_vaddr(t, map, m->m_data, m->m_len,
290 		    pmap_kernel());
291 		if (error != 0) {
292 			DMAMAP_RESET(map);
293 			return (error);
294 		}
295 	}
296 
297 	map->dm_nsegs++;
298 
299 	return (0);
300 }
301 
302 /*
303  * Like _bus_dmamap_load(), but for uios.
304  */
305 int
306 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
307     int flags)
308 {
309 
310 	panic("_bus_dmamap_load_uio: not implemented");
311 }
312 
313 /*
314  * Like _bus_dmamap_load(), but for raw memory allocated with
315  * bus_dmamem_alloc().
316  */
317 int
318 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
319     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
320 {
321 
322 	panic("_bus_dmamap_load_raw: not implemented");
323 }
324 
325 /*
326  * Unload a DMA map.
327  */
328 void
329 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
330 {
331 
332 	DPRINTF(("bus_dmamap_unload: t = %p, map = %p\n", t, map));
333 
334 	map->dm_nsegs = 0;
335 	map->dm_mapsize = 0;
336 }
337 
338 /*
339  * Synchronize a DMA map.
340  */
341 void
342 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
343     bus_size_t len, int ops)
344 {
345 	bus_size_t minlen;
346 	bus_addr_t addr, naddr;
347 	int i;
348 
349 	DPRINTF(("bus_dmamap_sync: t = %p, map = %p, offset = %ld, len = %ld, ops = %x\n", t, map, offset, len, ops));
350 
351 #ifdef DIAGNOSTIC
352 	/*
353 	 * Mixing PRE and POST operations is not allowed.
354 	 */
355 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
356 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
357 		panic("_bus_dmamap_sync: mix PRE and POST");
358 
359 	if (offset >= map->dm_mapsize)
360 		panic("_bus_dmamap_sync: bad offset");
361 	if ((offset + len) > map->dm_mapsize)
362 		panic("_bus_dmamap_sync: bad length");
363 #endif
364 
365 	if (!sh_cache_enable_dcache) {
366 		/* Nothing to do */
367 		DPRINTF(("bus_dmamap_sync: disabled D-Cache\n"));
368 		return;
369 	}
370 
371 	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
372 		/* Find the beginning segment. */
373 		if (offset >= map->dm_segs[i].ds_len) {
374 			offset -= map->dm_segs[i].ds_len;
375 			continue;
376 		}
377 
378 		/*
379 		 * Now at the first segment to sync; nail
380 		 * each segment until we have exhausted the
381 		 * length.
382 		 */
383 		minlen = len < map->dm_segs[i].ds_len - offset ?
384 		    len : map->dm_segs[i].ds_len - offset;
385 
386 		addr = map->dm_segs[i]._ds_vaddr;
387 		naddr = addr + offset;
388 
389 		if ((naddr >= SH3_P2SEG_BASE)
390 		 && (naddr + minlen <= SH3_P2SEG_END)) {
391 			DPRINTF(("bus_dmamap_sync: P2SEG (0x%08lx)\n", naddr));
392 			offset = 0;
393 			len -= minlen;
394 			continue;
395 		}
396 
397 		DPRINTF(("bus_dmamap_sync: flushing segment %d "
398 		    "(0x%lx+%lx, 0x%lx+0x%lx) (remain = %ld)\n",
399 		    i, addr, offset, addr, offset + minlen - 1, len));
400 
401 		switch (ops) {
402 		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
403 			if (SH_HAS_WRITEBACK_CACHE)
404 				sh_dcache_wbinv_range(naddr, minlen);
405 			else
406 				sh_dcache_inv_range(naddr, minlen);
407 			break;
408 
409 		case BUS_DMASYNC_PREREAD:
410 			if (SH_HAS_WRITEBACK_CACHE &&
411 			    ((naddr | minlen) & (sh_cache_line_size - 1)) != 0)
412 				sh_dcache_wbinv_range(naddr, minlen);
413 			else
414 				sh_dcache_inv_range(naddr, minlen);
415 			break;
416 
417 		case BUS_DMASYNC_PREWRITE:
418 			if (SH_HAS_WRITEBACK_CACHE)
419 				sh_dcache_wb_range(naddr, minlen);
420 			break;
421 
422 		case BUS_DMASYNC_POSTREAD:
423 		case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
424 			sh_dcache_inv_range(naddr, minlen);
425 			break;
426 		}
427 		offset = 0;
428 		len -= minlen;
429 	}
430 }
431 
432 /*
433  * Allocate memory safe for DMA.
434  */
435 int
436 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
437     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
438     int flags)
439 {
440 	struct pglist mlist;
441 	paddr_t curaddr, lastaddr;
442 	struct vm_page *m;
443 	int curseg, error, plaflag;
444 
445 	DPRINTF(("bus_dmamem_alloc: t = %p, size = %ld, alignment = %ld, boundary = %ld, segs = %p, nsegs = %d, rsegs = %p, flags = %x\n", t, size, alignment, boundary, segs, nsegs, rsegs, flags));
446 
447 	/* Always round the size. */
448 	size = round_page(size);
449 
450 	/*
451 	 * Allocate the pages from the VM system.
452 	 */
453 	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
454 	if (flags & BUS_DMA_ZERO)
455 		plaflag |= UVM_PLA_ZERO;
456 
457 	TAILQ_INIT(&mlist);
458 	error = uvm_pglistalloc(size, 0, -1, alignment, boundary,
459 	    &mlist, nsegs, plaflag);
460 	if (error)
461 		return (error);
462 
463 	/*
464 	 * Compute the location, size, and number of segments actually
465 	 * returned by the VM code.
466 	 */
467 	m = mlist.tqh_first;
468 	curseg = 0;
469 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
470 	segs[curseg].ds_len = PAGE_SIZE;
471 
472 	DPRINTF(("bus_dmamem_alloc: m = %p, lastaddr = 0x%08lx\n",m,lastaddr));
473 
474 	while ((m = TAILQ_NEXT(m, pageq)) != NULL) {
475 		curaddr = VM_PAGE_TO_PHYS(m);
476 		DPRINTF(("bus_dmamem_alloc: m = %p, curaddr = 0x%08lx, lastaddr = 0x%08lx\n", m, curaddr, lastaddr));
477 		if (curaddr == (lastaddr + PAGE_SIZE)) {
478 			segs[curseg].ds_len += PAGE_SIZE;
479 		} else {
480 			DPRINTF(("bus_dmamem_alloc: new segment\n"));
481 			curseg++;
482 			segs[curseg].ds_addr = curaddr;
483 			segs[curseg].ds_len = PAGE_SIZE;
484 		}
485 		lastaddr = curaddr;
486 	}
487 
488 	*rsegs = curseg + 1;
489 
490 	DPRINTF(("bus_dmamem_alloc: curseg = %d, *rsegs = %d\n",curseg,*rsegs));
491 
492 	return (0);
493 }
494 
495 /*
496  * Common function for freeing DMA-safe memory.  May be called by
497  * bus-specific DMA memory free functions.
498  */
499 void
500 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
501 {
502 	struct vm_page *m;
503 	bus_addr_t addr;
504 	struct pglist mlist;
505 	int curseg;
506 
507 	DPRINTF(("bus_dmamem_free: t = %p, segs = %p, nsegs = %d\n", t, segs, nsegs));
508 
509 	/*
510 	 * Build a list of pages to free back to the VM system.
511 	 */
512 	TAILQ_INIT(&mlist);
513 	for (curseg = 0; curseg < nsegs; curseg++) {
514 		DPRINTF(("bus_dmamem_free: segs[%d]: ds_addr = 0x%08lx, ds_len = %ld\n", curseg, segs[curseg].ds_addr, segs[curseg].ds_len));
515 		for (addr = segs[curseg].ds_addr;
516 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
517 		    addr += PAGE_SIZE) {
518 			m = PHYS_TO_VM_PAGE(addr);
519 			DPRINTF(("bus_dmamem_free: m = %p\n", m));
520 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
521 		}
522 	}
523 
524 	uvm_pglistfree(&mlist);
525 }
526 
527 
528 int
529 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
530     size_t size, caddr_t *kvap, int flags)
531 {
532 	vaddr_t va;
533 	bus_addr_t addr;
534 	int curseg;
535 	const struct kmem_dyn_mode *kd;
536 
537 	DPRINTF(("bus_dmamem_map: t = %p, segs = %p, nsegs = %d, size = %d, kvap = %p, flags = %x\n", t, segs, nsegs, size, kvap, flags));
538 
539         /*
540 	 * If we're only mapping 1 segment, use P2SEG, to avoid
541 	 * TLB thrashing.
542 	 */
543 	if (nsegs == 1) {
544 		if (flags & BUS_DMA_COHERENT) {
545 			*kvap = (caddr_t)SH3_PHYS_TO_P2SEG(segs[0].ds_addr);
546 		} else {
547 			*kvap = (caddr_t)SH3_PHYS_TO_P1SEG(segs[0].ds_addr);
548 		}
549 		DPRINTF(("bus_dmamem_map: addr = 0x%08lx, kva = %p\n", segs[0].ds_addr, *kvap));
550 		return 0;
551 	}
552 
553 	/* Always round the size. */
554 	size = round_page(size);
555 	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
556 	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
557 	if (va == 0)
558 		return (ENOMEM);
559 
560 	*kvap = (caddr_t)va;
561 	for (curseg = 0; curseg < nsegs; curseg++) {
562 		DPRINTF(("bus_dmamem_map: segs[%d]: ds_addr = 0x%08lx, ds_len = %ld\n", curseg, segs[curseg].ds_addr, segs[curseg].ds_len));
563 		for (addr = segs[curseg].ds_addr;
564 		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
565 		     addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
566 			if (size == 0)
567 				panic("_bus_dmamem_map: size botch");
568 			pmap_kenter_pa(va, addr,
569 			    PROT_READ | PROT_WRITE);
570 		}
571 	}
572 	pmap_update(pmap_kernel());
573 
574 	return (0);
575 }
576 
577 void
578 _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
579 {
580 
581 	DPRINTF(("bus_dmamem_unmap: t = %p, kva = %p, size = %d\n", t, kva, size));
582 
583 #ifdef DIAGNOSTIC
584 	if ((u_long)kva & PAGE_MASK)
585 		panic("_bus_dmamem_unmap");
586 #endif
587 
588         /*
589 	 * Nothing to do if we mapped it with P[12]SEG.
590 	 */
591 	if ((kva >= (caddr_t)SH3_P1SEG_BASE)
592 	 && (kva <= (caddr_t)SH3_P2SEG_END)) {
593 		return;
594 	}
595 
596 	size = round_page(size);
597 	pmap_kremove((vaddr_t)kva, size);
598 	pmap_update(pmap_kernel());
599 	km_free(kva, size, &kv_any, &kp_none);
600 }
601 
602 paddr_t
603 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
604     off_t off, int prot, int flags)
605 {
606 
607 	/* Not implemented. */
608 	return (paddr_t)(-1);
609 }
610