xref: /openbsd/sys/arch/sparc64/dev/iommu.c (revision 78b63d65)
1 /*	$OpenBSD: iommu.c,v 1.7 2001/12/04 23:22:42 art Exp $	*/
2 /*	$NetBSD: iommu.c,v 1.42 2001/10/07 20:30:40 eeh Exp $	*/
3 
4 /*
5  * Copyright (c) 1999, 2000 Matthew R. Green
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 /*-
33  * Copyright (c) 1998 The NetBSD Foundation, Inc.
34  * All rights reserved.
35  *
36  * This code is derived from software contributed to The NetBSD Foundation
37  * by Paul Kranenburg.
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  * 1. Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  * 2. Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in the
46  *    documentation and/or other materials provided with the distribution.
47  * 3. All advertising materials mentioning features or use of this software
48  *    must display the following acknowledgement:
49  *        This product includes software developed by the NetBSD
50  *        Foundation, Inc. and its contributors.
51  * 4. Neither the name of The NetBSD Foundation nor the names of its
52  *    contributors may be used to endorse or promote products derived
53  *    from this software without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
56  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
57  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
58  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
59  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
65  * POSSIBILITY OF SUCH DAMAGE.
66  */
67 
68 /*
69  * Copyright (c) 1992, 1993
70  *	The Regents of the University of California.  All rights reserved.
71  *
72  * This software was developed by the Computer Systems Engineering group
73  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
74  * contributed to Berkeley.
75  *
76  * All advertising materials mentioning features or use of this software
77  * must display the following acknowledgement:
78  *	This product includes software developed by the University of
79  *	California, Lawrence Berkeley Laboratory.
80  *
81  * Redistribution and use in source and binary forms, with or without
82  * modification, are permitted provided that the following conditions
83  * are met:
84  * 1. Redistributions of source code must retain the above copyright
85  *    notice, this list of conditions and the following disclaimer.
86  * 2. Redistributions in binary form must reproduce the above copyright
87  *    notice, this list of conditions and the following disclaimer in the
88  *    documentation and/or other materials provided with the distribution.
89  * 3. All advertising materials mentioning features or use of this software
90  *    must display the following acknowledgement:
91  *	This product includes software developed by the University of
92  *	California, Berkeley and its contributors.
93  * 4. Neither the name of the University nor the names of its contributors
94  *    may be used to endorse or promote products derived from this software
95  *    without specific prior written permission.
96  *
97  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
98  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
99  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
100  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
101  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
102  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
103  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
104  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
105  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
106  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
107  * SUCH DAMAGE.
108  *
109  *	from: NetBSD: sbus.c,v 1.13 1999/05/23 07:24:02 mrg Exp
110  *	from: @(#)sbus.c	8.1 (Berkeley) 6/11/93
111  */
112 
113 /*
114  * UltraSPARC IOMMU support; used by both the sbus and pci code.
115  */
116 #include <sys/param.h>
117 #include <sys/extent.h>
118 #include <sys/malloc.h>
119 #include <sys/systm.h>
120 #include <sys/device.h>
121 
122 #include <uvm/uvm_extern.h>
123 
124 #include <machine/bus.h>
125 #include <sparc64/sparc64/cache.h>
126 #include <sparc64/dev/iommureg.h>
127 #include <sparc64/dev/iommuvar.h>
128 
129 #include <machine/autoconf.h>
130 #include <machine/cpu.h>
131 
132 #ifdef DDB
133 #include <machine/db_machdep.h>
134 #include <ddb/db_sym.h>
135 #include <ddb/db_extern.h>
136 #endif
137 
138 #ifdef DEBUG
139 #define IDB_BUSDMA	0x1
140 #define IDB_IOMMU	0x2
141 #define IDB_INFO	0x4
142 #define	IDB_SYNC	0x8
143 int iommudebug = 0x0;
144 #define DPRINTF(l, s)   do { if (iommudebug & l) printf s; } while (0)
145 #else
146 #define DPRINTF(l, s)
147 #endif
148 
149 #define iommu_strbuf_flush(i,v) do {				\
150 	if ((i)->is_sb[0])					\
151 		bus_space_write_8((i)->is_bustag,		\
152 			(bus_space_handle_t)(u_long)		\
153 			&(i)->is_sb[0]->strbuf_pgflush,		\
154 			0, (v));				\
155 	if ((i)->is_sb[1])					\
156 		bus_space_write_8((i)->is_bustag,		\
157 			(bus_space_handle_t)(u_long)		\
158 			&(i)->is_sb[1]->strbuf_pgflush,		\
159 			0, (v));				\
160 	} while (0)
161 
162 static	int iommu_strbuf_flush_done __P((struct iommu_state *));
163 
164 /*
165  * initialise the UltraSPARC IOMMU (SBUS or PCI):
166  *	- allocate and setup the iotsb.
167  *	- enable the IOMMU
168  *	- initialise the streaming buffers (if they exist)
169  *	- create a private DVMA map.
170  */
171 void
172 iommu_init(name, is, tsbsize, iovabase)
173 	char *name;
174 	struct iommu_state *is;
175 	int tsbsize;
176 	u_int32_t iovabase;
177 {
178 	psize_t size;
179 	vaddr_t va;
180 	paddr_t pa;
181 	struct vm_page *m;
182 	struct pglist mlist;
183 
184 	/*
185 	 * Setup the iommu.
186 	 *
187 	 * The sun4u iommu is part of the SBUS or PCI controller so we
188 	 * will deal with it here..
189 	 *
190 	 * The IOMMU address space always ends at 0xffffe000, but the starting
191 	 * address depends on the size of the map.  The map size is 1024 * 2 ^
192 	 * is->is_tsbsize entries, where each entry is 8 bytes.  The start of
193 	 * the map can be calculated by (0xffffe000 << (8 + is->is_tsbsize)).
194 	 */
195 	is->is_cr = (tsbsize << 16) | IOMMUCR_EN;
196 	is->is_tsbsize = tsbsize;
197 	is->is_dvmabase = iovabase;
198 	if (iovabase == -1) is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize);
199 
200 	/*
201 	 * Allocate memory for I/O pagetables.  They need to be physically
202 	 * contiguous.
203 	 */
204 
205 	size = NBPG<<(is->is_tsbsize);
206 	TAILQ_INIT(&mlist);
207 	if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1,
208 		(paddr_t)NBPG, (paddr_t)0, &mlist, 1, 0) != 0)
209 		panic("iommu_init: no memory");
210 
211 	va = uvm_km_valloc(kernel_map, size);
212 	if (va == 0)
213 		panic("iommu_init: no memory");
214 	is->is_tsb = (int64_t *)va;
215 
216 	m = TAILQ_FIRST(&mlist);
217 	is->is_ptsb = VM_PAGE_TO_PHYS(m);
218 
219 	/* Map the pages */
220 	for (; m != NULL; m = TAILQ_NEXT(m,pageq)) {
221 		pa = VM_PAGE_TO_PHYS(m);
222 		pmap_enter(pmap_kernel(), va, pa | PMAP_NVC,
223 			VM_PROT_READ|VM_PROT_WRITE,
224 			VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
225 		va += NBPG;
226 	}
227 	pmap_update(pmap_kernel());
228 	bzero(is->is_tsb, size);
229 
230 #ifdef DEBUG
231 	if (iommudebug & IDB_INFO)
232 	{
233 		/* Probe the iommu */
234 		struct iommureg *regs = is->is_iommu;
235 
236 		printf("iommu regs at: cr=%lx tsb=%lx flush=%lx\n",
237 		    (u_long)&regs->iommu_cr,
238 		    (u_long)&regs->iommu_tsb,
239 		    (u_long)&regs->iommu_flush);
240 		printf("iommu cr=%llx tsb=%llx\n", (unsigned long long)regs->iommu_cr, (unsigned long long)regs->iommu_tsb);
241 		printf("TSB base %p phys %llx\n", (void *)is->is_tsb, (unsigned long long)is->is_ptsb);
242 		delay(1000000); /* 1 s */
243 	}
244 #endif
245 
246 	/*
247 	 * Initialize streaming buffer, if it is there.
248 	 */
249 	if (is->is_sb[0] || is->is_sb[1])
250 		(void)pmap_extract(pmap_kernel(), (vaddr_t)&is->is_flush[0],
251 		    (paddr_t *)&is->is_flushpa);
252 
253 	/*
254 	 * now actually start up the IOMMU
255 	 */
256 	iommu_reset(is);
257 
258 	/*
259 	 * Now all the hardware's working we need to allocate a dvma map.
260 	 */
261 	printf("DVMA map: %x to %x\n",
262 		(unsigned int)is->is_dvmabase,
263 		(unsigned int)(is->is_dvmabase+(size<<10)));
264 	is->is_dvmamap = extent_create(name,
265 				       is->is_dvmabase, (u_long)IOTSB_VEND,
266 				       M_DEVBUF, 0, 0, EX_NOWAIT);
267 }
268 
269 /*
270  * Streaming buffers don't exist on the UltraSPARC IIi; we should have
271  * detected that already and disabled them.  If not, we will notice that
272  * they aren't there when the STRBUF_EN bit does not remain.
273  */
274 void
275 iommu_reset(is)
276 	struct iommu_state *is;
277 {
278 
279 	/* Need to do 64-bit stores */
280 	bus_space_write_8(is->is_bustag,
281 			  (bus_space_handle_t)(u_long)&is->is_iommu->iommu_tsb,
282 			  0, is->is_ptsb);
283 	/* Enable IOMMU in diagnostic mode */
284 	bus_space_write_8(is->is_bustag,
285 			  (bus_space_handle_t)(u_long)&is->is_iommu->iommu_cr, 0,
286 			  is->is_cr|IOMMUCR_DE);
287 
288 	if (is->is_sb[0]) {
289 
290 		/* Enable diagnostics mode? */
291 		bus_space_write_8(is->is_bustag,
292 			(bus_space_handle_t)(u_long)&is->is_sb[0]->strbuf_ctl,
293 			0, STRBUF_EN);
294 
295 		/* No streaming buffers? Disable them */
296 		if (bus_space_read_8(is->is_bustag,
297 			(bus_space_handle_t)(u_long)&is->is_sb[0]->strbuf_ctl,
298 			0) == 0)
299 		is->is_sb[0] = 0;
300 	}
301 
302 	if (is->is_sb[1]) {
303 
304 		/* Enable diagnostics mode? */
305 		bus_space_write_8(is->is_bustag,
306 			(bus_space_handle_t)(u_long)&is->is_sb[1]->strbuf_ctl,
307 			0, STRBUF_EN);
308 
309 		/* No streaming buffers? Disable them */
310 		if (bus_space_read_8(is->is_bustag,
311 			(bus_space_handle_t)(u_long)&is->is_sb[1]->strbuf_ctl,
312 			0) == 0)
313 		is->is_sb[1] = 0;
314 	}
315 }
316 
317 /*
318  * Here are the iommu control routines.
319  */
320 void
321 iommu_enter(is, va, pa, flags)
322 	struct iommu_state *is;
323 	vaddr_t va;
324 	int64_t pa;
325 	int flags;
326 {
327 	int64_t tte;
328 
329 #ifdef DIAGNOSTIC
330 	if (va < is->is_dvmabase)
331 		panic("iommu_enter: va %#lx not in DVMA space", va);
332 #endif
333 
334 	tte = MAKEIOTTE(pa, !(flags&BUS_DMA_NOWRITE), !(flags&BUS_DMA_NOCACHE),
335 			(flags&BUS_DMA_STREAMING));
336 
337 	/* Is the streamcache flush really needed? */
338 	if (is->is_sb[0] || is->is_sb[1]) {
339 		iommu_strbuf_flush(is, va);
340 		iommu_strbuf_flush_done(is);
341 	}
342 	DPRINTF(IDB_IOMMU, ("Clearing TSB slot %d for va %p\n",
343 		       (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va));
344 	is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] = tte;
345 	bus_space_write_8(is->is_bustag, (bus_space_handle_t)(u_long)
346 			  &is->is_iommu->iommu_flush, 0, va);
347 	DPRINTF(IDB_IOMMU, ("iommu_enter: va %lx pa %lx TSB[%lx]@%p=%lx\n",
348 		       va, (long)pa, (u_long)IOTSBSLOT(va,is->is_tsbsize),
349 		       (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)],
350 		       (u_long)tte));
351 }
352 
353 
354 /*
355  * Find the value of a DVMA address (debug routine).
356  */
357 paddr_t
358 iommu_extract(is, dva)
359 	struct iommu_state *is;
360 	vaddr_t dva;
361 {
362 	int64_t tte = 0;
363 
364 	if (dva >= is->is_dvmabase)
365 		tte = is->is_tsb[IOTSBSLOT(dva,is->is_tsbsize)];
366 
367 	if ((tte&IOTTE_V) == 0)
368 		return ((paddr_t)-1L);
369 	return (tte&IOTTE_PAMASK);
370 }
371 
372 /*
373  * iommu_remove: removes mappings created by iommu_enter
374  *
375  * Only demap from IOMMU if flag is set.
376  *
377  * XXX: this function needs better internal error checking.
378  */
379 void
380 iommu_remove(is, va, len)
381 	struct iommu_state *is;
382 	vaddr_t va;
383 	size_t len;
384 {
385 
386 #ifdef DIAGNOSTIC
387 	if (va < is->is_dvmabase)
388 		panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va);
389 	if ((long)(va + len) < (long)va)
390 		panic("iommu_remove: va 0x%lx + len 0x%lx wraps",
391 		      (long) va, (long) len);
392 	if (len & ~0xfffffff)
393 		panic("iommu_remove: rediculous len 0x%lx", (u_long)len);
394 #endif
395 
396 	va = trunc_page(va);
397 	DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx TSB[%lx]@%p\n",
398 	    va, (u_long)IOTSBSLOT(va,is->is_tsbsize),
399 	    &is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)]));
400 	while (len > 0) {
401 		DPRINTF(IDB_IOMMU, ("iommu_remove: clearing TSB slot %d for va %p size %lx\n",
402 		    (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va, (u_long)len));
403 		if (is->is_sb[0] || is->is_sb[0]) {
404 			DPRINTF(IDB_IOMMU, ("iommu_remove: flushing va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
405 			       (void *)(u_long)va, (long)IOTSBSLOT(va,is->is_tsbsize),
406 			       (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)],
407 			       (long)(is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)]),
408 			       (u_long)len));
409 			iommu_strbuf_flush(is, va);
410 			if (len <= NBPG)
411 				iommu_strbuf_flush_done(is);
412 			DPRINTF(IDB_IOMMU, ("iommu_remove: flushed va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
413 			       (void *)(u_long)va, (long)IOTSBSLOT(va,is->is_tsbsize),
414 			       (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)],
415 			       (long)(is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)]),
416 			       (u_long)len));
417 		}
418 
419 		if (len <= NBPG)
420 			len = 0;
421 		else
422 			len -= NBPG;
423 
424 		is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] = 0;
425 		bus_space_write_8(is->is_bustag, (bus_space_handle_t)(u_long)
426 				  &is->is_iommu->iommu_flush, 0, va);
427 		va += NBPG;
428 	}
429 }
430 
431 static int
432 iommu_strbuf_flush_done(is)
433 	struct iommu_state *is;
434 {
435 	struct timeval cur, flushtimeout;
436 
437 #define BUMPTIME(t, usec) { \
438 	register volatile struct timeval *tp = (t); \
439 	register long us; \
440  \
441 	tp->tv_usec = us = tp->tv_usec + (usec); \
442 	if (us >= 1000000) { \
443 		tp->tv_usec = us - 1000000; \
444 		tp->tv_sec++; \
445 	} \
446 }
447 
448 	if (!is->is_sb[0] && !is->is_sb[1])
449 		return (0);
450 
451 	/*
452 	 * Streaming buffer flushes:
453 	 *
454 	 *   1 Tell strbuf to flush by storing va to strbuf_pgflush.  If
455 	 *     we're not on a cache line boundary (64-bits):
456 	 *   2 Store 0 in flag
457 	 *   3 Store pointer to flag in flushsync
458 	 *   4 wait till flushsync becomes 0x1
459 	 *
460 	 * If it takes more than .5 sec, something
461 	 * went wrong.
462 	 */
463 
464 	is->is_flush[0] = 1;
465 	is->is_flush[1] = 1;
466 	if (is->is_sb[0]) {
467 		is->is_flush[0] = 0;
468 		bus_space_write_8(is->is_bustag, (bus_space_handle_t)(u_long)
469 			&is->is_sb[0]->strbuf_flushsync, 0, is->is_flushpa);
470 	}
471 	if (is->is_sb[1]) {
472 		is->is_flush[0] = 1;
473 		bus_space_write_8(is->is_bustag, (bus_space_handle_t)(u_long)
474 			&is->is_sb[1]->strbuf_flushsync, 0, is->is_flushpa + 8);
475 	}
476 
477 	microtime(&flushtimeout);
478 	cur = flushtimeout;
479 	BUMPTIME(&flushtimeout, 500000); /* 1/2 sec */
480 
481 	DPRINTF(IDB_IOMMU, ("iommu_strbuf_flush_done: flush = %lx at va = %lx pa = %lx now=%lx:%lx until = %lx:%lx\n",
482 		       (long)is->is_flush, (long)&is->is_flush,
483 		       (long)is->is_flushpa, cur.tv_sec, cur.tv_usec,
484 		       flushtimeout.tv_sec, flushtimeout.tv_usec));
485 	/* Bypass non-coherent D$ */
486 	while ((!ldxa(is->is_flushpa, ASI_PHYS_CACHED) ||
487 		!ldxa(is->is_flushpa + 8, ASI_PHYS_CACHED)) &&
488 		((cur.tv_sec <= flushtimeout.tv_sec) &&
489 			(cur.tv_usec <= flushtimeout.tv_usec)))
490 		microtime(&cur);
491 
492 #ifdef DIAGNOSTIC
493 	if ((!ldxa(is->is_flushpa, ASI_PHYS_CACHED) ||
494 		!ldxa(is->is_flushpa + 8, ASI_PHYS_CACHED))) {
495 		printf("iommu_strbuf_flush_done: flush timeout %p,%p at %p\n",
496 			(void *)(u_long)is->is_flush[0],
497 			(void *)(u_long)is->is_flush[1],
498 			(void *)(u_long)is->is_flushpa); /* panic? */
499 #ifdef DDB
500 		Debugger();
501 #endif
502 	}
503 #endif
504 	DPRINTF(IDB_IOMMU, ("iommu_strbuf_flush_done: flushed\n"));
505 	return (is->is_flush[0] && is->is_flush[1]);
506 }
507 
508 /*
509  * IOMMU DVMA operations, common to SBUS and PCI.
510  */
511 int
512 iommu_dvmamap_load(t, is, map, buf, buflen, p, flags)
513 	bus_dma_tag_t t;
514 	struct iommu_state *is;
515 	bus_dmamap_t map;
516 	void *buf;
517 	bus_size_t buflen;
518 	struct proc *p;
519 	int flags;
520 {
521 	int s;
522 	int err;
523 	bus_size_t sgsize;
524 	paddr_t curaddr;
525 	u_long dvmaddr, sgstart, sgend;
526 	bus_size_t align, boundary;
527 	vaddr_t vaddr = (vaddr_t)buf;
528 	int seg;
529 	pmap_t pmap;
530 
531 	if (map->dm_nsegs) {
532 		/* Already in use?? */
533 #ifdef DIAGNOSTIC
534 		printf("iommu_dvmamap_load: map still in use\n");
535 #endif
536 		bus_dmamap_unload(t, map);
537 	}
538 	/*
539 	 * Make sure that on error condition we return "no valid mappings".
540 	 */
541 	map->dm_nsegs = 0;
542 
543 	if (buflen > map->_dm_size) {
544 		DPRINTF(IDB_BUSDMA,
545 		    ("iommu_dvmamap_load(): error %d > %d -- "
546 		     "map size exceeded!\n", (int)buflen, (int)map->_dm_size));
547 		return (EINVAL);
548 	}
549 
550 	sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
551 
552 	/*
553 	 * A boundary presented to bus_dmamem_alloc() takes precedence
554 	 * over boundary in the map.
555 	 */
556 	if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0)
557 		boundary = map->_dm_boundary;
558 	align = max(map->dm_segs[0]._ds_align, NBPG);
559 	s = splhigh();
560 	/*
561 	 * If our segment size is larger than the boundary we need to
562 	 * split the transfer up int little pieces ourselves.
563 	 */
564 	err = extent_alloc(is->is_dvmamap, sgsize, align, 0,
565 		(sgsize > boundary) ? 0 : boundary,
566 		EX_NOWAIT|EX_BOUNDZERO, (u_long *)&dvmaddr);
567 	splx(s);
568 
569 #ifdef DEBUG
570 	if (err || (dvmaddr == (bus_addr_t)-1))
571 	{
572 		printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n",
573 		    (int)sgsize, flags);
574 #ifdef DDB
575 		Debugger();
576 #endif
577 	}
578 #endif
579 	if (err != 0)
580 		return (err);
581 
582 	if (dvmaddr == (bus_addr_t)-1)
583 		return (ENOMEM);
584 
585 	/* Set the active DVMA map */
586 	map->_dm_dvmastart = dvmaddr;
587 	map->_dm_dvmasize = sgsize;
588 
589 	/*
590 	 * Now split the DVMA range into segments, not crossing
591 	 * the boundary.
592 	 */
593 	seg = 0;
594 	sgstart = dvmaddr + (vaddr & PGOFSET);
595 	sgend = sgstart + buflen - 1;
596 	map->dm_segs[seg].ds_addr = sgstart;
597 	DPRINTF(IDB_INFO, ("iommu_dvmamap_load: boundary %lx boundary-1 %lx "
598 		"~(boundary-1) %lx\n", boundary, (boundary-1), ~(boundary-1)));
599 	while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) {
600 		/* Oops.  We crossed a boundary.  Split the xfer. */
601 		DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
602 			"seg %d start %lx size %lx\n", seg,
603 			map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len));
604 		map->dm_segs[seg].ds_len = sgstart & (boundary - 1);
605 		if (++seg > map->_dm_segcnt) {
606 			/* Too many segments.  Fail the operation. */
607 			DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
608 				"too many segments %d\n", seg));
609 			s = splhigh();
610 			/* How can this fail?  And if it does what can we do? */
611 			err = extent_free(is->is_dvmamap,
612 				dvmaddr, sgsize, EX_NOWAIT);
613 			map->_dm_dvmastart = 0;
614 			map->_dm_dvmasize = 0;
615 			splx(s);
616 			return (E2BIG);
617 		}
618 		sgstart = roundup(sgstart, boundary);
619 		map->dm_segs[seg].ds_addr = sgstart;
620 	}
621 	map->dm_segs[seg].ds_len = sgend - sgstart + 1;
622 	DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
623 		"seg %d start %lx size %lx\n", seg,
624 		map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len));
625 	map->dm_nsegs = seg+1;
626 	map->dm_mapsize = buflen;
627 
628 	if (p != NULL)
629 		pmap = p->p_vmspace->vm_map.pmap;
630 	else
631 		pmap = pmap_kernel();
632 
633 	for (; buflen > 0; ) {
634 		/*
635 		 * Get the physical address for this page.
636 		 */
637 		if (pmap_extract(pmap, (vaddr_t)vaddr, &curaddr) == FALSE) {
638 			bus_dmamap_unload(t, map);
639 			return (-1);
640 		}
641 
642 		/*
643 		 * Compute the segment size, and adjust counts.
644 		 */
645 		sgsize = NBPG - ((u_long)vaddr & PGOFSET);
646 		if (buflen < sgsize)
647 			sgsize = buflen;
648 
649 		DPRINTF(IDB_BUSDMA,
650 		    ("iommu_dvmamap_load: map %p loading va %p "
651 			    "dva %lx at pa %lx\n",
652 			    map, (void *)vaddr, (long)dvmaddr,
653 			    (long)(curaddr&~(NBPG-1))));
654 		iommu_enter(is, trunc_page(dvmaddr), trunc_page(curaddr),
655 		    flags);
656 
657 		dvmaddr += PAGE_SIZE;
658 		vaddr += sgsize;
659 		buflen -= sgsize;
660 	}
661 	return (0);
662 }
663 
664 
665 void
666 iommu_dvmamap_unload(t, is, map)
667 	bus_dma_tag_t t;
668 	struct iommu_state *is;
669 	bus_dmamap_t map;
670 {
671 	int error, s;
672 	bus_size_t sgsize;
673 
674 	/* Flush the iommu */
675 #ifdef DEBUG
676 	if (!map->_dm_dvmastart) {
677 		printf("iommu_dvmamap_unload: No dvmastart is zero\n");
678 #ifdef DDB
679 		Debugger();
680 #endif
681 	}
682 #endif
683 	iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize);
684 
685 	/* Flush the caches */
686 	bus_dmamap_unload(t->_parent, map);
687 
688 	/* Mark the mappings as invalid. */
689 	map->dm_mapsize = 0;
690 	map->dm_nsegs = 0;
691 
692 	s = splhigh();
693 	error = extent_free(is->is_dvmamap, map->_dm_dvmastart,
694 		map->_dm_dvmasize, EX_NOWAIT);
695 	map->_dm_dvmastart = 0;
696 	map->_dm_dvmasize = 0;
697 	splx(s);
698 	if (error != 0)
699 		printf("warning: %qd of DVMA space lost\n", (long long)sgsize);
700 
701 	/* Clear the map */
702 }
703 
704 
705 int
706 iommu_dvmamap_load_raw(t, is, map, segs, nsegs, flags, size)
707 	bus_dma_tag_t t;
708 	struct iommu_state *is;
709 	bus_dmamap_t map;
710 	bus_dma_segment_t *segs;
711 	int nsegs;
712 	int flags;
713 	bus_size_t size;
714 {
715 	struct vm_page *m;
716 	int i, j, s;
717 	int left;
718 	int err;
719 	bus_size_t sgsize;
720 	paddr_t pa;
721 	bus_size_t boundary, align;
722 	u_long dvmaddr, sgstart, sgend;
723 	struct pglist *mlist;
724 	int pagesz = PAGE_SIZE;
725 
726 	if (map->dm_nsegs) {
727 		/* Already in use?? */
728 #ifdef DIAGNOSTIC
729 		printf("iommu_dvmamap_load_raw: map still in use\n");
730 #endif
731 		bus_dmamap_unload(t, map);
732 	}
733 
734 	/*
735 	 * A boundary presented to bus_dmamem_alloc() takes precedence
736 	 * over boundary in the map.
737 	 */
738 	if ((boundary = segs[0]._ds_boundary) == 0)
739 		boundary = map->_dm_boundary;
740 
741 	align = max(segs[0]._ds_align, NBPG);
742 
743 	/*
744 	 * Make sure that on error condition we return "no valid mappings".
745 	 */
746 	map->dm_nsegs = 0;
747 	/* Count up the total number of pages we need */
748 	pa = segs[0].ds_addr;
749 	sgsize = 0;
750 	left = size;
751 	for (i=0; left && i<nsegs; i++) {
752 		if (round_page(pa) != round_page(segs[i].ds_addr))
753 			sgsize = round_page(sgsize);
754 		sgsize += min(left, segs[i].ds_len);
755 		left -= segs[i].ds_len;
756 		pa = segs[i].ds_addr + segs[i].ds_len;
757 	}
758 	sgsize = round_page(sgsize);
759 
760 	s = splhigh();
761 	/*
762 	 * If our segment size is larger than the boundary we need to
763 	 * split the transfer up int little pieces ourselves.
764 	 */
765 	err = extent_alloc(is->is_dvmamap, sgsize, align, 0,
766 		(sgsize > boundary) ? 0 : boundary,
767 		((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT) |
768 		EX_BOUNDZERO, (u_long *)&dvmaddr);
769 	splx(s);
770 
771 	if (err != 0)
772 		return (err);
773 
774 #ifdef DEBUG
775 	if (dvmaddr == (bus_addr_t)-1)
776 	{
777 		printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) failed!\n",
778 		    (int)sgsize, flags);
779 #ifdef DDB
780 		Debugger();
781 #else
782 		panic("");
783 #endif
784 	}
785 #endif
786 	if (dvmaddr == (bus_addr_t)-1)
787 		return (ENOMEM);
788 
789 	/* Set the active DVMA map */
790 	map->_dm_dvmastart = dvmaddr;
791 	map->_dm_dvmasize = sgsize;
792 
793 	if ((mlist = segs[0]._ds_mlist) == NULL) {
794 		u_long prev_va = NULL;
795 		/*
796 		 * This segs is made up of individual physical pages,
797 		 * probably by _bus_dmamap_load_uio() or
798 		 * _bus_dmamap_load_mbuf().  Ignore the mlist and
799 		 * load each segment individually.
800 		 */
801 		map->dm_mapsize = size;
802 
803 		i = j = 0;
804 		pa = segs[i].ds_addr;
805 		dvmaddr += (pa & PGOFSET);
806 		left = min(size, segs[i].ds_len);
807 
808 		sgstart = dvmaddr;
809 		sgend = sgstart + left - 1;
810 
811 		map->dm_segs[j].ds_addr = dvmaddr;
812 		map->dm_segs[j].ds_len = left;
813 
814 		/* Set the size (which we will be destroying */
815 		map->dm_mapsize = size;
816 
817 		while (size > 0) {
818 			int incr;
819 
820 			if (left <= 0) {
821 				u_long offset;
822 
823 				/*
824 				 * If the two segs are on different physical
825 				 * pages move to a new virtual page.
826 				 */
827 				if (trunc_page(pa) !=
828 					trunc_page(segs[++i].ds_addr))
829 					dvmaddr += NBPG;
830 
831 				pa = segs[i].ds_addr;
832 				left = min(size, segs[i].ds_len);
833 
834 				offset = (pa & PGOFSET);
835 				if (dvmaddr == trunc_page(dvmaddr) + offset) {
836 					/* We can combine segments */
837 					map->dm_segs[j].ds_len += left;
838 					sgend += left;
839 				} else {
840 					/* Need a new segment */
841 					dvmaddr = trunc_page(dvmaddr) + offset;
842 					DPRINTF(IDB_INFO,
843 						("iommu_dvmamap_load_raw: "
844 							"seg %d start %lx "
845 							"size %lx\n", j,
846 							map->dm_segs[j].ds_addr,
847 							map->dm_segs[j].
848 							ds_len));
849 					if (++j > map->_dm_segcnt)
850 						goto fail;
851 					map->dm_segs[j].ds_addr = dvmaddr;
852 					map->dm_segs[j].ds_len = left;
853 
854 					sgstart = dvmaddr;
855 					sgend = sgstart + left - 1;
856 				}
857 
858 			}
859 
860 			/* Check for boundary issues */
861 			while ((sgstart & ~(boundary - 1)) !=
862 				(sgend & ~(boundary - 1))) {
863 				/* Need a new segment. */
864 				map->dm_segs[j].ds_len =
865 					sgstart & (boundary - 1);
866 				DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
867 					"seg %d start %lx size %lx\n", j,
868 					map->dm_segs[j].ds_addr,
869 					map->dm_segs[j].ds_len));
870 				if (++j > map->_dm_segcnt) {
871 fail:
872 					iommu_dvmamap_unload(t, is, map);
873 					return (E2BIG);
874 				}
875 				sgstart = roundup(sgstart, boundary);
876 				map->dm_segs[j].ds_addr = sgstart;
877 				map->dm_segs[j].ds_len = sgend - sgstart + 1;
878 			}
879 
880 			if (sgsize == 0)
881 				panic("iommu_dmamap_load_raw: size botch");
882 
883 			DPRINTF(IDB_BUSDMA,
884 				("iommu_dvmamap_load_raw: map %p loading va %lx at pa %lx\n",
885 					map, (long)dvmaddr, (long)(pa)));
886 			/* Enter it if we haven't before. */
887 			if (prev_va != trunc_page(dvmaddr))
888 				iommu_enter(is, prev_va = trunc_page(dvmaddr),
889 					trunc_page(pa), flags);
890 			incr = min(pagesz, left);
891 			dvmaddr += incr;
892 			pa += incr;
893 			left -= incr;
894 			size -= incr;
895 		}
896 		DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
897 			"seg %d start %lx size %lx\n", j,
898 			map->dm_segs[j].ds_addr, map->dm_segs[j].ds_len));
899 		map->dm_nsegs = j+1;
900 		return (0);
901 	}
902 	/*
903 	 * This was allocated with bus_dmamem_alloc.
904 	 * The pages are on an `mlist'.
905 	 */
906 	map->dm_mapsize = size;
907 	i = 0;
908 	sgstart = dvmaddr;
909 	sgend = sgstart + size - 1;
910 	map->dm_segs[i].ds_addr = sgstart;
911 	while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) {
912 		/* Oops.  We crossed a boundary.  Split the xfer. */
913 		map->dm_segs[i].ds_len = sgstart & (boundary - 1);
914 		DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
915 			"seg %d start %lx size %lx\n", i,
916 			map->dm_segs[i].ds_addr,
917 			map->dm_segs[i].ds_len));
918 		if (++i > map->_dm_segcnt) {
919 			/* Too many segments.  Fail the operation. */
920 			s = splhigh();
921 			/* How can this fail?  And if it does what can we do? */
922 			err = extent_free(is->is_dvmamap,
923 				dvmaddr, sgsize, EX_NOWAIT);
924 			map->_dm_dvmastart = 0;
925 			map->_dm_dvmasize = 0;
926 			splx(s);
927 			return (E2BIG);
928 		}
929 		sgstart = roundup(sgstart, boundary);
930 		map->dm_segs[i].ds_addr = sgstart;
931 	}
932 	DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
933 			"seg %d start %lx size %lx\n", i,
934 			map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len));
935 	map->dm_segs[i].ds_len = sgend - sgstart + 1;
936 
937 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
938 		if (sgsize == 0)
939 			panic("iommu_dmamap_load_raw: size botch");
940 		pa = VM_PAGE_TO_PHYS(m);
941 
942 		DPRINTF(IDB_BUSDMA,
943 		    ("iommu_dvmamap_load_raw: map %p loading va %lx at pa %lx\n",
944 		    map, (long)dvmaddr, (long)(pa)));
945 		iommu_enter(is, dvmaddr, pa, flags);
946 
947 		dvmaddr += pagesz;
948 		sgsize -= pagesz;
949 	}
950 	map->dm_mapsize = size;
951 	map->dm_nsegs = i+1;
952 	return (0);
953 }
954 
955 void
956 iommu_dvmamap_sync(t, is, map, offset, len, ops)
957 	bus_dma_tag_t t;
958 	struct iommu_state *is;
959 	bus_dmamap_t map;
960 	bus_addr_t offset;
961 	bus_size_t len;
962 	int ops;
963 {
964 	vaddr_t va = map->dm_segs[0].ds_addr + offset;
965 
966 	/*
967 	 * We only support one DMA segment; supporting more makes this code
968          * too unweildy.
969 	 */
970 
971 	if (ops & BUS_DMASYNC_PREREAD) {
972 		DPRINTF(IDB_SYNC,
973 		    ("iommu_dvmamap_sync: syncing va %p len %lu "
974 		     "BUS_DMASYNC_PREREAD\n", (void *)(u_long)va, (u_long)len));
975 
976 		/* Nothing to do */;
977 	}
978 	if (ops & BUS_DMASYNC_POSTREAD) {
979 		DPRINTF(IDB_SYNC,
980 		    ("iommu_dvmamap_sync: syncing va %p len %lu "
981 		     "BUS_DMASYNC_POSTREAD\n", (void *)(u_long)va, (u_long)len));
982 		/* if we have a streaming buffer, flush it here first */
983 		if (is->is_sb[0] || is->is_sb[1])
984 			while (len > 0) {
985 				DPRINTF(IDB_BUSDMA,
986 				    ("iommu_dvmamap_sync: flushing va %p, %lu "
987 				     "bytes left\n", (void *)(u_long)va, (u_long)len));
988 				iommu_strbuf_flush(is, va);
989 				if (len <= NBPG) {
990 					iommu_strbuf_flush_done(is);
991 					len = 0;
992 				} else
993 					len -= NBPG;
994 				va += NBPG;
995 			}
996 	}
997 	if (ops & BUS_DMASYNC_PREWRITE) {
998 		DPRINTF(IDB_SYNC,
999 		    ("iommu_dvmamap_sync: syncing va %p len %lu "
1000 		     "BUS_DMASYNC_PREWRITE\n", (void *)(u_long)va, (u_long)len));
1001 		/* if we have a streaming buffer, flush it here first */
1002 		if (is->is_sb[0] || is->is_sb[1])
1003 			while (len > 0) {
1004 				DPRINTF(IDB_BUSDMA,
1005 				    ("iommu_dvmamap_sync: flushing va %p, %lu "
1006 				     "bytes left\n", (void *)(u_long)va, (u_long)len));
1007 				iommu_strbuf_flush(is, va);
1008 				if (len <= NBPG) {
1009 					iommu_strbuf_flush_done(is);
1010 					len = 0;
1011 				} else
1012 					len -= NBPG;
1013 				va += NBPG;
1014 			}
1015 	}
1016 	if (ops & BUS_DMASYNC_POSTWRITE) {
1017 		DPRINTF(IDB_SYNC,
1018 		    ("iommu_dvmamap_sync: syncing va %p len %lu "
1019 		     "BUS_DMASYNC_POSTWRITE\n", (void *)(u_long)va, (u_long)len));
1020 		/* Nothing to do */;
1021 	}
1022 }
1023 
1024 int
1025 iommu_dvmamem_alloc(t, is, size, alignment, boundary, segs, nsegs, rsegs, flags)
1026 	bus_dma_tag_t t;
1027 	struct iommu_state *is;
1028 	bus_size_t size, alignment, boundary;
1029 	bus_dma_segment_t *segs;
1030 	int nsegs;
1031 	int *rsegs;
1032 	int flags;
1033 {
1034 
1035 	DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_alloc: sz %llx align %llx bound %llx "
1036 	   "segp %p flags %d\n", (unsigned long long)size,
1037 	   (unsigned long long)alignment, (unsigned long long)boundary,
1038 	   segs, flags));
1039 	return (bus_dmamem_alloc(t->_parent, size, alignment, boundary,
1040 	    segs, nsegs, rsegs, flags|BUS_DMA_DVMA));
1041 }
1042 
1043 void
1044 iommu_dvmamem_free(t, is, segs, nsegs)
1045 	bus_dma_tag_t t;
1046 	struct iommu_state *is;
1047 	bus_dma_segment_t *segs;
1048 	int nsegs;
1049 {
1050 
1051 	DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_free: segp %p nsegs %d\n",
1052 	    segs, nsegs));
1053 	bus_dmamem_free(t->_parent, segs, nsegs);
1054 }
1055 
1056 /*
1057  * Map the DVMA mappings into the kernel pmap.
1058  * Check the flags to see whether we're streaming or coherent.
1059  */
1060 int
1061 iommu_dvmamem_map(t, is, segs, nsegs, size, kvap, flags)
1062 	bus_dma_tag_t t;
1063 	struct iommu_state *is;
1064 	bus_dma_segment_t *segs;
1065 	int nsegs;
1066 	size_t size;
1067 	caddr_t *kvap;
1068 	int flags;
1069 {
1070 	struct vm_page *m;
1071 	vaddr_t va;
1072 	bus_addr_t addr;
1073 	struct pglist *mlist;
1074 	int cbit;
1075 
1076 	DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: segp %p nsegs %d size %lx\n",
1077 	    segs, nsegs, size));
1078 
1079 	/*
1080 	 * Allocate some space in the kernel map, and then map these pages
1081 	 * into this space.
1082 	 */
1083 	size = round_page(size);
1084 	va = uvm_km_valloc(kernel_map, size);
1085 	if (va == 0)
1086 		return (ENOMEM);
1087 
1088 	*kvap = (caddr_t)va;
1089 
1090 	/*
1091 	 * digest flags:
1092 	 */
1093 	cbit = 0;
1094 	if (flags & BUS_DMA_COHERENT)	/* Disable vcache */
1095 		cbit |= PMAP_NVC;
1096 	if (flags & BUS_DMA_NOCACHE)	/* sideffects */
1097 		cbit |= PMAP_NC;
1098 
1099 	/*
1100 	 * Now take this and map it into the CPU.
1101 	 */
1102 	mlist = segs[0]._ds_mlist;
1103 	for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1104 #ifdef DIAGNOSTIC
1105 		if (size == 0)
1106 			panic("iommu_dvmamem_map: size botch");
1107 #endif
1108 		addr = VM_PAGE_TO_PHYS(m);
1109 		DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: "
1110 		    "mapping va %lx at %llx\n", va, (unsigned long long)addr | cbit));
1111 		pmap_enter(pmap_kernel(), va, addr | cbit,
1112 		    VM_PROT_READ | VM_PROT_WRITE,
1113 		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
1114 		va += PAGE_SIZE;
1115 		size -= PAGE_SIZE;
1116 	}
1117 	pmap_update(pmap_kernel());
1118 
1119 	return (0);
1120 }
1121 
1122 /*
1123  * Unmap DVMA mappings from kernel
1124  */
1125 void
1126 iommu_dvmamem_unmap(t, is, kva, size)
1127 	bus_dma_tag_t t;
1128 	struct iommu_state *is;
1129 	caddr_t kva;
1130 	size_t size;
1131 {
1132 
1133 	DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_unmap: kvm %p size %lx\n",
1134 	    kva, size));
1135 
1136 #ifdef DIAGNOSTIC
1137 	if ((u_long)kva & PGOFSET)
1138 		panic("iommu_dvmamem_unmap");
1139 #endif
1140 
1141 	size = round_page(size);
1142 	pmap_remove(pmap_kernel(), (vaddr_t)kva, size);
1143 	pmap_update(pmap_kernel());
1144 #if 0
1145 	/*
1146 	 * XXX ? is this necessary? i think so and i think other
1147 	 * implementations are missing it.
1148 	 */
1149 	uvm_km_free(kernel_map, (vaddr_t)kva, size);
1150 #endif
1151 }
1152