xref: /illumos-gate/usr/src/uts/intel/io/iommulib.c (revision 86ecf0b4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"@(#)iommulib.c	1.6	08/09/07 SMI"
27 
28 #include <sys/sunddi.h>
29 #include <sys/sunndi.h>
30 #include <sys/errno.h>
31 #include <sys/modctl.h>
32 #include <sys/iommulib.h>
33 
34 /* ******** Type definitions private to this file  ********************** */
35 
36 /* 1 per IOMMU unit. There may be more than one per dip */
37 typedef struct iommulib_unit {
38 	kmutex_t ilu_lock;
39 	uint64_t ilu_ref;
40 	uint32_t ilu_unitid;
41 	dev_info_t *ilu_dip;
42 	iommulib_ops_t *ilu_ops;
43 	void* ilu_data;
44 	struct iommulib_unit *ilu_next;
45 	struct iommulib_unit *ilu_prev;
46 } iommulib_unit_t;
47 
48 typedef struct iommulib_nex {
49 	dev_info_t *nex_dip;
50 	iommulib_nexops_t nex_ops;
51 	struct iommulib_nex *nex_next;
52 	struct iommulib_nex *nex_prev;
53 } iommulib_nex_t;
54 
55 /* *********  Globals ************************ */
56 
57 /* IOMMU side: Following data protected by lock */
58 static kmutex_t iommulib_lock;
59 static iommulib_unit_t   *iommulib_list;
60 static uint64_t iommulib_unit_ids = 0;
61 static uint64_t iommulib_num_units = 0;
62 
63 /* rootnex side data */
64 
65 static kmutex_t iommulib_nexus_lock;
66 static iommulib_nex_t *iommulib_nexus_list;
67 
68 /* can be set atomically without lock */
69 static volatile uint32_t iommulib_fini;
70 
71 /* debug flag */
72 static int iommulib_debug;
73 
74 /*
75  * Module linkage information for the kernel.
76  */
77 static struct modlmisc modlmisc = {
78 	&mod_miscops, "IOMMU library module"
79 };
80 
81 static struct modlinkage modlinkage = {
82 	MODREV_1, (void *)&modlmisc, NULL
83 };
84 
85 int
86 _init(void)
87 {
88 	return (mod_install(&modlinkage));
89 }
90 
91 int
92 _fini(void)
93 {
94 	mutex_enter(&iommulib_lock);
95 	if (iommulib_list != NULL || iommulib_nexus_list != NULL) {
96 		mutex_exit(&iommulib_lock);
97 		return (EBUSY);
98 	}
99 	iommulib_fini = 1;
100 
101 	mutex_exit(&iommulib_lock);
102 	return (mod_remove(&modlinkage));
103 }
104 
105 int
106 _info(struct modinfo *modinfop)
107 {
108 	return (mod_info(&modlinkage, modinfop));
109 }
110 
111 /*
112  * Routines with iommulib_iommu_* are invoked from the
113  * IOMMU driver.
114  * Routines with iommulib_nex* are invoked from the
115  * nexus driver (typically rootnex)
116  */
117 
118 int
119 iommulib_nexus_register(dev_info_t *dip, iommulib_nexops_t *nexops,
120     iommulib_nexhandle_t *handle)
121 {
122 	iommulib_nex_t *nexp;
123 	int instance = ddi_get_instance(dip);
124 	const char *driver = ddi_driver_name(dip);
125 	dev_info_t *pdip = ddi_get_parent(dip);
126 	const char *f = "iommulib_nexus_register";
127 
128 	ASSERT(nexops);
129 	ASSERT(handle);
130 
131 	*handle = NULL;
132 
133 	/*
134 	 * Root node is never busy held
135 	 */
136 	if (dip != ddi_root_node() && (i_ddi_node_state(dip) < DS_PROBED ||
137 	    !DEVI_BUSY_OWNED(pdip))) {
138 		cmn_err(CE_WARN, "%s: NEXUS devinfo node not in DS_PROBED "
139 		    "or busy held for nexops vector (%p). Failing registration",
140 		    f, (void *)nexops);
141 		return (DDI_FAILURE);
142 	}
143 
144 	if (nexops->nops_vers != IOMMU_NEXOPS_VERSION) {
145 		cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB nexops version "
146 		    "in nexops vector (%p). Failing NEXUS registration",
147 		    f, driver, instance, (void *)nexops);
148 		return (DDI_FAILURE);
149 	}
150 
151 	ASSERT(nexops->nops_data == NULL);
152 
153 	if (nexops->nops_id == NULL) {
154 		cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
155 		    "Failing registration for nexops vector: %p",
156 		    f, driver, instance, (void *)nexops);
157 		return (DDI_FAILURE);
158 	}
159 
160 	if (nexops->nops_dma_allochdl == NULL) {
161 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_allochdl op. "
162 		    "Failing registration for ops vector: %p", f,
163 		    driver, instance, (void *)nexops);
164 		return (DDI_FAILURE);
165 	}
166 
167 	if (nexops->nops_dma_freehdl == NULL) {
168 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_freehdl op. "
169 		    "Failing registration for ops vector: %p", f,
170 		    driver, instance, (void *)nexops);
171 		return (DDI_FAILURE);
172 	}
173 
174 	if (nexops->nops_dma_bindhdl == NULL) {
175 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_bindhdl op. "
176 		    "Failing registration for ops vector: %p", f,
177 		    driver, instance, (void *)nexops);
178 		return (DDI_FAILURE);
179 	}
180 
181 	if (nexops->nops_dma_sync == NULL) {
182 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_sync op. "
183 		    "Failing registration for ops vector: %p", f,
184 		    driver, instance, (void *)nexops);
185 		return (DDI_FAILURE);
186 	}
187 
188 	if (nexops->nops_dma_reset_cookies == NULL) {
189 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_reset_cookies op. "
190 		    "Failing registration for ops vector: %p", f,
191 		    driver, instance, (void *)nexops);
192 		return (DDI_FAILURE);
193 	}
194 
195 	if (nexops->nops_dma_get_cookies == NULL) {
196 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_cookies op. "
197 		    "Failing registration for ops vector: %p", f,
198 		    driver, instance, (void *)nexops);
199 		return (DDI_FAILURE);
200 	}
201 
202 	if (nexops->nops_dma_set_cookies == NULL) {
203 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_set_cookies op. "
204 		    "Failing registration for ops vector: %p", f,
205 		    driver, instance, (void *)nexops);
206 		return (DDI_FAILURE);
207 	}
208 
209 	if (nexops->nops_dma_clear_cookies == NULL) {
210 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_clear_cookies op. "
211 		    "Failing registration for ops vector: %p", f,
212 		    driver, instance, (void *)nexops);
213 		return (DDI_FAILURE);
214 	}
215 
216 	if (nexops->nops_dma_get_sleep_flags == NULL) {
217 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_sleep_flags op. "
218 		    "Failing registration for ops vector: %p", f,
219 		    driver, instance, (void *)nexops);
220 		return (DDI_FAILURE);
221 	}
222 
223 	if (nexops->nops_dma_win == NULL) {
224 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_win op. "
225 		    "Failing registration for ops vector: %p", f,
226 		    driver, instance, (void *)nexops);
227 		return (DDI_FAILURE);
228 	}
229 
230 	/* Check for legacy ops */
231 	if (nexops->nops_dma_map == NULL) {
232 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy nops_dma_map op. "
233 		    "Failing registration for ops vector: %p", f,
234 		    driver, instance, (void *)nexops);
235 		return (DDI_FAILURE);
236 	}
237 
238 	if (nexops->nops_dma_mctl == NULL) {
239 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy nops_dma_mctl op. "
240 		    "Failing registration for ops vector: %p", f,
241 		    driver, instance, (void *)nexops);
242 		return (DDI_FAILURE);
243 	}
244 
245 	nexp = kmem_zalloc(sizeof (iommulib_nex_t), KM_SLEEP);
246 
247 	mutex_enter(&iommulib_lock);
248 	if (iommulib_fini == 1) {
249 		mutex_exit(&iommulib_lock);
250 		cmn_err(CE_WARN, "%s: IOMMULIB unloading. "
251 		    "Failing NEXUS register.", f);
252 		kmem_free(nexp, sizeof (iommulib_nex_t));
253 		return (DDI_FAILURE);
254 	}
255 
256 	/*
257 	 * fini/register race conditions have been handled. Now create the
258 	 * nexus struct
259 	 */
260 	ndi_hold_devi(dip);
261 	nexp->nex_dip = dip;
262 	nexp->nex_ops = *nexops;
263 
264 	mutex_enter(&iommulib_nexus_lock);
265 	nexp->nex_next = iommulib_nexus_list;
266 	iommulib_nexus_list = nexp;
267 	nexp->nex_prev = NULL;
268 
269 	if (nexp->nex_next != NULL)
270 		nexp->nex_next->nex_prev = nexp;
271 
272 	mutex_exit(&iommulib_nexus_lock);
273 	mutex_exit(&iommulib_lock);
274 
275 	cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered NEXUS %s "
276 	    "nexops=%p", f, driver, instance, ddi_node_name(dip),
277 	    (void *)nexops);
278 
279 	*handle = nexp;
280 
281 	return (DDI_SUCCESS);
282 }
283 
284 int
285 iommulib_nexus_unregister(iommulib_nexhandle_t handle)
286 {
287 	dev_info_t *dip;
288 	int instance;
289 	const char *driver;
290 	iommulib_nex_t *nexp = (iommulib_nex_t *)handle;
291 	const char *f = "iommulib_nexus_unregister";
292 
293 	ASSERT(nexp);
294 
295 	mutex_enter(&iommulib_nexus_lock);
296 
297 	dip = nexp->nex_dip;
298 	driver = ddi_driver_name(dip);
299 	instance = ddi_get_instance(dip);
300 
301 	/* A future enhancement would be to add ref-counts */
302 
303 	if (nexp->nex_prev == NULL) {
304 		iommulib_nexus_list = nexp->nex_next;
305 	} else {
306 		nexp->nex_prev->nex_next = nexp->nex_next;
307 	}
308 
309 	if (nexp->nex_next != NULL)
310 		nexp->nex_next->nex_prev = nexp->nex_prev;
311 
312 	mutex_exit(&iommulib_nexus_lock);
313 
314 	kmem_free(nexp, sizeof (iommulib_nex_t));
315 
316 	cmn_err(CE_NOTE, "!%s: %s%d: NEXUS (%s) handle successfully "
317 	    "unregistered from IOMMULIB", f, driver, instance,
318 	    ddi_node_name(dip));
319 
320 	ndi_rele_devi(dip);
321 
322 	return (DDI_SUCCESS);
323 }
324 
325 static iommulib_nexops_t *
326 lookup_nexops(dev_info_t *dip)
327 {
328 	iommulib_nex_t  *nexp;
329 
330 	mutex_enter(&iommulib_nexus_lock);
331 	nexp = iommulib_nexus_list;
332 	while (nexp) {
333 		if (nexp->nex_dip == dip)
334 			break;
335 		nexp = nexp->nex_next;
336 	}
337 	mutex_exit(&iommulib_nexus_lock);
338 
339 	return (nexp ? &nexp->nex_ops : NULL);
340 }
341 
342 int
343 iommulib_iommu_register(dev_info_t *dip, iommulib_ops_t *ops,
344     iommulib_handle_t *handle)
345 {
346 	const char *vendor;
347 	iommulib_unit_t *unitp;
348 	int instance = ddi_get_instance(dip);
349 	const char *driver = ddi_driver_name(dip);
350 	dev_info_t *pdip = ddi_get_parent(dip);
351 	const char *f = "iommulib_register";
352 
353 	ASSERT(ops);
354 	ASSERT(handle);
355 
356 	if (i_ddi_node_state(dip) < DS_PROBED || !DEVI_BUSY_OWNED(pdip)) {
357 		cmn_err(CE_WARN, "%s: devinfo node not in DS_PROBED or "
358 		    "busy held for ops vector (%p). Failing registration",
359 		    f, (void *)ops);
360 		return (DDI_FAILURE);
361 	}
362 
363 
364 	if (ops->ilops_vers != IOMMU_OPS_VERSION) {
365 		cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB ops version "
366 		    "in ops vector (%p). Failing registration", f, driver,
367 		    instance, (void *)ops);
368 		return (DDI_FAILURE);
369 	}
370 
371 	switch (ops->ilops_vendor) {
372 	case AMD_IOMMU:
373 		vendor = "AMD";
374 		break;
375 	case INTEL_IOMMU:
376 		vendor = "Intel";
377 		break;
378 	case INVALID_VENDOR:
379 		cmn_err(CE_WARN, "%s: %s%d: vendor field (%x) not initialized. "
380 		    "Failing registration for ops vector: %p", f,
381 		    driver, instance, ops->ilops_vendor, (void *)ops);
382 		return (DDI_FAILURE);
383 	default:
384 		cmn_err(CE_WARN, "%s: %s%d: Invalid vendor field (%x). "
385 		    "Failing registration for ops vector: %p", f,
386 		    driver, instance, ops->ilops_vendor, (void *)ops);
387 		return (DDI_FAILURE);
388 	}
389 
390 	cmn_err(CE_NOTE, "!%s: %s%d: Detected IOMMU registration from vendor"
391 	    " %s", f, driver, instance, vendor);
392 
393 	if (ops->ilops_data == NULL) {
394 		cmn_err(CE_WARN, "%s: %s%d: NULL IOMMU data field. "
395 		    "Failing registration for ops vector: %p", f,
396 		    driver, instance, (void *)ops);
397 		return (DDI_FAILURE);
398 	}
399 
400 	if (ops->ilops_id == NULL) {
401 		cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
402 		    "Failing registration for ops vector: %p", f,
403 		    driver, instance, (void *)ops);
404 		return (DDI_FAILURE);
405 	}
406 
407 	if (ops->ilops_probe == NULL) {
408 		cmn_err(CE_WARN, "%s: %s%d: NULL probe op. "
409 		    "Failing registration for ops vector: %p", f,
410 		    driver, instance, (void *)ops);
411 		return (DDI_FAILURE);
412 	}
413 
414 	if (ops->ilops_dma_allochdl == NULL) {
415 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_allochdl op. "
416 		    "Failing registration for ops vector: %p", f,
417 		    driver, instance, (void *)ops);
418 		return (DDI_FAILURE);
419 	}
420 
421 	if (ops->ilops_dma_freehdl == NULL) {
422 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_freehdl op. "
423 		    "Failing registration for ops vector: %p", f,
424 		    driver, instance, (void *)ops);
425 		return (DDI_FAILURE);
426 	}
427 
428 	if (ops->ilops_dma_bindhdl == NULL) {
429 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_bindhdl op. "
430 		    "Failing registration for ops vector: %p", f,
431 		    driver, instance, (void *)ops);
432 		return (DDI_FAILURE);
433 	}
434 
435 	if (ops->ilops_dma_sync == NULL) {
436 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_sync op. "
437 		    "Failing registration for ops vector: %p", f,
438 		    driver, instance, (void *)ops);
439 		return (DDI_FAILURE);
440 	}
441 
442 	if (ops->ilops_dma_win == NULL) {
443 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_win op. "
444 		    "Failing registration for ops vector: %p", f,
445 		    driver, instance, (void *)ops);
446 		return (DDI_FAILURE);
447 	}
448 
449 	/* Check for legacy ops */
450 	if (ops->ilops_dma_map == NULL) {
451 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy dma_map op. "
452 		    "Failing registration for ops vector: %p", f,
453 		    driver, instance, (void *)ops);
454 		return (DDI_FAILURE);
455 	}
456 
457 	if (ops->ilops_dma_mctl == NULL) {
458 		cmn_err(CE_WARN, "%s: %s%d: NULL legacy dma_mctl op. "
459 		    "Failing registration for ops vector: %p", f,
460 		    driver, instance, (void *)ops);
461 		return (DDI_FAILURE);
462 	}
463 
464 	unitp = kmem_zalloc(sizeof (iommulib_unit_t), KM_SLEEP);
465 	mutex_enter(&iommulib_lock);
466 	if (iommulib_fini == 1) {
467 		mutex_exit(&iommulib_lock);
468 		cmn_err(CE_WARN, "%s: IOMMULIB unloading. Failing register.",
469 		    f);
470 		kmem_free(unitp, sizeof (iommulib_unit_t));
471 		return (DDI_FAILURE);
472 	}
473 
474 	/*
475 	 * fini/register race conditions have been handled. Now create the
476 	 * IOMMU unit
477 	 */
478 	mutex_init(&unitp->ilu_lock, NULL, MUTEX_DEFAULT, NULL);
479 
480 	mutex_enter(&unitp->ilu_lock);
481 	unitp->ilu_unitid = ++iommulib_unit_ids;
482 	unitp->ilu_ref = 0;
483 	ndi_hold_devi(dip);
484 	unitp->ilu_dip = dip;
485 	unitp->ilu_ops = ops;
486 	unitp->ilu_data = ops->ilops_data;
487 
488 	unitp->ilu_next = iommulib_list;
489 	iommulib_list = unitp;
490 	unitp->ilu_prev = NULL;
491 	if (unitp->ilu_next)
492 		unitp->ilu_next->ilu_prev = unitp;
493 
494 	mutex_exit(&unitp->ilu_lock);
495 
496 	iommulib_num_units++;
497 
498 	*handle = unitp;
499 
500 	mutex_exit(&iommulib_lock);
501 
502 	cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered IOMMU unit "
503 	    "from vendor=%s, ops=%p, data=%p, IOMMULIB unitid=%u",
504 	    f, driver, instance, vendor, (void *)ops, (void *)unitp->ilu_data,
505 	    unitp->ilu_unitid);
506 
507 	return (DDI_SUCCESS);
508 }
509 
510 int
511 iommulib_iommu_unregister(iommulib_handle_t handle)
512 {
513 	uint32_t unitid;
514 	dev_info_t *dip;
515 	int instance;
516 	const char *driver;
517 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
518 	const char *f = "iommulib_unregister";
519 
520 	ASSERT(unitp);
521 
522 	mutex_enter(&iommulib_lock);
523 	mutex_enter(&unitp->ilu_lock);
524 
525 	unitid = unitp->ilu_unitid;
526 	dip = unitp->ilu_dip;
527 	driver = ddi_driver_name(dip);
528 	instance = ddi_get_instance(dip);
529 
530 	if (unitp->ilu_ref != 0) {
531 		mutex_exit(&unitp->ilu_lock);
532 		mutex_exit(&iommulib_lock);
533 		cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle is busy. Cannot "
534 		    "unregister IOMMULIB unitid %u",
535 		    f, driver, instance, unitid);
536 		return (DDI_FAILURE);
537 	}
538 	unitp->ilu_unitid = 0;
539 	ASSERT(unitp->ilu_ref == 0);
540 
541 	if (unitp->ilu_prev == NULL) {
542 		iommulib_list = unitp->ilu_next;
543 		unitp->ilu_next->ilu_prev = NULL;
544 	} else {
545 		unitp->ilu_prev->ilu_next = unitp->ilu_next;
546 		unitp->ilu_next->ilu_prev = unitp->ilu_prev;
547 	}
548 
549 	iommulib_num_units--;
550 
551 	mutex_exit(&unitp->ilu_lock);
552 
553 	mutex_destroy(&unitp->ilu_lock);
554 	kmem_free(unitp, sizeof (iommulib_unit_t));
555 
556 	mutex_exit(&iommulib_lock);
557 
558 	cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle (unitid=%u) successfully "
559 	    "unregistered", f, driver, instance, unitid);
560 
561 	ndi_rele_devi(dip);
562 
563 	return (DDI_SUCCESS);
564 }
565 
566 int
567 iommulib_nex_open(dev_info_t *rdip, uint_t *errorp)
568 {
569 	iommulib_unit_t *unitp;
570 	int instance = ddi_get_instance(rdip);
571 	const char *driver = ddi_driver_name(rdip);
572 	const char *f = "iommulib_nex_open";
573 
574 	*errorp = 0;
575 
576 	if (IOMMU_USED(rdip))
577 		return (DDI_SUCCESS);
578 
579 	ASSERT(DEVI(rdip)->devi_iommulib_handle == NULL);
580 
581 	/* prevent use of IOMMU for AMD IOMMU's DMA */
582 	if (strcmp(driver, "amd_iommu") == 0) {
583 		*errorp = ENOTSUP;
584 		return (DDI_FAILURE);
585 	}
586 
587 	/*
588 	 * Use the probe entry point to determine in a hardware specific
589 	 * manner whether this dip is controlled by an IOMMU. If yes,
590 	 * return the handle corresponding to the IOMMU unit.
591 	 */
592 
593 	mutex_enter(&iommulib_lock);
594 	for (unitp = iommulib_list; unitp; unitp = unitp->ilu_next) {
595 		if (unitp->ilu_ops->ilops_probe(unitp, rdip) == DDI_SUCCESS)
596 			break;
597 	}
598 
599 	if (unitp == NULL) {
600 		mutex_exit(&iommulib_lock);
601 		if (iommulib_debug) {
602 			char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
603 			cmn_err(CE_WARN, "%s: %s%d: devinfo node (%p): is not "
604 			    "controlled by an IOMMU: path=%s", f, driver,
605 			    instance, (void *)rdip, ddi_pathname(rdip, buf));
606 			kmem_free(buf, MAXPATHLEN);
607 		}
608 		*errorp = ENOTSUP;
609 		return (DDI_FAILURE);
610 	}
611 
612 	mutex_enter(&unitp->ilu_lock);
613 	unitp->ilu_ref++;
614 	mutex_exit(&unitp->ilu_lock);
615 	mutex_exit(&iommulib_lock);
616 
617 	DEVI(rdip)->devi_iommulib_handle = unitp;
618 
619 	return (DDI_SUCCESS);
620 }
621 
622 void
623 iommulib_nex_close(dev_info_t *rdip)
624 {
625 	iommulib_unit_t *unitp;
626 	const char *driver;
627 	int instance;
628 	uint32_t unitid;
629 	const char *f = "iommulib_nex_close";
630 
631 	unitp = (iommulib_unit_t *)DEVI(rdip)->devi_iommulib_handle;
632 	if (unitp == NULL)
633 		return;
634 
635 	DEVI(rdip)->devi_iommulib_handle = NULL;
636 
637 	mutex_enter(&iommulib_lock);
638 	mutex_enter(&unitp->ilu_lock);
639 	unitid = unitp->ilu_unitid;
640 	driver = ddi_driver_name(unitp->ilu_dip);
641 	instance = ddi_get_instance(unitp->ilu_dip);
642 	unitp->ilu_ref--;
643 	mutex_exit(&unitp->ilu_lock);
644 	mutex_exit(&iommulib_lock);
645 
646 	if (iommulib_debug) {
647 		char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
648 		(void) ddi_pathname(rdip, buf);
649 		cmn_err(CE_NOTE, "%s: %s%d: closing IOMMU for dip (%p), "
650 		    "unitid=%u rdip path = %s", f, driver, instance,
651 		    (void *)rdip, unitid, buf);
652 		kmem_free(buf, MAXPATHLEN);
653 	}
654 }
655 
656 int
657 iommulib_nexdma_allochdl(dev_info_t *dip, dev_info_t *rdip,
658     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t),
659     caddr_t arg, ddi_dma_handle_t *dma_handlep)
660 {
661 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
662 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
663 
664 	ASSERT(unitp);
665 
666 	/* No need to grab lock - the handle is reference counted */
667 	return (unitp->ilu_ops->ilops_dma_allochdl(handle, dip, rdip,
668 	    attr, waitfp, arg, dma_handlep));
669 }
670 
671 int
672 iommulib_nexdma_freehdl(dev_info_t *dip, dev_info_t *rdip,
673     ddi_dma_handle_t dma_handle)
674 {
675 	int error;
676 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
677 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
678 
679 	ASSERT(unitp);
680 
681 	/* No need to grab lock - the handle is reference counted */
682 	error = unitp->ilu_ops->ilops_dma_freehdl(handle, dip,
683 	    rdip, dma_handle);
684 
685 	return (error);
686 }
687 
688 int
689 iommulib_nexdma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
690     ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq,
691     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
692 {
693 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
694 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
695 
696 	ASSERT(unitp);
697 
698 	/* No need to grab lock - the handle is reference counted */
699 	return (unitp->ilu_ops->ilops_dma_bindhdl(handle, dip, rdip, dma_handle,
700 	    dmareq, cookiep, ccountp));
701 }
702 
703 int
704 iommulib_nexdma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
705     ddi_dma_handle_t dma_handle)
706 {
707 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
708 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
709 
710 	ASSERT(unitp);
711 
712 	/* No need to grab lock - the handle is reference counted */
713 	return (unitp->ilu_ops->ilops_dma_unbindhdl(handle, dip, rdip,
714 	    dma_handle));
715 }
716 
717 int
718 iommulib_nexdma_sync(dev_info_t *dip, dev_info_t *rdip,
719     ddi_dma_handle_t dma_handle, off_t off, size_t len,
720     uint_t cache_flags)
721 {
722 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
723 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
724 
725 	ASSERT(unitp);
726 
727 	/* No need to grab lock - the handle is reference counted */
728 	return (unitp->ilu_ops->ilops_dma_sync(handle, dip, rdip, dma_handle,
729 	    off, len, cache_flags));
730 }
731 
732 int
733 iommulib_nexdma_win(dev_info_t *dip, dev_info_t *rdip,
734     ddi_dma_handle_t dma_handle, uint_t win, off_t *offp, size_t *lenp,
735     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
736 {
737 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
738 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
739 
740 	ASSERT(unitp);
741 
742 	/* No need to grab lock - the handle is reference counted */
743 	return (unitp->ilu_ops->ilops_dma_win(handle, dip, rdip, dma_handle,
744 	    win, offp, lenp, cookiep, ccountp));
745 }
746 
747 /* Obsolete DMA routines */
748 
749 int
750 iommulib_nexdma_map(dev_info_t *dip, dev_info_t *rdip,
751     struct ddi_dma_req *dmareq, ddi_dma_handle_t *dma_handle)
752 {
753 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
754 	iommulib_unit_t *unitp = handle;
755 
756 	ASSERT(unitp);
757 
758 	/* No need to grab lock - the handle is reference counted */
759 	return (unitp->ilu_ops->ilops_dma_map(handle, dip, rdip, dmareq,
760 	    dma_handle));
761 }
762 
763 int
764 iommulib_nexdma_mctl(dev_info_t *dip, dev_info_t *rdip,
765     ddi_dma_handle_t dma_handle, enum ddi_dma_ctlops request,
766     off_t *offp, size_t *lenp, caddr_t *objpp, uint_t cache_flags)
767 {
768 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
769 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
770 
771 	ASSERT(unitp);
772 
773 	/* No need to grab lock - the handle is reference counted */
774 	return (unitp->ilu_ops->ilops_dma_mctl(handle, dip, rdip, dma_handle,
775 	    request, offp, lenp, objpp, cache_flags));
776 }
777 
778 /* Utility routines invoked by IOMMU drivers */
779 int
780 iommulib_iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
781     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
782     ddi_dma_handle_t *handlep)
783 {
784 	iommulib_nexops_t *nexops = lookup_nexops(dip);
785 	if (nexops == NULL)
786 		return (DDI_FAILURE);
787 	return (nexops->nops_dma_allochdl(dip, rdip, attr, waitfp, arg,
788 	    handlep));
789 }
790 
791 int
792 iommulib_iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
793     ddi_dma_handle_t handle)
794 {
795 	iommulib_nexops_t *nexops = lookup_nexops(dip);
796 	if (nexops == NULL)
797 		return (DDI_FAILURE);
798 	return (nexops->nops_dma_freehdl(dip, rdip, handle));
799 }
800 
801 int
802 iommulib_iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
803     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
804     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
805 {
806 	iommulib_nexops_t *nexops = lookup_nexops(dip);
807 	if (nexops == NULL)
808 		return (DDI_FAILURE);
809 	return (nexops->nops_dma_bindhdl(dip, rdip, handle, dmareq,
810 	    cookiep, ccountp));
811 }
812 
813 int
814 iommulib_iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
815     ddi_dma_handle_t handle)
816 {
817 	iommulib_nexops_t *nexops = lookup_nexops(dip);
818 	if (nexops == NULL)
819 		return (DDI_FAILURE);
820 	return (nexops->nops_dma_unbindhdl(dip, rdip, handle));
821 }
822 
823 void
824 iommulib_iommu_dma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
825 {
826 	iommulib_nexops_t *nexops = lookup_nexops(dip);
827 	nexops->nops_dma_reset_cookies(dip, handle);
828 }
829 
830 int
831 iommulib_iommu_dma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
832     ddi_dma_cookie_t **cookiepp, uint_t *ccountp)
833 {
834 	iommulib_nexops_t *nexops = lookup_nexops(dip);
835 	if (nexops == NULL)
836 		return (DDI_FAILURE);
837 	return (nexops->nops_dma_get_cookies(dip, handle, cookiepp, ccountp));
838 }
839 
840 int
841 iommulib_iommu_dma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
842     ddi_dma_cookie_t *cookiep, uint_t ccount)
843 {
844 	iommulib_nexops_t *nexops = lookup_nexops(dip);
845 	if (nexops == NULL)
846 		return (DDI_FAILURE);
847 	return (nexops->nops_dma_set_cookies(dip, handle, cookiep, ccount));
848 }
849 
850 int
851 iommulib_iommu_dma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
852 {
853 	iommulib_nexops_t *nexops = lookup_nexops(dip);
854 	if (nexops == NULL)
855 		return (DDI_FAILURE);
856 	return (nexops->nops_dma_clear_cookies(dip, handle));
857 }
858 
859 int
860 iommulib_iommu_dma_get_sleep_flags(dev_info_t *dip, ddi_dma_handle_t handle)
861 {
862 	iommulib_nexops_t *nexops = lookup_nexops(dip);
863 	if (nexops == NULL)
864 		return (DDI_FAILURE);
865 	return (nexops->nops_dma_get_sleep_flags(handle));
866 }
867 
868 int
869 iommulib_iommu_dma_sync(dev_info_t *dip, dev_info_t *rdip,
870     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags)
871 {
872 	iommulib_nexops_t *nexops = lookup_nexops(dip);
873 	if (nexops == NULL)
874 		return (DDI_FAILURE);
875 	return (nexops->nops_dma_sync(dip, rdip, handle, off, len,
876 	    cache_flags));
877 }
878 
879 int
880 iommulib_iommu_dma_win(dev_info_t *dip, dev_info_t *rdip,
881     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
882     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
883 {
884 	iommulib_nexops_t *nexops = lookup_nexops(dip);
885 	if (nexops == NULL)
886 		return (DDI_FAILURE);
887 	return (nexops->nops_dma_win(dip, rdip, handle, win, offp, lenp,
888 	    cookiep, ccountp));
889 }
890 
891 int
892 iommulib_iommu_dma_map(dev_info_t *dip, dev_info_t *rdip,
893     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
894 {
895 	iommulib_nexops_t *nexops = lookup_nexops(dip);
896 	if (nexops == NULL)
897 		return (DDI_FAILURE);
898 	return (nexops->nops_dma_map(dip, rdip, dmareq, handlep));
899 }
900 
901 int
902 iommulib_iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
903     ddi_dma_handle_t handle, enum ddi_dma_ctlops request, off_t *offp,
904     size_t *lenp, caddr_t *objpp, uint_t cache_flags)
905 {
906 	iommulib_nexops_t *nexops = lookup_nexops(dip);
907 	if (nexops == NULL)
908 		return (DDI_FAILURE);
909 	return (nexops->nops_dma_mctl(dip, rdip, handle, request, offp, lenp,
910 	    objpp, cache_flags));
911 }
912 
913 int
914 iommulib_iommu_getunitid(iommulib_handle_t handle, uint64_t *unitidp)
915 {
916 	iommulib_unit_t *unitp;
917 	uint64_t unitid;
918 
919 	unitp = (iommulib_unit_t *)handle;
920 
921 	ASSERT(unitp);
922 	ASSERT(unitidp);
923 
924 	mutex_enter(&unitp->ilu_lock);
925 	unitid = unitp->ilu_unitid;
926 	mutex_exit(&unitp->ilu_lock);
927 
928 	ASSERT(unitid > 0);
929 	*unitidp = (uint64_t)unitid;
930 
931 	return (DDI_SUCCESS);
932 }
933 
934 dev_info_t *
935 iommulib_iommu_getdip(iommulib_handle_t handle)
936 {
937 	iommulib_unit_t *unitp;
938 	dev_info_t *dip;
939 
940 	unitp = (iommulib_unit_t *)handle;
941 
942 	ASSERT(unitp);
943 
944 	mutex_enter(&unitp->ilu_lock);
945 	dip = unitp->ilu_dip;
946 	ASSERT(dip);
947 	ndi_hold_devi(dip);
948 	mutex_exit(&unitp->ilu_lock);
949 
950 	return (dip);
951 }
952 
953 iommulib_ops_t *
954 iommulib_iommu_getops(iommulib_handle_t handle)
955 {
956 	iommulib_unit_t *unitp;
957 	iommulib_ops_t *ops;
958 
959 	unitp = (iommulib_unit_t *)handle;
960 
961 	ASSERT(unitp);
962 
963 	mutex_enter(&unitp->ilu_lock);
964 	ops = unitp->ilu_ops;
965 	mutex_exit(&unitp->ilu_lock);
966 
967 	ASSERT(ops);
968 
969 	return (ops);
970 }
971 
972 void *
973 iommulib_iommu_getdata(iommulib_handle_t handle)
974 {
975 	iommulib_unit_t *unitp;
976 	void *data;
977 
978 	unitp = (iommulib_unit_t *)handle;
979 
980 	ASSERT(unitp);
981 
982 	mutex_enter(&unitp->ilu_lock);
983 	data = unitp->ilu_data;
984 	mutex_exit(&unitp->ilu_lock);
985 
986 	ASSERT(data);
987 
988 	return (data);
989 }
990