1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 #include <sys/sunddi.h>
27 #include <sys/sunndi.h>
28 #include <sys/iommulib.h>
29 #include <sys/amd_iommu.h>
30 #include <sys/pci_cap.h>
31 #include <sys/bootconf.h>
32 #include <sys/ddidmareq.h>
33 
34 #include "amd_iommu_impl.h"
35 #include "amd_iommu_acpi.h"
36 #include "amd_iommu_page_tables.h"
37 
38 static int amd_iommu_fini(amd_iommu_t *iommu, int type);
39 static void amd_iommu_teardown_interrupts(amd_iommu_t *iommu);
40 static void amd_iommu_stop(amd_iommu_t *iommu);
41 
42 static int amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip);
43 static int amd_iommu_allochdl(iommulib_handle_t handle,
44     dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
45     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep);
46 static int amd_iommu_freehdl(iommulib_handle_t handle,
47     dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
48 static int amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
49     dev_info_t *rdip, ddi_dma_handle_t dma_handle,
50     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
51     uint_t *ccountp);
52 static int amd_iommu_unbindhdl(iommulib_handle_t handle,
53     dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
54 static int amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
55     dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
56     size_t len, uint_t cache_flags);
57 static int amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
58     dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
59     off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
60     uint_t *ccountp);
61 static int amd_iommu_map(iommulib_handle_t handle, dev_info_t *dip,
62     dev_info_t *rdip, struct ddi_dma_req *dmareq,
63     ddi_dma_handle_t *dma_handle);
64 static int amd_iommu_mctl(iommulib_handle_t handle, dev_info_t *dip,
65     dev_info_t *rdip, ddi_dma_handle_t dma_handle,
66     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
67     caddr_t *objpp, uint_t cache_flags);
68 
69 static int unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
70     ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked);
71 
72 extern void *device_arena_alloc(size_t size, int vm_flag);
73 extern void device_arena_free(void * vaddr, size_t size);
74 
75 ddi_dma_attr_t amd_iommu_dma_attr = {
76 	DMA_ATTR_V0,
77 	0U,				/* dma_attr_addr_lo */
78 	0xffffffffffffffffULL,		/* dma_attr_addr_hi */
79 	0xffffffffU,			/* dma_attr_count_max */
80 	(uint64_t)4096,			/* dma_attr_align */
81 	1,				/* dma_attr_burstsizes */
82 	64,				/* dma_attr_minxfer */
83 	0xffffffffU,			/* dma_attr_maxxfer */
84 	0xffffffffU,			/* dma_attr_seg */
85 	1,				/* dma_attr_sgllen, variable */
86 	64,				/* dma_attr_granular */
87 	0				/* dma_attr_flags */
88 };
89 
90 ddi_device_acc_attr_t amd_iommu_devacc = {
91 	DDI_DEVICE_ATTR_V0,
92 	DDI_NEVERSWAP_ACC,
93 	DDI_STRICTORDER_ACC
94 };
95 
96 struct iommulib_ops amd_iommulib_ops = {
97 	IOMMU_OPS_VERSION,
98 	AMD_IOMMU,
99 	"AMD IOMMU Vers. 1",
100 	NULL,
101 	amd_iommu_probe,
102 	amd_iommu_allochdl,
103 	amd_iommu_freehdl,
104 	amd_iommu_bindhdl,
105 	amd_iommu_unbindhdl,
106 	amd_iommu_sync,
107 	amd_iommu_win,
108 	amd_iommu_map,
109 	amd_iommu_mctl
110 };
111 
112 static kmutex_t amd_iommu_pgtable_lock;
113 
114 static int
115 amd_iommu_register(amd_iommu_t *iommu)
116 {
117 	dev_info_t *dip = iommu->aiomt_dip;
118 	const char *driver = ddi_driver_name(dip);
119 	int instance = ddi_get_instance(dip);
120 	iommulib_ops_t *iommulib_ops;
121 	iommulib_handle_t handle;
122 	const char *f = "amd_iommu_register";
123 
124 	iommulib_ops = kmem_zalloc(sizeof (iommulib_ops_t), KM_SLEEP);
125 
126 	*iommulib_ops = amd_iommulib_ops;
127 
128 	iommulib_ops->ilops_data = (void *)iommu;
129 	iommu->aiomt_iommulib_ops = iommulib_ops;
130 
131 	if (iommulib_iommu_register(dip, iommulib_ops, &handle)
132 	    != DDI_SUCCESS) {
133 		cmn_err(CE_WARN, "%s: %s%d: Register with iommulib "
134 		    "failed idx=%d", f, driver, instance, iommu->aiomt_idx);
135 		kmem_free(iommulib_ops, sizeof (iommulib_ops_t));
136 		return (DDI_FAILURE);
137 	}
138 
139 	iommu->aiomt_iommulib_handle = handle;
140 
141 	return (DDI_SUCCESS);
142 }
143 
144 static int
145 amd_iommu_unregister(amd_iommu_t *iommu)
146 {
147 	if (iommu->aiomt_iommulib_handle == NULL) {
148 		/* we never registered */
149 		return (DDI_SUCCESS);
150 	}
151 
152 	if (iommulib_iommu_unregister(iommu->aiomt_iommulib_handle)
153 	    != DDI_SUCCESS) {
154 		return (DDI_FAILURE);
155 	}
156 
157 	kmem_free(iommu->aiomt_iommulib_ops, sizeof (iommulib_ops_t));
158 	iommu->aiomt_iommulib_ops = NULL;
159 	iommu->aiomt_iommulib_handle = NULL;
160 
161 	return (DDI_SUCCESS);
162 }
163 
164 static int
165 amd_iommu_setup_passthru(amd_iommu_t *iommu)
166 {
167 	gfx_entry_t *gfxp;
168 	dev_info_t *dip;
169 
170 	/*
171 	 * Setup passthru mapping for "special" devices
172 	 */
173 	amd_iommu_set_passthru(iommu, NULL);
174 
175 	for (gfxp = gfx_devinfo_list; gfxp; gfxp = gfxp->g_next) {
176 		gfxp->g_ref++;
177 		dip = gfxp->g_dip;
178 		if (dip) {
179 			amd_iommu_set_passthru(iommu, dip);
180 		}
181 		gfxp->g_ref--;
182 	}
183 
184 	return (DDI_SUCCESS);
185 }
186 
187 static int
188 amd_iommu_start(amd_iommu_t *iommu)
189 {
190 	dev_info_t *dip = iommu->aiomt_dip;
191 	int instance = ddi_get_instance(dip);
192 	const char *driver = ddi_driver_name(dip);
193 	amd_iommu_acpi_ivhd_t *hinfop;
194 	const char *f = "amd_iommu_start";
195 
196 	hinfop = amd_iommu_lookup_all_ivhd();
197 
198 	/*
199 	 * Disable HT tunnel translation.
200 	 * XXX use ACPI
201 	 */
202 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
203 	    AMD_IOMMU_HT_TUN_ENABLE, 0);
204 
205 	if (hinfop) {
206 		if (amd_iommu_debug) {
207 			cmn_err(CE_NOTE,
208 			    "amd_iommu: using ACPI for CTRL registers");
209 		}
210 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
211 		    AMD_IOMMU_ISOC, hinfop->ach_Isoc);
212 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
213 		    AMD_IOMMU_RESPASSPW, hinfop->ach_ResPassPW);
214 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
215 		    AMD_IOMMU_PASSPW, hinfop->ach_PassPW);
216 	}
217 
218 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
219 	    AMD_IOMMU_INVTO, 5);
220 
221 
222 	/*
223 	 * The Device table entry bit 0 (V) controls whether the device
224 	 * table entry is valid for address translation and Device table
225 	 * entry bit 128 (IV) controls whether interrupt remapping is valid.
226 	 * By setting both to zero we are essentially doing pass-thru. Since
227 	 * this table is zeroed on allocation, essentially we will have
228 	 * pass-thru when IOMMU is enabled.
229 	 */
230 
231 	/* Finally enable the IOMMU ... */
232 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
233 	    AMD_IOMMU_ENABLE, 1);
234 
235 	if (amd_iommu_debug) {
236 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
237 		    "Successfully started AMD IOMMU", f, driver, instance,
238 		    iommu->aiomt_idx);
239 	}
240 	cmn_err(CE_NOTE, "AMD IOMMU (%d,%d) enabled",
241 	    instance, iommu->aiomt_idx);
242 
243 	return (DDI_SUCCESS);
244 }
245 
246 static void
247 amd_iommu_stop(amd_iommu_t *iommu)
248 {
249 	dev_info_t *dip = iommu->aiomt_dip;
250 	int instance = ddi_get_instance(dip);
251 	const char *driver = ddi_driver_name(dip);
252 	const char *f = "amd_iommu_stop";
253 
254 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
255 	    AMD_IOMMU_ENABLE, 0);
256 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
257 	    AMD_IOMMU_EVENTINT_ENABLE, 0);
258 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
259 	    AMD_IOMMU_COMWAITINT_ENABLE, 0);
260 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
261 	    AMD_IOMMU_EVENTLOG_ENABLE, 0);
262 
263 	/*
264 	 * Disable translation on HT tunnel traffic
265 	 */
266 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
267 	    AMD_IOMMU_HT_TUN_ENABLE, 0);
268 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
269 	    AMD_IOMMU_CMDBUF_ENABLE, 0);
270 
271 	cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMYU idx=%d. "
272 	    "Successfully stopped AMD IOMMU", f, driver, instance,
273 	    iommu->aiomt_idx);
274 }
275 
276 static int
277 amd_iommu_setup_tables_and_buffers(amd_iommu_t *iommu)
278 {
279 	dev_info_t *dip = iommu->aiomt_dip;
280 	int instance = ddi_get_instance(dip);
281 	const char *driver = ddi_driver_name(dip);
282 	uint32_t dma_bufsz;
283 	caddr_t addr;
284 	uint32_t sz;
285 	uint32_t p2sz;
286 	int i;
287 	uint64_t *dentry;
288 	int err;
289 	const char *f = "amd_iommu_setup_tables_and_buffers";
290 
291 	/*
292 	 * We will put the Device Table, Command Buffer and
293 	 * Event Log in contiguous memory. Allocate the maximum
294 	 * size allowed for such structures
295 	 * Device Table:  256b * 64K = 32B * 64K
296 	 * Command Buffer: 128b * 32K = 16B * 32K
297 	 * Event Log:  128b * 32K = 16B * 32K
298 	 */
299 	iommu->aiomt_devtbl_sz = (1<<AMD_IOMMU_DEVTBL_SZ) * AMD_IOMMU_DEVENT_SZ;
300 	iommu->aiomt_cmdbuf_sz = (1<<AMD_IOMMU_CMDBUF_SZ) * AMD_IOMMU_CMD_SZ;
301 	iommu->aiomt_eventlog_sz =
302 	    (1<<AMD_IOMMU_EVENTLOG_SZ) * AMD_IOMMU_EVENT_SZ;
303 
304 	dma_bufsz = iommu->aiomt_devtbl_sz + iommu->aiomt_cmdbuf_sz
305 	    + iommu->aiomt_eventlog_sz;
306 
307 	/*
308 	 * Alloc a DMA handle.
309 	 */
310 	err = ddi_dma_alloc_handle(dip, &amd_iommu_dma_attr,
311 	    DDI_DMA_SLEEP, NULL, &iommu->aiomt_dmahdl);
312 	if (err != DDI_SUCCESS) {
313 		cmn_err(CE_WARN, "%s: %s%d: Cannot alloc DMA handle for "
314 		    "AMD IOMMU tables and buffers", f, driver, instance);
315 		return (DDI_FAILURE);
316 	}
317 
318 	/*
319 	 * Alloc memory for tables and buffers
320 	 * XXX remove cast to size_t
321 	 */
322 	err = ddi_dma_mem_alloc(iommu->aiomt_dmahdl, dma_bufsz,
323 	    &amd_iommu_devacc, DDI_DMA_CONSISTENT|IOMEM_DATA_UNCACHED,
324 	    DDI_DMA_SLEEP,  NULL, (caddr_t *)&iommu->aiomt_dma_bufva,
325 	    (size_t *)&iommu->aiomt_dma_mem_realsz, &iommu->aiomt_dma_mem_hdl);
326 	if (err != DDI_SUCCESS) {
327 		cmn_err(CE_WARN, "%s: %s%d: Cannot alloc memory for DMA "
328 		    "to AMD IOMMU tables and buffers", f, driver, instance);
329 		iommu->aiomt_dma_bufva = NULL;
330 		iommu->aiomt_dma_mem_realsz = 0;
331 		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
332 		iommu->aiomt_dmahdl = NULL;
333 		return (DDI_FAILURE);
334 	}
335 
336 	/*
337 	 * The VA must be 4K aligned and >= table size
338 	 */
339 	ASSERT(((uintptr_t)iommu->aiomt_dma_bufva &
340 	    AMD_IOMMU_TABLE_ALIGN) == 0);
341 	ASSERT(iommu->aiomt_dma_mem_realsz >= dma_bufsz);
342 
343 	/*
344 	 * Now bind the handle
345 	 */
346 	err = ddi_dma_addr_bind_handle(iommu->aiomt_dmahdl, NULL,
347 	    iommu->aiomt_dma_bufva, iommu->aiomt_dma_mem_realsz,
348 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
349 	    NULL, &iommu->aiomt_buf_dma_cookie, &iommu->aiomt_buf_dma_ncookie);
350 	if (err != DDI_DMA_MAPPED) {
351 		cmn_err(CE_WARN, "%s: %s%d: Cannot bind memory for DMA "
352 		    "to AMD IOMMU tables and buffers. bufrealsz=%p",
353 		    f, driver, instance,
354 		    (void *)(uintptr_t)iommu->aiomt_dma_mem_realsz);
355 		iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
356 		iommu->aiomt_buf_dma_cookie.dmac_size = 0;
357 		iommu->aiomt_buf_dma_cookie.dmac_type = 0;
358 		iommu->aiomt_buf_dma_ncookie = 0;
359 		ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
360 		iommu->aiomt_dma_mem_hdl = NULL;
361 		iommu->aiomt_dma_bufva = NULL;
362 		iommu->aiomt_dma_mem_realsz = 0;
363 		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
364 		iommu->aiomt_dmahdl = NULL;
365 		return (DDI_FAILURE);
366 	}
367 
368 	/*
369 	 * We assume the DMA engine on the IOMMU is capable of handling the
370 	 * whole table buffer in a single cookie. If not and multiple cookies
371 	 * are needed we fail.
372 	 */
373 	if (iommu->aiomt_buf_dma_ncookie != 1) {
374 		cmn_err(CE_WARN, "%s: %s%d: Cannot handle multiple "
375 		    "cookies for DMA to AMD IOMMU tables and buffers. "
376 		    "#cookies=%u", f, driver, instance,
377 		    iommu->aiomt_buf_dma_ncookie);
378 		(void) ddi_dma_unbind_handle(iommu->aiomt_dmahdl);
379 		iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
380 		iommu->aiomt_buf_dma_cookie.dmac_size = 0;
381 		iommu->aiomt_buf_dma_cookie.dmac_type = 0;
382 		iommu->aiomt_buf_dma_ncookie = 0;
383 		ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
384 		iommu->aiomt_dma_mem_hdl = NULL;
385 		iommu->aiomt_dma_bufva = NULL;
386 		iommu->aiomt_dma_mem_realsz = 0;
387 		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
388 		iommu->aiomt_dmahdl = NULL;
389 		return (DDI_FAILURE);
390 	}
391 
392 	/*
393 	 * The address in the cookie must be 4K aligned and >= table size
394 	 */
395 	ASSERT((iommu->aiomt_buf_dma_cookie.dmac_cookie_addr
396 	    & AMD_IOMMU_TABLE_ALIGN) == 0);
397 	ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size
398 	    <= iommu->aiomt_dma_mem_realsz);
399 	ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size >= dma_bufsz);
400 
401 	/*
402 	 * Setup the device table pointers in the iommu struct as
403 	 * well as the IOMMU device table register
404 	 */
405 	iommu->aiomt_devtbl = iommu->aiomt_dma_bufva;
406 	bzero(iommu->aiomt_devtbl, iommu->aiomt_devtbl_sz);
407 
408 	/*
409 	 * Set V=1 and TV = 0, so any inadvertant pass-thrus cause
410 	 * page faults. Also set SE bit so we aren't swamped with
411 	 * page fault messages
412 	 */
413 	for (i = 0; i <= AMD_IOMMU_MAX_DEVICEID; i++) {
414 		/*LINTED*/
415 		dentry = (uint64_t *)&iommu->aiomt_devtbl
416 		    [i * AMD_IOMMU_DEVTBL_ENTRY_SZ];
417 		AMD_IOMMU_REG_SET64(dentry, AMD_IOMMU_DEVTBL_V, 1);
418 		AMD_IOMMU_REG_SET64(&(dentry[1]), AMD_IOMMU_DEVTBL_SE, 1);
419 	}
420 
421 	addr = (caddr_t)(uintptr_t)iommu->aiomt_buf_dma_cookie.dmac_cookie_addr;
422 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
423 	    AMD_IOMMU_DEVTABBASE, ((uint64_t)(uintptr_t)addr) >> 12);
424 	sz = (iommu->aiomt_devtbl_sz >> 12) - 1;
425 	ASSERT(sz <= ((1 << 9) - 1));
426 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
427 	    AMD_IOMMU_DEVTABSIZE, sz);
428 
429 	/*
430 	 * Setup the command buffer pointers
431 	 */
432 	iommu->aiomt_cmdbuf = iommu->aiomt_devtbl +
433 	    iommu->aiomt_devtbl_sz;
434 	bzero(iommu->aiomt_cmdbuf, iommu->aiomt_cmdbuf_sz);
435 	addr += iommu->aiomt_devtbl_sz;
436 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
437 	    AMD_IOMMU_COMBASE, ((uint64_t)(uintptr_t)addr) >> 12);
438 
439 	p2sz = AMD_IOMMU_CMDBUF_SZ;
440 	ASSERT(p2sz >= AMD_IOMMU_CMDBUF_MINSZ &&
441 	    p2sz <= AMD_IOMMU_CMDBUF_MAXSZ);
442 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
443 	    AMD_IOMMU_COMLEN, p2sz);
444 	/*LINTED*/
445 	iommu->aiomt_cmd_tail = (uint32_t *)iommu->aiomt_cmdbuf;
446 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
447 	    AMD_IOMMU_CMDHEADPTR, 0);
448 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_tail_va),
449 	    AMD_IOMMU_CMDTAILPTR, 0);
450 
451 	/*
452 	 * Setup the event log pointers
453 	 */
454 	iommu->aiomt_eventlog = iommu->aiomt_cmdbuf +
455 	    iommu->aiomt_eventlog_sz;
456 	bzero(iommu->aiomt_eventlog, iommu->aiomt_eventlog_sz);
457 	addr += iommu->aiomt_cmdbuf_sz;
458 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
459 	    AMD_IOMMU_EVENTBASE, ((uint64_t)(uintptr_t)addr) >> 12);
460 	p2sz = AMD_IOMMU_EVENTLOG_SZ;
461 	ASSERT(p2sz >= AMD_IOMMU_EVENTLOG_MINSZ &&
462 	    p2sz <= AMD_IOMMU_EVENTLOG_MAXSZ);
463 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
464 	    AMD_IOMMU_EVENTLEN, sz);
465 	/*LINTED*/
466 	iommu->aiomt_event_head = (uint32_t *)iommu->aiomt_eventlog;
467 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
468 	    AMD_IOMMU_EVENTHEADPTR, 0);
469 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_tail_va),
470 	    AMD_IOMMU_EVENTTAILPTR, 0);
471 
472 	/* dma sync so device sees this init */
473 	SYNC_FORDEV(iommu->aiomt_dmahdl);
474 
475 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_TABLES) {
476 		cmn_err(CE_NOTE, "%s: %s%d: successfully setup AMD IOMMU "
477 		    "tables, idx=%d", f, driver, instance, iommu->aiomt_idx);
478 	}
479 
480 	return (DDI_SUCCESS);
481 }
482 
483 static void
484 amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu, int type)
485 {
486 	dev_info_t *dip = iommu->aiomt_dip;
487 	int instance = ddi_get_instance(dip);
488 	const char *driver = ddi_driver_name(dip);
489 	const char *f = "amd_iommu_teardown_tables_and_buffers";
490 
491 	iommu->aiomt_eventlog = NULL;
492 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
493 	    AMD_IOMMU_EVENTBASE, 0);
494 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
495 	    AMD_IOMMU_EVENTLEN, 0);
496 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
497 	    AMD_IOMMU_EVENTHEADPTR, 0);
498 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
499 	    AMD_IOMMU_EVENTTAILPTR, 0);
500 
501 
502 	iommu->aiomt_cmdbuf = NULL;
503 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
504 	    AMD_IOMMU_COMBASE, 0);
505 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
506 	    AMD_IOMMU_COMLEN, 0);
507 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
508 	    AMD_IOMMU_CMDHEADPTR, 0);
509 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
510 	    AMD_IOMMU_CMDTAILPTR, 0);
511 
512 
513 	iommu->aiomt_devtbl = NULL;
514 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
515 	    AMD_IOMMU_DEVTABBASE, 0);
516 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
517 	    AMD_IOMMU_DEVTABSIZE, 0);
518 
519 	if (iommu->aiomt_dmahdl == NULL || type == AMD_IOMMU_QUIESCE)
520 		return;
521 
522 	/* Unbind the handle */
523 	if (ddi_dma_unbind_handle(iommu->aiomt_dmahdl) != DDI_SUCCESS) {
524 		cmn_err(CE_WARN, "%s: %s%d: failed to unbind handle: "
525 		    "%p for IOMMU idx=%d", f, driver, instance,
526 		    (void *)iommu->aiomt_dmahdl, iommu->aiomt_idx);
527 	}
528 	iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
529 	iommu->aiomt_buf_dma_cookie.dmac_size = 0;
530 	iommu->aiomt_buf_dma_cookie.dmac_type = 0;
531 	iommu->aiomt_buf_dma_ncookie = 0;
532 
533 	/* Free the table memory allocated for DMA */
534 	ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
535 	iommu->aiomt_dma_mem_hdl = NULL;
536 	iommu->aiomt_dma_bufva = NULL;
537 	iommu->aiomt_dma_mem_realsz = 0;
538 
539 	/* Free the DMA handle */
540 	ddi_dma_free_handle(&iommu->aiomt_dmahdl);
541 	iommu->aiomt_dmahdl = NULL;
542 }
543 
544 static void
545 amd_iommu_enable_interrupts(amd_iommu_t *iommu)
546 {
547 	ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
548 	    AMD_IOMMU_CMDBUF_RUN) == 0);
549 	ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
550 	    AMD_IOMMU_EVENT_LOG_RUN) == 0);
551 
552 	/* Must be set prior to enabling command buffer */
553 	/* Must be set prior to enabling event logging */
554 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
555 	    AMD_IOMMU_CMDBUF_ENABLE, 1);
556 	/* No interrupts for completion wait  - too heavy weight. use polling */
557 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
558 	    AMD_IOMMU_COMWAITINT_ENABLE, 0);
559 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
560 	    AMD_IOMMU_EVENTLOG_ENABLE, 1);
561 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
562 	    AMD_IOMMU_EVENTINT_ENABLE, 1);
563 }
564 
565 static int
566 amd_iommu_setup_exclusion(amd_iommu_t *iommu)
567 {
568 	amd_iommu_acpi_ivmd_t *minfop;
569 
570 	minfop = amd_iommu_lookup_all_ivmd();
571 
572 	if (minfop && minfop->acm_ExclRange == 1) {
573 		cmn_err(CE_NOTE, "Programming exclusion range");
574 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
575 		    AMD_IOMMU_EXCL_BASE_ADDR,
576 		    minfop->acm_ivmd_phys_start >> 12);
577 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
578 		    AMD_IOMMU_EXCL_BASE_ALLOW, 1);
579 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
580 		    AMD_IOMMU_EXCL_BASE_EXEN, 1);
581 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
582 		    AMD_IOMMU_EXCL_LIM, (minfop->acm_ivmd_phys_start +
583 		    minfop->acm_ivmd_phys_len) >> 12);
584 	} else {
585 		if (amd_iommu_debug) {
586 			cmn_err(CE_NOTE, "Skipping exclusion range");
587 		}
588 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
589 		    AMD_IOMMU_EXCL_BASE_ADDR, 0);
590 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
591 		    AMD_IOMMU_EXCL_BASE_ALLOW, 1);
592 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
593 		    AMD_IOMMU_EXCL_BASE_EXEN, 0);
594 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
595 		    AMD_IOMMU_EXCL_LIM, 0);
596 	}
597 
598 	return (DDI_SUCCESS);
599 }
600 
601 static void
602 amd_iommu_teardown_exclusion(amd_iommu_t *iommu)
603 {
604 	(void) amd_iommu_setup_exclusion(iommu);
605 }
606 
607 static uint_t
608 amd_iommu_intr_handler(caddr_t arg1, caddr_t arg2)
609 {
610 	/*LINTED*/
611 	amd_iommu_t *iommu = (amd_iommu_t *)arg1;
612 	dev_info_t *dip = iommu->aiomt_dip;
613 	int instance = ddi_get_instance(dip);
614 	const char *driver = ddi_driver_name(dip);
615 	const char *f = "amd_iommu_intr_handler";
616 
617 	ASSERT(arg1);
618 	ASSERT(arg2 == NULL);
619 
620 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
621 		cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d. In INTR handler",
622 		    f, driver, instance, iommu->aiomt_idx);
623 	}
624 
625 	if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
626 	    AMD_IOMMU_EVENT_LOG_INT) == 1) {
627 		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
628 			cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d "
629 			    "Event Log Interrupt", f, driver, instance,
630 			    iommu->aiomt_idx);
631 		}
632 		(void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISPLAY);
633 		WAIT_SEC(1);
634 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
635 		    AMD_IOMMU_EVENT_LOG_INT, 1);
636 		return (DDI_INTR_CLAIMED);
637 	}
638 
639 	if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
640 	    AMD_IOMMU_EVENT_OVERFLOW_INT) == 1) {
641 		cmn_err(CE_NOTE, "!%s: %s%d: IOMMU unit idx=%d "
642 		    "Event Overflow Interrupt", f, driver, instance,
643 		    iommu->aiomt_idx);
644 		(void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISCARD);
645 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
646 		    AMD_IOMMU_EVENT_LOG_INT, 1);
647 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
648 		    AMD_IOMMU_EVENT_OVERFLOW_INT, 1);
649 		return (DDI_INTR_CLAIMED);
650 	}
651 
652 	return (DDI_INTR_UNCLAIMED);
653 }
654 
655 
656 static int
657 amd_iommu_setup_interrupts(amd_iommu_t *iommu)
658 {
659 	dev_info_t *dip = iommu->aiomt_dip;
660 	int instance = ddi_get_instance(dip);
661 	const char *driver = ddi_driver_name(dip);
662 	int intrcap0;
663 	int intrcapN;
664 	int type;
665 	int err;
666 	int req;
667 	int avail;
668 	int p2req;
669 	int actual;
670 	int i;
671 	int j;
672 	const char *f = "amd_iommu_setup_interrupts";
673 
674 	if (ddi_intr_get_supported_types(dip, &type) != DDI_SUCCESS) {
675 		cmn_err(CE_WARN, "%s: %s%d: ddi_intr_get_supported_types "
676 		    "failed: idx=%d", f, driver, instance, iommu->aiomt_idx);
677 		return (DDI_FAILURE);
678 	}
679 
680 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
681 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
682 		    "Interrupt types supported = 0x%x", f, driver, instance,
683 		    iommu->aiomt_idx, type);
684 	}
685 
686 	/*
687 	 * for now we only support MSI
688 	 */
689 	if ((type & DDI_INTR_TYPE_MSI) == 0) {
690 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
691 		    "MSI interrupts not supported. Failing init.",
692 		    f, driver, instance, iommu->aiomt_idx);
693 		return (DDI_FAILURE);
694 	}
695 
696 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
697 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. MSI supported",
698 		    f, driver, instance, iommu->aiomt_idx);
699 	}
700 
701 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_MSI, &req);
702 	if (err != DDI_SUCCESS) {
703 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
704 		    "ddi_intr_get_nintrs failed err = %d",
705 		    f, driver, instance, iommu->aiomt_idx, err);
706 		return (DDI_FAILURE);
707 	}
708 
709 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
710 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
711 		    "MSI number of interrupts requested: %d",
712 		    f, driver, instance, iommu->aiomt_idx, req);
713 	}
714 
715 	if (req == 0) {
716 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
717 		    "interrupts requested. Failing init", f,
718 		    driver, instance, iommu->aiomt_idx);
719 		return (DDI_FAILURE);
720 	}
721 
722 	err = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSI, &avail);
723 	if (err != DDI_SUCCESS) {
724 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d "
725 		    "ddi_intr_get_navail failed err = %d", f,
726 		    driver, instance, iommu->aiomt_idx, err);
727 		return (DDI_FAILURE);
728 	}
729 
730 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
731 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
732 		    "MSI number of interrupts available: %d",
733 		    f, driver, instance, iommu->aiomt_idx, avail);
734 	}
735 
736 	if (avail == 0) {
737 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
738 		    "interrupts available. Failing init", f,
739 		    driver, instance, iommu->aiomt_idx);
740 		return (DDI_FAILURE);
741 	}
742 
743 	if (avail < req) {
744 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: MSI "
745 		    "interrupts: requested (%d) > available (%d). "
746 		    "Failing init", f, driver, instance, iommu->aiomt_idx,
747 		    req, avail);
748 		return (DDI_FAILURE);
749 	}
750 
751 	/* Allocate memory for DDI interrupt handles */
752 	iommu->aiomt_intr_htable_sz = req * sizeof (ddi_intr_handle_t);
753 	iommu->aiomt_intr_htable = kmem_zalloc(iommu->aiomt_intr_htable_sz,
754 	    KM_SLEEP);
755 
756 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_TABLE;
757 
758 	/* Convert req to a power of two as required by ddi_intr_alloc */
759 	p2req = 0;
760 	while (1<<p2req <= req)
761 		p2req++;
762 	p2req--;
763 	req = 1<<p2req;
764 
765 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
766 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
767 		    "MSI power of 2 number of interrupts: %d,%d",
768 		    f, driver, instance, iommu->aiomt_idx, p2req, req);
769 	}
770 
771 	err = ddi_intr_alloc(iommu->aiomt_dip, iommu->aiomt_intr_htable,
772 	    DDI_INTR_TYPE_MSI, 0, req, &actual, DDI_INTR_ALLOC_STRICT);
773 	if (err != DDI_SUCCESS) {
774 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
775 		    "ddi_intr_alloc failed: err = %d",
776 		    f, driver, instance, iommu->aiomt_idx, err);
777 		amd_iommu_teardown_interrupts(iommu);
778 		return (DDI_FAILURE);
779 	}
780 
781 	iommu->aiomt_actual_intrs = actual;
782 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_ALLOCED;
783 
784 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
785 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
786 		    "number of interrupts actually allocated %d",
787 		    f, driver, instance, iommu->aiomt_idx, actual);
788 	}
789 
790 	if (iommu->aiomt_actual_intrs < req) {
791 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
792 		    "ddi_intr_alloc failed: actual (%d) < req (%d)",
793 		    f, driver, instance, iommu->aiomt_idx,
794 		    iommu->aiomt_actual_intrs, req);
795 		amd_iommu_teardown_interrupts(iommu);
796 		return (DDI_FAILURE);
797 	}
798 
799 	for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
800 		if (ddi_intr_add_handler(iommu->aiomt_intr_htable[i],
801 		    amd_iommu_intr_handler, (void *)iommu, NULL)
802 		    != DDI_SUCCESS) {
803 			cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
804 			    "ddi_intr_add_handler failed: intr = %d, err = %d",
805 			    f, driver, instance, iommu->aiomt_idx, i, err);
806 			for (j = 0; j < i; j++) {
807 				(void) ddi_intr_remove_handler(
808 				    iommu->aiomt_intr_htable[j]);
809 			}
810 			amd_iommu_teardown_interrupts(iommu);
811 			return (DDI_FAILURE);
812 		}
813 	}
814 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_HANDLER;
815 
816 	intrcap0 = intrcapN = -1;
817 	if (ddi_intr_get_cap(iommu->aiomt_intr_htable[0], &intrcap0)
818 	    != DDI_SUCCESS ||
819 	    ddi_intr_get_cap(
820 	    iommu->aiomt_intr_htable[iommu->aiomt_actual_intrs - 1], &intrcapN)
821 	    != DDI_SUCCESS || intrcap0 != intrcapN) {
822 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
823 		    "ddi_intr_get_cap failed or inconsistent cap among "
824 		    "interrupts: intrcap0 (%d) < intrcapN (%d)",
825 		    f, driver, instance, iommu->aiomt_idx, intrcap0, intrcapN);
826 		amd_iommu_teardown_interrupts(iommu);
827 		return (DDI_FAILURE);
828 	}
829 	iommu->aiomt_intr_cap = intrcap0;
830 
831 	if (intrcap0 & DDI_INTR_FLAG_BLOCK) {
832 		/* Need to call block enable */
833 		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
834 			cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
835 			    "Need to call block enable",
836 			    f, driver, instance, iommu->aiomt_idx);
837 		}
838 		if (ddi_intr_block_enable(iommu->aiomt_intr_htable,
839 		    iommu->aiomt_actual_intrs) != DDI_SUCCESS) {
840 			cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
841 			    "ddi_intr_block enable failed ", f, driver,
842 			    instance, iommu->aiomt_idx);
843 			(void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
844 			    iommu->aiomt_actual_intrs);
845 			amd_iommu_teardown_interrupts(iommu);
846 			return (DDI_FAILURE);
847 		}
848 	} else {
849 		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
850 			cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
851 			    "Need to call individual enable",
852 			    f, driver, instance, iommu->aiomt_idx);
853 		}
854 		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
855 			if (ddi_intr_enable(iommu->aiomt_intr_htable[i])
856 			    != DDI_SUCCESS) {
857 				cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
858 				    "ddi_intr_enable failed: intr = %d", f,
859 				    driver, instance, iommu->aiomt_idx, i);
860 				for (j = 0; j < i; j++) {
861 					(void) ddi_intr_disable(
862 					    iommu->aiomt_intr_htable[j]);
863 				}
864 				amd_iommu_teardown_interrupts(iommu);
865 				return (DDI_FAILURE);
866 			}
867 		}
868 	}
869 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_ENABLED;
870 
871 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
872 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
873 		    "Interrupts successfully %s enabled. # of interrupts = %d",
874 		    f, driver, instance, iommu->aiomt_idx,
875 		    (intrcap0 & DDI_INTR_FLAG_BLOCK) ? "(block)" :
876 		    "(individually)", iommu->aiomt_actual_intrs);
877 	}
878 
879 	return (DDI_SUCCESS);
880 }
881 
882 static void
883 amd_iommu_teardown_interrupts(amd_iommu_t *iommu)
884 {
885 	int i;
886 
887 	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ENABLED) {
888 		if (iommu->aiomt_intr_cap & DDI_INTR_FLAG_BLOCK) {
889 			(void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
890 			    iommu->aiomt_actual_intrs);
891 		} else {
892 			for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
893 				(void) ddi_intr_disable(
894 				    iommu->aiomt_intr_htable[i]);
895 			}
896 		}
897 	}
898 
899 	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_HANDLER) {
900 		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
901 			(void) ddi_intr_remove_handler(
902 			    iommu->aiomt_intr_htable[i]);
903 		}
904 	}
905 
906 	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ALLOCED) {
907 		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
908 			(void) ddi_intr_free(iommu->aiomt_intr_htable[i]);
909 		}
910 	}
911 	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_TABLE) {
912 		kmem_free(iommu->aiomt_intr_htable,
913 		    iommu->aiomt_intr_htable_sz);
914 	}
915 	iommu->aiomt_intr_htable = NULL;
916 	iommu->aiomt_intr_htable_sz = 0;
917 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_INVALID;
918 }
919 
920 static amd_iommu_t *
921 amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
922     uint16_t cap_base)
923 {
924 	amd_iommu_t *iommu;
925 	int instance = ddi_get_instance(dip);
926 	const char *driver = ddi_driver_name(dip);
927 	uint32_t caphdr;
928 	uint32_t low_addr32;
929 	uint32_t hi_addr32;
930 	uint32_t range;
931 	uint32_t misc;
932 	uint64_t pgoffset;
933 	amd_iommu_acpi_global_t *global;
934 	amd_iommu_acpi_ivhd_t *hinfop;
935 	int bus, device, func;
936 	const char *f = "amd_iommu_init";
937 
938 	low_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
939 	    AMD_IOMMU_CAP_ADDR_LOW_OFF);
940 	if (!(low_addr32 & AMD_IOMMU_REG_ADDR_LOCKED)) {
941 		cmn_err(CE_WARN, "%s: %s%d: capability registers not locked. "
942 		    "Unable to use IOMMU unit idx=%d - skipping ...", f, driver,
943 		    instance, idx);
944 		return (NULL);
945 	}
946 
947 	iommu = kmem_zalloc(sizeof (amd_iommu_t), KM_SLEEP);
948 	mutex_init(&iommu->aiomt_mutex, NULL, MUTEX_DRIVER, NULL);
949 	mutex_enter(&iommu->aiomt_mutex);
950 
951 	mutex_init(&iommu->aiomt_cmdlock, NULL, MUTEX_DRIVER, NULL);
952 	mutex_init(&iommu->aiomt_eventlock, NULL, MUTEX_DRIVER, NULL);
953 
954 	iommu->aiomt_dip = dip;
955 	iommu->aiomt_idx = idx;
956 
957 	if (acpica_get_bdf(iommu->aiomt_dip, &bus, &device, &func)
958 	    != DDI_SUCCESS) {
959 		cmn_err(CE_WARN, "%s: %s%d: Failed to get BDF"
960 		    "Unable to use IOMMU unit idx=%d - skipping ...",
961 		    f, driver, instance, idx);
962 		return (NULL);
963 	}
964 
965 	iommu->aiomt_bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) |
966 	    (uint8_t)func;
967 
968 	/*
969 	 * Since everything in the capability block is locked and RO at this
970 	 * point, copy everything into the IOMMU struct
971 	 */
972 
973 	/* Get cap header */
974 	caphdr = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_HDR_OFF);
975 	iommu->aiomt_cap_hdr = caphdr;
976 	iommu->aiomt_npcache = AMD_IOMMU_REG_GET32(&caphdr,
977 	    AMD_IOMMU_CAP_NPCACHE);
978 	iommu->aiomt_httun = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_HTTUN);
979 
980 	global = amd_iommu_lookup_acpi_global();
981 	hinfop = amd_iommu_lookup_any_ivhd(iommu);
982 
983 	if (hinfop)
984 		iommu->aiomt_iotlb = hinfop->ach_IotlbSup;
985 	else
986 		iommu->aiomt_iotlb =
987 		    AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_IOTLB);
988 
989 	iommu->aiomt_captype = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
990 	iommu->aiomt_capid = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
991 
992 	/*
993 	 * Get address of IOMMU control registers
994 	 */
995 	hi_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
996 	    AMD_IOMMU_CAP_ADDR_HI_OFF);
997 	iommu->aiomt_low_addr32 = low_addr32;
998 	iommu->aiomt_hi_addr32 = hi_addr32;
999 	low_addr32 &= ~AMD_IOMMU_REG_ADDR_LOCKED;
1000 
1001 	if (hinfop) {
1002 		iommu->aiomt_reg_pa =  hinfop->ach_IOMMU_reg_base;
1003 		ASSERT(hinfop->ach_IOMMU_pci_seg == 0);
1004 	} else {
1005 		iommu->aiomt_reg_pa =  ((uint64_t)hi_addr32 << 32 | low_addr32);
1006 	}
1007 
1008 	/*
1009 	 * Get cap range reg
1010 	 */
1011 	range = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_RANGE_OFF);
1012 	iommu->aiomt_range = range;
1013 	iommu->aiomt_rng_valid = AMD_IOMMU_REG_GET32(&range,
1014 	    AMD_IOMMU_RNG_VALID);
1015 	if (iommu->aiomt_rng_valid) {
1016 		iommu->aiomt_rng_bus = AMD_IOMMU_REG_GET32(&range,
1017 		    AMD_IOMMU_RNG_BUS);
1018 		iommu->aiomt_first_devfn = AMD_IOMMU_REG_GET32(&range,
1019 		    AMD_IOMMU_FIRST_DEVFN);
1020 		iommu->aiomt_last_devfn = AMD_IOMMU_REG_GET32(&range,
1021 		    AMD_IOMMU_LAST_DEVFN);
1022 	} else {
1023 		iommu->aiomt_rng_bus = 0;
1024 		iommu->aiomt_first_devfn = 0;
1025 		iommu->aiomt_last_devfn = 0;
1026 	}
1027 
1028 	if (hinfop)
1029 		iommu->aiomt_ht_unitid = hinfop->ach_IOMMU_UnitID;
1030 	else
1031 		iommu->aiomt_ht_unitid = AMD_IOMMU_REG_GET32(&range,
1032 		    AMD_IOMMU_HT_UNITID);
1033 
1034 	/*
1035 	 * Get cap misc reg
1036 	 */
1037 	misc = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_MISC_OFF);
1038 	iommu->aiomt_misc = misc;
1039 
1040 	if (global) {
1041 		iommu->aiomt_htatsresv = global->acg_HtAtsResv;
1042 		iommu->aiomt_vasize = global->acg_VAsize;
1043 		iommu->aiomt_pasize = global->acg_PAsize;
1044 	} else {
1045 		iommu->aiomt_htatsresv = AMD_IOMMU_REG_GET32(&misc,
1046 		    AMD_IOMMU_HT_ATSRSV);
1047 		iommu->aiomt_vasize = AMD_IOMMU_REG_GET32(&misc,
1048 		    AMD_IOMMU_VA_SIZE);
1049 		iommu->aiomt_pasize = AMD_IOMMU_REG_GET32(&misc,
1050 		    AMD_IOMMU_PA_SIZE);
1051 	}
1052 
1053 	if (hinfop) {
1054 		iommu->aiomt_msinum = hinfop->ach_IOMMU_MSInum;
1055 	} else {
1056 		iommu->aiomt_msinum =
1057 		    AMD_IOMMU_REG_GET32(&misc, AMD_IOMMU_MSINUM);
1058 	}
1059 
1060 	/*
1061 	 * Set up mapping between control registers PA and VA
1062 	 */
1063 	pgoffset = iommu->aiomt_reg_pa & MMU_PAGEOFFSET;
1064 	ASSERT(pgoffset == 0);
1065 	iommu->aiomt_reg_pages = mmu_btopr(AMD_IOMMU_REG_SIZE + pgoffset);
1066 	iommu->aiomt_reg_size = mmu_ptob(iommu->aiomt_reg_pages);
1067 
1068 	iommu->aiomt_va = (uintptr_t)device_arena_alloc(
1069 	    ptob(iommu->aiomt_reg_pages), VM_SLEEP);
1070 	if (iommu->aiomt_va == 0) {
1071 		cmn_err(CE_WARN, "%s: %s%d: Failed to alloc VA for IOMMU "
1072 		    "control regs. Skipping IOMMU idx=%d", f, driver,
1073 		    instance, idx);
1074 		mutex_exit(&iommu->aiomt_mutex);
1075 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1076 		return (NULL);
1077 	}
1078 
1079 	hat_devload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
1080 	    iommu->aiomt_reg_size,
1081 	    mmu_btop(iommu->aiomt_reg_pa), PROT_READ | PROT_WRITE
1082 	    | HAT_STRICTORDER, HAT_LOAD_LOCK);
1083 
1084 	iommu->aiomt_reg_va = iommu->aiomt_va + pgoffset;
1085 
1086 	/*
1087 	 * Setup the various control register's VA
1088 	 */
1089 	iommu->aiomt_reg_devtbl_va = iommu->aiomt_reg_va +
1090 	    AMD_IOMMU_DEVTBL_REG_OFF;
1091 	iommu->aiomt_reg_cmdbuf_va = iommu->aiomt_reg_va +
1092 	    AMD_IOMMU_CMDBUF_REG_OFF;
1093 	iommu->aiomt_reg_eventlog_va = iommu->aiomt_reg_va +
1094 	    AMD_IOMMU_EVENTLOG_REG_OFF;
1095 	iommu->aiomt_reg_ctrl_va = iommu->aiomt_reg_va +
1096 	    AMD_IOMMU_CTRL_REG_OFF;
1097 	iommu->aiomt_reg_excl_base_va = iommu->aiomt_reg_va +
1098 	    AMD_IOMMU_EXCL_BASE_REG_OFF;
1099 	iommu->aiomt_reg_excl_lim_va = iommu->aiomt_reg_va +
1100 	    AMD_IOMMU_EXCL_LIM_REG_OFF;
1101 	iommu->aiomt_reg_cmdbuf_head_va = iommu->aiomt_reg_va +
1102 	    AMD_IOMMU_CMDBUF_HEAD_REG_OFF;
1103 	iommu->aiomt_reg_cmdbuf_tail_va = iommu->aiomt_reg_va +
1104 	    AMD_IOMMU_CMDBUF_TAIL_REG_OFF;
1105 	iommu->aiomt_reg_eventlog_head_va = iommu->aiomt_reg_va +
1106 	    AMD_IOMMU_EVENTLOG_HEAD_REG_OFF;
1107 	iommu->aiomt_reg_eventlog_tail_va = iommu->aiomt_reg_va +
1108 	    AMD_IOMMU_EVENTLOG_TAIL_REG_OFF;
1109 	iommu->aiomt_reg_status_va = iommu->aiomt_reg_va +
1110 	    AMD_IOMMU_STATUS_REG_OFF;
1111 
1112 
1113 	/*
1114 	 * Setup the DEVICE table, CMD buffer, and LOG buffer in
1115 	 * memory and setup DMA access to this memory location
1116 	 */
1117 	if (amd_iommu_setup_tables_and_buffers(iommu) != DDI_SUCCESS) {
1118 		mutex_exit(&iommu->aiomt_mutex);
1119 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1120 		return (NULL);
1121 	}
1122 
1123 	if (amd_iommu_setup_exclusion(iommu) != DDI_SUCCESS) {
1124 		mutex_exit(&iommu->aiomt_mutex);
1125 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1126 		return (NULL);
1127 	}
1128 
1129 	amd_iommu_enable_interrupts(iommu);
1130 
1131 	if (amd_iommu_setup_interrupts(iommu) != DDI_SUCCESS) {
1132 		mutex_exit(&iommu->aiomt_mutex);
1133 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1134 		return (NULL);
1135 	}
1136 
1137 	/*
1138 	 * need to setup domain table before gfx bypass
1139 	 */
1140 	amd_iommu_init_page_tables(iommu);
1141 
1142 	/*
1143 	 * Set pass-thru for special devices like IOAPIC and HPET
1144 	 *
1145 	 * Also, gfx devices don't use DDI for DMA. No need to register
1146 	 * before setting up gfx passthru
1147 	 */
1148 	if (amd_iommu_setup_passthru(iommu) != DDI_SUCCESS) {
1149 		mutex_exit(&iommu->aiomt_mutex);
1150 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1151 		return (NULL);
1152 	}
1153 
1154 	/* Initialize device table entries based on ACPI settings */
1155 	if (amd_iommu_acpi_init_devtbl(iommu) !=  DDI_SUCCESS) {
1156 		cmn_err(CE_WARN, "%s: %s%d: Can't initialize device table",
1157 		    f, driver, instance);
1158 		mutex_exit(&iommu->aiomt_mutex);
1159 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1160 		return (NULL);
1161 	}
1162 
1163 	if (amd_iommu_start(iommu) != DDI_SUCCESS) {
1164 		mutex_exit(&iommu->aiomt_mutex);
1165 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1166 		return (NULL);
1167 	}
1168 
1169 	/* xxx register/start race  */
1170 	if (amd_iommu_register(iommu) != DDI_SUCCESS) {
1171 		mutex_exit(&iommu->aiomt_mutex);
1172 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1173 		return (NULL);
1174 	}
1175 
1176 	if (amd_iommu_debug) {
1177 		cmn_err(CE_NOTE, "%s: %s%d: IOMMU idx=%d inited.", f, driver,
1178 		    instance, idx);
1179 	}
1180 
1181 	return (iommu);
1182 }
1183 
1184 static int
1185 amd_iommu_fini(amd_iommu_t *iommu, int type)
1186 {
1187 	int idx = iommu->aiomt_idx;
1188 	dev_info_t *dip = iommu->aiomt_dip;
1189 	int instance = ddi_get_instance(dip);
1190 	const char *driver = ddi_driver_name(dip);
1191 	const char *f = "amd_iommu_fini";
1192 
1193 	if (type == AMD_IOMMU_TEARDOWN) {
1194 		mutex_enter(&iommu->aiomt_mutex);
1195 		if (amd_iommu_unregister(iommu) != DDI_SUCCESS) {
1196 			cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit failed. "
1197 			    "idx = %d", f, driver, instance, idx);
1198 			return (DDI_FAILURE);
1199 		}
1200 	}
1201 
1202 	amd_iommu_stop(iommu);
1203 
1204 	if (type == AMD_IOMMU_TEARDOWN) {
1205 		amd_iommu_fini_page_tables(iommu);
1206 		amd_iommu_teardown_interrupts(iommu);
1207 		amd_iommu_teardown_exclusion(iommu);
1208 	}
1209 
1210 	amd_iommu_teardown_tables_and_buffers(iommu, type);
1211 
1212 	if (type == AMD_IOMMU_QUIESCE)
1213 		return (DDI_SUCCESS);
1214 
1215 	if (iommu->aiomt_va != NULL) {
1216 		hat_unload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
1217 		    iommu->aiomt_reg_size, HAT_UNLOAD_UNLOCK);
1218 		device_arena_free((void *)(uintptr_t)iommu->aiomt_va,
1219 		    ptob(iommu->aiomt_reg_pages));
1220 		iommu->aiomt_va = NULL;
1221 		iommu->aiomt_reg_va = NULL;
1222 	}
1223 	mutex_destroy(&iommu->aiomt_eventlock);
1224 	mutex_destroy(&iommu->aiomt_cmdlock);
1225 	mutex_exit(&iommu->aiomt_mutex);
1226 	mutex_destroy(&iommu->aiomt_mutex);
1227 	kmem_free(iommu, sizeof (amd_iommu_t));
1228 
1229 	cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit complete. idx = %d",
1230 	    f, driver, instance, idx);
1231 
1232 	return (DDI_SUCCESS);
1233 }
1234 
1235 int
1236 amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep)
1237 {
1238 	int instance = ddi_get_instance(dip);
1239 	const char *driver = ddi_driver_name(dip);
1240 	ddi_acc_handle_t handle;
1241 	uint8_t base_class;
1242 	uint8_t sub_class;
1243 	uint8_t prog_class;
1244 	int idx;
1245 	uint32_t id;
1246 	uint16_t cap_base;
1247 	uint32_t caphdr;
1248 	uint8_t cap_type;
1249 	uint8_t cap_id;
1250 	amd_iommu_t *iommu;
1251 	const char *f = "amd_iommu_setup";
1252 
1253 	ASSERT(instance >= 0);
1254 	ASSERT(driver);
1255 
1256 	/* First setup PCI access to config space */
1257 
1258 	if (pci_config_setup(dip, &handle) != DDI_SUCCESS) {
1259 		cmn_err(CE_WARN, "%s: PCI config setup failed: %s%d",
1260 		    f, driver, instance);
1261 		return (DDI_FAILURE);
1262 	}
1263 
1264 	/*
1265 	 * The AMD IOMMU is part of an independent PCI function. There may be
1266 	 * more than one IOMMU in that PCI function
1267 	 */
1268 	base_class = pci_config_get8(handle, PCI_CONF_BASCLASS);
1269 	sub_class = pci_config_get8(handle, PCI_CONF_SUBCLASS);
1270 	prog_class = pci_config_get8(handle, PCI_CONF_PROGCLASS);
1271 
1272 	if (base_class != PCI_CLASS_PERIPH || sub_class != PCI_PERIPH_IOMMU ||
1273 	    prog_class != AMD_IOMMU_PCI_PROG_IF) {
1274 		cmn_err(CE_WARN, "%s: %s%d: invalid PCI class(0x%x)/"
1275 		    "subclass(0x%x)/programming interface(0x%x)", f, driver,
1276 		    instance, base_class, sub_class, prog_class);
1277 		pci_config_teardown(&handle);
1278 		return (DDI_FAILURE);
1279 	}
1280 
1281 	/*
1282 	 * Find and initialize all IOMMU units in this function
1283 	 */
1284 	for (idx = 0; ; idx++) {
1285 		if (pci_cap_probe(handle, idx, &id, &cap_base) != DDI_SUCCESS)
1286 			break;
1287 
1288 		/* check if cap ID is secure device cap id */
1289 		if (id != PCI_CAP_ID_SECURE_DEV) {
1290 			if (amd_iommu_debug) {
1291 				cmn_err(CE_NOTE,
1292 				    "%s: %s%d: skipping IOMMU: idx(0x%x) "
1293 				    "cap ID (0x%x) != secure dev capid (0x%x)",
1294 				    f, driver, instance, idx, id,
1295 				    PCI_CAP_ID_SECURE_DEV);
1296 			}
1297 			continue;
1298 		}
1299 
1300 		/* check if cap type is IOMMU cap type */
1301 		caphdr = PCI_CAP_GET32(handle, 0, cap_base,
1302 		    AMD_IOMMU_CAP_HDR_OFF);
1303 		cap_type = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
1304 		cap_id = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
1305 
1306 		if (cap_type != AMD_IOMMU_CAP) {
1307 			cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
1308 			    "cap type (0x%x) != AMD IOMMU CAP (0x%x)", f,
1309 			    driver, instance, idx, cap_type, AMD_IOMMU_CAP);
1310 			continue;
1311 		}
1312 		ASSERT(cap_id == PCI_CAP_ID_SECURE_DEV);
1313 		ASSERT(cap_id == id);
1314 
1315 		iommu = amd_iommu_init(dip, handle, idx, cap_base);
1316 		if (iommu == NULL) {
1317 			cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
1318 			    "failed to init IOMMU", f,
1319 			    driver, instance, idx);
1320 			continue;
1321 		}
1322 
1323 		if (statep->aioms_iommu_start == NULL) {
1324 			statep->aioms_iommu_start = iommu;
1325 		} else {
1326 			statep->aioms_iommu_end->aiomt_next = iommu;
1327 		}
1328 		statep->aioms_iommu_end = iommu;
1329 
1330 		statep->aioms_nunits++;
1331 	}
1332 
1333 	pci_config_teardown(&handle);
1334 
1335 	if (amd_iommu_debug) {
1336 		cmn_err(CE_NOTE, "%s: %s%d: state=%p: setup %d IOMMU units",
1337 		    f, driver, instance, (void *)statep, statep->aioms_nunits);
1338 	}
1339 
1340 	return (DDI_SUCCESS);
1341 }
1342 
1343 int
1344 amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep, int type)
1345 {
1346 	int instance = ddi_get_instance(dip);
1347 	const char *driver = ddi_driver_name(dip);
1348 	amd_iommu_t *iommu, *next_iommu;
1349 	int teardown;
1350 	int error = DDI_SUCCESS;
1351 	const char *f = "amd_iommu_teardown";
1352 
1353 	teardown = 0;
1354 	for (iommu = statep->aioms_iommu_start; iommu;
1355 	    iommu = next_iommu) {
1356 		ASSERT(statep->aioms_nunits > 0);
1357 		next_iommu = iommu->aiomt_next;
1358 		if (amd_iommu_fini(iommu, type) != DDI_SUCCESS) {
1359 			error = DDI_FAILURE;
1360 			continue;
1361 		}
1362 		statep->aioms_nunits--;
1363 		teardown++;
1364 	}
1365 
1366 	cmn_err(CE_NOTE, "%s: %s%d: state=%p: toredown %d units. "
1367 	    "%d units left", f, driver, instance, (void *)statep,
1368 	    teardown, statep->aioms_nunits);
1369 
1370 	return (error);
1371 }
1372 
1373 dev_info_t *
1374 amd_iommu_pci_dip(dev_info_t *rdip, const char *path)
1375 {
1376 	dev_info_t *pdip;
1377 	const char *driver = ddi_driver_name(rdip);
1378 	int instance = ddi_get_instance(rdip);
1379 	const char *f = "amd_iommu_pci_dip";
1380 
1381 	/* Hold rdip so it and its parents don't go away */
1382 	ndi_hold_devi(rdip);
1383 
1384 	if (ddi_is_pci_dip(rdip))
1385 		return (rdip);
1386 
1387 	pdip = rdip;
1388 	while (pdip = ddi_get_parent(pdip)) {
1389 		if (ddi_is_pci_dip(pdip)) {
1390 			ndi_hold_devi(pdip);
1391 			ndi_rele_devi(rdip);
1392 			return (pdip);
1393 		}
1394 	}
1395 
1396 	cmn_err(
1397 #ifdef	DEBUG
1398 	    CE_PANIC,
1399 #else
1400 	    CE_WARN,
1401 #endif	/* DEBUG */
1402 	    "%s: %s%d dip = %p has no PCI parent, path = %s",
1403 	    f, driver, instance, (void *)rdip, path);
1404 
1405 	ndi_rele_devi(rdip);
1406 
1407 	return (NULL);
1408 }
1409 
1410 /* Interface with IOMMULIB */
1411 /*ARGSUSED*/
1412 static int
1413 amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip)
1414 {
1415 	const char *driver = ddi_driver_name(rdip);
1416 	char *s;
1417 	int bus, device, func, bdf;
1418 	amd_iommu_acpi_ivhd_t *hinfop;
1419 	dev_info_t *pci_dip;
1420 	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1421 	const char *f = "amd_iommu_probe";
1422 	int instance = ddi_get_instance(iommu->aiomt_dip);
1423 	const char *idriver = ddi_driver_name(iommu->aiomt_dip);
1424 	char *path, *pathp;
1425 
1426 	if (amd_iommu_disable_list) {
1427 		s = strstr(amd_iommu_disable_list, driver);
1428 		if (s == NULL)
1429 			return (DDI_SUCCESS);
1430 		if (s == amd_iommu_disable_list || *(s - 1) == ':') {
1431 			s += strlen(driver);
1432 			if (*s == '\0' || *s == ':') {
1433 				amd_iommu_set_passthru(iommu, rdip);
1434 				return (DDI_FAILURE);
1435 			}
1436 		}
1437 	}
1438 
1439 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1440 	if ((pathp = ddi_pathname(rdip, path)) == NULL)
1441 		pathp = "<unknown>";
1442 
1443 	pci_dip = amd_iommu_pci_dip(rdip, path);
1444 	if (pci_dip == NULL) {
1445 		cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get PCI dip "
1446 		    "for rdip=%p, path = %s",
1447 		    f, idriver, instance, iommu->aiomt_idx, (void *)rdip,
1448 		    pathp);
1449 		kmem_free(path, MAXPATHLEN);
1450 		return (DDI_FAILURE);
1451 	}
1452 
1453 	if (acpica_get_bdf(pci_dip, &bus, &device, &func) != DDI_SUCCESS) {
1454 		cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get BDF "
1455 		    "for rdip=%p, path = %s",
1456 		    f, idriver, instance, iommu->aiomt_idx, (void *)rdip,
1457 		    pathp);
1458 		kmem_free(path, MAXPATHLEN);
1459 		return (DDI_FAILURE);
1460 	}
1461 	kmem_free(path, MAXPATHLEN);
1462 
1463 	/*
1464 	 * See whether device is described by IVRS as being managed
1465 	 * by this IOMMU
1466 	 */
1467 	bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) | (uint8_t)func;
1468 	hinfop = amd_iommu_lookup_ivhd(bdf);
1469 	if (hinfop && hinfop->ach_IOMMU_deviceid == iommu->aiomt_bdf)
1470 		return (DDI_SUCCESS);
1471 
1472 	return (DDI_FAILURE);
1473 }
1474 
1475 /*ARGSUSED*/
1476 static int
1477 amd_iommu_allochdl(iommulib_handle_t handle,
1478     dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1479     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep)
1480 {
1481 	return (iommulib_iommu_dma_allochdl(dip, rdip, attr, waitfp,
1482 	    arg, dma_handlep));
1483 }
1484 
1485 /*ARGSUSED*/
1486 static int
1487 amd_iommu_freehdl(iommulib_handle_t handle,
1488     dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
1489 {
1490 	return (iommulib_iommu_dma_freehdl(dip, rdip, dma_handle));
1491 }
1492 
1493 /*ARGSUSED*/
1494 static int
1495 map_current_window(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
1496     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookie_array, uint_t ccount,
1497     int km_flags)
1498 {
1499 	const char *driver = ddi_driver_name(iommu->aiomt_dip);
1500 	int instance = ddi_get_instance(iommu->aiomt_dip);
1501 	int idx = iommu->aiomt_idx;
1502 	int i;
1503 	uint64_t start_va;
1504 	char *path;
1505 	int error = DDI_FAILURE;
1506 	const char *f = "map_current_window";
1507 
1508 	path = kmem_alloc(MAXPATHLEN, km_flags);
1509 	if (path == NULL) {
1510 		return (DDI_DMA_NORESOURCES);
1511 	}
1512 
1513 	(void) ddi_pathname(rdip, path);
1514 	mutex_enter(&amd_iommu_pgtable_lock);
1515 
1516 	if (amd_iommu_debug == AMD_IOMMU_DEBUG_PAGE_TABLES) {
1517 		cmn_err(CE_NOTE, "%s: %s%d: idx=%d Attempting to get cookies "
1518 		    "from handle for device %s",
1519 		    f, driver, instance, idx, path);
1520 	}
1521 
1522 	start_va = 0;
1523 	for (i = 0; i < ccount; i++) {
1524 		if ((error = amd_iommu_map_pa2va(iommu, rdip, attrp, dmareq,
1525 		    cookie_array[i].dmac_cookie_addr,
1526 		    cookie_array[i].dmac_size,
1527 		    AMD_IOMMU_VMEM_MAP, &start_va, km_flags)) != DDI_SUCCESS) {
1528 			break;
1529 		}
1530 		cookie_array[i].dmac_cookie_addr = (uintptr_t)start_va;
1531 		cookie_array[i].dmac_type = 0;
1532 	}
1533 
1534 	if (i != ccount) {
1535 		cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot map cookie# %d "
1536 		    "for device %s", f, driver, instance, idx, i, path);
1537 		(void) unmap_current_window(iommu, rdip, cookie_array,
1538 		    ccount, i, 1);
1539 		goto out;
1540 	}
1541 
1542 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
1543 		cmn_err(CE_NOTE, "%s: return SUCCESS", f);
1544 	}
1545 
1546 	error = DDI_DMA_MAPPED;
1547 out:
1548 	mutex_exit(&amd_iommu_pgtable_lock);
1549 	kmem_free(path, MAXPATHLEN);
1550 	return (error);
1551 }
1552 
1553 /*ARGSUSED*/
1554 static int
1555 unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
1556     ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked)
1557 {
1558 	const char *driver = ddi_driver_name(iommu->aiomt_dip);
1559 	int instance = ddi_get_instance(iommu->aiomt_dip);
1560 	int idx = iommu->aiomt_idx;
1561 	int i;
1562 	int error = DDI_FAILURE;
1563 	char *path;
1564 	int pathfree;
1565 	const char *f = "unmap_current_window";
1566 
1567 	if (!locked)
1568 		mutex_enter(&amd_iommu_pgtable_lock);
1569 
1570 	path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
1571 	if (path) {
1572 		(void) ddi_pathname(rdip, path);
1573 		pathfree = 1;
1574 	} else {
1575 		path = "<path-mem-alloc-failed>";
1576 		pathfree = 0;
1577 	}
1578 
1579 	if (ncookies == -1)
1580 		ncookies = ccount;
1581 
1582 	for (i = 0; i < ncookies; i++) {
1583 		if (amd_iommu_unmap_va(iommu, rdip,
1584 		    cookie_array[i].dmac_cookie_addr,
1585 		    cookie_array[i].dmac_size,
1586 		    AMD_IOMMU_VMEM_MAP) != DDI_SUCCESS) {
1587 			break;
1588 		}
1589 	}
1590 
1591 	if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_COMPL_WAIT, NULL, 0, 0)
1592 	    != DDI_SUCCESS) {
1593 		cmn_err(CE_WARN, "%s: AMD IOMMU completion wait failed for: %s",
1594 		    f, path);
1595 	}
1596 
1597 	if (i != ncookies) {
1598 		cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot unmap cookie# %d "
1599 		    "for device %s", f, driver, instance, idx, i, path);
1600 		error = DDI_FAILURE;
1601 		goto out;
1602 	}
1603 
1604 	error = DDI_SUCCESS;
1605 
1606 out:
1607 	if (pathfree)
1608 		kmem_free(path, MAXPATHLEN);
1609 	if (!locked)
1610 		mutex_exit(&amd_iommu_pgtable_lock);
1611 	return (error);
1612 }
1613 
1614 /*ARGSUSED*/
1615 static int
1616 amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
1617     dev_info_t *rdip, ddi_dma_handle_t dma_handle,
1618     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
1619     uint_t *ccountp)
1620 {
1621 	int dma_error = DDI_DMA_NOMAPPING;
1622 	int error;
1623 	char *path;
1624 	ddi_dma_cookie_t *cookie_array = NULL;
1625 	uint_t ccount = 0;
1626 	ddi_dma_impl_t *hp;
1627 	ddi_dma_attr_t *attrp;
1628 	int km_flags;
1629 	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1630 	int instance = ddi_get_instance(rdip);
1631 	const char *driver = ddi_driver_name(rdip);
1632 	const char *f = "amd_iommu_bindhdl";
1633 
1634 	dma_error = iommulib_iommu_dma_bindhdl(dip, rdip, dma_handle,
1635 	    dmareq, cookiep, ccountp);
1636 
1637 	if (dma_error != DDI_DMA_MAPPED && dma_error != DDI_DMA_PARTIAL_MAP)
1638 		return (dma_error);
1639 
1640 	km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
1641 
1642 	path = kmem_alloc(MAXPATHLEN, km_flags);
1643 	if (path) {
1644 		(void) ddi_pathname(rdip, path);
1645 	} else {
1646 		dma_error = DDI_DMA_NORESOURCES;
1647 		goto unbind;
1648 	}
1649 
1650 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
1651 		cmn_err(CE_NOTE, "%s: %s got cookie (%p), #cookies: %d",
1652 		    f, path,
1653 		    (void *)cookiep->dmac_cookie_addr,
1654 		    *ccountp);
1655 	}
1656 
1657 	cookie_array = NULL;
1658 	ccount = 0;
1659 	if ((error = iommulib_iommu_dma_get_cookies(dip, dma_handle,
1660 	    &cookie_array, &ccount)) != DDI_SUCCESS) {
1661 		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1662 		    "for device %s", f, driver, instance, path);
1663 		dma_error = error;
1664 		goto unbind;
1665 	}
1666 
1667 	hp = (ddi_dma_impl_t *)dma_handle;
1668 	attrp = &hp->dmai_attr;
1669 
1670 	error = map_current_window(iommu, rdip, attrp, dmareq,
1671 	    cookie_array, ccount, km_flags);
1672 	if (error != DDI_SUCCESS) {
1673 		dma_error = error;
1674 		goto unbind;
1675 	}
1676 
1677 	if ((error = iommulib_iommu_dma_set_cookies(dip, dma_handle,
1678 	    cookie_array, ccount)) != DDI_SUCCESS) {
1679 		cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
1680 		    "for device %s", f, driver, instance, path);
1681 		dma_error = error;
1682 		goto unbind;
1683 	}
1684 
1685 	*cookiep = cookie_array[0];
1686 
1687 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
1688 		cmn_err(CE_NOTE, "%s: %s remapped cookie (%p), #cookies: %d",
1689 		    f, path,
1690 		    (void *)(uintptr_t)cookiep->dmac_cookie_addr,
1691 		    *ccountp);
1692 	}
1693 
1694 	kmem_free(path, MAXPATHLEN);
1695 	ASSERT(dma_error == DDI_DMA_MAPPED || dma_error == DDI_DMA_PARTIAL_MAP);
1696 	return (dma_error);
1697 unbind:
1698 	kmem_free(path, MAXPATHLEN);
1699 	(void) iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle);
1700 	return (dma_error);
1701 }
1702 
1703 /*ARGSUSED*/
1704 static int
1705 amd_iommu_unbindhdl(iommulib_handle_t handle,
1706     dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
1707 {
1708 	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1709 	ddi_dma_cookie_t *cookie_array = NULL;
1710 	uint_t ccount = 0;
1711 	int error = DDI_FAILURE;
1712 	int instance = ddi_get_instance(rdip);
1713 	const char *driver = ddi_driver_name(rdip);
1714 	const char *f = "amd_iommu_unbindhdl";
1715 
1716 	cookie_array = NULL;
1717 	ccount = 0;
1718 	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1719 	    &ccount) != DDI_SUCCESS) {
1720 		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1721 		    "for device %p", f, driver, instance, (void *)rdip);
1722 		error = DDI_FAILURE;
1723 		goto out;
1724 	}
1725 
1726 	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1727 		cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
1728 		    "for device %p", f, driver, instance, (void *)rdip);
1729 		error = DDI_FAILURE;
1730 		goto out;
1731 	}
1732 
1733 	if (iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle)
1734 	    != DDI_SUCCESS) {
1735 		cmn_err(CE_WARN, "%s: %s%d: failed to unbindhdl for dip=%p",
1736 		    f, driver, instance, (void *)rdip);
1737 		error = DDI_FAILURE;
1738 		goto out;
1739 	}
1740 
1741 	if (unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0)
1742 	    != DDI_SUCCESS) {
1743 		cmn_err(CE_WARN, "%s: %s%d: failed to unmap current window "
1744 		    "for dip=%p", f, driver, instance, (void *)rdip);
1745 		error = DDI_FAILURE;
1746 	} else {
1747 		error = DDI_SUCCESS;
1748 	}
1749 out:
1750 	if (cookie_array)
1751 		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1752 	return (error);
1753 }
1754 
1755 /*ARGSUSED*/
1756 static int
1757 amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
1758     dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
1759     size_t len, uint_t cache_flags)
1760 {
1761 	ddi_dma_cookie_t *cookie_array = NULL;
1762 	uint_t ccount = 0;
1763 	int error;
1764 	const char *f = "amd_iommu_sync";
1765 
1766 	cookie_array = NULL;
1767 	ccount = 0;
1768 	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1769 	    &ccount) != DDI_SUCCESS) {
1770 		ASSERT(cookie_array == NULL);
1771 		cmn_err(CE_WARN, "%s: Cannot get cookies "
1772 		    "for device %p", f, (void *)rdip);
1773 		error = DDI_FAILURE;
1774 		goto out;
1775 	}
1776 
1777 	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1778 		cmn_err(CE_WARN, "%s: Cannot clear cookies "
1779 		    "for device %p", f, (void *)rdip);
1780 		error = DDI_FAILURE;
1781 		goto out;
1782 	}
1783 
1784 	error = iommulib_iommu_dma_sync(dip, rdip, dma_handle, off,
1785 	    len, cache_flags);
1786 
1787 	if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
1788 	    ccount) != DDI_SUCCESS) {
1789 		cmn_err(CE_WARN, "%s: Cannot set cookies "
1790 		    "for device %p", f, (void *)rdip);
1791 		error = DDI_FAILURE;
1792 	} else {
1793 		cookie_array = NULL;
1794 		ccount = 0;
1795 	}
1796 
1797 out:
1798 	if (cookie_array)
1799 		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1800 	return (error);
1801 }
1802 
1803 /*ARGSUSED*/
1804 static int
1805 amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
1806     dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
1807     off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
1808     uint_t *ccountp)
1809 {
1810 	int error = DDI_FAILURE;
1811 	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1812 	ddi_dma_cookie_t *cookie_array = NULL;
1813 	uint_t ccount = 0;
1814 	int km_flags;
1815 	ddi_dma_impl_t *hp;
1816 	ddi_dma_attr_t *attrp;
1817 	struct ddi_dma_req sdmareq = {0};
1818 	int instance = ddi_get_instance(rdip);
1819 	const char *driver = ddi_driver_name(rdip);
1820 	const char *f = "amd_iommu_win";
1821 
1822 	km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
1823 
1824 	cookie_array = NULL;
1825 	ccount = 0;
1826 	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1827 	    &ccount) != DDI_SUCCESS) {
1828 		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1829 		    "for device %p", f, driver, instance, (void *)rdip);
1830 		error = DDI_FAILURE;
1831 		goto out;
1832 	}
1833 
1834 	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1835 		cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
1836 		    "for device %p", f, driver, instance, (void *)rdip);
1837 		error = DDI_FAILURE;
1838 		goto out;
1839 	}
1840 
1841 	if (iommulib_iommu_dma_win(dip, rdip, dma_handle, win,
1842 	    offp, lenp, cookiep, ccountp) != DDI_SUCCESS) {
1843 		cmn_err(CE_WARN, "%s: %s%d: failed switch windows for dip=%p",
1844 		    f, driver, instance, (void *)rdip);
1845 		error = DDI_FAILURE;
1846 		goto out;
1847 	}
1848 
1849 	(void) unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0);
1850 
1851 	if (cookie_array) {
1852 		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1853 		cookie_array = NULL;
1854 		ccount = 0;
1855 	}
1856 
1857 	cookie_array = NULL;
1858 	ccount = 0;
1859 	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1860 	    &ccount) != DDI_SUCCESS) {
1861 		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1862 		    "for device %p", f, driver, instance, (void *)rdip);
1863 		error = DDI_FAILURE;
1864 		goto out;
1865 	}
1866 
1867 	hp = (ddi_dma_impl_t *)dma_handle;
1868 	attrp = &hp->dmai_attr;
1869 
1870 	sdmareq.dmar_flags = DDI_DMA_RDWR;
1871 	error = map_current_window(iommu, rdip, attrp, &sdmareq,
1872 	    cookie_array, ccount, km_flags);
1873 
1874 	if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
1875 	    ccount) != DDI_SUCCESS) {
1876 		cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
1877 		    "for device %p", f, driver, instance, (void *)rdip);
1878 		error = DDI_FAILURE;
1879 		goto out;
1880 	}
1881 
1882 	*cookiep = cookie_array[0];
1883 
1884 	return (error == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1885 out:
1886 	if (cookie_array)
1887 		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1888 
1889 	return (error);
1890 }
1891 
1892 /* Obsoleted DMA routines */
1893 
1894 /*ARGSUSED*/
1895 static int
1896 amd_iommu_map(iommulib_handle_t handle, dev_info_t *dip,
1897     dev_info_t *rdip, struct ddi_dma_req *dmareq,
1898     ddi_dma_handle_t *dma_handle)
1899 {
1900 	ASSERT(0);
1901 	return (iommulib_iommu_dma_map(dip, rdip, dmareq, dma_handle));
1902 }
1903 
1904 /*ARGSUSED*/
1905 static int
1906 amd_iommu_mctl(iommulib_handle_t handle, dev_info_t *dip,
1907     dev_info_t *rdip, ddi_dma_handle_t dma_handle,
1908     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
1909     caddr_t *objpp, uint_t cache_flags)
1910 {
1911 	ASSERT(0);
1912 	return (iommulib_iommu_dma_mctl(dip, rdip, dma_handle,
1913 	    request, offp, lenp, objpp, cache_flags));
1914 }
1915 
1916 uint64_t
1917 amd_iommu_reg_get64_workaround(uint64_t *regp, uint32_t bits)
1918 {
1919 	split_t s;
1920 	uint32_t *ptr32 = (uint32_t *)regp;
1921 	uint64_t *s64p = &(s.u64);
1922 
1923 	s.u32[0] = ptr32[0];
1924 	s.u32[1] = ptr32[1];
1925 
1926 	return (AMD_IOMMU_REG_GET64_IMPL(s64p, bits));
1927 }
1928 
1929 uint64_t
1930 amd_iommu_reg_set64_workaround(uint64_t *regp, uint32_t bits, uint64_t value)
1931 {
1932 	split_t s;
1933 	uint32_t *ptr32 = (uint32_t *)regp;
1934 	uint64_t *s64p = &(s.u64);
1935 
1936 	s.u32[0] = ptr32[0];
1937 	s.u32[1] = ptr32[1];
1938 
1939 	AMD_IOMMU_REG_SET64_IMPL(s64p, bits, value);
1940 
1941 	*regp = s.u64;
1942 
1943 	return (s.u64);
1944 }
1945 
1946 void
1947 amd_iommu_read_boot_props(void)
1948 {
1949 	char *propval;
1950 
1951 	/*
1952 	 * if "amd-iommu = no/false" boot property is set,
1953 	 * ignore AMD iommu
1954 	 */
1955 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
1956 	    DDI_PROP_DONTPASS, "amd-iommu", &propval) == DDI_SUCCESS) {
1957 		if (strcmp(propval, "no") == 0 ||
1958 		    strcmp(propval, "false") == 0) {
1959 			amd_iommu_disable = 1;
1960 		}
1961 		ddi_prop_free(propval);
1962 	}
1963 
1964 	/*
1965 	 * Copy the list of drivers for which IOMMU is disabled by user.
1966 	 */
1967 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
1968 	    DDI_PROP_DONTPASS, "amd-iommu-disable-list", &propval)
1969 	    == DDI_SUCCESS) {
1970 		amd_iommu_disable_list = kmem_alloc(strlen(propval) + 1,
1971 		    KM_SLEEP);
1972 		(void) strcpy(amd_iommu_disable_list, propval);
1973 		ddi_prop_free(propval);
1974 	}
1975 
1976 }
1977 
1978 void
1979 amd_iommu_lookup_conf_props(dev_info_t *dip)
1980 {
1981 	char *disable;
1982 
1983 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1984 	    DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu", &disable)
1985 	    == DDI_PROP_SUCCESS) {
1986 		if (strcmp(disable, "no") == 0) {
1987 			amd_iommu_disable = 1;
1988 		}
1989 		ddi_prop_free(disable);
1990 	}
1991 
1992 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1993 	    DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu-disable-list",
1994 	    &disable) == DDI_PROP_SUCCESS) {
1995 		amd_iommu_disable_list = kmem_alloc(strlen(disable) + 1,
1996 		    KM_SLEEP);
1997 		(void) strcpy(amd_iommu_disable_list, disable);
1998 		ddi_prop_free(disable);
1999 	}
2000 }
2001