xref: /illumos-gate/usr/src/uts/i86pc/io/immu_qinv.c (revision 50200e77)
13a634bfcSVikram Hegde /*
23a634bfcSVikram Hegde  * CDDL HEADER START
33a634bfcSVikram Hegde  *
43a634bfcSVikram Hegde  * The contents of this file are subject to the terms of the
53a634bfcSVikram Hegde  * Common Development and Distribution License (the "License").
63a634bfcSVikram Hegde  * You may not use this file except in compliance with the License.
73a634bfcSVikram Hegde  *
83a634bfcSVikram Hegde  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93a634bfcSVikram Hegde  * or http://www.opensolaris.org/os/licensing.
103a634bfcSVikram Hegde  * See the License for the specific language governing permissions
113a634bfcSVikram Hegde  * and limitations under the License.
123a634bfcSVikram Hegde  *
133a634bfcSVikram Hegde  * When distributing Covered Code, include this CDDL HEADER in each
143a634bfcSVikram Hegde  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153a634bfcSVikram Hegde  * If applicable, add the following below this CDDL HEADER, with the
163a634bfcSVikram Hegde  * fields enclosed by brackets "[]" replaced with your own identifying
173a634bfcSVikram Hegde  * information: Portions Copyright [yyyy] [name of copyright owner]
183a634bfcSVikram Hegde  *
193a634bfcSVikram Hegde  * CDDL HEADER END
203a634bfcSVikram Hegde  */
213a634bfcSVikram Hegde /*
22c94adbf9SFrank Van Der Linden  * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23c94adbf9SFrank Van Der Linden  * All rights reserved.
243a634bfcSVikram Hegde  */
253a634bfcSVikram Hegde 
263a634bfcSVikram Hegde /*
273a634bfcSVikram Hegde  * Copyright (c) 2009, Intel Corporation.
283a634bfcSVikram Hegde  * All rights reserved.
293a634bfcSVikram Hegde  */
303a634bfcSVikram Hegde 
313a634bfcSVikram Hegde #include <sys/ddi.h>
323a634bfcSVikram Hegde #include <sys/archsystm.h>
333a634bfcSVikram Hegde #include <vm/hat_i86.h>
343a634bfcSVikram Hegde #include <sys/types.h>
35*50200e77SFrank Van Der Linden #include <sys/cpu.h>
363a634bfcSVikram Hegde #include <sys/sysmacros.h>
373a634bfcSVikram Hegde #include <sys/immu.h>
383a634bfcSVikram Hegde 
393a634bfcSVikram Hegde /* invalidation queue table entry size */
403a634bfcSVikram Hegde #define	QINV_ENTRY_SIZE		0x10
413a634bfcSVikram Hegde 
423a634bfcSVikram Hegde /* max value of Queue Size field of Invalidation Queue Address Register */
433a634bfcSVikram Hegde #define	QINV_MAX_QUEUE_SIZE	0x7
443a634bfcSVikram Hegde 
453a634bfcSVikram Hegde /* status data size of invalidation wait descriptor */
463a634bfcSVikram Hegde #define	QINV_SYNC_DATA_SIZE	0x4
473a634bfcSVikram Hegde 
483a634bfcSVikram Hegde /* invalidation queue head and tail */
493a634bfcSVikram Hegde #define	QINV_IQA_HEAD(QH)	BITX((QH), 18, 4)
503a634bfcSVikram Hegde #define	QINV_IQA_TAIL_SHIFT	4
513a634bfcSVikram Hegde 
523a634bfcSVikram Hegde /* invalidation queue entry structure */
533a634bfcSVikram Hegde typedef struct qinv_inv_dsc {
543a634bfcSVikram Hegde 	uint64_t	lo;
553a634bfcSVikram Hegde 	uint64_t	hi;
563a634bfcSVikram Hegde } qinv_dsc_t;
573a634bfcSVikram Hegde 
583a634bfcSVikram Hegde /* physical contigous pages for invalidation queue */
593a634bfcSVikram Hegde typedef struct qinv_mem {
603a634bfcSVikram Hegde 	kmutex_t	   qinv_mem_lock;
613a634bfcSVikram Hegde 	ddi_dma_handle_t   qinv_mem_dma_hdl;
623a634bfcSVikram Hegde 	ddi_acc_handle_t   qinv_mem_acc_hdl;
633a634bfcSVikram Hegde 	caddr_t		   qinv_mem_vaddr;
643a634bfcSVikram Hegde 	paddr_t		   qinv_mem_paddr;
653a634bfcSVikram Hegde 	uint_t		   qinv_mem_size;
663a634bfcSVikram Hegde 	uint16_t	   qinv_mem_head;
673a634bfcSVikram Hegde 	uint16_t	   qinv_mem_tail;
683a634bfcSVikram Hegde } qinv_mem_t;
693a634bfcSVikram Hegde 
703a634bfcSVikram Hegde 
713a634bfcSVikram Hegde /*
723a634bfcSVikram Hegde  * invalidation queue state
733a634bfcSVikram Hegde  *   This structure describes the state information of the
743a634bfcSVikram Hegde  *   invalidation queue table and related status memeory for
753a634bfcSVikram Hegde  *   invalidation wait descriptor
763a634bfcSVikram Hegde  *
773a634bfcSVikram Hegde  * qinv_table		- invalidation queue table
783a634bfcSVikram Hegde  * qinv_sync		- sync status memory for invalidation wait descriptor
793a634bfcSVikram Hegde  */
803a634bfcSVikram Hegde typedef struct qinv {
813a634bfcSVikram Hegde 	qinv_mem_t		qinv_table;
823a634bfcSVikram Hegde 	qinv_mem_t		qinv_sync;
833a634bfcSVikram Hegde } qinv_t;
843a634bfcSVikram Hegde 
85*50200e77SFrank Van Der Linden static void immu_qinv_inv_wait(immu_inv_wait_t *iwp);
86*50200e77SFrank Van Der Linden 
87c94adbf9SFrank Van Der Linden static struct immu_flushops immu_qinv_flushops = {
88c94adbf9SFrank Van Der Linden 	immu_qinv_context_fsi,
89c94adbf9SFrank Van Der Linden 	immu_qinv_context_dsi,
90c94adbf9SFrank Van Der Linden 	immu_qinv_context_gbl,
91c94adbf9SFrank Van Der Linden 	immu_qinv_iotlb_psi,
92c94adbf9SFrank Van Der Linden 	immu_qinv_iotlb_dsi,
93*50200e77SFrank Van Der Linden 	immu_qinv_iotlb_gbl,
94*50200e77SFrank Van Der Linden 	immu_qinv_inv_wait
95c94adbf9SFrank Van Der Linden };
963a634bfcSVikram Hegde 
973a634bfcSVikram Hegde /* helper macro for making queue invalidation descriptor */
983a634bfcSVikram Hegde #define	INV_DSC_TYPE(dsc)	((dsc)->lo & 0xF)
993a634bfcSVikram Hegde #define	CC_INV_DSC_HIGH		(0)
1003a634bfcSVikram Hegde #define	CC_INV_DSC_LOW(fm, sid, did, g)	(((uint64_t)(fm) << 48) | \
1013a634bfcSVikram Hegde 	((uint64_t)(sid) << 32) | \
1023a634bfcSVikram Hegde 	((uint64_t)(did) << 16) | \
1033a634bfcSVikram Hegde 	((uint64_t)(g) << 4) | \
1043a634bfcSVikram Hegde 	1)
1053a634bfcSVikram Hegde 
1063a634bfcSVikram Hegde #define	IOTLB_INV_DSC_HIGH(addr, ih, am) (((uint64_t)(addr)) | \
1073a634bfcSVikram Hegde 	((uint64_t)(ih) << 6) |	\
1083a634bfcSVikram Hegde 	((uint64_t)(am)))
1093a634bfcSVikram Hegde 
1103a634bfcSVikram Hegde #define	IOTLB_INV_DSC_LOW(did, dr, dw, g) (((uint64_t)(did) << 16) | \
1113a634bfcSVikram Hegde 	((uint64_t)(dr) << 7) | \
1123a634bfcSVikram Hegde 	((uint64_t)(dw) << 6) | \
1133a634bfcSVikram Hegde 	((uint64_t)(g) << 4) | \
1143a634bfcSVikram Hegde 	2)
1153a634bfcSVikram Hegde 
1163a634bfcSVikram Hegde #define	DEV_IOTLB_INV_DSC_HIGH(addr, s) (((uint64_t)(addr)) | (s))
1173a634bfcSVikram Hegde 
1183a634bfcSVikram Hegde #define	DEV_IOTLB_INV_DSC_LOW(sid, max_invs_pd) ( \
1193a634bfcSVikram Hegde 	((uint64_t)(sid) << 32) | \
1203a634bfcSVikram Hegde 	((uint64_t)(max_invs_pd) << 16) | \
1213a634bfcSVikram Hegde 	3)
1223a634bfcSVikram Hegde 
1233a634bfcSVikram Hegde #define	IEC_INV_DSC_HIGH (0)
1243a634bfcSVikram Hegde #define	IEC_INV_DSC_LOW(idx, im, g) (((uint64_t)(idx) << 32) | \
1253a634bfcSVikram Hegde 	((uint64_t)(im) << 27) | \
1263a634bfcSVikram Hegde 	((uint64_t)(g) << 4) | \
1273a634bfcSVikram Hegde 	4)
1283a634bfcSVikram Hegde 
1293a634bfcSVikram Hegde #define	INV_WAIT_DSC_HIGH(saddr) ((uint64_t)(saddr))
1303a634bfcSVikram Hegde 
1313a634bfcSVikram Hegde #define	INV_WAIT_DSC_LOW(sdata, fn, sw, iflag) (((uint64_t)(sdata) << 32) | \
1323a634bfcSVikram Hegde 	((uint64_t)(fn) << 6) | \
1333a634bfcSVikram Hegde 	((uint64_t)(sw) << 5) | \
1343a634bfcSVikram Hegde 	((uint64_t)(iflag) << 4) | \
1353a634bfcSVikram Hegde 	5)
1363a634bfcSVikram Hegde 
1373a634bfcSVikram Hegde /*
1383a634bfcSVikram Hegde  * QS field of Invalidation Queue Address Register
1393a634bfcSVikram Hegde  * the size of invalidation queue is 1 << (qinv_iqa_qs + 8)
1403a634bfcSVikram Hegde  */
1413a634bfcSVikram Hegde static uint_t qinv_iqa_qs = 6;
1423a634bfcSVikram Hegde 
1433a634bfcSVikram Hegde /*
1443a634bfcSVikram Hegde  * the invalidate desctiptor type of queued invalidation interface
1453a634bfcSVikram Hegde  */
1463a634bfcSVikram Hegde static char *qinv_dsc_type[] = {
1473a634bfcSVikram Hegde 	"Reserved",
1483a634bfcSVikram Hegde 	"Context Cache Invalidate Descriptor",
1493a634bfcSVikram Hegde 	"IOTLB Invalidate Descriptor",
1503a634bfcSVikram Hegde 	"Device-IOTLB Invalidate Descriptor",
1513a634bfcSVikram Hegde 	"Interrupt Entry Cache Invalidate Descriptor",
1523a634bfcSVikram Hegde 	"Invalidation Wait Descriptor",
1533a634bfcSVikram Hegde 	"Incorrect queue invalidation type"
1543a634bfcSVikram Hegde };
1553a634bfcSVikram Hegde 
1563a634bfcSVikram Hegde #define	QINV_MAX_DSC_TYPE	(sizeof (qinv_dsc_type) / sizeof (char *))
1573a634bfcSVikram Hegde 
1583a634bfcSVikram Hegde /*
1593a634bfcSVikram Hegde  * the queued invalidation interface functions
1603a634bfcSVikram Hegde  */
1613a634bfcSVikram Hegde static void qinv_submit_inv_dsc(immu_t *immu, qinv_dsc_t *dsc);
1623a634bfcSVikram Hegde static void qinv_context_common(immu_t *immu, uint8_t function_mask,
1633a634bfcSVikram Hegde     uint16_t source_id, uint_t domain_id, ctt_inv_g_t type);
1643a634bfcSVikram Hegde static void qinv_iotlb_common(immu_t *immu, uint_t domain_id,
1653a634bfcSVikram Hegde     uint64_t addr, uint_t am, uint_t hint, tlb_inv_g_t type);
1663a634bfcSVikram Hegde static void qinv_iec_common(immu_t *immu, uint_t iidx,
1673a634bfcSVikram Hegde     uint_t im, uint_t g);
168*50200e77SFrank Van Der Linden static void immu_qinv_inv_wait(immu_inv_wait_t *iwp);
169*50200e77SFrank Van Der Linden static void qinv_wait_sync(immu_t *immu, immu_inv_wait_t *iwp);
1703a634bfcSVikram Hegde /*LINTED*/
1713a634bfcSVikram Hegde static void qinv_dev_iotlb_common(immu_t *immu, uint16_t sid,
1723a634bfcSVikram Hegde     uint64_t addr, uint_t size, uint_t max_invs_pd);
1733a634bfcSVikram Hegde 
1743a634bfcSVikram Hegde 
1753a634bfcSVikram Hegde /* submit invalidation request descriptor to invalidation queue */
1763a634bfcSVikram Hegde static void
qinv_submit_inv_dsc(immu_t * immu,qinv_dsc_t * dsc)1773a634bfcSVikram Hegde qinv_submit_inv_dsc(immu_t *immu, qinv_dsc_t *dsc)
1783a634bfcSVikram Hegde {
1793a634bfcSVikram Hegde 	qinv_t *qinv;
1803a634bfcSVikram Hegde 	qinv_mem_t *qinv_table;
1813a634bfcSVikram Hegde 	uint_t tail;
182*50200e77SFrank Van Der Linden #ifdef DEBUG
183*50200e77SFrank Van Der Linden 	uint_t count = 0;
184*50200e77SFrank Van Der Linden #endif
1853a634bfcSVikram Hegde 
1863a634bfcSVikram Hegde 	qinv = (qinv_t *)immu->immu_qinv;
1873a634bfcSVikram Hegde 	qinv_table = &(qinv->qinv_table);
1883a634bfcSVikram Hegde 
1893a634bfcSVikram Hegde 	mutex_enter(&qinv_table->qinv_mem_lock);
1903a634bfcSVikram Hegde 	tail = qinv_table->qinv_mem_tail;
1913a634bfcSVikram Hegde 	qinv_table->qinv_mem_tail++;
1923a634bfcSVikram Hegde 
1933a634bfcSVikram Hegde 	if (qinv_table->qinv_mem_tail == qinv_table->qinv_mem_size)
1943a634bfcSVikram Hegde 		qinv_table->qinv_mem_tail = 0;
1953a634bfcSVikram Hegde 
1963a634bfcSVikram Hegde 	while (qinv_table->qinv_mem_head == qinv_table->qinv_mem_tail) {
197*50200e77SFrank Van Der Linden #ifdef DEBUG
198*50200e77SFrank Van Der Linden 		count++;
199*50200e77SFrank Van Der Linden #endif
2003a634bfcSVikram Hegde 		/*
2013a634bfcSVikram Hegde 		 * inv queue table exhausted, wait hardware to fetch
2023a634bfcSVikram Hegde 		 * next descriptor
2033a634bfcSVikram Hegde 		 */
2043a634bfcSVikram Hegde 		qinv_table->qinv_mem_head = QINV_IQA_HEAD(
2053a634bfcSVikram Hegde 		    immu_regs_get64(immu, IMMU_REG_INVAL_QH));
2063a634bfcSVikram Hegde 	}
2073a634bfcSVikram Hegde 
208*50200e77SFrank Van Der Linden 	IMMU_DPROBE3(immu__qinv__sub, uint64_t, dsc->lo, uint64_t, dsc->hi,
209*50200e77SFrank Van Der Linden 	    uint_t, count);
210*50200e77SFrank Van Der Linden 
2113a634bfcSVikram Hegde 	bcopy(dsc, qinv_table->qinv_mem_vaddr + tail * QINV_ENTRY_SIZE,
2123a634bfcSVikram Hegde 	    QINV_ENTRY_SIZE);
2133a634bfcSVikram Hegde 
2143a634bfcSVikram Hegde 	immu_regs_put64(immu, IMMU_REG_INVAL_QT,
2153a634bfcSVikram Hegde 	    qinv_table->qinv_mem_tail << QINV_IQA_TAIL_SHIFT);
2163a634bfcSVikram Hegde 
2173a634bfcSVikram Hegde 	mutex_exit(&qinv_table->qinv_mem_lock);
2183a634bfcSVikram Hegde }
2193a634bfcSVikram Hegde 
2203a634bfcSVikram Hegde /* queued invalidation interface -- invalidate context cache */
2213a634bfcSVikram Hegde static void
qinv_context_common(immu_t * immu,uint8_t function_mask,uint16_t source_id,uint_t domain_id,ctt_inv_g_t type)2223a634bfcSVikram Hegde qinv_context_common(immu_t *immu, uint8_t function_mask,
2233a634bfcSVikram Hegde     uint16_t source_id, uint_t domain_id, ctt_inv_g_t type)
2243a634bfcSVikram Hegde {
2253a634bfcSVikram Hegde 	qinv_dsc_t dsc;
2263a634bfcSVikram Hegde 
2273a634bfcSVikram Hegde 	dsc.lo = CC_INV_DSC_LOW(function_mask, source_id, domain_id, type);
2283a634bfcSVikram Hegde 	dsc.hi = CC_INV_DSC_HIGH;
2293a634bfcSVikram Hegde 
2303a634bfcSVikram Hegde 	qinv_submit_inv_dsc(immu, &dsc);
2313a634bfcSVikram Hegde }
2323a634bfcSVikram Hegde 
2333a634bfcSVikram Hegde /* queued invalidation interface -- invalidate iotlb */
2343a634bfcSVikram Hegde static void
qinv_iotlb_common(immu_t * immu,uint_t domain_id,uint64_t addr,uint_t am,uint_t hint,tlb_inv_g_t type)2353a634bfcSVikram Hegde qinv_iotlb_common(immu_t *immu, uint_t domain_id,
2363a634bfcSVikram Hegde     uint64_t addr, uint_t am, uint_t hint, tlb_inv_g_t type)
2373a634bfcSVikram Hegde {
2383a634bfcSVikram Hegde 	qinv_dsc_t dsc;
2393a634bfcSVikram Hegde 	uint8_t dr = 0;
2403a634bfcSVikram Hegde 	uint8_t dw = 0;
2413a634bfcSVikram Hegde 
2423a634bfcSVikram Hegde 	if (IMMU_CAP_GET_DRD(immu->immu_regs_cap))
2433a634bfcSVikram Hegde 		dr = 1;
2443a634bfcSVikram Hegde 	if (IMMU_CAP_GET_DWD(immu->immu_regs_cap))
2453a634bfcSVikram Hegde 		dw = 1;
2463a634bfcSVikram Hegde 
2473a634bfcSVikram Hegde 	switch (type) {
2483a634bfcSVikram Hegde 	case TLB_INV_G_PAGE:
2493a634bfcSVikram Hegde 		if (!IMMU_CAP_GET_PSI(immu->immu_regs_cap) ||
2503a634bfcSVikram Hegde 		    am > IMMU_CAP_GET_MAMV(immu->immu_regs_cap) ||
2513a634bfcSVikram Hegde 		    addr & IMMU_PAGEOFFSET) {
2523a634bfcSVikram Hegde 			type = TLB_INV_G_DOMAIN;
2533a634bfcSVikram Hegde 			goto qinv_ignore_psi;
2543a634bfcSVikram Hegde 		}
2553a634bfcSVikram Hegde 		dsc.lo = IOTLB_INV_DSC_LOW(domain_id, dr, dw, type);
2563a634bfcSVikram Hegde 		dsc.hi = IOTLB_INV_DSC_HIGH(addr, hint, am);
2573a634bfcSVikram Hegde 		break;
2583a634bfcSVikram Hegde 
2593a634bfcSVikram Hegde 	qinv_ignore_psi:
2603a634bfcSVikram Hegde 	case TLB_INV_G_DOMAIN:
2613a634bfcSVikram Hegde 		dsc.lo = IOTLB_INV_DSC_LOW(domain_id, dr, dw, type);
2623a634bfcSVikram Hegde 		dsc.hi = 0;
2633a634bfcSVikram Hegde 		break;
2643a634bfcSVikram Hegde 
2653a634bfcSVikram Hegde 	case TLB_INV_G_GLOBAL:
2663a634bfcSVikram Hegde 		dsc.lo = IOTLB_INV_DSC_LOW(0, dr, dw, type);
2673a634bfcSVikram Hegde 		dsc.hi = 0;
2683a634bfcSVikram Hegde 		break;
2693a634bfcSVikram Hegde 	default:
2703a634bfcSVikram Hegde 		ddi_err(DER_WARN, NULL, "incorrect iotlb flush type");
2713a634bfcSVikram Hegde 		return;
2723a634bfcSVikram Hegde 	}
2733a634bfcSVikram Hegde 
2743a634bfcSVikram Hegde 	qinv_submit_inv_dsc(immu, &dsc);
2753a634bfcSVikram Hegde }
2763a634bfcSVikram Hegde 
2773a634bfcSVikram Hegde /* queued invalidation interface -- invalidate dev_iotlb */
2783a634bfcSVikram Hegde static void
qinv_dev_iotlb_common(immu_t * immu,uint16_t sid,uint64_t addr,uint_t size,uint_t max_invs_pd)2793a634bfcSVikram Hegde qinv_dev_iotlb_common(immu_t *immu, uint16_t sid,
2803a634bfcSVikram Hegde     uint64_t addr, uint_t size, uint_t max_invs_pd)
2813a634bfcSVikram Hegde {
2823a634bfcSVikram Hegde 	qinv_dsc_t dsc;
2833a634bfcSVikram Hegde 
2843a634bfcSVikram Hegde 	dsc.lo = DEV_IOTLB_INV_DSC_LOW(sid, max_invs_pd);
2853a634bfcSVikram Hegde 	dsc.hi = DEV_IOTLB_INV_DSC_HIGH(addr, size);
2863a634bfcSVikram Hegde 
2873a634bfcSVikram Hegde 	qinv_submit_inv_dsc(immu, &dsc);
2883a634bfcSVikram Hegde }
2893a634bfcSVikram Hegde 
2903a634bfcSVikram Hegde /* queued invalidation interface -- invalidate interrupt entry cache */
2913a634bfcSVikram Hegde static void
qinv_iec_common(immu_t * immu,uint_t iidx,uint_t im,uint_t g)2923a634bfcSVikram Hegde qinv_iec_common(immu_t *immu, uint_t iidx, uint_t im, uint_t g)
2933a634bfcSVikram Hegde {
2943a634bfcSVikram Hegde 	qinv_dsc_t dsc;
2953a634bfcSVikram Hegde 
2963a634bfcSVikram Hegde 	dsc.lo = IEC_INV_DSC_LOW(iidx, im, g);
2973a634bfcSVikram Hegde 	dsc.hi = IEC_INV_DSC_HIGH;
2983a634bfcSVikram Hegde 
2993a634bfcSVikram Hegde 	qinv_submit_inv_dsc(immu, &dsc);
3003a634bfcSVikram Hegde }
3013a634bfcSVikram Hegde 
3023a634bfcSVikram Hegde /*
3033a634bfcSVikram Hegde  * queued invalidation interface -- invalidation wait descriptor
3043a634bfcSVikram Hegde  *   wait until the invalidation request finished
3053a634bfcSVikram Hegde  */
3063a634bfcSVikram Hegde static void
qinv_wait_sync(immu_t * immu,immu_inv_wait_t * iwp)307*50200e77SFrank Van Der Linden qinv_wait_sync(immu_t *immu, immu_inv_wait_t *iwp)
3083a634bfcSVikram Hegde {
3093a634bfcSVikram Hegde 	qinv_dsc_t dsc;
3103a634bfcSVikram Hegde 	volatile uint32_t *status;
311*50200e77SFrank Van Der Linden 	uint64_t paddr;
312*50200e77SFrank Van Der Linden #ifdef DEBUG
313*50200e77SFrank Van Der Linden 	uint_t count;
314*50200e77SFrank Van Der Linden #endif
3153a634bfcSVikram Hegde 
316*50200e77SFrank Van Der Linden 	status = &iwp->iwp_vstatus;
317*50200e77SFrank Van Der Linden 	paddr = iwp->iwp_pstatus;
318*50200e77SFrank Van Der Linden 
319*50200e77SFrank Van Der Linden 	*status = IMMU_INV_DATA_PENDING;
320*50200e77SFrank Van Der Linden 	membar_producer();
3213a634bfcSVikram Hegde 
3223a634bfcSVikram Hegde 	/*
323*50200e77SFrank Van Der Linden 	 * sdata = IMMU_INV_DATA_DONE, fence = 1, sw = 1, if = 0
3243a634bfcSVikram Hegde 	 * indicate the invalidation wait descriptor completion by
3253a634bfcSVikram Hegde 	 * performing a coherent DWORD write to the status address,
3263a634bfcSVikram Hegde 	 * not by generating an invalidation completion event
3273a634bfcSVikram Hegde 	 */
328*50200e77SFrank Van Der Linden 	dsc.lo = INV_WAIT_DSC_LOW(IMMU_INV_DATA_DONE, 1, 1, 0);
329*50200e77SFrank Van Der Linden 	dsc.hi = INV_WAIT_DSC_HIGH(paddr);
3303a634bfcSVikram Hegde 
3313a634bfcSVikram Hegde 	qinv_submit_inv_dsc(immu, &dsc);
3323a634bfcSVikram Hegde 
333*50200e77SFrank Van Der Linden 	if (iwp->iwp_sync) {
334*50200e77SFrank Van Der Linden #ifdef DEBUG
335*50200e77SFrank Van Der Linden 		count = 0;
336*50200e77SFrank Van Der Linden 		while (*status != IMMU_INV_DATA_DONE) {
337*50200e77SFrank Van Der Linden 			count++;
338*50200e77SFrank Van Der Linden 			ht_pause();
339*50200e77SFrank Van Der Linden 		}
340*50200e77SFrank Van Der Linden 		DTRACE_PROBE2(immu__wait__sync, const char *, iwp->iwp_name,
341*50200e77SFrank Van Der Linden 		    uint_t, count);
342*50200e77SFrank Van Der Linden #else
343*50200e77SFrank Van Der Linden 		while (*status != IMMU_INV_DATA_DONE)
344*50200e77SFrank Van Der Linden 			ht_pause();
345*50200e77SFrank Van Der Linden #endif
346*50200e77SFrank Van Der Linden 	}
3473a634bfcSVikram Hegde }
3483a634bfcSVikram Hegde 
349*50200e77SFrank Van Der Linden static void
immu_qinv_inv_wait(immu_inv_wait_t * iwp)350*50200e77SFrank Van Der Linden immu_qinv_inv_wait(immu_inv_wait_t *iwp)
3513a634bfcSVikram Hegde {
352*50200e77SFrank Van Der Linden 	volatile uint32_t *status = &iwp->iwp_vstatus;
353*50200e77SFrank Van Der Linden #ifdef DEBUG
354*50200e77SFrank Van Der Linden 	uint_t count;
3553a634bfcSVikram Hegde 
356*50200e77SFrank Van Der Linden 	count = 0;
357*50200e77SFrank Van Der Linden 	while (*status != IMMU_INV_DATA_DONE) {
358*50200e77SFrank Van Der Linden 		count++;
359*50200e77SFrank Van Der Linden 		ht_pause();
3603a634bfcSVikram Hegde 	}
361*50200e77SFrank Van Der Linden 	DTRACE_PROBE2(immu__wait__async, const char *, iwp->iwp_name,
362*50200e77SFrank Van Der Linden 	    uint_t, count);
363*50200e77SFrank Van Der Linden #else
3643a634bfcSVikram Hegde 
365*50200e77SFrank Van Der Linden 	while (*status != IMMU_INV_DATA_DONE)
366*50200e77SFrank Van Der Linden 		ht_pause();
367*50200e77SFrank Van Der Linden #endif
3683a634bfcSVikram Hegde }
3693a634bfcSVikram Hegde 
3703a634bfcSVikram Hegde /*
3713a634bfcSVikram Hegde  * call ddi_dma_mem_alloc to allocate physical contigous
3723a634bfcSVikram Hegde  * pages for invalidation queue table
3733a634bfcSVikram Hegde  */
3743a634bfcSVikram Hegde static int
qinv_setup(immu_t * immu)3753a634bfcSVikram Hegde qinv_setup(immu_t *immu)
3763a634bfcSVikram Hegde {
3773a634bfcSVikram Hegde 	qinv_t *qinv;
3783a634bfcSVikram Hegde 	size_t size;
3793a634bfcSVikram Hegde 
3803a634bfcSVikram Hegde 	ddi_dma_attr_t qinv_dma_attr = {
3813a634bfcSVikram Hegde 		DMA_ATTR_V0,
3823a634bfcSVikram Hegde 		0U,
383d2256d26SFrank Van Der Linden 		0xffffffffffffffffULL,
3843a634bfcSVikram Hegde 		0xffffffffU,
3853a634bfcSVikram Hegde 		MMU_PAGESIZE, /* page aligned */
3863a634bfcSVikram Hegde 		0x1,
3873a634bfcSVikram Hegde 		0x1,
3883a634bfcSVikram Hegde 		0xffffffffU,
389d2256d26SFrank Van Der Linden 		0xffffffffffffffffULL,
3903a634bfcSVikram Hegde 		1,
3913a634bfcSVikram Hegde 		4,
3923a634bfcSVikram Hegde 		0
3933a634bfcSVikram Hegde 	};
3943a634bfcSVikram Hegde 
3953a634bfcSVikram Hegde 	ddi_device_acc_attr_t qinv_acc_attr = {
3963a634bfcSVikram Hegde 		DDI_DEVICE_ATTR_V0,
3973a634bfcSVikram Hegde 		DDI_NEVERSWAP_ACC,
3983a634bfcSVikram Hegde 		DDI_STRICTORDER_ACC
3993a634bfcSVikram Hegde 	};
4003a634bfcSVikram Hegde 
4013a634bfcSVikram Hegde 	mutex_init(&(immu->immu_qinv_lock), NULL, MUTEX_DRIVER, NULL);
4023a634bfcSVikram Hegde 
4033a634bfcSVikram Hegde 
4043a634bfcSVikram Hegde 	mutex_enter(&(immu->immu_qinv_lock));
4053a634bfcSVikram Hegde 
4063a634bfcSVikram Hegde 	immu->immu_qinv = NULL;
4073a634bfcSVikram Hegde 	if (!IMMU_ECAP_GET_QI(immu->immu_regs_excap) ||
4083a634bfcSVikram Hegde 	    immu_qinv_enable == B_FALSE) {
4093a634bfcSVikram Hegde 		mutex_exit(&(immu->immu_qinv_lock));
4103a634bfcSVikram Hegde 		return (DDI_SUCCESS);
4113a634bfcSVikram Hegde 	}
4123a634bfcSVikram Hegde 
4133a634bfcSVikram Hegde 	if (qinv_iqa_qs > QINV_MAX_QUEUE_SIZE)
4143a634bfcSVikram Hegde 		qinv_iqa_qs = QINV_MAX_QUEUE_SIZE;
4153a634bfcSVikram Hegde 
4163a634bfcSVikram Hegde 	qinv = kmem_zalloc(sizeof (qinv_t), KM_SLEEP);
4173a634bfcSVikram Hegde 
4183a634bfcSVikram Hegde 	if (ddi_dma_alloc_handle(root_devinfo,
4193a634bfcSVikram Hegde 	    &qinv_dma_attr, DDI_DMA_SLEEP, NULL,
4203a634bfcSVikram Hegde 	    &(qinv->qinv_table.qinv_mem_dma_hdl)) != DDI_SUCCESS) {
4213a634bfcSVikram Hegde 		ddi_err(DER_WARN, root_devinfo,
4223a634bfcSVikram Hegde 		    "alloc invalidation queue table handler failed");
4233a634bfcSVikram Hegde 		goto queue_table_handle_failed;
4243a634bfcSVikram Hegde 	}
4253a634bfcSVikram Hegde 
4263a634bfcSVikram Hegde 	if (ddi_dma_alloc_handle(root_devinfo,
4273a634bfcSVikram Hegde 	    &qinv_dma_attr, DDI_DMA_SLEEP, NULL,
4283a634bfcSVikram Hegde 	    &(qinv->qinv_sync.qinv_mem_dma_hdl)) != DDI_SUCCESS) {
4293a634bfcSVikram Hegde 		ddi_err(DER_WARN, root_devinfo,
4303a634bfcSVikram Hegde 		    "alloc invalidation queue sync mem handler failed");
4313a634bfcSVikram Hegde 		goto sync_table_handle_failed;
4323a634bfcSVikram Hegde 	}
4333a634bfcSVikram Hegde 
4343a634bfcSVikram Hegde 	qinv->qinv_table.qinv_mem_size = (1 << (qinv_iqa_qs + 8));
4353a634bfcSVikram Hegde 	size = qinv->qinv_table.qinv_mem_size * QINV_ENTRY_SIZE;
4363a634bfcSVikram Hegde 
4373a634bfcSVikram Hegde 	/* alloc physical contiguous pages for invalidation queue */
4383a634bfcSVikram Hegde 	if (ddi_dma_mem_alloc(qinv->qinv_table.qinv_mem_dma_hdl,
4393a634bfcSVikram Hegde 	    size,
4403a634bfcSVikram Hegde 	    &qinv_acc_attr,
4413a634bfcSVikram Hegde 	    DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
4423a634bfcSVikram Hegde 	    DDI_DMA_SLEEP,
4433a634bfcSVikram Hegde 	    NULL,
4443a634bfcSVikram Hegde 	    &(qinv->qinv_table.qinv_mem_vaddr),
4453a634bfcSVikram Hegde 	    &size,
4463a634bfcSVikram Hegde 	    &(qinv->qinv_table.qinv_mem_acc_hdl)) != DDI_SUCCESS) {
4473a634bfcSVikram Hegde 		ddi_err(DER_WARN, root_devinfo,
4483a634bfcSVikram Hegde 		    "alloc invalidation queue table failed");
4493a634bfcSVikram Hegde 		goto queue_table_mem_failed;
4503a634bfcSVikram Hegde 	}
4513a634bfcSVikram Hegde 
4523a634bfcSVikram Hegde 	ASSERT(!((uintptr_t)qinv->qinv_table.qinv_mem_vaddr & MMU_PAGEOFFSET));
4533a634bfcSVikram Hegde 	bzero(qinv->qinv_table.qinv_mem_vaddr, size);
4543a634bfcSVikram Hegde 
4553a634bfcSVikram Hegde 	/* get the base physical address of invalidation request queue */
4563a634bfcSVikram Hegde 	qinv->qinv_table.qinv_mem_paddr = pfn_to_pa(
4573a634bfcSVikram Hegde 	    hat_getpfnum(kas.a_hat, qinv->qinv_table.qinv_mem_vaddr));
4583a634bfcSVikram Hegde 
4593a634bfcSVikram Hegde 	qinv->qinv_table.qinv_mem_head = qinv->qinv_table.qinv_mem_tail = 0;
4603a634bfcSVikram Hegde 
4613a634bfcSVikram Hegde 	qinv->qinv_sync.qinv_mem_size = qinv->qinv_table.qinv_mem_size;
4623a634bfcSVikram Hegde 	size = qinv->qinv_sync.qinv_mem_size * QINV_SYNC_DATA_SIZE;
4633a634bfcSVikram Hegde 
4643a634bfcSVikram Hegde 	/* alloc status memory for invalidation wait descriptor */
4653a634bfcSVikram Hegde 	if (ddi_dma_mem_alloc(qinv->qinv_sync.qinv_mem_dma_hdl,
4663a634bfcSVikram Hegde 	    size,
4673a634bfcSVikram Hegde 	    &qinv_acc_attr,
4683a634bfcSVikram Hegde 	    DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
4693a634bfcSVikram Hegde 	    DDI_DMA_SLEEP,
4703a634bfcSVikram Hegde 	    NULL,
4713a634bfcSVikram Hegde 	    &(qinv->qinv_sync.qinv_mem_vaddr),
4723a634bfcSVikram Hegde 	    &size,
4733a634bfcSVikram Hegde 	    &(qinv->qinv_sync.qinv_mem_acc_hdl)) != DDI_SUCCESS) {
4743a634bfcSVikram Hegde 		ddi_err(DER_WARN, root_devinfo,
4753a634bfcSVikram Hegde 		    "alloc invalidation queue sync mem failed");
4763a634bfcSVikram Hegde 		goto sync_table_mem_failed;
4773a634bfcSVikram Hegde 	}
4783a634bfcSVikram Hegde 
4793a634bfcSVikram Hegde 	ASSERT(!((uintptr_t)qinv->qinv_sync.qinv_mem_vaddr & MMU_PAGEOFFSET));
4803a634bfcSVikram Hegde 	bzero(qinv->qinv_sync.qinv_mem_vaddr, size);
4813a634bfcSVikram Hegde 	qinv->qinv_sync.qinv_mem_paddr = pfn_to_pa(
4823a634bfcSVikram Hegde 	    hat_getpfnum(kas.a_hat, qinv->qinv_sync.qinv_mem_vaddr));
4833a634bfcSVikram Hegde 
4843a634bfcSVikram Hegde 	qinv->qinv_sync.qinv_mem_head = qinv->qinv_sync.qinv_mem_tail = 0;
4853a634bfcSVikram Hegde 
4863a634bfcSVikram Hegde 	mutex_init(&(qinv->qinv_table.qinv_mem_lock), NULL, MUTEX_DRIVER, NULL);
4873a634bfcSVikram Hegde 	mutex_init(&(qinv->qinv_sync.qinv_mem_lock), NULL, MUTEX_DRIVER, NULL);
4883a634bfcSVikram Hegde 
4893a634bfcSVikram Hegde 	immu->immu_qinv = qinv;
4903a634bfcSVikram Hegde 
4913a634bfcSVikram Hegde 	mutex_exit(&(immu->immu_qinv_lock));
4923a634bfcSVikram Hegde 
4933a634bfcSVikram Hegde 	return (DDI_SUCCESS);
4943a634bfcSVikram Hegde 
4953a634bfcSVikram Hegde sync_table_mem_failed:
4963a634bfcSVikram Hegde 	ddi_dma_mem_free(&(qinv->qinv_table.qinv_mem_acc_hdl));
4973a634bfcSVikram Hegde 
4983a634bfcSVikram Hegde queue_table_mem_failed:
4993a634bfcSVikram Hegde 	ddi_dma_free_handle(&(qinv->qinv_sync.qinv_mem_dma_hdl));
5003a634bfcSVikram Hegde 
5013a634bfcSVikram Hegde sync_table_handle_failed:
5023a634bfcSVikram Hegde 	ddi_dma_free_handle(&(qinv->qinv_table.qinv_mem_dma_hdl));
5033a634bfcSVikram Hegde 
5043a634bfcSVikram Hegde queue_table_handle_failed:
5053a634bfcSVikram Hegde 	kmem_free(qinv, sizeof (qinv_t));
5063a634bfcSVikram Hegde 
5073a634bfcSVikram Hegde 	mutex_exit(&(immu->immu_qinv_lock));
5083a634bfcSVikram Hegde 
5093a634bfcSVikram Hegde 	return (DDI_FAILURE);
5103a634bfcSVikram Hegde }
5113a634bfcSVikram Hegde 
5123a634bfcSVikram Hegde /*
5133a634bfcSVikram Hegde  * ###########################################################################
5143a634bfcSVikram Hegde  *
5153a634bfcSVikram Hegde  * Functions exported by immu_qinv.c
5163a634bfcSVikram Hegde  *
5173a634bfcSVikram Hegde  * ###########################################################################
5183a634bfcSVikram Hegde  */
5193a634bfcSVikram Hegde 
5203a634bfcSVikram Hegde /*
5213a634bfcSVikram Hegde  * initialize invalidation request queue structure.
5223a634bfcSVikram Hegde  */
523d2256d26SFrank Van Der Linden int
immu_qinv_setup(list_t * listp)5243a634bfcSVikram Hegde immu_qinv_setup(list_t *listp)
5253a634bfcSVikram Hegde {
5263a634bfcSVikram Hegde 	immu_t *immu;
527d2256d26SFrank Van Der Linden 	int nerr;
5283a634bfcSVikram Hegde 
5293a634bfcSVikram Hegde 	if (immu_qinv_enable == B_FALSE) {
530d2256d26SFrank Van Der Linden 		return (DDI_FAILURE);
5313a634bfcSVikram Hegde 	}
5323a634bfcSVikram Hegde 
533d2256d26SFrank Van Der Linden 	nerr = 0;
5343a634bfcSVikram Hegde 	immu = list_head(listp);
5353a634bfcSVikram Hegde 	for (; immu; immu = list_next(listp, immu)) {
5363a634bfcSVikram Hegde 		if (qinv_setup(immu) == DDI_SUCCESS) {
5373a634bfcSVikram Hegde 			immu->immu_qinv_setup = B_TRUE;
538d2256d26SFrank Van Der Linden 		} else {
539d2256d26SFrank Van Der Linden 			nerr++;
540d2256d26SFrank Van Der Linden 			break;
5413a634bfcSVikram Hegde 		}
5423a634bfcSVikram Hegde 	}
543d2256d26SFrank Van Der Linden 
544d2256d26SFrank Van Der Linden 	return (nerr > 0 ? DDI_FAILURE : DDI_SUCCESS);
5453a634bfcSVikram Hegde }
5463a634bfcSVikram Hegde 
5473a634bfcSVikram Hegde void
immu_qinv_startup(immu_t * immu)5483a634bfcSVikram Hegde immu_qinv_startup(immu_t *immu)
5493a634bfcSVikram Hegde {
5503a634bfcSVikram Hegde 	qinv_t *qinv;
5513a634bfcSVikram Hegde 	uint64_t qinv_reg_value;
5523a634bfcSVikram Hegde 
5533a634bfcSVikram Hegde 	if (immu->immu_qinv_setup == B_FALSE) {
5543a634bfcSVikram Hegde 		return;
5553a634bfcSVikram Hegde 	}
5563a634bfcSVikram Hegde 
5573a634bfcSVikram Hegde 	qinv = (qinv_t *)immu->immu_qinv;
5583a634bfcSVikram Hegde 	qinv_reg_value = qinv->qinv_table.qinv_mem_paddr | qinv_iqa_qs;
5593a634bfcSVikram Hegde 	immu_regs_qinv_enable(immu, qinv_reg_value);
560c94adbf9SFrank Van Der Linden 	immu->immu_flushops = &immu_qinv_flushops;
5613a634bfcSVikram Hegde 	immu->immu_qinv_running = B_TRUE;
5623a634bfcSVikram Hegde }
5633a634bfcSVikram Hegde 
5643a634bfcSVikram Hegde /*
5653a634bfcSVikram Hegde  * queued invalidation interface
5663a634bfcSVikram Hegde  *   function based context cache invalidation
5673a634bfcSVikram Hegde  */
5683a634bfcSVikram Hegde void
immu_qinv_context_fsi(immu_t * immu,uint8_t function_mask,uint16_t source_id,uint_t domain_id,immu_inv_wait_t * iwp)5693a634bfcSVikram Hegde immu_qinv_context_fsi(immu_t *immu, uint8_t function_mask,
570*50200e77SFrank Van Der Linden     uint16_t source_id, uint_t domain_id, immu_inv_wait_t *iwp)
5713a634bfcSVikram Hegde {
5723a634bfcSVikram Hegde 	qinv_context_common(immu, function_mask, source_id,
5733a634bfcSVikram Hegde 	    domain_id, CTT_INV_G_DEVICE);
574*50200e77SFrank Van Der Linden 	qinv_wait_sync(immu, iwp);
5753a634bfcSVikram Hegde }
5763a634bfcSVikram Hegde 
5773a634bfcSVikram Hegde /*
5783a634bfcSVikram Hegde  * queued invalidation interface
5793a634bfcSVikram Hegde  *   domain based context cache invalidation
5803a634bfcSVikram Hegde  */
5813a634bfcSVikram Hegde void
immu_qinv_context_dsi(immu_t * immu,uint_t domain_id,immu_inv_wait_t * iwp)582*50200e77SFrank Van Der Linden immu_qinv_context_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp)
5833a634bfcSVikram Hegde {
5843a634bfcSVikram Hegde 	qinv_context_common(immu, 0, 0, domain_id, CTT_INV_G_DOMAIN);
585*50200e77SFrank Van Der Linden 	qinv_wait_sync(immu, iwp);
5863a634bfcSVikram Hegde }
5873a634bfcSVikram Hegde 
5883a634bfcSVikram Hegde /*
5893a634bfcSVikram Hegde  * queued invalidation interface
5903a634bfcSVikram Hegde  *   invalidation global context cache
5913a634bfcSVikram Hegde  */
5923a634bfcSVikram Hegde void
immu_qinv_context_gbl(immu_t * immu,immu_inv_wait_t * iwp)593*50200e77SFrank Van Der Linden immu_qinv_context_gbl(immu_t *immu, immu_inv_wait_t *iwp)
5943a634bfcSVikram Hegde {
5953a634bfcSVikram Hegde 	qinv_context_common(immu, 0, 0, 0, CTT_INV_G_GLOBAL);
596*50200e77SFrank Van Der Linden 	qinv_wait_sync(immu, iwp);
5973a634bfcSVikram Hegde }
5983a634bfcSVikram Hegde 
5993a634bfcSVikram Hegde /*
6003a634bfcSVikram Hegde  * queued invalidation interface
6013a634bfcSVikram Hegde  *   paged based iotlb invalidation
6023a634bfcSVikram Hegde  */
6033a634bfcSVikram Hegde void
immu_qinv_iotlb_psi(immu_t * immu,uint_t domain_id,uint64_t dvma,uint_t count,uint_t hint,immu_inv_wait_t * iwp)604c94adbf9SFrank Van Der Linden immu_qinv_iotlb_psi(immu_t *immu, uint_t domain_id,
605*50200e77SFrank Van Der Linden 	uint64_t dvma, uint_t count, uint_t hint, immu_inv_wait_t *iwp)
6063a634bfcSVikram Hegde {
6073a634bfcSVikram Hegde 	uint_t am = 0;
6083a634bfcSVikram Hegde 	uint_t max_am;
6093a634bfcSVikram Hegde 
6103a634bfcSVikram Hegde 	max_am = IMMU_CAP_GET_MAMV(immu->immu_regs_cap);
6113a634bfcSVikram Hegde 
6123a634bfcSVikram Hegde 	/* choose page specified invalidation */
6133a634bfcSVikram Hegde 	if (IMMU_CAP_GET_PSI(immu->immu_regs_cap)) {
6143a634bfcSVikram Hegde 		while (am <= max_am) {
6153a634bfcSVikram Hegde 			if ((ADDR_AM_OFFSET(IMMU_BTOP(dvma), am) + count)
6163a634bfcSVikram Hegde 			    <= ADDR_AM_MAX(am)) {
6173a634bfcSVikram Hegde 				qinv_iotlb_common(immu, domain_id,
6183a634bfcSVikram Hegde 				    dvma, am, hint, TLB_INV_G_PAGE);
6193a634bfcSVikram Hegde 				break;
6203a634bfcSVikram Hegde 			}
6213a634bfcSVikram Hegde 			am++;
6223a634bfcSVikram Hegde 		}
6233a634bfcSVikram Hegde 		if (am > max_am) {
6243a634bfcSVikram Hegde 			qinv_iotlb_common(immu, domain_id,
6253a634bfcSVikram Hegde 			    dvma, 0, hint, TLB_INV_G_DOMAIN);
6263a634bfcSVikram Hegde 		}
6273a634bfcSVikram Hegde 
6283a634bfcSVikram Hegde 	/* choose domain invalidation */
6293a634bfcSVikram Hegde 	} else {
6303a634bfcSVikram Hegde 		qinv_iotlb_common(immu, domain_id, dvma,
6313a634bfcSVikram Hegde 		    0, hint, TLB_INV_G_DOMAIN);
6323a634bfcSVikram Hegde 	}
633*50200e77SFrank Van Der Linden 
634*50200e77SFrank Van Der Linden 	qinv_wait_sync(immu, iwp);
6353a634bfcSVikram Hegde }
6363a634bfcSVikram Hegde 
6373a634bfcSVikram Hegde /*
6383a634bfcSVikram Hegde  * queued invalidation interface
6393a634bfcSVikram Hegde  *   domain based iotlb invalidation
6403a634bfcSVikram Hegde  */
6413a634bfcSVikram Hegde void
immu_qinv_iotlb_dsi(immu_t * immu,uint_t domain_id,immu_inv_wait_t * iwp)642*50200e77SFrank Van Der Linden immu_qinv_iotlb_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp)
6433a634bfcSVikram Hegde {
6443a634bfcSVikram Hegde 	qinv_iotlb_common(immu, domain_id, 0, 0, 0, TLB_INV_G_DOMAIN);
645*50200e77SFrank Van Der Linden 	qinv_wait_sync(immu, iwp);
6463a634bfcSVikram Hegde }
6473a634bfcSVikram Hegde 
6483a634bfcSVikram Hegde /*
6493a634bfcSVikram Hegde  * queued invalidation interface
6503a634bfcSVikram Hegde  *    global iotlb invalidation
6513a634bfcSVikram Hegde  */
6523a634bfcSVikram Hegde void
immu_qinv_iotlb_gbl(immu_t * immu,immu_inv_wait_t * iwp)653*50200e77SFrank Van Der Linden immu_qinv_iotlb_gbl(immu_t *immu, immu_inv_wait_t *iwp)
6543a634bfcSVikram Hegde {
6553a634bfcSVikram Hegde 	qinv_iotlb_common(immu, 0, 0, 0, 0, TLB_INV_G_GLOBAL);
656*50200e77SFrank Van Der Linden 	qinv_wait_sync(immu, iwp);
6573a634bfcSVikram Hegde }
6583a634bfcSVikram Hegde 
6593a634bfcSVikram Hegde /* queued invalidation interface -- global invalidate interrupt entry cache */
6603a634bfcSVikram Hegde void
immu_qinv_intr_global(immu_t * immu,immu_inv_wait_t * iwp)661*50200e77SFrank Van Der Linden immu_qinv_intr_global(immu_t *immu, immu_inv_wait_t *iwp)
6623a634bfcSVikram Hegde {
6633a634bfcSVikram Hegde 	qinv_iec_common(immu, 0, 0, IEC_INV_GLOBAL);
664*50200e77SFrank Van Der Linden 	qinv_wait_sync(immu, iwp);
6653a634bfcSVikram Hegde }
6663a634bfcSVikram Hegde 
6673a634bfcSVikram Hegde /* queued invalidation interface -- invalidate single interrupt entry cache */
6683a634bfcSVikram Hegde void
immu_qinv_intr_one_cache(immu_t * immu,uint_t iidx,immu_inv_wait_t * iwp)669*50200e77SFrank Van Der Linden immu_qinv_intr_one_cache(immu_t *immu, uint_t iidx, immu_inv_wait_t *iwp)
6703a634bfcSVikram Hegde {
6713a634bfcSVikram Hegde 	qinv_iec_common(immu, iidx, 0, IEC_INV_INDEX);
672*50200e77SFrank Van Der Linden 	qinv_wait_sync(immu, iwp);
6733a634bfcSVikram Hegde }
6743a634bfcSVikram Hegde 
6753a634bfcSVikram Hegde /* queued invalidation interface -- invalidate interrupt entry caches */
6763a634bfcSVikram Hegde void
immu_qinv_intr_caches(immu_t * immu,uint_t iidx,uint_t cnt,immu_inv_wait_t * iwp)677*50200e77SFrank Van Der Linden immu_qinv_intr_caches(immu_t *immu, uint_t iidx, uint_t cnt,
678*50200e77SFrank Van Der Linden     immu_inv_wait_t *iwp)
6793a634bfcSVikram Hegde {
6803a634bfcSVikram Hegde 	uint_t	i, mask = 0;
6813a634bfcSVikram Hegde 
6823a634bfcSVikram Hegde 	ASSERT(cnt != 0);
6833a634bfcSVikram Hegde 
6843a634bfcSVikram Hegde 	/* requested interrupt count is not a power of 2 */
6853a634bfcSVikram Hegde 	if (!ISP2(cnt)) {
6863a634bfcSVikram Hegde 		for (i = 0; i < cnt; i++) {
6873a634bfcSVikram Hegde 			qinv_iec_common(immu, iidx + cnt, 0, IEC_INV_INDEX);
6883a634bfcSVikram Hegde 		}
689*50200e77SFrank Van Der Linden 		qinv_wait_sync(immu, iwp);
6903a634bfcSVikram Hegde 		return;
6913a634bfcSVikram Hegde 	}
6923a634bfcSVikram Hegde 
6933a634bfcSVikram Hegde 	while ((2 << mask) < cnt) {
6943a634bfcSVikram Hegde 		mask++;
6953a634bfcSVikram Hegde 	}
6963a634bfcSVikram Hegde 
6973a634bfcSVikram Hegde 	if (mask > IMMU_ECAP_GET_MHMV(immu->immu_regs_excap)) {
6983a634bfcSVikram Hegde 		for (i = 0; i < cnt; i++) {
6993a634bfcSVikram Hegde 			qinv_iec_common(immu, iidx + cnt, 0, IEC_INV_INDEX);
7003a634bfcSVikram Hegde 		}
701*50200e77SFrank Van Der Linden 		qinv_wait_sync(immu, iwp);
7023a634bfcSVikram Hegde 		return;
7033a634bfcSVikram Hegde 	}
7043a634bfcSVikram Hegde 
7053a634bfcSVikram Hegde 	qinv_iec_common(immu, iidx, mask, IEC_INV_INDEX);
7063a634bfcSVikram Hegde 
707*50200e77SFrank Van Der Linden 	qinv_wait_sync(immu, iwp);
7083a634bfcSVikram Hegde }
7093a634bfcSVikram Hegde 
7103a634bfcSVikram Hegde void
immu_qinv_report_fault(immu_t * immu)7113a634bfcSVikram Hegde immu_qinv_report_fault(immu_t *immu)
7123a634bfcSVikram Hegde {
7133a634bfcSVikram Hegde 	uint16_t head;
7143a634bfcSVikram Hegde 	qinv_dsc_t *dsc;
7153a634bfcSVikram Hegde 	qinv_t *qinv;
7163a634bfcSVikram Hegde 
7173a634bfcSVikram Hegde 	/* access qinv data */
7183a634bfcSVikram Hegde 	mutex_enter(&(immu->immu_qinv_lock));
7193a634bfcSVikram Hegde 
7203a634bfcSVikram Hegde 	qinv = (qinv_t *)(immu->immu_qinv);
7213a634bfcSVikram Hegde 
7223a634bfcSVikram Hegde 	head = QINV_IQA_HEAD(
7233a634bfcSVikram Hegde 	    immu_regs_get64(immu, IMMU_REG_INVAL_QH));
7243a634bfcSVikram Hegde 
7253a634bfcSVikram Hegde 	dsc = (qinv_dsc_t *)(qinv->qinv_table.qinv_mem_vaddr
7263a634bfcSVikram Hegde 	    + (head * QINV_ENTRY_SIZE));
7273a634bfcSVikram Hegde 
7283a634bfcSVikram Hegde 	/* report the error */
7293a634bfcSVikram Hegde 	ddi_err(DER_WARN, immu->immu_dip,
7303a634bfcSVikram Hegde 	    "generated a fault when fetching a descriptor from the"
7313a634bfcSVikram Hegde 	    "\tinvalidation queue, or detects that the fetched"
7323a634bfcSVikram Hegde 	    "\tdescriptor is invalid. The head register is "
7333a634bfcSVikram Hegde 	    "0x%" PRIx64
7343a634bfcSVikram Hegde 	    "\tthe type is %s",
7353a634bfcSVikram Hegde 	    head,
7363a634bfcSVikram Hegde 	    qinv_dsc_type[MIN(INV_DSC_TYPE(dsc), QINV_MAX_DSC_TYPE)]);
7373a634bfcSVikram Hegde 
7383a634bfcSVikram Hegde 	mutex_exit(&(immu->immu_qinv_lock));
7393a634bfcSVikram Hegde }
740