1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23 * All rights reserved.
24 */
25
26 /*
27 * immu_regs.c - File that operates on a IMMU unit's regsiters
28 */
29 #include <sys/dditypes.h>
30 #include <sys/ddi.h>
31 #include <sys/archsystm.h>
32 #include <sys/x86_archext.h>
33 #include <sys/spl.h>
34 #include <sys/sysmacros.h>
35 #include <sys/immu.h>
36 #include <sys/cpu.h>
37
38 #define get_reg32(immu, offset) ddi_get32((immu)->immu_regs_handle, \
39 (uint32_t *)(immu->immu_regs_addr + (offset)))
40 #define get_reg64(immu, offset) ddi_get64((immu)->immu_regs_handle, \
41 (uint64_t *)(immu->immu_regs_addr + (offset)))
42 #define put_reg32(immu, offset, val) ddi_put32\
43 ((immu)->immu_regs_handle, \
44 (uint32_t *)(immu->immu_regs_addr + (offset)), val)
45 #define put_reg64(immu, offset, val) ddi_put64\
46 ((immu)->immu_regs_handle, \
47 (uint64_t *)(immu->immu_regs_addr + (offset)), val)
48
49 static void immu_regs_inv_wait(immu_inv_wait_t *iwp);
50
51 struct immu_flushops immu_regs_flushops = {
52 immu_regs_context_fsi,
53 immu_regs_context_dsi,
54 immu_regs_context_gbl,
55 immu_regs_iotlb_psi,
56 immu_regs_iotlb_dsi,
57 immu_regs_iotlb_gbl,
58 immu_regs_inv_wait
59 };
60
61 /*
62 * wait max 60s for the hardware completion
63 */
64 #define IMMU_MAX_WAIT_TIME 60000000
65 #define wait_completion(immu, offset, getf, completion, status) \
66 { \
67 clock_t stick = ddi_get_lbolt(); \
68 clock_t ntick; \
69 _NOTE(CONSTCOND) \
70 while (1) { \
71 status = getf(immu, offset); \
72 ntick = ddi_get_lbolt(); \
73 if (completion) { \
74 break; \
75 } \
76 if (ntick - stick >= drv_usectohz(IMMU_MAX_WAIT_TIME)) { \
77 ddi_err(DER_PANIC, NULL, \
78 "immu wait completion time out"); \
79 /*NOTREACHED*/ \
80 } else { \
81 ht_pause();\
82 }\
83 }\
84 }
85
86 static ddi_device_acc_attr_t immu_regs_attr = {
87 DDI_DEVICE_ATTR_V0,
88 DDI_NEVERSWAP_ACC,
89 DDI_STRICTORDER_ACC,
90 };
91
92 /*
93 * iotlb_flush()
94 * flush the iotlb cache
95 */
96 static void
iotlb_flush(immu_t * immu,uint_t domain_id,uint64_t addr,uint_t am,uint_t hint,immu_iotlb_inv_t type)97 iotlb_flush(immu_t *immu, uint_t domain_id,
98 uint64_t addr, uint_t am, uint_t hint, immu_iotlb_inv_t type)
99 {
100 uint64_t command = 0, iva = 0;
101 uint_t iva_offset, iotlb_offset;
102 uint64_t status = 0;
103
104 /* no lock needed since cap and excap fields are RDONLY */
105 iva_offset = IMMU_ECAP_GET_IRO(immu->immu_regs_excap);
106 iotlb_offset = iva_offset + 8;
107
108 /*
109 * prepare drain read/write command
110 */
111 if (IMMU_CAP_GET_DWD(immu->immu_regs_cap)) {
112 command |= TLB_INV_DRAIN_WRITE;
113 }
114
115 if (IMMU_CAP_GET_DRD(immu->immu_regs_cap)) {
116 command |= TLB_INV_DRAIN_READ;
117 }
118
119 /*
120 * if the hardward doesn't support page selective invalidation, we
121 * will use domain type. Otherwise, use global type
122 */
123 switch (type) {
124 case IOTLB_PSI:
125 command |= TLB_INV_PAGE | TLB_INV_IVT |
126 TLB_INV_DID(domain_id);
127 iva = addr | am | TLB_IVA_HINT(hint);
128 break;
129 case IOTLB_DSI:
130 command |= TLB_INV_DOMAIN | TLB_INV_IVT |
131 TLB_INV_DID(domain_id);
132 break;
133 case IOTLB_GLOBAL:
134 command |= TLB_INV_GLOBAL | TLB_INV_IVT;
135 break;
136 default:
137 ddi_err(DER_MODE, NULL, "%s: incorrect iotlb flush type",
138 immu->immu_name);
139 return;
140 }
141
142 if (iva)
143 put_reg64(immu, iva_offset, iva);
144 put_reg64(immu, iotlb_offset, command);
145 wait_completion(immu, iotlb_offset, get_reg64,
146 (!(status & TLB_INV_IVT)), status);
147 }
148
149 /*
150 * immu_regs_iotlb_psi()
151 * iotlb page specific invalidation
152 */
153 /*ARGSUSED*/
154 void
immu_regs_iotlb_psi(immu_t * immu,uint_t did,uint64_t dvma,uint_t snpages,uint_t hint,immu_inv_wait_t * iwp)155 immu_regs_iotlb_psi(immu_t *immu, uint_t did, uint64_t dvma, uint_t snpages,
156 uint_t hint, immu_inv_wait_t *iwp)
157 {
158 int dvma_am;
159 int npg_am;
160 int max_am;
161 int am;
162 uint64_t align;
163 int npages_left;
164 int npages;
165 int i;
166
167 if (!IMMU_CAP_GET_PSI(immu->immu_regs_cap)) {
168 immu_regs_iotlb_dsi(immu, did, iwp);
169 return;
170 }
171
172 max_am = IMMU_CAP_GET_MAMV(immu->immu_regs_cap);
173
174 mutex_enter(&(immu->immu_regs_lock));
175
176 npages_left = snpages;
177 for (i = 0; i < immu_flush_gran && npages_left > 0; i++) {
178 /* First calculate alignment of DVMA */
179
180 if (dvma == 0) {
181 dvma_am = max_am;
182 } else {
183 for (align = (1 << 12), dvma_am = 1;
184 (dvma & align) == 0; align <<= 1, dvma_am++)
185 ;
186 dvma_am--;
187 }
188
189 /* Calculate the npg_am */
190 npages = npages_left;
191 for (npg_am = 0, npages >>= 1; npages; npages >>= 1, npg_am++)
192 ;
193
194 am = MIN(max_am, MIN(dvma_am, npg_am));
195
196 iotlb_flush(immu, did, dvma, am, hint, IOTLB_PSI);
197
198 npages = (1 << am);
199 npages_left -= npages;
200 dvma += (npages * IMMU_PAGESIZE);
201 }
202
203 if (npages_left) {
204 iotlb_flush(immu, did, 0, 0, 0, IOTLB_DSI);
205 }
206 mutex_exit(&(immu->immu_regs_lock));
207 }
208
209 /*
210 * immu_regs_iotlb_dsi()
211 * domain specific invalidation
212 */
213 /*ARGSUSED*/
214 void
immu_regs_iotlb_dsi(immu_t * immu,uint_t domain_id,immu_inv_wait_t * iwp)215 immu_regs_iotlb_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp)
216 {
217 mutex_enter(&(immu->immu_regs_lock));
218 iotlb_flush(immu, domain_id, 0, 0, 0, IOTLB_DSI);
219 mutex_exit(&(immu->immu_regs_lock));
220 }
221
222 /*
223 * immu_regs_iotlb_gbl()
224 * global iotlb invalidation
225 */
226 /*ARGSUSED*/
227 void
immu_regs_iotlb_gbl(immu_t * immu,immu_inv_wait_t * iwp)228 immu_regs_iotlb_gbl(immu_t *immu, immu_inv_wait_t *iwp)
229 {
230 mutex_enter(&(immu->immu_regs_lock));
231 iotlb_flush(immu, 0, 0, 0, 0, IOTLB_GLOBAL);
232 mutex_exit(&(immu->immu_regs_lock));
233 }
234
235
236 static int
gaw2agaw(int gaw)237 gaw2agaw(int gaw)
238 {
239 int r, agaw;
240
241 r = (gaw - 12) % 9;
242
243 if (r == 0)
244 agaw = gaw;
245 else
246 agaw = gaw + 9 - r;
247
248 if (agaw > 64)
249 agaw = 64;
250
251 return (agaw);
252 }
253
254 /*
255 * set_immu_agaw()
256 * calculate agaw for a IOMMU unit
257 */
258 static int
set_agaw(immu_t * immu)259 set_agaw(immu_t *immu)
260 {
261 int mgaw, magaw, agaw;
262 uint_t bitpos;
263 int max_sagaw_mask, sagaw_mask, mask;
264 int nlevels;
265
266 /*
267 * mgaw is the maximum guest address width.
268 * Addresses above this value will be
269 * blocked by the IOMMU unit.
270 * sagaw is a bitmask that lists all the
271 * AGAWs supported by this IOMMU unit.
272 */
273 mgaw = IMMU_CAP_MGAW(immu->immu_regs_cap);
274 sagaw_mask = IMMU_CAP_SAGAW(immu->immu_regs_cap);
275
276 magaw = gaw2agaw(mgaw);
277
278 /*
279 * Get bitpos corresponding to
280 * magaw
281 */
282
283 /*
284 * Maximum SAGAW is specified by
285 * Vt-d spec.
286 */
287 max_sagaw_mask = ((1 << 5) - 1);
288
289 if (sagaw_mask > max_sagaw_mask) {
290 ddi_err(DER_WARN, NULL, "%s: SAGAW bitmask (%x) "
291 "is larger than maximu SAGAW bitmask "
292 "(%x) specified by Intel Vt-d spec",
293 immu->immu_name, sagaw_mask, max_sagaw_mask);
294 return (DDI_FAILURE);
295 }
296
297 /*
298 * Find a supported AGAW <= magaw
299 *
300 * sagaw_mask bitpos AGAW (bits) nlevels
301 * ==============================================
302 * 0 0 0 0 1 0 30 2
303 * 0 0 0 1 0 1 39 3
304 * 0 0 1 0 0 2 48 4
305 * 0 1 0 0 0 3 57 5
306 * 1 0 0 0 0 4 64(66) 6
307 */
308 mask = 1;
309 nlevels = 0;
310 agaw = 0;
311 for (mask = 1, bitpos = 0; bitpos < 5;
312 bitpos++, mask <<= 1) {
313 if (mask & sagaw_mask) {
314 nlevels = bitpos + 2;
315 agaw = 30 + (bitpos * 9);
316 }
317 }
318
319 /* calculated agaw can be > 64 */
320 agaw = (agaw > 64) ? 64 : agaw;
321
322 if (agaw < 30 || agaw > magaw) {
323 ddi_err(DER_WARN, NULL, "%s: Calculated AGAW (%d) "
324 "is outside valid limits [30,%d] specified by Vt-d spec "
325 "and magaw", immu->immu_name, agaw, magaw);
326 return (DDI_FAILURE);
327 }
328
329 if (nlevels < 2 || nlevels > 6) {
330 ddi_err(DER_WARN, NULL, "%s: Calculated pagetable "
331 "level (%d) is outside valid limits [2,6]",
332 immu->immu_name, nlevels);
333 return (DDI_FAILURE);
334 }
335
336 ddi_err(DER_LOG, NULL, "Calculated pagetable "
337 "level (%d), agaw = %d", nlevels, agaw);
338
339 immu->immu_dvma_nlevels = nlevels;
340 immu->immu_dvma_agaw = agaw;
341
342 return (DDI_SUCCESS);
343 }
344
345 static int
setup_regs(immu_t * immu)346 setup_regs(immu_t *immu)
347 {
348 int error;
349
350 /*
351 * This lock may be acquired by the IOMMU interrupt handler
352 */
353 mutex_init(&(immu->immu_regs_lock), NULL, MUTEX_DRIVER,
354 (void *)ipltospl(IMMU_INTR_IPL));
355
356 /*
357 * map the register address space
358 */
359 error = ddi_regs_map_setup(immu->immu_dip, 0,
360 (caddr_t *)&(immu->immu_regs_addr), (offset_t)0,
361 (offset_t)IMMU_REGSZ, &immu_regs_attr,
362 &(immu->immu_regs_handle));
363
364 if (error == DDI_FAILURE) {
365 ddi_err(DER_WARN, NULL, "%s: Intel IOMMU register map failed",
366 immu->immu_name);
367 mutex_destroy(&(immu->immu_regs_lock));
368 return (DDI_FAILURE);
369 }
370
371 /*
372 * get the register value
373 */
374 immu->immu_regs_cap = get_reg64(immu, IMMU_REG_CAP);
375 immu->immu_regs_excap = get_reg64(immu, IMMU_REG_EXCAP);
376
377 /*
378 * if the hardware access is non-coherent, we need clflush
379 */
380 if (IMMU_ECAP_GET_C(immu->immu_regs_excap)) {
381 immu->immu_dvma_coherent = B_TRUE;
382 } else {
383 immu->immu_dvma_coherent = B_FALSE;
384 if (!is_x86_feature(x86_featureset, X86FSET_CLFSH)) {
385 ddi_err(DER_WARN, NULL,
386 "immu unit %s can't be enabled due to "
387 "missing clflush functionality", immu->immu_name);
388 ddi_regs_map_free(&(immu->immu_regs_handle));
389 mutex_destroy(&(immu->immu_regs_lock));
390 return (DDI_FAILURE);
391 }
392 }
393
394 /* Setup SNP and TM reserved fields */
395 immu->immu_SNP_reserved = immu_regs_is_SNP_reserved(immu);
396 immu->immu_TM_reserved = immu_regs_is_TM_reserved(immu);
397
398 if (IMMU_ECAP_GET_CH(immu->immu_regs_excap) && immu_use_tm)
399 immu->immu_ptemask = PDTE_MASK_TM;
400 else
401 immu->immu_ptemask = 0;
402
403 /*
404 * Check for Mobile 4 series chipset
405 */
406 if (immu_quirk_mobile4 == B_TRUE &&
407 !IMMU_CAP_GET_RWBF(immu->immu_regs_cap)) {
408 ddi_err(DER_LOG, NULL,
409 "IMMU: Mobile 4 chipset quirk detected. "
410 "Force-setting RWBF");
411 IMMU_CAP_SET_RWBF(immu->immu_regs_cap);
412 }
413
414 /*
415 * retrieve the maximum number of domains
416 */
417 immu->immu_max_domains = IMMU_CAP_ND(immu->immu_regs_cap);
418
419 /*
420 * calculate the agaw
421 */
422 if (set_agaw(immu) != DDI_SUCCESS) {
423 ddi_regs_map_free(&(immu->immu_regs_handle));
424 mutex_destroy(&(immu->immu_regs_lock));
425 return (DDI_FAILURE);
426 }
427 immu->immu_regs_cmdval = 0;
428
429 immu->immu_flushops = &immu_regs_flushops;
430
431 return (DDI_SUCCESS);
432 }
433
434 /* ############### Functions exported ################## */
435
436 /*
437 * immu_regs_setup()
438 * Setup mappings to a IMMU unit's registers
439 * so that they can be read/written
440 */
441 void
immu_regs_setup(list_t * listp)442 immu_regs_setup(list_t *listp)
443 {
444 int i;
445 immu_t *immu;
446
447 for (i = 0; i < IMMU_MAXSEG; i++) {
448 immu = list_head(listp);
449 for (; immu; immu = list_next(listp, immu)) {
450 /* do your best, continue on error */
451 if (setup_regs(immu) != DDI_SUCCESS) {
452 immu->immu_regs_setup = B_FALSE;
453 } else {
454 immu->immu_regs_setup = B_TRUE;
455 }
456 }
457 }
458 }
459
460 /*
461 * immu_regs_map()
462 */
463 int
immu_regs_resume(immu_t * immu)464 immu_regs_resume(immu_t *immu)
465 {
466 int error;
467
468 /*
469 * remap the register address space
470 */
471 error = ddi_regs_map_setup(immu->immu_dip, 0,
472 (caddr_t *)&(immu->immu_regs_addr), (offset_t)0,
473 (offset_t)IMMU_REGSZ, &immu_regs_attr,
474 &(immu->immu_regs_handle));
475 if (error != DDI_SUCCESS) {
476 return (DDI_FAILURE);
477 }
478
479 immu_regs_set_root_table(immu);
480
481 immu_regs_intr_enable(immu, immu->immu_regs_intr_msi_addr,
482 immu->immu_regs_intr_msi_data, immu->immu_regs_intr_uaddr);
483
484 (void) immu_intr_handler((caddr_t)immu, NULL);
485
486 immu_regs_intrmap_enable(immu, immu->immu_intrmap_irta_reg);
487
488 immu_regs_qinv_enable(immu, immu->immu_qinv_reg_value);
489
490
491 return (error);
492 }
493
494 /*
495 * immu_regs_suspend()
496 */
497 void
immu_regs_suspend(immu_t * immu)498 immu_regs_suspend(immu_t *immu)
499 {
500
501 immu->immu_intrmap_running = B_FALSE;
502
503 /* Finally, unmap the regs */
504 ddi_regs_map_free(&(immu->immu_regs_handle));
505 }
506
507 /*
508 * immu_regs_startup()
509 * set a IMMU unit's registers to startup the unit
510 */
511 void
immu_regs_startup(immu_t * immu)512 immu_regs_startup(immu_t *immu)
513 {
514 uint32_t status;
515
516 if (immu->immu_regs_setup == B_FALSE) {
517 return;
518 }
519
520 mutex_enter(&(immu->immu_regs_lock));
521 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
522 immu->immu_regs_cmdval | IMMU_GCMD_TE);
523 wait_completion(immu, IMMU_REG_GLOBAL_STS,
524 get_reg32, (status & IMMU_GSTS_TES), status);
525 immu->immu_regs_cmdval |= IMMU_GCMD_TE;
526 immu->immu_regs_running = B_TRUE;
527 mutex_exit(&(immu->immu_regs_lock));
528
529 ddi_err(DER_NOTE, NULL, "%s running", immu->immu_name);
530 }
531
532 /*
533 * immu_regs_shutdown()
534 * shutdown a unit
535 */
536 void
immu_regs_shutdown(immu_t * immu)537 immu_regs_shutdown(immu_t *immu)
538 {
539 uint32_t status;
540
541 if (immu->immu_regs_running == B_FALSE) {
542 return;
543 }
544
545 mutex_enter(&(immu->immu_regs_lock));
546 immu->immu_regs_cmdval &= ~IMMU_GCMD_TE;
547 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
548 immu->immu_regs_cmdval);
549 wait_completion(immu, IMMU_REG_GLOBAL_STS,
550 get_reg32, !(status & IMMU_GSTS_TES), status);
551 immu->immu_regs_running = B_FALSE;
552 mutex_exit(&(immu->immu_regs_lock));
553
554 ddi_err(DER_NOTE, NULL, "IOMMU %s stopped", immu->immu_name);
555 }
556
557 /*
558 * immu_regs_intr()
559 * Set a IMMU unit regs to setup a IMMU unit's
560 * interrupt handler
561 */
562 void
immu_regs_intr_enable(immu_t * immu,uint32_t msi_addr,uint32_t msi_data,uint32_t uaddr)563 immu_regs_intr_enable(immu_t *immu, uint32_t msi_addr, uint32_t msi_data,
564 uint32_t uaddr)
565 {
566 mutex_enter(&(immu->immu_regs_lock));
567 immu->immu_regs_intr_msi_addr = msi_addr;
568 immu->immu_regs_intr_uaddr = uaddr;
569 immu->immu_regs_intr_msi_data = msi_data;
570 put_reg32(immu, IMMU_REG_FEVNT_ADDR, msi_addr);
571 put_reg32(immu, IMMU_REG_FEVNT_UADDR, uaddr);
572 put_reg32(immu, IMMU_REG_FEVNT_DATA, msi_data);
573 put_reg32(immu, IMMU_REG_FEVNT_CON, 0);
574 mutex_exit(&(immu->immu_regs_lock));
575 }
576
577 /*
578 * immu_regs_passthru_supported()
579 * Returns B_TRUE ifi passthru is supported
580 */
581 boolean_t
immu_regs_passthru_supported(immu_t * immu)582 immu_regs_passthru_supported(immu_t *immu)
583 {
584 if (IMMU_ECAP_GET_PT(immu->immu_regs_excap)) {
585 return (B_TRUE);
586 }
587
588 ddi_err(DER_WARN, NULL, "Passthru not supported");
589 return (B_FALSE);
590 }
591
592 /*
593 * immu_regs_is_TM_reserved()
594 * Returns B_TRUE if TM field is reserved
595 */
596 boolean_t
immu_regs_is_TM_reserved(immu_t * immu)597 immu_regs_is_TM_reserved(immu_t *immu)
598 {
599 if (IMMU_ECAP_GET_DI(immu->immu_regs_excap) ||
600 IMMU_ECAP_GET_CH(immu->immu_regs_excap)) {
601 return (B_FALSE);
602 }
603 return (B_TRUE);
604 }
605
606 /*
607 * immu_regs_is_SNP_reserved()
608 * Returns B_TRUE if SNP field is reserved
609 */
610 boolean_t
immu_regs_is_SNP_reserved(immu_t * immu)611 immu_regs_is_SNP_reserved(immu_t *immu)
612 {
613
614 return (IMMU_ECAP_GET_SC(immu->immu_regs_excap) ? B_FALSE : B_TRUE);
615 }
616
617 /*
618 * immu_regs_wbf_flush()
619 * If required and supported, write to IMMU
620 * unit's regs to flush DMA write buffer(s)
621 */
622 void
immu_regs_wbf_flush(immu_t * immu)623 immu_regs_wbf_flush(immu_t *immu)
624 {
625 uint32_t status;
626
627 if (!IMMU_CAP_GET_RWBF(immu->immu_regs_cap)) {
628 return;
629 }
630
631 mutex_enter(&(immu->immu_regs_lock));
632 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
633 immu->immu_regs_cmdval | IMMU_GCMD_WBF);
634 wait_completion(immu, IMMU_REG_GLOBAL_STS,
635 get_reg32, (!(status & IMMU_GSTS_WBFS)), status);
636 mutex_exit(&(immu->immu_regs_lock));
637 }
638
639 /*
640 * immu_regs_cpu_flush()
641 * flush the cpu cache line after CPU memory writes, so
642 * IOMMU can see the writes
643 */
644 void
immu_regs_cpu_flush(immu_t * immu,caddr_t addr,uint_t size)645 immu_regs_cpu_flush(immu_t *immu, caddr_t addr, uint_t size)
646 {
647 uintptr_t startline, endline;
648
649 if (immu->immu_dvma_coherent == B_TRUE)
650 return;
651
652 startline = (uintptr_t)addr & ~(uintptr_t)(x86_clflush_size - 1);
653 endline = ((uintptr_t)addr + size - 1) &
654 ~(uintptr_t)(x86_clflush_size - 1);
655 while (startline <= endline) {
656 clflush_insn((caddr_t)startline);
657 startline += x86_clflush_size;
658 }
659
660 mfence_insn();
661 }
662
663 /*
664 * immu_regs_context_flush()
665 * flush the context cache
666 */
667 static void
context_flush(immu_t * immu,uint8_t function_mask,uint16_t sid,uint_t did,immu_context_inv_t type)668 context_flush(immu_t *immu, uint8_t function_mask,
669 uint16_t sid, uint_t did, immu_context_inv_t type)
670 {
671 uint64_t command = 0;
672 uint64_t status;
673
674 /*
675 * define the command
676 */
677 switch (type) {
678 case CONTEXT_FSI:
679 command |= CCMD_INV_ICC | CCMD_INV_DEVICE
680 | CCMD_INV_DID(did)
681 | CCMD_INV_SID(sid) | CCMD_INV_FM(function_mask);
682 break;
683 case CONTEXT_DSI:
684 command |= CCMD_INV_ICC | CCMD_INV_DOMAIN
685 | CCMD_INV_DID(did);
686 break;
687 case CONTEXT_GLOBAL:
688 command |= CCMD_INV_ICC | CCMD_INV_GLOBAL;
689 break;
690 default:
691 ddi_err(DER_PANIC, NULL,
692 "%s: incorrect context cache flush type",
693 immu->immu_name);
694 /*NOTREACHED*/
695 }
696
697 mutex_enter(&(immu->immu_regs_lock));
698 put_reg64(immu, IMMU_REG_CONTEXT_CMD, command);
699 wait_completion(immu, IMMU_REG_CONTEXT_CMD, get_reg64,
700 (!(status & CCMD_INV_ICC)), status);
701 mutex_exit(&(immu->immu_regs_lock));
702 }
703
704 /*ARGSUSED*/
705 void
immu_regs_context_fsi(immu_t * immu,uint8_t function_mask,uint16_t source_id,uint_t domain_id,immu_inv_wait_t * iwp)706 immu_regs_context_fsi(immu_t *immu, uint8_t function_mask,
707 uint16_t source_id, uint_t domain_id, immu_inv_wait_t *iwp)
708 {
709 context_flush(immu, function_mask, source_id, domain_id, CONTEXT_FSI);
710 }
711
712 /*ARGSUSED*/
713 void
immu_regs_context_dsi(immu_t * immu,uint_t domain_id,immu_inv_wait_t * iwp)714 immu_regs_context_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp)
715 {
716 context_flush(immu, 0, 0, domain_id, CONTEXT_DSI);
717 }
718
719 /*ARGSUSED*/
720 void
immu_regs_context_gbl(immu_t * immu,immu_inv_wait_t * iwp)721 immu_regs_context_gbl(immu_t *immu, immu_inv_wait_t *iwp)
722 {
723 context_flush(immu, 0, 0, 0, CONTEXT_GLOBAL);
724 }
725
726 /*
727 * Nothing to do, all register operations are synchronous.
728 */
729 /*ARGSUSED*/
730 static void
immu_regs_inv_wait(immu_inv_wait_t * iwp)731 immu_regs_inv_wait(immu_inv_wait_t *iwp)
732 {
733 }
734
735 void
immu_regs_set_root_table(immu_t * immu)736 immu_regs_set_root_table(immu_t *immu)
737 {
738 uint32_t status;
739
740 mutex_enter(&(immu->immu_regs_lock));
741 put_reg64(immu, IMMU_REG_ROOTENTRY,
742 immu->immu_ctx_root->hwpg_paddr);
743 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
744 immu->immu_regs_cmdval | IMMU_GCMD_SRTP);
745 wait_completion(immu, IMMU_REG_GLOBAL_STS,
746 get_reg32, (status & IMMU_GSTS_RTPS), status);
747 mutex_exit(&(immu->immu_regs_lock));
748 }
749
750
751 /* enable queued invalidation interface */
752 void
immu_regs_qinv_enable(immu_t * immu,uint64_t qinv_reg_value)753 immu_regs_qinv_enable(immu_t *immu, uint64_t qinv_reg_value)
754 {
755 uint32_t status;
756
757 if (immu_qinv_enable == B_FALSE)
758 return;
759
760 mutex_enter(&immu->immu_regs_lock);
761 immu->immu_qinv_reg_value = qinv_reg_value;
762 /* Initialize the Invalidation Queue Tail register to zero */
763 put_reg64(immu, IMMU_REG_INVAL_QT, 0);
764
765 /* set invalidation queue base address register */
766 put_reg64(immu, IMMU_REG_INVAL_QAR, qinv_reg_value);
767
768 /* enable queued invalidation interface */
769 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
770 immu->immu_regs_cmdval | IMMU_GCMD_QIE);
771 wait_completion(immu, IMMU_REG_GLOBAL_STS,
772 get_reg32, (status & IMMU_GSTS_QIES), status);
773 mutex_exit(&immu->immu_regs_lock);
774
775 immu->immu_regs_cmdval |= IMMU_GCMD_QIE;
776 immu->immu_qinv_running = B_TRUE;
777
778 }
779
780 /* enable interrupt remapping hardware unit */
781 void
immu_regs_intrmap_enable(immu_t * immu,uint64_t irta_reg)782 immu_regs_intrmap_enable(immu_t *immu, uint64_t irta_reg)
783 {
784 uint32_t status;
785
786 if (immu_intrmap_enable == B_FALSE)
787 return;
788
789 /* set interrupt remap table pointer */
790 mutex_enter(&(immu->immu_regs_lock));
791 immu->immu_intrmap_irta_reg = irta_reg;
792 put_reg64(immu, IMMU_REG_IRTAR, irta_reg);
793 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
794 immu->immu_regs_cmdval | IMMU_GCMD_SIRTP);
795 wait_completion(immu, IMMU_REG_GLOBAL_STS,
796 get_reg32, (status & IMMU_GSTS_IRTPS), status);
797 mutex_exit(&(immu->immu_regs_lock));
798
799 /* global flush intr entry cache */
800 immu_qinv_intr_global(immu, &immu->immu_intrmap_inv_wait);
801
802 /* enable interrupt remapping */
803 mutex_enter(&(immu->immu_regs_lock));
804 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
805 immu->immu_regs_cmdval | IMMU_GCMD_IRE);
806 wait_completion(immu, IMMU_REG_GLOBAL_STS,
807 get_reg32, (status & IMMU_GSTS_IRES),
808 status);
809 immu->immu_regs_cmdval |= IMMU_GCMD_IRE;
810
811 /* set compatible mode */
812 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
813 immu->immu_regs_cmdval | IMMU_GCMD_CFI);
814 wait_completion(immu, IMMU_REG_GLOBAL_STS,
815 get_reg32, (status & IMMU_GSTS_CFIS),
816 status);
817 immu->immu_regs_cmdval |= IMMU_GCMD_CFI;
818 mutex_exit(&(immu->immu_regs_lock));
819
820 immu->immu_intrmap_running = B_TRUE;
821 }
822
823 uint64_t
immu_regs_get64(immu_t * immu,uint_t reg)824 immu_regs_get64(immu_t *immu, uint_t reg)
825 {
826 return (get_reg64(immu, reg));
827 }
828
829 uint32_t
immu_regs_get32(immu_t * immu,uint_t reg)830 immu_regs_get32(immu_t *immu, uint_t reg)
831 {
832 return (get_reg32(immu, reg));
833 }
834
835 void
immu_regs_put64(immu_t * immu,uint_t reg,uint64_t val)836 immu_regs_put64(immu_t *immu, uint_t reg, uint64_t val)
837 {
838 put_reg64(immu, reg, val);
839 }
840
841 void
immu_regs_put32(immu_t * immu,uint_t reg,uint32_t val)842 immu_regs_put32(immu_t *immu, uint_t reg, uint32_t val)
843 {
844 put_reg32(immu, reg, val);
845 }
846