1 /* 2 * QEMU emulation of an Intel IOMMU (VT-d) 3 * (DMA Remapping device) 4 * 5 * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com> 6 * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/error-report.h" 24 #include "qapi/error.h" 25 #include "hw/sysbus.h" 26 #include "exec/address-spaces.h" 27 #include "intel_iommu_internal.h" 28 #include "hw/pci/pci.h" 29 #include "hw/pci/pci_bus.h" 30 #include "hw/i386/pc.h" 31 #include "hw/i386/apic-msidef.h" 32 #include "hw/boards.h" 33 #include "hw/i386/x86-iommu.h" 34 #include "hw/pci-host/q35.h" 35 #include "sysemu/kvm.h" 36 #include "hw/i386/apic_internal.h" 37 #include "kvm_i386.h" 38 #include "trace.h" 39 40 static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val, 41 uint64_t wmask, uint64_t w1cmask) 42 { 43 stq_le_p(&s->csr[addr], val); 44 stq_le_p(&s->wmask[addr], wmask); 45 stq_le_p(&s->w1cmask[addr], w1cmask); 46 } 47 48 static void vtd_define_quad_wo(IntelIOMMUState *s, hwaddr addr, uint64_t mask) 49 { 50 stq_le_p(&s->womask[addr], mask); 51 } 52 53 static void vtd_define_long(IntelIOMMUState *s, hwaddr addr, uint32_t val, 54 uint32_t wmask, uint32_t w1cmask) 55 { 56 stl_le_p(&s->csr[addr], val); 57 stl_le_p(&s->wmask[addr], wmask); 58 stl_le_p(&s->w1cmask[addr], w1cmask); 59 } 60 61 static void vtd_define_long_wo(IntelIOMMUState *s, hwaddr addr, uint32_t mask) 62 { 63 stl_le_p(&s->womask[addr], mask); 64 } 65 66 /* "External" get/set operations */ 67 static void vtd_set_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val) 68 { 69 uint64_t oldval = ldq_le_p(&s->csr[addr]); 70 uint64_t wmask = ldq_le_p(&s->wmask[addr]); 71 uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]); 72 stq_le_p(&s->csr[addr], 73 ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val)); 74 } 75 76 static void vtd_set_long(IntelIOMMUState *s, hwaddr addr, uint32_t val) 77 { 78 uint32_t oldval = ldl_le_p(&s->csr[addr]); 79 uint32_t wmask = ldl_le_p(&s->wmask[addr]); 80 uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]); 81 stl_le_p(&s->csr[addr], 82 ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val)); 83 } 84 85 static uint64_t vtd_get_quad(IntelIOMMUState *s, hwaddr addr) 86 { 87 uint64_t val = ldq_le_p(&s->csr[addr]); 88 uint64_t womask = ldq_le_p(&s->womask[addr]); 89 return val & ~womask; 90 } 91 92 static uint32_t vtd_get_long(IntelIOMMUState *s, hwaddr addr) 93 { 94 uint32_t val = ldl_le_p(&s->csr[addr]); 95 uint32_t womask = ldl_le_p(&s->womask[addr]); 96 return val & ~womask; 97 } 98 99 /* "Internal" get/set operations */ 100 static uint64_t vtd_get_quad_raw(IntelIOMMUState *s, hwaddr addr) 101 { 102 return ldq_le_p(&s->csr[addr]); 103 } 104 105 static uint32_t vtd_get_long_raw(IntelIOMMUState *s, hwaddr addr) 106 { 107 return ldl_le_p(&s->csr[addr]); 108 } 109 110 static void vtd_set_quad_raw(IntelIOMMUState *s, hwaddr addr, uint64_t val) 111 { 112 stq_le_p(&s->csr[addr], val); 113 } 114 115 static uint32_t vtd_set_clear_mask_long(IntelIOMMUState *s, hwaddr addr, 116 uint32_t clear, uint32_t mask) 117 { 118 uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask; 119 stl_le_p(&s->csr[addr], new_val); 120 return new_val; 121 } 122 123 static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState *s, hwaddr addr, 124 uint64_t clear, uint64_t mask) 125 { 126 uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask; 127 stq_le_p(&s->csr[addr], new_val); 128 return new_val; 129 } 130 131 /* GHashTable functions */ 132 static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2) 133 { 134 return *((const uint64_t *)v1) == *((const uint64_t *)v2); 135 } 136 137 static guint vtd_uint64_hash(gconstpointer v) 138 { 139 return (guint)*(const uint64_t *)v; 140 } 141 142 static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value, 143 gpointer user_data) 144 { 145 VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value; 146 uint16_t domain_id = *(uint16_t *)user_data; 147 return entry->domain_id == domain_id; 148 } 149 150 /* The shift of an addr for a certain level of paging structure */ 151 static inline uint32_t vtd_slpt_level_shift(uint32_t level) 152 { 153 assert(level != 0); 154 return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS; 155 } 156 157 static inline uint64_t vtd_slpt_level_page_mask(uint32_t level) 158 { 159 return ~((1ULL << vtd_slpt_level_shift(level)) - 1); 160 } 161 162 static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value, 163 gpointer user_data) 164 { 165 VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value; 166 VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data; 167 uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask; 168 uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K; 169 return (entry->domain_id == info->domain_id) && 170 (((entry->gfn & info->mask) == gfn) || 171 (entry->gfn == gfn_tlb)); 172 } 173 174 /* Reset all the gen of VTDAddressSpace to zero and set the gen of 175 * IntelIOMMUState to 1. 176 */ 177 static void vtd_reset_context_cache(IntelIOMMUState *s) 178 { 179 VTDAddressSpace *vtd_as; 180 VTDBus *vtd_bus; 181 GHashTableIter bus_it; 182 uint32_t devfn_it; 183 184 trace_vtd_context_cache_reset(); 185 186 g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr); 187 188 while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) { 189 for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) { 190 vtd_as = vtd_bus->dev_as[devfn_it]; 191 if (!vtd_as) { 192 continue; 193 } 194 vtd_as->context_cache_entry.context_cache_gen = 0; 195 } 196 } 197 s->context_cache_gen = 1; 198 } 199 200 static void vtd_reset_iotlb(IntelIOMMUState *s) 201 { 202 assert(s->iotlb); 203 g_hash_table_remove_all(s->iotlb); 204 } 205 206 static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint16_t source_id, 207 uint32_t level) 208 { 209 return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) | 210 ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT); 211 } 212 213 static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level) 214 { 215 return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K; 216 } 217 218 static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id, 219 hwaddr addr) 220 { 221 VTDIOTLBEntry *entry; 222 uint64_t key; 223 int level; 224 225 for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) { 226 key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level), 227 source_id, level); 228 entry = g_hash_table_lookup(s->iotlb, &key); 229 if (entry) { 230 goto out; 231 } 232 } 233 234 out: 235 return entry; 236 } 237 238 static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id, 239 uint16_t domain_id, hwaddr addr, uint64_t slpte, 240 bool read_flags, bool write_flags, 241 uint32_t level) 242 { 243 VTDIOTLBEntry *entry = g_malloc(sizeof(*entry)); 244 uint64_t *key = g_malloc(sizeof(*key)); 245 uint64_t gfn = vtd_get_iotlb_gfn(addr, level); 246 247 trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id); 248 if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) { 249 trace_vtd_iotlb_reset("iotlb exceeds size limit"); 250 vtd_reset_iotlb(s); 251 } 252 253 entry->gfn = gfn; 254 entry->domain_id = domain_id; 255 entry->slpte = slpte; 256 entry->read_flags = read_flags; 257 entry->write_flags = write_flags; 258 entry->mask = vtd_slpt_level_page_mask(level); 259 *key = vtd_get_iotlb_key(gfn, source_id, level); 260 g_hash_table_replace(s->iotlb, key, entry); 261 } 262 263 /* Given the reg addr of both the message data and address, generate an 264 * interrupt via MSI. 265 */ 266 static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg, 267 hwaddr mesg_data_reg) 268 { 269 MSIMessage msi; 270 271 assert(mesg_data_reg < DMAR_REG_SIZE); 272 assert(mesg_addr_reg < DMAR_REG_SIZE); 273 274 msi.address = vtd_get_long_raw(s, mesg_addr_reg); 275 msi.data = vtd_get_long_raw(s, mesg_data_reg); 276 277 trace_vtd_irq_generate(msi.address, msi.data); 278 279 apic_get_class()->send_msi(&msi); 280 } 281 282 /* Generate a fault event to software via MSI if conditions are met. 283 * Notice that the value of FSTS_REG being passed to it should be the one 284 * before any update. 285 */ 286 static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts) 287 { 288 if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO || 289 pre_fsts & VTD_FSTS_IQE) { 290 trace_vtd_err("There are previous interrupt conditions " 291 "to be serviced by software, fault event " 292 "is not generated."); 293 return; 294 } 295 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP); 296 if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) { 297 trace_vtd_err("Interrupt Mask set, irq is not generated."); 298 } else { 299 vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG); 300 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); 301 } 302 } 303 304 /* Check if the Fault (F) field of the Fault Recording Register referenced by 305 * @index is Set. 306 */ 307 static bool vtd_is_frcd_set(IntelIOMMUState *s, uint16_t index) 308 { 309 /* Each reg is 128-bit */ 310 hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); 311 addr += 8; /* Access the high 64-bit half */ 312 313 assert(index < DMAR_FRCD_REG_NR); 314 315 return vtd_get_quad_raw(s, addr) & VTD_FRCD_F; 316 } 317 318 /* Update the PPF field of Fault Status Register. 319 * Should be called whenever change the F field of any fault recording 320 * registers. 321 */ 322 static void vtd_update_fsts_ppf(IntelIOMMUState *s) 323 { 324 uint32_t i; 325 uint32_t ppf_mask = 0; 326 327 for (i = 0; i < DMAR_FRCD_REG_NR; i++) { 328 if (vtd_is_frcd_set(s, i)) { 329 ppf_mask = VTD_FSTS_PPF; 330 break; 331 } 332 } 333 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_PPF, ppf_mask); 334 trace_vtd_fsts_ppf(!!ppf_mask); 335 } 336 337 static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index) 338 { 339 /* Each reg is 128-bit */ 340 hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); 341 addr += 8; /* Access the high 64-bit half */ 342 343 assert(index < DMAR_FRCD_REG_NR); 344 345 vtd_set_clear_mask_quad(s, addr, 0, VTD_FRCD_F); 346 vtd_update_fsts_ppf(s); 347 } 348 349 /* Must not update F field now, should be done later */ 350 static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index, 351 uint16_t source_id, hwaddr addr, 352 VTDFaultReason fault, bool is_write) 353 { 354 uint64_t hi = 0, lo; 355 hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); 356 357 assert(index < DMAR_FRCD_REG_NR); 358 359 lo = VTD_FRCD_FI(addr); 360 hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault); 361 if (!is_write) { 362 hi |= VTD_FRCD_T; 363 } 364 vtd_set_quad_raw(s, frcd_reg_addr, lo); 365 vtd_set_quad_raw(s, frcd_reg_addr + 8, hi); 366 367 trace_vtd_frr_new(index, hi, lo); 368 } 369 370 /* Try to collapse multiple pending faults from the same requester */ 371 static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id) 372 { 373 uint32_t i; 374 uint64_t frcd_reg; 375 hwaddr addr = DMAR_FRCD_REG_OFFSET + 8; /* The high 64-bit half */ 376 377 for (i = 0; i < DMAR_FRCD_REG_NR; i++) { 378 frcd_reg = vtd_get_quad_raw(s, addr); 379 if ((frcd_reg & VTD_FRCD_F) && 380 ((frcd_reg & VTD_FRCD_SID_MASK) == source_id)) { 381 return true; 382 } 383 addr += 16; /* 128-bit for each */ 384 } 385 return false; 386 } 387 388 /* Log and report an DMAR (address translation) fault to software */ 389 static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id, 390 hwaddr addr, VTDFaultReason fault, 391 bool is_write) 392 { 393 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); 394 395 assert(fault < VTD_FR_MAX); 396 397 if (fault == VTD_FR_RESERVED_ERR) { 398 /* This is not a normal fault reason case. Drop it. */ 399 return; 400 } 401 402 trace_vtd_dmar_fault(source_id, fault, addr, is_write); 403 404 if (fsts_reg & VTD_FSTS_PFO) { 405 trace_vtd_err("New fault is not recorded due to " 406 "Primary Fault Overflow."); 407 return; 408 } 409 410 if (vtd_try_collapse_fault(s, source_id)) { 411 trace_vtd_err("New fault is not recorded due to " 412 "compression of faults."); 413 return; 414 } 415 416 if (vtd_is_frcd_set(s, s->next_frcd_reg)) { 417 trace_vtd_err("Next Fault Recording Reg is used, " 418 "new fault is not recorded, set PFO field."); 419 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO); 420 return; 421 } 422 423 vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write); 424 425 if (fsts_reg & VTD_FSTS_PPF) { 426 trace_vtd_err("There are pending faults already, " 427 "fault event is not generated."); 428 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); 429 s->next_frcd_reg++; 430 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { 431 s->next_frcd_reg = 0; 432 } 433 } else { 434 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_FRI_MASK, 435 VTD_FSTS_FRI(s->next_frcd_reg)); 436 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); /* Will set PPF */ 437 s->next_frcd_reg++; 438 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { 439 s->next_frcd_reg = 0; 440 } 441 /* This case actually cause the PPF to be Set. 442 * So generate fault event (interrupt). 443 */ 444 vtd_generate_fault_event(s, fsts_reg); 445 } 446 } 447 448 /* Handle Invalidation Queue Errors of queued invalidation interface error 449 * conditions. 450 */ 451 static void vtd_handle_inv_queue_error(IntelIOMMUState *s) 452 { 453 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); 454 455 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_IQE); 456 vtd_generate_fault_event(s, fsts_reg); 457 } 458 459 /* Set the IWC field and try to generate an invalidation completion interrupt */ 460 static void vtd_generate_completion_event(IntelIOMMUState *s) 461 { 462 if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) { 463 trace_vtd_inv_desc_wait_irq("One pending, skip current"); 464 return; 465 } 466 vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC); 467 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP); 468 if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) { 469 trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, " 470 "new event not generated"); 471 return; 472 } else { 473 /* Generate the interrupt event */ 474 trace_vtd_inv_desc_wait_irq("Generating complete event"); 475 vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG); 476 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); 477 } 478 } 479 480 static inline bool vtd_root_entry_present(VTDRootEntry *root) 481 { 482 return root->val & VTD_ROOT_ENTRY_P; 483 } 484 485 static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index, 486 VTDRootEntry *re) 487 { 488 dma_addr_t addr; 489 490 addr = s->root + index * sizeof(*re); 491 if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) { 492 trace_vtd_re_invalid(re->rsvd, re->val); 493 re->val = 0; 494 return -VTD_FR_ROOT_TABLE_INV; 495 } 496 re->val = le64_to_cpu(re->val); 497 return 0; 498 } 499 500 static inline bool vtd_ce_present(VTDContextEntry *context) 501 { 502 return context->lo & VTD_CONTEXT_ENTRY_P; 503 } 504 505 static int vtd_get_context_entry_from_root(VTDRootEntry *root, uint8_t index, 506 VTDContextEntry *ce) 507 { 508 dma_addr_t addr; 509 510 /* we have checked that root entry is present */ 511 addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce); 512 if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) { 513 trace_vtd_re_invalid(root->rsvd, root->val); 514 return -VTD_FR_CONTEXT_TABLE_INV; 515 } 516 ce->lo = le64_to_cpu(ce->lo); 517 ce->hi = le64_to_cpu(ce->hi); 518 return 0; 519 } 520 521 static inline dma_addr_t vtd_ce_get_slpt_base(VTDContextEntry *ce) 522 { 523 return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR; 524 } 525 526 static inline uint64_t vtd_get_slpte_addr(uint64_t slpte) 527 { 528 return slpte & VTD_SL_PT_BASE_ADDR_MASK; 529 } 530 531 /* Whether the pte indicates the address of the page frame */ 532 static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level) 533 { 534 return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK); 535 } 536 537 /* Get the content of a spte located in @base_addr[@index] */ 538 static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index) 539 { 540 uint64_t slpte; 541 542 assert(index < VTD_SL_PT_ENTRY_NR); 543 544 if (dma_memory_read(&address_space_memory, 545 base_addr + index * sizeof(slpte), &slpte, 546 sizeof(slpte))) { 547 slpte = (uint64_t)-1; 548 return slpte; 549 } 550 slpte = le64_to_cpu(slpte); 551 return slpte; 552 } 553 554 /* Given an iova and the level of paging structure, return the offset 555 * of current level. 556 */ 557 static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level) 558 { 559 return (iova >> vtd_slpt_level_shift(level)) & 560 ((1ULL << VTD_SL_LEVEL_BITS) - 1); 561 } 562 563 /* Check Capability Register to see if the @level of page-table is supported */ 564 static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level) 565 { 566 return VTD_CAP_SAGAW_MASK & s->cap & 567 (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT)); 568 } 569 570 /* Get the page-table level that hardware should use for the second-level 571 * page-table walk from the Address Width field of context-entry. 572 */ 573 static inline uint32_t vtd_ce_get_level(VTDContextEntry *ce) 574 { 575 return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW); 576 } 577 578 static inline uint32_t vtd_ce_get_agaw(VTDContextEntry *ce) 579 { 580 return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9; 581 } 582 583 static inline uint32_t vtd_ce_get_type(VTDContextEntry *ce) 584 { 585 return ce->lo & VTD_CONTEXT_ENTRY_TT; 586 } 587 588 /* Return true if check passed, otherwise false */ 589 static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu, 590 VTDContextEntry *ce) 591 { 592 switch (vtd_ce_get_type(ce)) { 593 case VTD_CONTEXT_TT_MULTI_LEVEL: 594 /* Always supported */ 595 break; 596 case VTD_CONTEXT_TT_DEV_IOTLB: 597 if (!x86_iommu->dt_supported) { 598 return false; 599 } 600 break; 601 case VTD_CONTEXT_TT_PASS_THROUGH: 602 if (!x86_iommu->pt_supported) { 603 return false; 604 } 605 break; 606 default: 607 /* Unknwon type */ 608 return false; 609 } 610 return true; 611 } 612 613 static inline uint64_t vtd_iova_limit(VTDContextEntry *ce) 614 { 615 uint32_t ce_agaw = vtd_ce_get_agaw(ce); 616 return 1ULL << MIN(ce_agaw, VTD_MGAW); 617 } 618 619 /* Return true if IOVA passes range check, otherwise false. */ 620 static inline bool vtd_iova_range_check(uint64_t iova, VTDContextEntry *ce) 621 { 622 /* 623 * Check if @iova is above 2^X-1, where X is the minimum of MGAW 624 * in CAP_REG and AW in context-entry. 625 */ 626 return !(iova & ~(vtd_iova_limit(ce) - 1)); 627 } 628 629 static const uint64_t vtd_paging_entry_rsvd_field[] = { 630 [0] = ~0ULL, 631 /* For not large page */ 632 [1] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM), 633 [2] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM), 634 [3] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM), 635 [4] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM), 636 /* For large page */ 637 [5] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM), 638 [6] = 0x1ff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM), 639 [7] = 0x3ffff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM), 640 [8] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM), 641 }; 642 643 static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level) 644 { 645 if (slpte & VTD_SL_PT_PAGE_SIZE_MASK) { 646 /* Maybe large page */ 647 return slpte & vtd_paging_entry_rsvd_field[level + 4]; 648 } else { 649 return slpte & vtd_paging_entry_rsvd_field[level]; 650 } 651 } 652 653 /* Find the VTD address space associated with a given bus number */ 654 static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num) 655 { 656 VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num]; 657 if (!vtd_bus) { 658 /* 659 * Iterate over the registered buses to find the one which 660 * currently hold this bus number, and update the bus_num 661 * lookup table: 662 */ 663 GHashTableIter iter; 664 665 g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); 666 while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { 667 if (pci_bus_num(vtd_bus->bus) == bus_num) { 668 s->vtd_as_by_bus_num[bus_num] = vtd_bus; 669 return vtd_bus; 670 } 671 } 672 } 673 return vtd_bus; 674 } 675 676 /* Given the @iova, get relevant @slptep. @slpte_level will be the last level 677 * of the translation, can be used for deciding the size of large page. 678 */ 679 static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, 680 uint64_t *slptep, uint32_t *slpte_level, 681 bool *reads, bool *writes) 682 { 683 dma_addr_t addr = vtd_ce_get_slpt_base(ce); 684 uint32_t level = vtd_ce_get_level(ce); 685 uint32_t offset; 686 uint64_t slpte; 687 uint64_t access_right_check; 688 689 if (!vtd_iova_range_check(iova, ce)) { 690 trace_vtd_err_dmar_iova_overflow(iova); 691 return -VTD_FR_ADDR_BEYOND_MGAW; 692 } 693 694 /* FIXME: what is the Atomics request here? */ 695 access_right_check = is_write ? VTD_SL_W : VTD_SL_R; 696 697 while (true) { 698 offset = vtd_iova_level_offset(iova, level); 699 slpte = vtd_get_slpte(addr, offset); 700 701 if (slpte == (uint64_t)-1) { 702 trace_vtd_err_dmar_slpte_read_error(iova, level); 703 if (level == vtd_ce_get_level(ce)) { 704 /* Invalid programming of context-entry */ 705 return -VTD_FR_CONTEXT_ENTRY_INV; 706 } else { 707 return -VTD_FR_PAGING_ENTRY_INV; 708 } 709 } 710 *reads = (*reads) && (slpte & VTD_SL_R); 711 *writes = (*writes) && (slpte & VTD_SL_W); 712 if (!(slpte & access_right_check)) { 713 trace_vtd_err_dmar_slpte_perm_error(iova, level, slpte, is_write); 714 return is_write ? -VTD_FR_WRITE : -VTD_FR_READ; 715 } 716 if (vtd_slpte_nonzero_rsvd(slpte, level)) { 717 trace_vtd_err_dmar_slpte_resv_error(iova, level, slpte); 718 return -VTD_FR_PAGING_ENTRY_RSVD; 719 } 720 721 if (vtd_is_last_slpte(slpte, level)) { 722 *slptep = slpte; 723 *slpte_level = level; 724 return 0; 725 } 726 addr = vtd_get_slpte_addr(slpte); 727 level--; 728 } 729 } 730 731 typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private); 732 733 /** 734 * vtd_page_walk_level - walk over specific level for IOVA range 735 * 736 * @addr: base GPA addr to start the walk 737 * @start: IOVA range start address 738 * @end: IOVA range end address (start <= addr < end) 739 * @hook_fn: hook func to be called when detected page 740 * @private: private data to be passed into hook func 741 * @read: whether parent level has read permission 742 * @write: whether parent level has write permission 743 * @notify_unmap: whether we should notify invalid entries 744 */ 745 static int vtd_page_walk_level(dma_addr_t addr, uint64_t start, 746 uint64_t end, vtd_page_walk_hook hook_fn, 747 void *private, uint32_t level, 748 bool read, bool write, bool notify_unmap) 749 { 750 bool read_cur, write_cur, entry_valid; 751 uint32_t offset; 752 uint64_t slpte; 753 uint64_t subpage_size, subpage_mask; 754 IOMMUTLBEntry entry; 755 uint64_t iova = start; 756 uint64_t iova_next; 757 int ret = 0; 758 759 trace_vtd_page_walk_level(addr, level, start, end); 760 761 subpage_size = 1ULL << vtd_slpt_level_shift(level); 762 subpage_mask = vtd_slpt_level_page_mask(level); 763 764 while (iova < end) { 765 iova_next = (iova & subpage_mask) + subpage_size; 766 767 offset = vtd_iova_level_offset(iova, level); 768 slpte = vtd_get_slpte(addr, offset); 769 770 if (slpte == (uint64_t)-1) { 771 trace_vtd_page_walk_skip_read(iova, iova_next); 772 goto next; 773 } 774 775 if (vtd_slpte_nonzero_rsvd(slpte, level)) { 776 trace_vtd_page_walk_skip_reserve(iova, iova_next); 777 goto next; 778 } 779 780 /* Permissions are stacked with parents' */ 781 read_cur = read && (slpte & VTD_SL_R); 782 write_cur = write && (slpte & VTD_SL_W); 783 784 /* 785 * As long as we have either read/write permission, this is a 786 * valid entry. The rule works for both page entries and page 787 * table entries. 788 */ 789 entry_valid = read_cur | write_cur; 790 791 if (vtd_is_last_slpte(slpte, level)) { 792 entry.target_as = &address_space_memory; 793 entry.iova = iova & subpage_mask; 794 /* NOTE: this is only meaningful if entry_valid == true */ 795 entry.translated_addr = vtd_get_slpte_addr(slpte); 796 entry.addr_mask = ~subpage_mask; 797 entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur); 798 if (!entry_valid && !notify_unmap) { 799 trace_vtd_page_walk_skip_perm(iova, iova_next); 800 goto next; 801 } 802 trace_vtd_page_walk_one(level, entry.iova, entry.translated_addr, 803 entry.addr_mask, entry.perm); 804 if (hook_fn) { 805 ret = hook_fn(&entry, private); 806 if (ret < 0) { 807 return ret; 808 } 809 } 810 } else { 811 if (!entry_valid) { 812 trace_vtd_page_walk_skip_perm(iova, iova_next); 813 goto next; 814 } 815 ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte), iova, 816 MIN(iova_next, end), hook_fn, private, 817 level - 1, read_cur, write_cur, 818 notify_unmap); 819 if (ret < 0) { 820 return ret; 821 } 822 } 823 824 next: 825 iova = iova_next; 826 } 827 828 return 0; 829 } 830 831 /** 832 * vtd_page_walk - walk specific IOVA range, and call the hook 833 * 834 * @ce: context entry to walk upon 835 * @start: IOVA address to start the walk 836 * @end: IOVA range end address (start <= addr < end) 837 * @hook_fn: the hook that to be called for each detected area 838 * @private: private data for the hook function 839 */ 840 static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end, 841 vtd_page_walk_hook hook_fn, void *private, 842 bool notify_unmap) 843 { 844 dma_addr_t addr = vtd_ce_get_slpt_base(ce); 845 uint32_t level = vtd_ce_get_level(ce); 846 847 if (!vtd_iova_range_check(start, ce)) { 848 return -VTD_FR_ADDR_BEYOND_MGAW; 849 } 850 851 if (!vtd_iova_range_check(end, ce)) { 852 /* Fix end so that it reaches the maximum */ 853 end = vtd_iova_limit(ce); 854 } 855 856 return vtd_page_walk_level(addr, start, end, hook_fn, private, 857 level, true, true, notify_unmap); 858 } 859 860 /* Map a device to its corresponding domain (context-entry) */ 861 static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, 862 uint8_t devfn, VTDContextEntry *ce) 863 { 864 VTDRootEntry re; 865 int ret_fr; 866 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); 867 868 ret_fr = vtd_get_root_entry(s, bus_num, &re); 869 if (ret_fr) { 870 return ret_fr; 871 } 872 873 if (!vtd_root_entry_present(&re)) { 874 /* Not error - it's okay we don't have root entry. */ 875 trace_vtd_re_not_present(bus_num); 876 return -VTD_FR_ROOT_ENTRY_P; 877 } 878 879 if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD)) { 880 trace_vtd_re_invalid(re.rsvd, re.val); 881 return -VTD_FR_ROOT_ENTRY_RSVD; 882 } 883 884 ret_fr = vtd_get_context_entry_from_root(&re, devfn, ce); 885 if (ret_fr) { 886 return ret_fr; 887 } 888 889 if (!vtd_ce_present(ce)) { 890 /* Not error - it's okay we don't have context entry. */ 891 trace_vtd_ce_not_present(bus_num, devfn); 892 return -VTD_FR_CONTEXT_ENTRY_P; 893 } 894 895 if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) || 896 (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO)) { 897 trace_vtd_ce_invalid(ce->hi, ce->lo); 898 return -VTD_FR_CONTEXT_ENTRY_RSVD; 899 } 900 901 /* Check if the programming of context-entry is valid */ 902 if (!vtd_is_level_supported(s, vtd_ce_get_level(ce))) { 903 trace_vtd_ce_invalid(ce->hi, ce->lo); 904 return -VTD_FR_CONTEXT_ENTRY_INV; 905 } 906 907 /* Do translation type check */ 908 if (!vtd_ce_type_check(x86_iommu, ce)) { 909 trace_vtd_ce_invalid(ce->hi, ce->lo); 910 return -VTD_FR_CONTEXT_ENTRY_INV; 911 } 912 913 return 0; 914 } 915 916 /* 917 * Fetch translation type for specific device. Returns <0 if error 918 * happens, otherwise return the shifted type to check against 919 * VTD_CONTEXT_TT_*. 920 */ 921 static int vtd_dev_get_trans_type(VTDAddressSpace *as) 922 { 923 IntelIOMMUState *s; 924 VTDContextEntry ce; 925 int ret; 926 927 s = as->iommu_state; 928 929 ret = vtd_dev_to_context_entry(s, pci_bus_num(as->bus), 930 as->devfn, &ce); 931 if (ret) { 932 return ret; 933 } 934 935 return vtd_ce_get_type(&ce); 936 } 937 938 static bool vtd_dev_pt_enabled(VTDAddressSpace *as) 939 { 940 int ret; 941 942 assert(as); 943 944 ret = vtd_dev_get_trans_type(as); 945 if (ret < 0) { 946 /* 947 * Possibly failed to parse the context entry for some reason 948 * (e.g., during init, or any guest configuration errors on 949 * context entries). We should assume PT not enabled for 950 * safety. 951 */ 952 return false; 953 } 954 955 return ret == VTD_CONTEXT_TT_PASS_THROUGH; 956 } 957 958 /* Return whether the device is using IOMMU translation. */ 959 static bool vtd_switch_address_space(VTDAddressSpace *as) 960 { 961 bool use_iommu; 962 963 assert(as); 964 965 use_iommu = as->iommu_state->dmar_enabled & !vtd_dev_pt_enabled(as); 966 967 trace_vtd_switch_address_space(pci_bus_num(as->bus), 968 VTD_PCI_SLOT(as->devfn), 969 VTD_PCI_FUNC(as->devfn), 970 use_iommu); 971 972 /* Turn off first then on the other */ 973 if (use_iommu) { 974 memory_region_set_enabled(&as->sys_alias, false); 975 memory_region_set_enabled(&as->iommu, true); 976 } else { 977 memory_region_set_enabled(&as->iommu, false); 978 memory_region_set_enabled(&as->sys_alias, true); 979 } 980 981 return use_iommu; 982 } 983 984 static void vtd_switch_address_space_all(IntelIOMMUState *s) 985 { 986 GHashTableIter iter; 987 VTDBus *vtd_bus; 988 int i; 989 990 g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); 991 while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { 992 for (i = 0; i < X86_IOMMU_PCI_DEVFN_MAX; i++) { 993 if (!vtd_bus->dev_as[i]) { 994 continue; 995 } 996 vtd_switch_address_space(vtd_bus->dev_as[i]); 997 } 998 } 999 } 1000 1001 static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn) 1002 { 1003 return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL); 1004 } 1005 1006 static const bool vtd_qualified_faults[] = { 1007 [VTD_FR_RESERVED] = false, 1008 [VTD_FR_ROOT_ENTRY_P] = false, 1009 [VTD_FR_CONTEXT_ENTRY_P] = true, 1010 [VTD_FR_CONTEXT_ENTRY_INV] = true, 1011 [VTD_FR_ADDR_BEYOND_MGAW] = true, 1012 [VTD_FR_WRITE] = true, 1013 [VTD_FR_READ] = true, 1014 [VTD_FR_PAGING_ENTRY_INV] = true, 1015 [VTD_FR_ROOT_TABLE_INV] = false, 1016 [VTD_FR_CONTEXT_TABLE_INV] = false, 1017 [VTD_FR_ROOT_ENTRY_RSVD] = false, 1018 [VTD_FR_PAGING_ENTRY_RSVD] = true, 1019 [VTD_FR_CONTEXT_ENTRY_TT] = true, 1020 [VTD_FR_RESERVED_ERR] = false, 1021 [VTD_FR_MAX] = false, 1022 }; 1023 1024 /* To see if a fault condition is "qualified", which is reported to software 1025 * only if the FPD field in the context-entry used to process the faulting 1026 * request is 0. 1027 */ 1028 static inline bool vtd_is_qualified_fault(VTDFaultReason fault) 1029 { 1030 return vtd_qualified_faults[fault]; 1031 } 1032 1033 static inline bool vtd_is_interrupt_addr(hwaddr addr) 1034 { 1035 return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST; 1036 } 1037 1038 static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id) 1039 { 1040 VTDBus *vtd_bus; 1041 VTDAddressSpace *vtd_as; 1042 bool success = false; 1043 1044 vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id)); 1045 if (!vtd_bus) { 1046 goto out; 1047 } 1048 1049 vtd_as = vtd_bus->dev_as[VTD_SID_TO_DEVFN(source_id)]; 1050 if (!vtd_as) { 1051 goto out; 1052 } 1053 1054 if (vtd_switch_address_space(vtd_as) == false) { 1055 /* We switched off IOMMU region successfully. */ 1056 success = true; 1057 } 1058 1059 out: 1060 trace_vtd_pt_enable_fast_path(source_id, success); 1061 } 1062 1063 /* Map dev to context-entry then do a paging-structures walk to do a iommu 1064 * translation. 1065 * 1066 * Called from RCU critical section. 1067 * 1068 * @bus_num: The bus number 1069 * @devfn: The devfn, which is the combined of device and function number 1070 * @is_write: The access is a write operation 1071 * @entry: IOMMUTLBEntry that contain the addr to be translated and result 1072 * 1073 * Returns true if translation is successful, otherwise false. 1074 */ 1075 static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, 1076 uint8_t devfn, hwaddr addr, bool is_write, 1077 IOMMUTLBEntry *entry) 1078 { 1079 IntelIOMMUState *s = vtd_as->iommu_state; 1080 VTDContextEntry ce; 1081 uint8_t bus_num = pci_bus_num(bus); 1082 VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry; 1083 uint64_t slpte, page_mask; 1084 uint32_t level; 1085 uint16_t source_id = vtd_make_source_id(bus_num, devfn); 1086 int ret_fr; 1087 bool is_fpd_set = false; 1088 bool reads = true; 1089 bool writes = true; 1090 VTDIOTLBEntry *iotlb_entry; 1091 1092 /* 1093 * We have standalone memory region for interrupt addresses, we 1094 * should never receive translation requests in this region. 1095 */ 1096 assert(!vtd_is_interrupt_addr(addr)); 1097 1098 /* Try to fetch slpte form IOTLB */ 1099 iotlb_entry = vtd_lookup_iotlb(s, source_id, addr); 1100 if (iotlb_entry) { 1101 trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte, 1102 iotlb_entry->domain_id); 1103 slpte = iotlb_entry->slpte; 1104 reads = iotlb_entry->read_flags; 1105 writes = iotlb_entry->write_flags; 1106 page_mask = iotlb_entry->mask; 1107 goto out; 1108 } 1109 1110 /* Try to fetch context-entry from cache first */ 1111 if (cc_entry->context_cache_gen == s->context_cache_gen) { 1112 trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi, 1113 cc_entry->context_entry.lo, 1114 cc_entry->context_cache_gen); 1115 ce = cc_entry->context_entry; 1116 is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; 1117 } else { 1118 ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce); 1119 is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; 1120 if (ret_fr) { 1121 ret_fr = -ret_fr; 1122 if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { 1123 trace_vtd_fault_disabled(); 1124 } else { 1125 vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); 1126 } 1127 goto error; 1128 } 1129 /* Update context-cache */ 1130 trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo, 1131 cc_entry->context_cache_gen, 1132 s->context_cache_gen); 1133 cc_entry->context_entry = ce; 1134 cc_entry->context_cache_gen = s->context_cache_gen; 1135 } 1136 1137 /* 1138 * We don't need to translate for pass-through context entries. 1139 * Also, let's ignore IOTLB caching as well for PT devices. 1140 */ 1141 if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) { 1142 entry->iova = addr & VTD_PAGE_MASK; 1143 entry->translated_addr = entry->iova; 1144 entry->addr_mask = VTD_PAGE_MASK; 1145 entry->perm = IOMMU_RW; 1146 trace_vtd_translate_pt(source_id, entry->iova); 1147 1148 /* 1149 * When this happens, it means firstly caching-mode is not 1150 * enabled, and this is the first passthrough translation for 1151 * the device. Let's enable the fast path for passthrough. 1152 * 1153 * When passthrough is disabled again for the device, we can 1154 * capture it via the context entry invalidation, then the 1155 * IOMMU region can be swapped back. 1156 */ 1157 vtd_pt_enable_fast_path(s, source_id); 1158 1159 return true; 1160 } 1161 1162 ret_fr = vtd_iova_to_slpte(&ce, addr, is_write, &slpte, &level, 1163 &reads, &writes); 1164 if (ret_fr) { 1165 ret_fr = -ret_fr; 1166 if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { 1167 trace_vtd_fault_disabled(); 1168 } else { 1169 vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); 1170 } 1171 goto error; 1172 } 1173 1174 page_mask = vtd_slpt_level_page_mask(level); 1175 vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte, 1176 reads, writes, level); 1177 out: 1178 entry->iova = addr & page_mask; 1179 entry->translated_addr = vtd_get_slpte_addr(slpte) & page_mask; 1180 entry->addr_mask = ~page_mask; 1181 entry->perm = IOMMU_ACCESS_FLAG(reads, writes); 1182 return true; 1183 1184 error: 1185 entry->iova = 0; 1186 entry->translated_addr = 0; 1187 entry->addr_mask = 0; 1188 entry->perm = IOMMU_NONE; 1189 return false; 1190 } 1191 1192 static void vtd_root_table_setup(IntelIOMMUState *s) 1193 { 1194 s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG); 1195 s->root_extended = s->root & VTD_RTADDR_RTT; 1196 s->root &= VTD_RTADDR_ADDR_MASK; 1197 1198 trace_vtd_reg_dmar_root(s->root, s->root_extended); 1199 } 1200 1201 static void vtd_iec_notify_all(IntelIOMMUState *s, bool global, 1202 uint32_t index, uint32_t mask) 1203 { 1204 x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask); 1205 } 1206 1207 static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s) 1208 { 1209 uint64_t value = 0; 1210 value = vtd_get_quad_raw(s, DMAR_IRTA_REG); 1211 s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1); 1212 s->intr_root = value & VTD_IRTA_ADDR_MASK; 1213 s->intr_eime = value & VTD_IRTA_EIME; 1214 1215 /* Notify global invalidation */ 1216 vtd_iec_notify_all(s, true, 0, 0); 1217 1218 trace_vtd_reg_ir_root(s->intr_root, s->intr_size); 1219 } 1220 1221 static void vtd_iommu_replay_all(IntelIOMMUState *s) 1222 { 1223 IntelIOMMUNotifierNode *node; 1224 1225 QLIST_FOREACH(node, &s->notifiers_list, next) { 1226 memory_region_iommu_replay_all(&node->vtd_as->iommu); 1227 } 1228 } 1229 1230 static void vtd_context_global_invalidate(IntelIOMMUState *s) 1231 { 1232 trace_vtd_inv_desc_cc_global(); 1233 s->context_cache_gen++; 1234 if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) { 1235 vtd_reset_context_cache(s); 1236 } 1237 vtd_switch_address_space_all(s); 1238 /* 1239 * From VT-d spec 6.5.2.1, a global context entry invalidation 1240 * should be followed by a IOTLB global invalidation, so we should 1241 * be safe even without this. Hoewever, let's replay the region as 1242 * well to be safer, and go back here when we need finer tunes for 1243 * VT-d emulation codes. 1244 */ 1245 vtd_iommu_replay_all(s); 1246 } 1247 1248 /* Do a context-cache device-selective invalidation. 1249 * @func_mask: FM field after shifting 1250 */ 1251 static void vtd_context_device_invalidate(IntelIOMMUState *s, 1252 uint16_t source_id, 1253 uint16_t func_mask) 1254 { 1255 uint16_t mask; 1256 VTDBus *vtd_bus; 1257 VTDAddressSpace *vtd_as; 1258 uint8_t bus_n, devfn; 1259 uint16_t devfn_it; 1260 1261 trace_vtd_inv_desc_cc_devices(source_id, func_mask); 1262 1263 switch (func_mask & 3) { 1264 case 0: 1265 mask = 0; /* No bits in the SID field masked */ 1266 break; 1267 case 1: 1268 mask = 4; /* Mask bit 2 in the SID field */ 1269 break; 1270 case 2: 1271 mask = 6; /* Mask bit 2:1 in the SID field */ 1272 break; 1273 case 3: 1274 mask = 7; /* Mask bit 2:0 in the SID field */ 1275 break; 1276 } 1277 mask = ~mask; 1278 1279 bus_n = VTD_SID_TO_BUS(source_id); 1280 vtd_bus = vtd_find_as_from_bus_num(s, bus_n); 1281 if (vtd_bus) { 1282 devfn = VTD_SID_TO_DEVFN(source_id); 1283 for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) { 1284 vtd_as = vtd_bus->dev_as[devfn_it]; 1285 if (vtd_as && ((devfn_it & mask) == (devfn & mask))) { 1286 trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it), 1287 VTD_PCI_FUNC(devfn_it)); 1288 vtd_as->context_cache_entry.context_cache_gen = 0; 1289 /* 1290 * Do switch address space when needed, in case if the 1291 * device passthrough bit is switched. 1292 */ 1293 vtd_switch_address_space(vtd_as); 1294 /* 1295 * So a device is moving out of (or moving into) a 1296 * domain, a replay() suites here to notify all the 1297 * IOMMU_NOTIFIER_MAP registers about this change. 1298 * This won't bring bad even if we have no such 1299 * notifier registered - the IOMMU notification 1300 * framework will skip MAP notifications if that 1301 * happened. 1302 */ 1303 memory_region_iommu_replay_all(&vtd_as->iommu); 1304 } 1305 } 1306 } 1307 } 1308 1309 /* Context-cache invalidation 1310 * Returns the Context Actual Invalidation Granularity. 1311 * @val: the content of the CCMD_REG 1312 */ 1313 static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val) 1314 { 1315 uint64_t caig; 1316 uint64_t type = val & VTD_CCMD_CIRG_MASK; 1317 1318 switch (type) { 1319 case VTD_CCMD_DOMAIN_INVL: 1320 /* Fall through */ 1321 case VTD_CCMD_GLOBAL_INVL: 1322 caig = VTD_CCMD_GLOBAL_INVL_A; 1323 vtd_context_global_invalidate(s); 1324 break; 1325 1326 case VTD_CCMD_DEVICE_INVL: 1327 caig = VTD_CCMD_DEVICE_INVL_A; 1328 vtd_context_device_invalidate(s, VTD_CCMD_SID(val), VTD_CCMD_FM(val)); 1329 break; 1330 1331 default: 1332 trace_vtd_err("Context cache invalidate type error."); 1333 caig = 0; 1334 } 1335 return caig; 1336 } 1337 1338 static void vtd_iotlb_global_invalidate(IntelIOMMUState *s) 1339 { 1340 trace_vtd_inv_desc_iotlb_global(); 1341 vtd_reset_iotlb(s); 1342 vtd_iommu_replay_all(s); 1343 } 1344 1345 static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id) 1346 { 1347 IntelIOMMUNotifierNode *node; 1348 VTDContextEntry ce; 1349 VTDAddressSpace *vtd_as; 1350 1351 trace_vtd_inv_desc_iotlb_domain(domain_id); 1352 1353 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain, 1354 &domain_id); 1355 1356 QLIST_FOREACH(node, &s->notifiers_list, next) { 1357 vtd_as = node->vtd_as; 1358 if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), 1359 vtd_as->devfn, &ce) && 1360 domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) { 1361 memory_region_iommu_replay_all(&vtd_as->iommu); 1362 } 1363 } 1364 } 1365 1366 static int vtd_page_invalidate_notify_hook(IOMMUTLBEntry *entry, 1367 void *private) 1368 { 1369 memory_region_notify_iommu((MemoryRegion *)private, *entry); 1370 return 0; 1371 } 1372 1373 static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, 1374 uint16_t domain_id, hwaddr addr, 1375 uint8_t am) 1376 { 1377 IntelIOMMUNotifierNode *node; 1378 VTDContextEntry ce; 1379 int ret; 1380 1381 QLIST_FOREACH(node, &(s->notifiers_list), next) { 1382 VTDAddressSpace *vtd_as = node->vtd_as; 1383 ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), 1384 vtd_as->devfn, &ce); 1385 if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) { 1386 vtd_page_walk(&ce, addr, addr + (1 << am) * VTD_PAGE_SIZE, 1387 vtd_page_invalidate_notify_hook, 1388 (void *)&vtd_as->iommu, true); 1389 } 1390 } 1391 } 1392 1393 static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id, 1394 hwaddr addr, uint8_t am) 1395 { 1396 VTDIOTLBPageInvInfo info; 1397 1398 trace_vtd_inv_desc_iotlb_pages(domain_id, addr, am); 1399 1400 assert(am <= VTD_MAMV); 1401 info.domain_id = domain_id; 1402 info.addr = addr; 1403 info.mask = ~((1 << am) - 1); 1404 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info); 1405 vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am); 1406 } 1407 1408 /* Flush IOTLB 1409 * Returns the IOTLB Actual Invalidation Granularity. 1410 * @val: the content of the IOTLB_REG 1411 */ 1412 static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val) 1413 { 1414 uint64_t iaig; 1415 uint64_t type = val & VTD_TLB_FLUSH_GRANU_MASK; 1416 uint16_t domain_id; 1417 hwaddr addr; 1418 uint8_t am; 1419 1420 switch (type) { 1421 case VTD_TLB_GLOBAL_FLUSH: 1422 iaig = VTD_TLB_GLOBAL_FLUSH_A; 1423 vtd_iotlb_global_invalidate(s); 1424 break; 1425 1426 case VTD_TLB_DSI_FLUSH: 1427 domain_id = VTD_TLB_DID(val); 1428 iaig = VTD_TLB_DSI_FLUSH_A; 1429 vtd_iotlb_domain_invalidate(s, domain_id); 1430 break; 1431 1432 case VTD_TLB_PSI_FLUSH: 1433 domain_id = VTD_TLB_DID(val); 1434 addr = vtd_get_quad_raw(s, DMAR_IVA_REG); 1435 am = VTD_IVA_AM(addr); 1436 addr = VTD_IVA_ADDR(addr); 1437 if (am > VTD_MAMV) { 1438 trace_vtd_err("IOTLB PSI flush: address mask overflow."); 1439 iaig = 0; 1440 break; 1441 } 1442 iaig = VTD_TLB_PSI_FLUSH_A; 1443 vtd_iotlb_page_invalidate(s, domain_id, addr, am); 1444 break; 1445 1446 default: 1447 trace_vtd_err("IOTLB flush: invalid granularity."); 1448 iaig = 0; 1449 } 1450 return iaig; 1451 } 1452 1453 static inline bool vtd_queued_inv_enable_check(IntelIOMMUState *s) 1454 { 1455 return s->iq_tail == 0; 1456 } 1457 1458 static inline bool vtd_queued_inv_disable_check(IntelIOMMUState *s) 1459 { 1460 return s->qi_enabled && (s->iq_tail == s->iq_head) && 1461 (s->iq_last_desc_type == VTD_INV_DESC_WAIT); 1462 } 1463 1464 static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en) 1465 { 1466 uint64_t iqa_val = vtd_get_quad_raw(s, DMAR_IQA_REG); 1467 1468 trace_vtd_inv_qi_enable(en); 1469 1470 if (en) { 1471 if (vtd_queued_inv_enable_check(s)) { 1472 s->iq = iqa_val & VTD_IQA_IQA_MASK; 1473 /* 2^(x+8) entries */ 1474 s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8); 1475 s->qi_enabled = true; 1476 trace_vtd_inv_qi_setup(s->iq, s->iq_size); 1477 /* Ok - report back to driver */ 1478 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES); 1479 } else { 1480 trace_vtd_err_qi_enable(s->iq_tail); 1481 } 1482 } else { 1483 if (vtd_queued_inv_disable_check(s)) { 1484 /* disable Queued Invalidation */ 1485 vtd_set_quad_raw(s, DMAR_IQH_REG, 0); 1486 s->iq_head = 0; 1487 s->qi_enabled = false; 1488 /* Ok - report back to driver */ 1489 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0); 1490 } else { 1491 trace_vtd_err_qi_disable(s->iq_head, s->iq_tail, s->iq_last_desc_type); 1492 } 1493 } 1494 } 1495 1496 /* Set Root Table Pointer */ 1497 static void vtd_handle_gcmd_srtp(IntelIOMMUState *s) 1498 { 1499 vtd_root_table_setup(s); 1500 /* Ok - report back to driver */ 1501 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_RTPS); 1502 } 1503 1504 /* Set Interrupt Remap Table Pointer */ 1505 static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s) 1506 { 1507 vtd_interrupt_remap_table_setup(s); 1508 /* Ok - report back to driver */ 1509 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS); 1510 } 1511 1512 /* Handle Translation Enable/Disable */ 1513 static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en) 1514 { 1515 if (s->dmar_enabled == en) { 1516 return; 1517 } 1518 1519 trace_vtd_dmar_enable(en); 1520 1521 if (en) { 1522 s->dmar_enabled = true; 1523 /* Ok - report back to driver */ 1524 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_TES); 1525 } else { 1526 s->dmar_enabled = false; 1527 1528 /* Clear the index of Fault Recording Register */ 1529 s->next_frcd_reg = 0; 1530 /* Ok - report back to driver */ 1531 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0); 1532 } 1533 1534 vtd_switch_address_space_all(s); 1535 } 1536 1537 /* Handle Interrupt Remap Enable/Disable */ 1538 static void vtd_handle_gcmd_ire(IntelIOMMUState *s, bool en) 1539 { 1540 trace_vtd_ir_enable(en); 1541 1542 if (en) { 1543 s->intr_enabled = true; 1544 /* Ok - report back to driver */ 1545 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRES); 1546 } else { 1547 s->intr_enabled = false; 1548 /* Ok - report back to driver */ 1549 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_IRES, 0); 1550 } 1551 } 1552 1553 /* Handle write to Global Command Register */ 1554 static void vtd_handle_gcmd_write(IntelIOMMUState *s) 1555 { 1556 uint32_t status = vtd_get_long_raw(s, DMAR_GSTS_REG); 1557 uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG); 1558 uint32_t changed = status ^ val; 1559 1560 trace_vtd_reg_write_gcmd(status, val); 1561 if (changed & VTD_GCMD_TE) { 1562 /* Translation enable/disable */ 1563 vtd_handle_gcmd_te(s, val & VTD_GCMD_TE); 1564 } 1565 if (val & VTD_GCMD_SRTP) { 1566 /* Set/update the root-table pointer */ 1567 vtd_handle_gcmd_srtp(s); 1568 } 1569 if (changed & VTD_GCMD_QIE) { 1570 /* Queued Invalidation Enable */ 1571 vtd_handle_gcmd_qie(s, val & VTD_GCMD_QIE); 1572 } 1573 if (val & VTD_GCMD_SIRTP) { 1574 /* Set/update the interrupt remapping root-table pointer */ 1575 vtd_handle_gcmd_sirtp(s); 1576 } 1577 if (changed & VTD_GCMD_IRE) { 1578 /* Interrupt remap enable/disable */ 1579 vtd_handle_gcmd_ire(s, val & VTD_GCMD_IRE); 1580 } 1581 } 1582 1583 /* Handle write to Context Command Register */ 1584 static void vtd_handle_ccmd_write(IntelIOMMUState *s) 1585 { 1586 uint64_t ret; 1587 uint64_t val = vtd_get_quad_raw(s, DMAR_CCMD_REG); 1588 1589 /* Context-cache invalidation request */ 1590 if (val & VTD_CCMD_ICC) { 1591 if (s->qi_enabled) { 1592 trace_vtd_err("Queued Invalidation enabled, " 1593 "should not use register-based invalidation"); 1594 return; 1595 } 1596 ret = vtd_context_cache_invalidate(s, val); 1597 /* Invalidation completed. Change something to show */ 1598 vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_ICC, 0ULL); 1599 ret = vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_CAIG_MASK, 1600 ret); 1601 } 1602 } 1603 1604 /* Handle write to IOTLB Invalidation Register */ 1605 static void vtd_handle_iotlb_write(IntelIOMMUState *s) 1606 { 1607 uint64_t ret; 1608 uint64_t val = vtd_get_quad_raw(s, DMAR_IOTLB_REG); 1609 1610 /* IOTLB invalidation request */ 1611 if (val & VTD_TLB_IVT) { 1612 if (s->qi_enabled) { 1613 trace_vtd_err("Queued Invalidation enabled, " 1614 "should not use register-based invalidation."); 1615 return; 1616 } 1617 ret = vtd_iotlb_flush(s, val); 1618 /* Invalidation completed. Change something to show */ 1619 vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, VTD_TLB_IVT, 0ULL); 1620 ret = vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, 1621 VTD_TLB_FLUSH_GRANU_MASK_A, ret); 1622 } 1623 } 1624 1625 /* Fetch an Invalidation Descriptor from the Invalidation Queue */ 1626 static bool vtd_get_inv_desc(dma_addr_t base_addr, uint32_t offset, 1627 VTDInvDesc *inv_desc) 1628 { 1629 dma_addr_t addr = base_addr + offset * sizeof(*inv_desc); 1630 if (dma_memory_read(&address_space_memory, addr, inv_desc, 1631 sizeof(*inv_desc))) { 1632 trace_vtd_err("Read INV DESC failed."); 1633 inv_desc->lo = 0; 1634 inv_desc->hi = 0; 1635 return false; 1636 } 1637 inv_desc->lo = le64_to_cpu(inv_desc->lo); 1638 inv_desc->hi = le64_to_cpu(inv_desc->hi); 1639 return true; 1640 } 1641 1642 static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) 1643 { 1644 if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) || 1645 (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) { 1646 trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo); 1647 return false; 1648 } 1649 if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) { 1650 /* Status Write */ 1651 uint32_t status_data = (uint32_t)(inv_desc->lo >> 1652 VTD_INV_DESC_WAIT_DATA_SHIFT); 1653 1654 assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF)); 1655 1656 /* FIXME: need to be masked with HAW? */ 1657 dma_addr_t status_addr = inv_desc->hi; 1658 trace_vtd_inv_desc_wait_sw(status_addr, status_data); 1659 status_data = cpu_to_le32(status_data); 1660 if (dma_memory_write(&address_space_memory, status_addr, &status_data, 1661 sizeof(status_data))) { 1662 trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo); 1663 return false; 1664 } 1665 } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) { 1666 /* Interrupt flag */ 1667 vtd_generate_completion_event(s); 1668 } else { 1669 trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo); 1670 return false; 1671 } 1672 return true; 1673 } 1674 1675 static bool vtd_process_context_cache_desc(IntelIOMMUState *s, 1676 VTDInvDesc *inv_desc) 1677 { 1678 uint16_t sid, fmask; 1679 1680 if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) { 1681 trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo); 1682 return false; 1683 } 1684 switch (inv_desc->lo & VTD_INV_DESC_CC_G) { 1685 case VTD_INV_DESC_CC_DOMAIN: 1686 trace_vtd_inv_desc_cc_domain( 1687 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo)); 1688 /* Fall through */ 1689 case VTD_INV_DESC_CC_GLOBAL: 1690 vtd_context_global_invalidate(s); 1691 break; 1692 1693 case VTD_INV_DESC_CC_DEVICE: 1694 sid = VTD_INV_DESC_CC_SID(inv_desc->lo); 1695 fmask = VTD_INV_DESC_CC_FM(inv_desc->lo); 1696 vtd_context_device_invalidate(s, sid, fmask); 1697 break; 1698 1699 default: 1700 trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo); 1701 return false; 1702 } 1703 return true; 1704 } 1705 1706 static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) 1707 { 1708 uint16_t domain_id; 1709 uint8_t am; 1710 hwaddr addr; 1711 1712 if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) || 1713 (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) { 1714 trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo); 1715 return false; 1716 } 1717 1718 switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) { 1719 case VTD_INV_DESC_IOTLB_GLOBAL: 1720 vtd_iotlb_global_invalidate(s); 1721 break; 1722 1723 case VTD_INV_DESC_IOTLB_DOMAIN: 1724 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); 1725 vtd_iotlb_domain_invalidate(s, domain_id); 1726 break; 1727 1728 case VTD_INV_DESC_IOTLB_PAGE: 1729 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); 1730 addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi); 1731 am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi); 1732 if (am > VTD_MAMV) { 1733 trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo); 1734 return false; 1735 } 1736 vtd_iotlb_page_invalidate(s, domain_id, addr, am); 1737 break; 1738 1739 default: 1740 trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo); 1741 return false; 1742 } 1743 return true; 1744 } 1745 1746 static bool vtd_process_inv_iec_desc(IntelIOMMUState *s, 1747 VTDInvDesc *inv_desc) 1748 { 1749 trace_vtd_inv_desc_iec(inv_desc->iec.granularity, 1750 inv_desc->iec.index, 1751 inv_desc->iec.index_mask); 1752 1753 vtd_iec_notify_all(s, !inv_desc->iec.granularity, 1754 inv_desc->iec.index, 1755 inv_desc->iec.index_mask); 1756 return true; 1757 } 1758 1759 static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, 1760 VTDInvDesc *inv_desc) 1761 { 1762 VTDAddressSpace *vtd_dev_as; 1763 IOMMUTLBEntry entry; 1764 struct VTDBus *vtd_bus; 1765 hwaddr addr; 1766 uint64_t sz; 1767 uint16_t sid; 1768 uint8_t devfn; 1769 bool size; 1770 uint8_t bus_num; 1771 1772 addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi); 1773 sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo); 1774 devfn = sid & 0xff; 1775 bus_num = sid >> 8; 1776 size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi); 1777 1778 if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) || 1779 (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) { 1780 trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo); 1781 return false; 1782 } 1783 1784 vtd_bus = vtd_find_as_from_bus_num(s, bus_num); 1785 if (!vtd_bus) { 1786 goto done; 1787 } 1788 1789 vtd_dev_as = vtd_bus->dev_as[devfn]; 1790 if (!vtd_dev_as) { 1791 goto done; 1792 } 1793 1794 /* According to ATS spec table 2.4: 1795 * S = 0, bits 15:12 = xxxx range size: 4K 1796 * S = 1, bits 15:12 = xxx0 range size: 8K 1797 * S = 1, bits 15:12 = xx01 range size: 16K 1798 * S = 1, bits 15:12 = x011 range size: 32K 1799 * S = 1, bits 15:12 = 0111 range size: 64K 1800 * ... 1801 */ 1802 if (size) { 1803 sz = (VTD_PAGE_SIZE * 2) << cto64(addr >> VTD_PAGE_SHIFT); 1804 addr &= ~(sz - 1); 1805 } else { 1806 sz = VTD_PAGE_SIZE; 1807 } 1808 1809 entry.target_as = &vtd_dev_as->as; 1810 entry.addr_mask = sz - 1; 1811 entry.iova = addr; 1812 entry.perm = IOMMU_NONE; 1813 entry.translated_addr = 0; 1814 memory_region_notify_iommu(&vtd_dev_as->iommu, entry); 1815 1816 done: 1817 return true; 1818 } 1819 1820 static bool vtd_process_inv_desc(IntelIOMMUState *s) 1821 { 1822 VTDInvDesc inv_desc; 1823 uint8_t desc_type; 1824 1825 trace_vtd_inv_qi_head(s->iq_head); 1826 if (!vtd_get_inv_desc(s->iq, s->iq_head, &inv_desc)) { 1827 s->iq_last_desc_type = VTD_INV_DESC_NONE; 1828 return false; 1829 } 1830 desc_type = inv_desc.lo & VTD_INV_DESC_TYPE; 1831 /* FIXME: should update at first or at last? */ 1832 s->iq_last_desc_type = desc_type; 1833 1834 switch (desc_type) { 1835 case VTD_INV_DESC_CC: 1836 trace_vtd_inv_desc("context-cache", inv_desc.hi, inv_desc.lo); 1837 if (!vtd_process_context_cache_desc(s, &inv_desc)) { 1838 return false; 1839 } 1840 break; 1841 1842 case VTD_INV_DESC_IOTLB: 1843 trace_vtd_inv_desc("iotlb", inv_desc.hi, inv_desc.lo); 1844 if (!vtd_process_iotlb_desc(s, &inv_desc)) { 1845 return false; 1846 } 1847 break; 1848 1849 case VTD_INV_DESC_WAIT: 1850 trace_vtd_inv_desc("wait", inv_desc.hi, inv_desc.lo); 1851 if (!vtd_process_wait_desc(s, &inv_desc)) { 1852 return false; 1853 } 1854 break; 1855 1856 case VTD_INV_DESC_IEC: 1857 trace_vtd_inv_desc("iec", inv_desc.hi, inv_desc.lo); 1858 if (!vtd_process_inv_iec_desc(s, &inv_desc)) { 1859 return false; 1860 } 1861 break; 1862 1863 case VTD_INV_DESC_DEVICE: 1864 trace_vtd_inv_desc("device", inv_desc.hi, inv_desc.lo); 1865 if (!vtd_process_device_iotlb_desc(s, &inv_desc)) { 1866 return false; 1867 } 1868 break; 1869 1870 default: 1871 trace_vtd_inv_desc_invalid(inv_desc.hi, inv_desc.lo); 1872 return false; 1873 } 1874 s->iq_head++; 1875 if (s->iq_head == s->iq_size) { 1876 s->iq_head = 0; 1877 } 1878 return true; 1879 } 1880 1881 /* Try to fetch and process more Invalidation Descriptors */ 1882 static void vtd_fetch_inv_desc(IntelIOMMUState *s) 1883 { 1884 trace_vtd_inv_qi_fetch(); 1885 1886 if (s->iq_tail >= s->iq_size) { 1887 /* Detects an invalid Tail pointer */ 1888 trace_vtd_err_qi_tail(s->iq_tail, s->iq_size); 1889 vtd_handle_inv_queue_error(s); 1890 return; 1891 } 1892 while (s->iq_head != s->iq_tail) { 1893 if (!vtd_process_inv_desc(s)) { 1894 /* Invalidation Queue Errors */ 1895 vtd_handle_inv_queue_error(s); 1896 break; 1897 } 1898 /* Must update the IQH_REG in time */ 1899 vtd_set_quad_raw(s, DMAR_IQH_REG, 1900 (((uint64_t)(s->iq_head)) << VTD_IQH_QH_SHIFT) & 1901 VTD_IQH_QH_MASK); 1902 } 1903 } 1904 1905 /* Handle write to Invalidation Queue Tail Register */ 1906 static void vtd_handle_iqt_write(IntelIOMMUState *s) 1907 { 1908 uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG); 1909 1910 s->iq_tail = VTD_IQT_QT(val); 1911 trace_vtd_inv_qi_tail(s->iq_tail); 1912 1913 if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) { 1914 /* Process Invalidation Queue here */ 1915 vtd_fetch_inv_desc(s); 1916 } 1917 } 1918 1919 static void vtd_handle_fsts_write(IntelIOMMUState *s) 1920 { 1921 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); 1922 uint32_t fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG); 1923 uint32_t status_fields = VTD_FSTS_PFO | VTD_FSTS_PPF | VTD_FSTS_IQE; 1924 1925 if ((fectl_reg & VTD_FECTL_IP) && !(fsts_reg & status_fields)) { 1926 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); 1927 trace_vtd_fsts_clear_ip(); 1928 } 1929 /* FIXME: when IQE is Clear, should we try to fetch some Invalidation 1930 * Descriptors if there are any when Queued Invalidation is enabled? 1931 */ 1932 } 1933 1934 static void vtd_handle_fectl_write(IntelIOMMUState *s) 1935 { 1936 uint32_t fectl_reg; 1937 /* FIXME: when software clears the IM field, check the IP field. But do we 1938 * need to compare the old value and the new value to conclude that 1939 * software clears the IM field? Or just check if the IM field is zero? 1940 */ 1941 fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG); 1942 1943 trace_vtd_reg_write_fectl(fectl_reg); 1944 1945 if ((fectl_reg & VTD_FECTL_IP) && !(fectl_reg & VTD_FECTL_IM)) { 1946 vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG); 1947 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); 1948 } 1949 } 1950 1951 static void vtd_handle_ics_write(IntelIOMMUState *s) 1952 { 1953 uint32_t ics_reg = vtd_get_long_raw(s, DMAR_ICS_REG); 1954 uint32_t iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG); 1955 1956 if ((iectl_reg & VTD_IECTL_IP) && !(ics_reg & VTD_ICS_IWC)) { 1957 trace_vtd_reg_ics_clear_ip(); 1958 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); 1959 } 1960 } 1961 1962 static void vtd_handle_iectl_write(IntelIOMMUState *s) 1963 { 1964 uint32_t iectl_reg; 1965 /* FIXME: when software clears the IM field, check the IP field. But do we 1966 * need to compare the old value and the new value to conclude that 1967 * software clears the IM field? Or just check if the IM field is zero? 1968 */ 1969 iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG); 1970 1971 trace_vtd_reg_write_iectl(iectl_reg); 1972 1973 if ((iectl_reg & VTD_IECTL_IP) && !(iectl_reg & VTD_IECTL_IM)) { 1974 vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG); 1975 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); 1976 } 1977 } 1978 1979 static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size) 1980 { 1981 IntelIOMMUState *s = opaque; 1982 uint64_t val; 1983 1984 trace_vtd_reg_read(addr, size); 1985 1986 if (addr + size > DMAR_REG_SIZE) { 1987 trace_vtd_err("Read MMIO over range."); 1988 return (uint64_t)-1; 1989 } 1990 1991 switch (addr) { 1992 /* Root Table Address Register, 64-bit */ 1993 case DMAR_RTADDR_REG: 1994 if (size == 4) { 1995 val = s->root & ((1ULL << 32) - 1); 1996 } else { 1997 val = s->root; 1998 } 1999 break; 2000 2001 case DMAR_RTADDR_REG_HI: 2002 assert(size == 4); 2003 val = s->root >> 32; 2004 break; 2005 2006 /* Invalidation Queue Address Register, 64-bit */ 2007 case DMAR_IQA_REG: 2008 val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS); 2009 if (size == 4) { 2010 val = val & ((1ULL << 32) - 1); 2011 } 2012 break; 2013 2014 case DMAR_IQA_REG_HI: 2015 assert(size == 4); 2016 val = s->iq >> 32; 2017 break; 2018 2019 default: 2020 if (size == 4) { 2021 val = vtd_get_long(s, addr); 2022 } else { 2023 val = vtd_get_quad(s, addr); 2024 } 2025 } 2026 2027 return val; 2028 } 2029 2030 static void vtd_mem_write(void *opaque, hwaddr addr, 2031 uint64_t val, unsigned size) 2032 { 2033 IntelIOMMUState *s = opaque; 2034 2035 trace_vtd_reg_write(addr, size, val); 2036 2037 if (addr + size > DMAR_REG_SIZE) { 2038 trace_vtd_err("Write MMIO over range."); 2039 return; 2040 } 2041 2042 switch (addr) { 2043 /* Global Command Register, 32-bit */ 2044 case DMAR_GCMD_REG: 2045 vtd_set_long(s, addr, val); 2046 vtd_handle_gcmd_write(s); 2047 break; 2048 2049 /* Context Command Register, 64-bit */ 2050 case DMAR_CCMD_REG: 2051 if (size == 4) { 2052 vtd_set_long(s, addr, val); 2053 } else { 2054 vtd_set_quad(s, addr, val); 2055 vtd_handle_ccmd_write(s); 2056 } 2057 break; 2058 2059 case DMAR_CCMD_REG_HI: 2060 assert(size == 4); 2061 vtd_set_long(s, addr, val); 2062 vtd_handle_ccmd_write(s); 2063 break; 2064 2065 /* IOTLB Invalidation Register, 64-bit */ 2066 case DMAR_IOTLB_REG: 2067 if (size == 4) { 2068 vtd_set_long(s, addr, val); 2069 } else { 2070 vtd_set_quad(s, addr, val); 2071 vtd_handle_iotlb_write(s); 2072 } 2073 break; 2074 2075 case DMAR_IOTLB_REG_HI: 2076 assert(size == 4); 2077 vtd_set_long(s, addr, val); 2078 vtd_handle_iotlb_write(s); 2079 break; 2080 2081 /* Invalidate Address Register, 64-bit */ 2082 case DMAR_IVA_REG: 2083 if (size == 4) { 2084 vtd_set_long(s, addr, val); 2085 } else { 2086 vtd_set_quad(s, addr, val); 2087 } 2088 break; 2089 2090 case DMAR_IVA_REG_HI: 2091 assert(size == 4); 2092 vtd_set_long(s, addr, val); 2093 break; 2094 2095 /* Fault Status Register, 32-bit */ 2096 case DMAR_FSTS_REG: 2097 assert(size == 4); 2098 vtd_set_long(s, addr, val); 2099 vtd_handle_fsts_write(s); 2100 break; 2101 2102 /* Fault Event Control Register, 32-bit */ 2103 case DMAR_FECTL_REG: 2104 assert(size == 4); 2105 vtd_set_long(s, addr, val); 2106 vtd_handle_fectl_write(s); 2107 break; 2108 2109 /* Fault Event Data Register, 32-bit */ 2110 case DMAR_FEDATA_REG: 2111 assert(size == 4); 2112 vtd_set_long(s, addr, val); 2113 break; 2114 2115 /* Fault Event Address Register, 32-bit */ 2116 case DMAR_FEADDR_REG: 2117 assert(size == 4); 2118 vtd_set_long(s, addr, val); 2119 break; 2120 2121 /* Fault Event Upper Address Register, 32-bit */ 2122 case DMAR_FEUADDR_REG: 2123 assert(size == 4); 2124 vtd_set_long(s, addr, val); 2125 break; 2126 2127 /* Protected Memory Enable Register, 32-bit */ 2128 case DMAR_PMEN_REG: 2129 assert(size == 4); 2130 vtd_set_long(s, addr, val); 2131 break; 2132 2133 /* Root Table Address Register, 64-bit */ 2134 case DMAR_RTADDR_REG: 2135 if (size == 4) { 2136 vtd_set_long(s, addr, val); 2137 } else { 2138 vtd_set_quad(s, addr, val); 2139 } 2140 break; 2141 2142 case DMAR_RTADDR_REG_HI: 2143 assert(size == 4); 2144 vtd_set_long(s, addr, val); 2145 break; 2146 2147 /* Invalidation Queue Tail Register, 64-bit */ 2148 case DMAR_IQT_REG: 2149 if (size == 4) { 2150 vtd_set_long(s, addr, val); 2151 } else { 2152 vtd_set_quad(s, addr, val); 2153 } 2154 vtd_handle_iqt_write(s); 2155 break; 2156 2157 case DMAR_IQT_REG_HI: 2158 assert(size == 4); 2159 vtd_set_long(s, addr, val); 2160 /* 19:63 of IQT_REG is RsvdZ, do nothing here */ 2161 break; 2162 2163 /* Invalidation Queue Address Register, 64-bit */ 2164 case DMAR_IQA_REG: 2165 if (size == 4) { 2166 vtd_set_long(s, addr, val); 2167 } else { 2168 vtd_set_quad(s, addr, val); 2169 } 2170 break; 2171 2172 case DMAR_IQA_REG_HI: 2173 assert(size == 4); 2174 vtd_set_long(s, addr, val); 2175 break; 2176 2177 /* Invalidation Completion Status Register, 32-bit */ 2178 case DMAR_ICS_REG: 2179 assert(size == 4); 2180 vtd_set_long(s, addr, val); 2181 vtd_handle_ics_write(s); 2182 break; 2183 2184 /* Invalidation Event Control Register, 32-bit */ 2185 case DMAR_IECTL_REG: 2186 assert(size == 4); 2187 vtd_set_long(s, addr, val); 2188 vtd_handle_iectl_write(s); 2189 break; 2190 2191 /* Invalidation Event Data Register, 32-bit */ 2192 case DMAR_IEDATA_REG: 2193 assert(size == 4); 2194 vtd_set_long(s, addr, val); 2195 break; 2196 2197 /* Invalidation Event Address Register, 32-bit */ 2198 case DMAR_IEADDR_REG: 2199 assert(size == 4); 2200 vtd_set_long(s, addr, val); 2201 break; 2202 2203 /* Invalidation Event Upper Address Register, 32-bit */ 2204 case DMAR_IEUADDR_REG: 2205 assert(size == 4); 2206 vtd_set_long(s, addr, val); 2207 break; 2208 2209 /* Fault Recording Registers, 128-bit */ 2210 case DMAR_FRCD_REG_0_0: 2211 if (size == 4) { 2212 vtd_set_long(s, addr, val); 2213 } else { 2214 vtd_set_quad(s, addr, val); 2215 } 2216 break; 2217 2218 case DMAR_FRCD_REG_0_1: 2219 assert(size == 4); 2220 vtd_set_long(s, addr, val); 2221 break; 2222 2223 case DMAR_FRCD_REG_0_2: 2224 if (size == 4) { 2225 vtd_set_long(s, addr, val); 2226 } else { 2227 vtd_set_quad(s, addr, val); 2228 /* May clear bit 127 (Fault), update PPF */ 2229 vtd_update_fsts_ppf(s); 2230 } 2231 break; 2232 2233 case DMAR_FRCD_REG_0_3: 2234 assert(size == 4); 2235 vtd_set_long(s, addr, val); 2236 /* May clear bit 127 (Fault), update PPF */ 2237 vtd_update_fsts_ppf(s); 2238 break; 2239 2240 case DMAR_IRTA_REG: 2241 if (size == 4) { 2242 vtd_set_long(s, addr, val); 2243 } else { 2244 vtd_set_quad(s, addr, val); 2245 } 2246 break; 2247 2248 case DMAR_IRTA_REG_HI: 2249 assert(size == 4); 2250 vtd_set_long(s, addr, val); 2251 break; 2252 2253 default: 2254 if (size == 4) { 2255 vtd_set_long(s, addr, val); 2256 } else { 2257 vtd_set_quad(s, addr, val); 2258 } 2259 } 2260 } 2261 2262 static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr, 2263 IOMMUAccessFlags flag) 2264 { 2265 VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); 2266 IntelIOMMUState *s = vtd_as->iommu_state; 2267 IOMMUTLBEntry iotlb = { 2268 /* We'll fill in the rest later. */ 2269 .target_as = &address_space_memory, 2270 }; 2271 bool success; 2272 2273 if (likely(s->dmar_enabled)) { 2274 success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, 2275 addr, flag & IOMMU_WO, &iotlb); 2276 } else { 2277 /* DMAR disabled, passthrough, use 4k-page*/ 2278 iotlb.iova = addr & VTD_PAGE_MASK_4K; 2279 iotlb.translated_addr = addr & VTD_PAGE_MASK_4K; 2280 iotlb.addr_mask = ~VTD_PAGE_MASK_4K; 2281 iotlb.perm = IOMMU_RW; 2282 success = true; 2283 } 2284 2285 if (likely(success)) { 2286 trace_vtd_dmar_translate(pci_bus_num(vtd_as->bus), 2287 VTD_PCI_SLOT(vtd_as->devfn), 2288 VTD_PCI_FUNC(vtd_as->devfn), 2289 iotlb.iova, iotlb.translated_addr, 2290 iotlb.addr_mask); 2291 } else { 2292 trace_vtd_err_dmar_translate(pci_bus_num(vtd_as->bus), 2293 VTD_PCI_SLOT(vtd_as->devfn), 2294 VTD_PCI_FUNC(vtd_as->devfn), 2295 iotlb.iova); 2296 } 2297 2298 return iotlb; 2299 } 2300 2301 static void vtd_iommu_notify_flag_changed(MemoryRegion *iommu, 2302 IOMMUNotifierFlag old, 2303 IOMMUNotifierFlag new) 2304 { 2305 VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); 2306 IntelIOMMUState *s = vtd_as->iommu_state; 2307 IntelIOMMUNotifierNode *node = NULL; 2308 IntelIOMMUNotifierNode *next_node = NULL; 2309 2310 if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) { 2311 error_report("We need to set cache_mode=1 for intel-iommu to enable " 2312 "device assignment with IOMMU protection."); 2313 exit(1); 2314 } 2315 2316 if (old == IOMMU_NOTIFIER_NONE) { 2317 node = g_malloc0(sizeof(*node)); 2318 node->vtd_as = vtd_as; 2319 QLIST_INSERT_HEAD(&s->notifiers_list, node, next); 2320 return; 2321 } 2322 2323 /* update notifier node with new flags */ 2324 QLIST_FOREACH_SAFE(node, &s->notifiers_list, next, next_node) { 2325 if (node->vtd_as == vtd_as) { 2326 if (new == IOMMU_NOTIFIER_NONE) { 2327 QLIST_REMOVE(node, next); 2328 g_free(node); 2329 } 2330 return; 2331 } 2332 } 2333 } 2334 2335 static const VMStateDescription vtd_vmstate = { 2336 .name = "iommu-intel", 2337 .version_id = 1, 2338 .minimum_version_id = 1, 2339 .priority = MIG_PRI_IOMMU, 2340 .fields = (VMStateField[]) { 2341 VMSTATE_UINT64(root, IntelIOMMUState), 2342 VMSTATE_UINT64(intr_root, IntelIOMMUState), 2343 VMSTATE_UINT64(iq, IntelIOMMUState), 2344 VMSTATE_UINT32(intr_size, IntelIOMMUState), 2345 VMSTATE_UINT16(iq_head, IntelIOMMUState), 2346 VMSTATE_UINT16(iq_tail, IntelIOMMUState), 2347 VMSTATE_UINT16(iq_size, IntelIOMMUState), 2348 VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState), 2349 VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE), 2350 VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState), 2351 VMSTATE_BOOL(root_extended, IntelIOMMUState), 2352 VMSTATE_BOOL(dmar_enabled, IntelIOMMUState), 2353 VMSTATE_BOOL(qi_enabled, IntelIOMMUState), 2354 VMSTATE_BOOL(intr_enabled, IntelIOMMUState), 2355 VMSTATE_BOOL(intr_eime, IntelIOMMUState), 2356 VMSTATE_END_OF_LIST() 2357 } 2358 }; 2359 2360 static const MemoryRegionOps vtd_mem_ops = { 2361 .read = vtd_mem_read, 2362 .write = vtd_mem_write, 2363 .endianness = DEVICE_LITTLE_ENDIAN, 2364 .impl = { 2365 .min_access_size = 4, 2366 .max_access_size = 8, 2367 }, 2368 .valid = { 2369 .min_access_size = 4, 2370 .max_access_size = 8, 2371 }, 2372 }; 2373 2374 static Property vtd_properties[] = { 2375 DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0), 2376 DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim, 2377 ON_OFF_AUTO_AUTO), 2378 DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false), 2379 DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE), 2380 DEFINE_PROP_END_OF_LIST(), 2381 }; 2382 2383 /* Read IRTE entry with specific index */ 2384 static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, 2385 VTD_IR_TableEntry *entry, uint16_t sid) 2386 { 2387 static const uint16_t vtd_svt_mask[VTD_SQ_MAX] = \ 2388 {0xffff, 0xfffb, 0xfff9, 0xfff8}; 2389 dma_addr_t addr = 0x00; 2390 uint16_t mask, source_id; 2391 uint8_t bus, bus_max, bus_min; 2392 2393 addr = iommu->intr_root + index * sizeof(*entry); 2394 if (dma_memory_read(&address_space_memory, addr, entry, 2395 sizeof(*entry))) { 2396 trace_vtd_err("Memory read failed for IRTE."); 2397 return -VTD_FR_IR_ROOT_INVAL; 2398 } 2399 2400 trace_vtd_ir_irte_get(index, le64_to_cpu(entry->data[1]), 2401 le64_to_cpu(entry->data[0])); 2402 2403 if (!entry->irte.present) { 2404 trace_vtd_err_irte(index, le64_to_cpu(entry->data[1]), 2405 le64_to_cpu(entry->data[0])); 2406 return -VTD_FR_IR_ENTRY_P; 2407 } 2408 2409 if (entry->irte.__reserved_0 || entry->irte.__reserved_1 || 2410 entry->irte.__reserved_2) { 2411 trace_vtd_err_irte(index, le64_to_cpu(entry->data[1]), 2412 le64_to_cpu(entry->data[0])); 2413 return -VTD_FR_IR_IRTE_RSVD; 2414 } 2415 2416 if (sid != X86_IOMMU_SID_INVALID) { 2417 /* Validate IRTE SID */ 2418 source_id = le32_to_cpu(entry->irte.source_id); 2419 switch (entry->irte.sid_vtype) { 2420 case VTD_SVT_NONE: 2421 break; 2422 2423 case VTD_SVT_ALL: 2424 mask = vtd_svt_mask[entry->irte.sid_q]; 2425 if ((source_id & mask) != (sid & mask)) { 2426 trace_vtd_err_irte_sid(index, sid, source_id); 2427 return -VTD_FR_IR_SID_ERR; 2428 } 2429 break; 2430 2431 case VTD_SVT_BUS: 2432 bus_max = source_id >> 8; 2433 bus_min = source_id & 0xff; 2434 bus = sid >> 8; 2435 if (bus > bus_max || bus < bus_min) { 2436 trace_vtd_err_irte_sid_bus(index, bus, bus_min, bus_max); 2437 return -VTD_FR_IR_SID_ERR; 2438 } 2439 break; 2440 2441 default: 2442 trace_vtd_err_irte_svt(index, entry->irte.sid_vtype); 2443 /* Take this as verification failure. */ 2444 return -VTD_FR_IR_SID_ERR; 2445 break; 2446 } 2447 } 2448 2449 return 0; 2450 } 2451 2452 /* Fetch IRQ information of specific IR index */ 2453 static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index, 2454 VTDIrq *irq, uint16_t sid) 2455 { 2456 VTD_IR_TableEntry irte = {}; 2457 int ret = 0; 2458 2459 ret = vtd_irte_get(iommu, index, &irte, sid); 2460 if (ret) { 2461 return ret; 2462 } 2463 2464 irq->trigger_mode = irte.irte.trigger_mode; 2465 irq->vector = irte.irte.vector; 2466 irq->delivery_mode = irte.irte.delivery_mode; 2467 irq->dest = le32_to_cpu(irte.irte.dest_id); 2468 if (!iommu->intr_eime) { 2469 #define VTD_IR_APIC_DEST_MASK (0xff00ULL) 2470 #define VTD_IR_APIC_DEST_SHIFT (8) 2471 irq->dest = (irq->dest & VTD_IR_APIC_DEST_MASK) >> 2472 VTD_IR_APIC_DEST_SHIFT; 2473 } 2474 irq->dest_mode = irte.irte.dest_mode; 2475 irq->redir_hint = irte.irte.redir_hint; 2476 2477 trace_vtd_ir_remap(index, irq->trigger_mode, irq->vector, 2478 irq->delivery_mode, irq->dest, irq->dest_mode); 2479 2480 return 0; 2481 } 2482 2483 /* Generate one MSI message from VTDIrq info */ 2484 static void vtd_generate_msi_message(VTDIrq *irq, MSIMessage *msg_out) 2485 { 2486 VTD_MSIMessage msg = {}; 2487 2488 /* Generate address bits */ 2489 msg.dest_mode = irq->dest_mode; 2490 msg.redir_hint = irq->redir_hint; 2491 msg.dest = irq->dest; 2492 msg.__addr_hi = irq->dest & 0xffffff00; 2493 msg.__addr_head = cpu_to_le32(0xfee); 2494 /* Keep this from original MSI address bits */ 2495 msg.__not_used = irq->msi_addr_last_bits; 2496 2497 /* Generate data bits */ 2498 msg.vector = irq->vector; 2499 msg.delivery_mode = irq->delivery_mode; 2500 msg.level = 1; 2501 msg.trigger_mode = irq->trigger_mode; 2502 2503 msg_out->address = msg.msi_addr; 2504 msg_out->data = msg.msi_data; 2505 } 2506 2507 /* Interrupt remapping for MSI/MSI-X entry */ 2508 static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu, 2509 MSIMessage *origin, 2510 MSIMessage *translated, 2511 uint16_t sid) 2512 { 2513 int ret = 0; 2514 VTD_IR_MSIAddress addr; 2515 uint16_t index; 2516 VTDIrq irq = {}; 2517 2518 assert(origin && translated); 2519 2520 trace_vtd_ir_remap_msi_req(origin->address, origin->data); 2521 2522 if (!iommu || !iommu->intr_enabled) { 2523 memcpy(translated, origin, sizeof(*origin)); 2524 goto out; 2525 } 2526 2527 if (origin->address & VTD_MSI_ADDR_HI_MASK) { 2528 trace_vtd_err("MSI address high 32 bits non-zero when " 2529 "Interrupt Remapping enabled."); 2530 return -VTD_FR_IR_REQ_RSVD; 2531 } 2532 2533 addr.data = origin->address & VTD_MSI_ADDR_LO_MASK; 2534 if (addr.addr.__head != 0xfee) { 2535 trace_vtd_err("MSI addr low 32 bit invalid."); 2536 return -VTD_FR_IR_REQ_RSVD; 2537 } 2538 2539 /* This is compatible mode. */ 2540 if (addr.addr.int_mode != VTD_IR_INT_FORMAT_REMAP) { 2541 memcpy(translated, origin, sizeof(*origin)); 2542 goto out; 2543 } 2544 2545 index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l); 2546 2547 #define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff) 2548 #define VTD_IR_MSI_DATA_RESERVED (0xffff0000) 2549 2550 if (addr.addr.sub_valid) { 2551 /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */ 2552 index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE; 2553 } 2554 2555 ret = vtd_remap_irq_get(iommu, index, &irq, sid); 2556 if (ret) { 2557 return ret; 2558 } 2559 2560 if (addr.addr.sub_valid) { 2561 trace_vtd_ir_remap_type("MSI"); 2562 if (origin->data & VTD_IR_MSI_DATA_RESERVED) { 2563 trace_vtd_err_ir_msi_invalid(sid, origin->address, origin->data); 2564 return -VTD_FR_IR_REQ_RSVD; 2565 } 2566 } else { 2567 uint8_t vector = origin->data & 0xff; 2568 uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; 2569 2570 trace_vtd_ir_remap_type("IOAPIC"); 2571 /* IOAPIC entry vector should be aligned with IRTE vector 2572 * (see vt-d spec 5.1.5.1). */ 2573 if (vector != irq.vector) { 2574 trace_vtd_warn_ir_vector(sid, index, vector, irq.vector); 2575 } 2576 2577 /* The Trigger Mode field must match the Trigger Mode in the IRTE. 2578 * (see vt-d spec 5.1.5.1). */ 2579 if (trigger_mode != irq.trigger_mode) { 2580 trace_vtd_warn_ir_trigger(sid, index, trigger_mode, 2581 irq.trigger_mode); 2582 } 2583 } 2584 2585 /* 2586 * We'd better keep the last two bits, assuming that guest OS 2587 * might modify it. Keep it does not hurt after all. 2588 */ 2589 irq.msi_addr_last_bits = addr.addr.__not_care; 2590 2591 /* Translate VTDIrq to MSI message */ 2592 vtd_generate_msi_message(&irq, translated); 2593 2594 out: 2595 trace_vtd_ir_remap_msi(origin->address, origin->data, 2596 translated->address, translated->data); 2597 return 0; 2598 } 2599 2600 static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src, 2601 MSIMessage *dst, uint16_t sid) 2602 { 2603 return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu), 2604 src, dst, sid); 2605 } 2606 2607 static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr, 2608 uint64_t *data, unsigned size, 2609 MemTxAttrs attrs) 2610 { 2611 return MEMTX_OK; 2612 } 2613 2614 static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr, 2615 uint64_t value, unsigned size, 2616 MemTxAttrs attrs) 2617 { 2618 int ret = 0; 2619 MSIMessage from = {}, to = {}; 2620 uint16_t sid = X86_IOMMU_SID_INVALID; 2621 2622 from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST; 2623 from.data = (uint32_t) value; 2624 2625 if (!attrs.unspecified) { 2626 /* We have explicit Source ID */ 2627 sid = attrs.requester_id; 2628 } 2629 2630 ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid); 2631 if (ret) { 2632 /* TODO: report error */ 2633 /* Drop this interrupt */ 2634 return MEMTX_ERROR; 2635 } 2636 2637 apic_get_class()->send_msi(&to); 2638 2639 return MEMTX_OK; 2640 } 2641 2642 static const MemoryRegionOps vtd_mem_ir_ops = { 2643 .read_with_attrs = vtd_mem_ir_read, 2644 .write_with_attrs = vtd_mem_ir_write, 2645 .endianness = DEVICE_LITTLE_ENDIAN, 2646 .impl = { 2647 .min_access_size = 4, 2648 .max_access_size = 4, 2649 }, 2650 .valid = { 2651 .min_access_size = 4, 2652 .max_access_size = 4, 2653 }, 2654 }; 2655 2656 VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) 2657 { 2658 uintptr_t key = (uintptr_t)bus; 2659 VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key); 2660 VTDAddressSpace *vtd_dev_as; 2661 char name[128]; 2662 2663 if (!vtd_bus) { 2664 uintptr_t *new_key = g_malloc(sizeof(*new_key)); 2665 *new_key = (uintptr_t)bus; 2666 /* No corresponding free() */ 2667 vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \ 2668 X86_IOMMU_PCI_DEVFN_MAX); 2669 vtd_bus->bus = bus; 2670 g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus); 2671 } 2672 2673 vtd_dev_as = vtd_bus->dev_as[devfn]; 2674 2675 if (!vtd_dev_as) { 2676 snprintf(name, sizeof(name), "intel_iommu_devfn_%d", devfn); 2677 vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace)); 2678 2679 vtd_dev_as->bus = bus; 2680 vtd_dev_as->devfn = (uint8_t)devfn; 2681 vtd_dev_as->iommu_state = s; 2682 vtd_dev_as->context_cache_entry.context_cache_gen = 0; 2683 2684 /* 2685 * Memory region relationships looks like (Address range shows 2686 * only lower 32 bits to make it short in length...): 2687 * 2688 * |-----------------+-------------------+----------| 2689 * | Name | Address range | Priority | 2690 * |-----------------+-------------------+----------+ 2691 * | vtd_root | 00000000-ffffffff | 0 | 2692 * | intel_iommu | 00000000-ffffffff | 1 | 2693 * | vtd_sys_alias | 00000000-ffffffff | 1 | 2694 * | intel_iommu_ir | fee00000-feefffff | 64 | 2695 * |-----------------+-------------------+----------| 2696 * 2697 * We enable/disable DMAR by switching enablement for 2698 * vtd_sys_alias and intel_iommu regions. IR region is always 2699 * enabled. 2700 */ 2701 memory_region_init_iommu(&vtd_dev_as->iommu, OBJECT(s), 2702 &s->iommu_ops, "intel_iommu_dmar", 2703 UINT64_MAX); 2704 memory_region_init_alias(&vtd_dev_as->sys_alias, OBJECT(s), 2705 "vtd_sys_alias", get_system_memory(), 2706 0, memory_region_size(get_system_memory())); 2707 memory_region_init_io(&vtd_dev_as->iommu_ir, OBJECT(s), 2708 &vtd_mem_ir_ops, s, "intel_iommu_ir", 2709 VTD_INTERRUPT_ADDR_SIZE); 2710 memory_region_init(&vtd_dev_as->root, OBJECT(s), 2711 "vtd_root", UINT64_MAX); 2712 memory_region_add_subregion_overlap(&vtd_dev_as->root, 2713 VTD_INTERRUPT_ADDR_FIRST, 2714 &vtd_dev_as->iommu_ir, 64); 2715 address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, name); 2716 memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, 2717 &vtd_dev_as->sys_alias, 1); 2718 memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, 2719 &vtd_dev_as->iommu, 1); 2720 vtd_switch_address_space(vtd_dev_as); 2721 } 2722 return vtd_dev_as; 2723 } 2724 2725 /* Unmap the whole range in the notifier's scope. */ 2726 static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) 2727 { 2728 IOMMUTLBEntry entry; 2729 hwaddr size; 2730 hwaddr start = n->start; 2731 hwaddr end = n->end; 2732 2733 /* 2734 * Note: all the codes in this function has a assumption that IOVA 2735 * bits are no more than VTD_MGAW bits (which is restricted by 2736 * VT-d spec), otherwise we need to consider overflow of 64 bits. 2737 */ 2738 2739 if (end > VTD_ADDRESS_SIZE) { 2740 /* 2741 * Don't need to unmap regions that is bigger than the whole 2742 * VT-d supported address space size 2743 */ 2744 end = VTD_ADDRESS_SIZE; 2745 } 2746 2747 assert(start <= end); 2748 size = end - start; 2749 2750 if (ctpop64(size) != 1) { 2751 /* 2752 * This size cannot format a correct mask. Let's enlarge it to 2753 * suite the minimum available mask. 2754 */ 2755 int n = 64 - clz64(size); 2756 if (n > VTD_MGAW) { 2757 /* should not happen, but in case it happens, limit it */ 2758 n = VTD_MGAW; 2759 } 2760 size = 1ULL << n; 2761 } 2762 2763 entry.target_as = &address_space_memory; 2764 /* Adjust iova for the size */ 2765 entry.iova = n->start & ~(size - 1); 2766 /* This field is meaningless for unmap */ 2767 entry.translated_addr = 0; 2768 entry.perm = IOMMU_NONE; 2769 entry.addr_mask = size - 1; 2770 2771 trace_vtd_as_unmap_whole(pci_bus_num(as->bus), 2772 VTD_PCI_SLOT(as->devfn), 2773 VTD_PCI_FUNC(as->devfn), 2774 entry.iova, size); 2775 2776 memory_region_notify_one(n, &entry); 2777 } 2778 2779 static void vtd_address_space_unmap_all(IntelIOMMUState *s) 2780 { 2781 IntelIOMMUNotifierNode *node; 2782 VTDAddressSpace *vtd_as; 2783 IOMMUNotifier *n; 2784 2785 QLIST_FOREACH(node, &s->notifiers_list, next) { 2786 vtd_as = node->vtd_as; 2787 IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { 2788 vtd_address_space_unmap(vtd_as, n); 2789 } 2790 } 2791 } 2792 2793 static int vtd_replay_hook(IOMMUTLBEntry *entry, void *private) 2794 { 2795 memory_region_notify_one((IOMMUNotifier *)private, entry); 2796 return 0; 2797 } 2798 2799 static void vtd_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n) 2800 { 2801 VTDAddressSpace *vtd_as = container_of(mr, VTDAddressSpace, iommu); 2802 IntelIOMMUState *s = vtd_as->iommu_state; 2803 uint8_t bus_n = pci_bus_num(vtd_as->bus); 2804 VTDContextEntry ce; 2805 2806 /* 2807 * The replay can be triggered by either a invalidation or a newly 2808 * created entry. No matter what, we release existing mappings 2809 * (it means flushing caches for UNMAP-only registers). 2810 */ 2811 vtd_address_space_unmap(vtd_as, n); 2812 2813 if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { 2814 trace_vtd_replay_ce_valid(bus_n, PCI_SLOT(vtd_as->devfn), 2815 PCI_FUNC(vtd_as->devfn), 2816 VTD_CONTEXT_ENTRY_DID(ce.hi), 2817 ce.hi, ce.lo); 2818 vtd_page_walk(&ce, 0, ~0ULL, vtd_replay_hook, (void *)n, false); 2819 } else { 2820 trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), 2821 PCI_FUNC(vtd_as->devfn)); 2822 } 2823 2824 return; 2825 } 2826 2827 /* Do the initialization. It will also be called when reset, so pay 2828 * attention when adding new initialization stuff. 2829 */ 2830 static void vtd_init(IntelIOMMUState *s) 2831 { 2832 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); 2833 2834 memset(s->csr, 0, DMAR_REG_SIZE); 2835 memset(s->wmask, 0, DMAR_REG_SIZE); 2836 memset(s->w1cmask, 0, DMAR_REG_SIZE); 2837 memset(s->womask, 0, DMAR_REG_SIZE); 2838 2839 s->iommu_ops.translate = vtd_iommu_translate; 2840 s->iommu_ops.notify_flag_changed = vtd_iommu_notify_flag_changed; 2841 s->iommu_ops.replay = vtd_iommu_replay; 2842 s->root = 0; 2843 s->root_extended = false; 2844 s->dmar_enabled = false; 2845 s->iq_head = 0; 2846 s->iq_tail = 0; 2847 s->iq = 0; 2848 s->iq_size = 0; 2849 s->qi_enabled = false; 2850 s->iq_last_desc_type = VTD_INV_DESC_NONE; 2851 s->next_frcd_reg = 0; 2852 s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MGAW | 2853 VTD_CAP_SAGAW | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS; 2854 s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO; 2855 2856 if (x86_iommu->intr_supported) { 2857 s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV; 2858 if (s->intr_eim == ON_OFF_AUTO_ON) { 2859 s->ecap |= VTD_ECAP_EIM; 2860 } 2861 assert(s->intr_eim != ON_OFF_AUTO_AUTO); 2862 } 2863 2864 if (x86_iommu->dt_supported) { 2865 s->ecap |= VTD_ECAP_DT; 2866 } 2867 2868 if (x86_iommu->pt_supported) { 2869 s->ecap |= VTD_ECAP_PT; 2870 } 2871 2872 if (s->caching_mode) { 2873 s->cap |= VTD_CAP_CM; 2874 } 2875 2876 vtd_reset_context_cache(s); 2877 vtd_reset_iotlb(s); 2878 2879 /* Define registers with default values and bit semantics */ 2880 vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0); 2881 vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0); 2882 vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0); 2883 vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0); 2884 vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL); 2885 vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0); 2886 vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffff000ULL, 0); 2887 vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0); 2888 vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL); 2889 2890 /* Advanced Fault Logging not supported */ 2891 vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL); 2892 vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0); 2893 vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0); 2894 vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0); 2895 2896 /* Treated as RsvdZ when EIM in ECAP_REG is not supported 2897 * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0); 2898 */ 2899 vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0); 2900 2901 /* Treated as RO for implementations that PLMR and PHMR fields reported 2902 * as Clear in the CAP_REG. 2903 * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0); 2904 */ 2905 vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0); 2906 2907 vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0); 2908 vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0); 2909 vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff007ULL, 0); 2910 vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL); 2911 vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0); 2912 vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0); 2913 vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0); 2914 /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */ 2915 vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0); 2916 2917 /* IOTLB registers */ 2918 vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0); 2919 vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0); 2920 vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL); 2921 2922 /* Fault Recording Registers, 128-bit */ 2923 vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0); 2924 vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL); 2925 2926 /* 2927 * Interrupt remapping registers. 2928 */ 2929 vtd_define_quad(s, DMAR_IRTA_REG, 0, 0xfffffffffffff80fULL, 0); 2930 } 2931 2932 /* Should not reset address_spaces when reset because devices will still use 2933 * the address space they got at first (won't ask the bus again). 2934 */ 2935 static void vtd_reset(DeviceState *dev) 2936 { 2937 IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); 2938 2939 vtd_init(s); 2940 2941 /* 2942 * When device reset, throw away all mappings and external caches 2943 */ 2944 vtd_address_space_unmap_all(s); 2945 } 2946 2947 static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) 2948 { 2949 IntelIOMMUState *s = opaque; 2950 VTDAddressSpace *vtd_as; 2951 2952 assert(0 <= devfn && devfn < X86_IOMMU_PCI_DEVFN_MAX); 2953 2954 vtd_as = vtd_find_add_as(s, bus, devfn); 2955 return &vtd_as->as; 2956 } 2957 2958 static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) 2959 { 2960 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); 2961 2962 /* Currently Intel IOMMU IR only support "kernel-irqchip={off|split}" */ 2963 if (x86_iommu->intr_supported && kvm_irqchip_in_kernel() && 2964 !kvm_irqchip_is_split()) { 2965 error_setg(errp, "Intel Interrupt Remapping cannot work with " 2966 "kernel-irqchip=on, please use 'split|off'."); 2967 return false; 2968 } 2969 if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu->intr_supported) { 2970 error_setg(errp, "eim=on cannot be selected without intremap=on"); 2971 return false; 2972 } 2973 2974 if (s->intr_eim == ON_OFF_AUTO_AUTO) { 2975 s->intr_eim = (kvm_irqchip_in_kernel() || s->buggy_eim) 2976 && x86_iommu->intr_supported ? 2977 ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; 2978 } 2979 if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) { 2980 if (!kvm_irqchip_in_kernel()) { 2981 error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split"); 2982 return false; 2983 } 2984 if (!kvm_enable_x2apic()) { 2985 error_setg(errp, "eim=on requires support on the KVM side" 2986 "(X2APIC_API, first shipped in v4.7)"); 2987 return false; 2988 } 2989 } 2990 2991 return true; 2992 } 2993 2994 static void vtd_realize(DeviceState *dev, Error **errp) 2995 { 2996 MachineState *ms = MACHINE(qdev_get_machine()); 2997 MachineClass *mc = MACHINE_GET_CLASS(ms); 2998 PCMachineState *pcms = 2999 PC_MACHINE(object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE)); 3000 PCIBus *bus; 3001 IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); 3002 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev); 3003 3004 if (!pcms) { 3005 error_setg(errp, "Machine-type '%s' not supported by intel-iommu", 3006 mc->name); 3007 return; 3008 } 3009 3010 bus = pcms->bus; 3011 x86_iommu->type = TYPE_INTEL; 3012 3013 if (!vtd_decide_config(s, errp)) { 3014 return; 3015 } 3016 3017 QLIST_INIT(&s->notifiers_list); 3018 memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num)); 3019 memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s, 3020 "intel_iommu", DMAR_REG_SIZE); 3021 sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem); 3022 /* No corresponding destroy */ 3023 s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, 3024 g_free, g_free); 3025 s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, 3026 g_free, g_free); 3027 vtd_init(s); 3028 sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR); 3029 pci_setup_iommu(bus, vtd_host_dma_iommu, dev); 3030 /* Pseudo address space under root PCI bus. */ 3031 pcms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC); 3032 } 3033 3034 static void vtd_class_init(ObjectClass *klass, void *data) 3035 { 3036 DeviceClass *dc = DEVICE_CLASS(klass); 3037 X86IOMMUClass *x86_class = X86_IOMMU_CLASS(klass); 3038 3039 dc->reset = vtd_reset; 3040 dc->vmsd = &vtd_vmstate; 3041 dc->props = vtd_properties; 3042 dc->hotpluggable = false; 3043 x86_class->realize = vtd_realize; 3044 x86_class->int_remap = vtd_int_remap; 3045 /* Supported by the pc-q35-* machine types */ 3046 dc->user_creatable = true; 3047 } 3048 3049 static const TypeInfo vtd_info = { 3050 .name = TYPE_INTEL_IOMMU_DEVICE, 3051 .parent = TYPE_X86_IOMMU_DEVICE, 3052 .instance_size = sizeof(IntelIOMMUState), 3053 .class_init = vtd_class_init, 3054 }; 3055 3056 static void vtd_register_types(void) 3057 { 3058 type_register_static(&vtd_info); 3059 } 3060 3061 type_init(vtd_register_types) 3062