1 /* 2 * generic functions used by VFIO devices 3 * 4 * Copyright Red Hat, Inc. 2012 5 * 6 * Authors: 7 * Alex Williamson <alex.williamson@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Based on qemu-kvm device-assignment: 13 * Adapted for KVM by Qumranet. 14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) 15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) 16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) 17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) 18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 #ifdef CONFIG_KVM 24 #include <linux/kvm.h> 25 #endif 26 #include <linux/vfio.h> 27 28 #include "hw/vfio/vfio-common.h" 29 #include "hw/vfio/vfio.h" 30 #include "exec/address-spaces.h" 31 #include "exec/memory.h" 32 #include "exec/ram_addr.h" 33 #include "hw/hw.h" 34 #include "qemu/error-report.h" 35 #include "qemu/main-loop.h" 36 #include "qemu/range.h" 37 #include "sysemu/kvm.h" 38 #include "sysemu/reset.h" 39 #include "trace.h" 40 #include "qapi/error.h" 41 #include "migration/migration.h" 42 43 VFIOGroupList vfio_group_list = 44 QLIST_HEAD_INITIALIZER(vfio_group_list); 45 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces = 46 QLIST_HEAD_INITIALIZER(vfio_address_spaces); 47 48 #ifdef CONFIG_KVM 49 /* 50 * We have a single VFIO pseudo device per KVM VM. Once created it lives 51 * for the life of the VM. Closing the file descriptor only drops our 52 * reference to it and the device's reference to kvm. Therefore once 53 * initialized, this file descriptor is only released on QEMU exit and 54 * we'll re-use it should another vfio device be attached before then. 55 */ 56 static int vfio_kvm_device_fd = -1; 57 #endif 58 59 /* 60 * Common VFIO interrupt disable 61 */ 62 void vfio_disable_irqindex(VFIODevice *vbasedev, int index) 63 { 64 struct vfio_irq_set irq_set = { 65 .argsz = sizeof(irq_set), 66 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, 67 .index = index, 68 .start = 0, 69 .count = 0, 70 }; 71 72 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 73 } 74 75 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index) 76 { 77 struct vfio_irq_set irq_set = { 78 .argsz = sizeof(irq_set), 79 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, 80 .index = index, 81 .start = 0, 82 .count = 1, 83 }; 84 85 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 86 } 87 88 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index) 89 { 90 struct vfio_irq_set irq_set = { 91 .argsz = sizeof(irq_set), 92 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, 93 .index = index, 94 .start = 0, 95 .count = 1, 96 }; 97 98 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 99 } 100 101 static inline const char *action_to_str(int action) 102 { 103 switch (action) { 104 case VFIO_IRQ_SET_ACTION_MASK: 105 return "MASK"; 106 case VFIO_IRQ_SET_ACTION_UNMASK: 107 return "UNMASK"; 108 case VFIO_IRQ_SET_ACTION_TRIGGER: 109 return "TRIGGER"; 110 default: 111 return "UNKNOWN ACTION"; 112 } 113 } 114 115 static const char *index_to_str(VFIODevice *vbasedev, int index) 116 { 117 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { 118 return NULL; 119 } 120 121 switch (index) { 122 case VFIO_PCI_INTX_IRQ_INDEX: 123 return "INTX"; 124 case VFIO_PCI_MSI_IRQ_INDEX: 125 return "MSI"; 126 case VFIO_PCI_MSIX_IRQ_INDEX: 127 return "MSIX"; 128 case VFIO_PCI_ERR_IRQ_INDEX: 129 return "ERR"; 130 case VFIO_PCI_REQ_IRQ_INDEX: 131 return "REQ"; 132 default: 133 return NULL; 134 } 135 } 136 137 int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex, 138 int action, int fd, Error **errp) 139 { 140 struct vfio_irq_set *irq_set; 141 int argsz, ret = 0; 142 const char *name; 143 int32_t *pfd; 144 145 argsz = sizeof(*irq_set) + sizeof(*pfd); 146 147 irq_set = g_malloc0(argsz); 148 irq_set->argsz = argsz; 149 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action; 150 irq_set->index = index; 151 irq_set->start = subindex; 152 irq_set->count = 1; 153 pfd = (int32_t *)&irq_set->data; 154 *pfd = fd; 155 156 if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) { 157 ret = -errno; 158 } 159 g_free(irq_set); 160 161 if (!ret) { 162 return 0; 163 } 164 165 error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure"); 166 167 name = index_to_str(vbasedev, index); 168 if (name) { 169 error_prepend(errp, "%s-%d: ", name, subindex); 170 } else { 171 error_prepend(errp, "index %d-%d: ", index, subindex); 172 } 173 error_prepend(errp, 174 "Failed to %s %s eventfd signaling for interrupt ", 175 fd < 0 ? "tear down" : "set up", action_to_str(action)); 176 return ret; 177 } 178 179 /* 180 * IO Port/MMIO - Beware of the endians, VFIO is always little endian 181 */ 182 void vfio_region_write(void *opaque, hwaddr addr, 183 uint64_t data, unsigned size) 184 { 185 VFIORegion *region = opaque; 186 VFIODevice *vbasedev = region->vbasedev; 187 union { 188 uint8_t byte; 189 uint16_t word; 190 uint32_t dword; 191 uint64_t qword; 192 } buf; 193 194 switch (size) { 195 case 1: 196 buf.byte = data; 197 break; 198 case 2: 199 buf.word = cpu_to_le16(data); 200 break; 201 case 4: 202 buf.dword = cpu_to_le32(data); 203 break; 204 case 8: 205 buf.qword = cpu_to_le64(data); 206 break; 207 default: 208 hw_error("vfio: unsupported write size, %d bytes", size); 209 break; 210 } 211 212 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { 213 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64 214 ",%d) failed: %m", 215 __func__, vbasedev->name, region->nr, 216 addr, data, size); 217 } 218 219 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); 220 221 /* 222 * A read or write to a BAR always signals an INTx EOI. This will 223 * do nothing if not pending (including not in INTx mode). We assume 224 * that a BAR access is in response to an interrupt and that BAR 225 * accesses will service the interrupt. Unfortunately, we don't know 226 * which access will service the interrupt, so we're potentially 227 * getting quite a few host interrupts per guest interrupt. 228 */ 229 vbasedev->ops->vfio_eoi(vbasedev); 230 } 231 232 uint64_t vfio_region_read(void *opaque, 233 hwaddr addr, unsigned size) 234 { 235 VFIORegion *region = opaque; 236 VFIODevice *vbasedev = region->vbasedev; 237 union { 238 uint8_t byte; 239 uint16_t word; 240 uint32_t dword; 241 uint64_t qword; 242 } buf; 243 uint64_t data = 0; 244 245 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { 246 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m", 247 __func__, vbasedev->name, region->nr, 248 addr, size); 249 return (uint64_t)-1; 250 } 251 switch (size) { 252 case 1: 253 data = buf.byte; 254 break; 255 case 2: 256 data = le16_to_cpu(buf.word); 257 break; 258 case 4: 259 data = le32_to_cpu(buf.dword); 260 break; 261 case 8: 262 data = le64_to_cpu(buf.qword); 263 break; 264 default: 265 hw_error("vfio: unsupported read size, %d bytes", size); 266 break; 267 } 268 269 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); 270 271 /* Same as write above */ 272 vbasedev->ops->vfio_eoi(vbasedev); 273 274 return data; 275 } 276 277 const MemoryRegionOps vfio_region_ops = { 278 .read = vfio_region_read, 279 .write = vfio_region_write, 280 .endianness = DEVICE_LITTLE_ENDIAN, 281 .valid = { 282 .min_access_size = 1, 283 .max_access_size = 8, 284 }, 285 .impl = { 286 .min_access_size = 1, 287 .max_access_size = 8, 288 }, 289 }; 290 291 /* 292 * Device state interfaces 293 */ 294 295 bool vfio_mig_active(void) 296 { 297 VFIOGroup *group; 298 VFIODevice *vbasedev; 299 300 if (QLIST_EMPTY(&vfio_group_list)) { 301 return false; 302 } 303 304 QLIST_FOREACH(group, &vfio_group_list, next) { 305 QLIST_FOREACH(vbasedev, &group->device_list, next) { 306 if (vbasedev->migration_blocker) { 307 return false; 308 } 309 } 310 } 311 return true; 312 } 313 314 static bool vfio_devices_all_stopped_and_saving(VFIOContainer *container) 315 { 316 VFIOGroup *group; 317 VFIODevice *vbasedev; 318 MigrationState *ms = migrate_get_current(); 319 320 if (!migration_is_setup_or_active(ms->state)) { 321 return false; 322 } 323 324 QLIST_FOREACH(group, &container->group_list, container_next) { 325 QLIST_FOREACH(vbasedev, &group->device_list, next) { 326 VFIOMigration *migration = vbasedev->migration; 327 328 if (!migration) { 329 return false; 330 } 331 332 if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) && 333 !(migration->device_state & VFIO_DEVICE_STATE_RUNNING)) { 334 continue; 335 } else { 336 return false; 337 } 338 } 339 } 340 return true; 341 } 342 343 static bool vfio_devices_all_running_and_saving(VFIOContainer *container) 344 { 345 VFIOGroup *group; 346 VFIODevice *vbasedev; 347 MigrationState *ms = migrate_get_current(); 348 349 if (!migration_is_setup_or_active(ms->state)) { 350 return false; 351 } 352 353 QLIST_FOREACH(group, &container->group_list, container_next) { 354 QLIST_FOREACH(vbasedev, &group->device_list, next) { 355 VFIOMigration *migration = vbasedev->migration; 356 357 if (!migration) { 358 return false; 359 } 360 361 if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) && 362 (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) { 363 continue; 364 } else { 365 return false; 366 } 367 } 368 } 369 return true; 370 } 371 372 static int vfio_dma_unmap_bitmap(VFIOContainer *container, 373 hwaddr iova, ram_addr_t size, 374 IOMMUTLBEntry *iotlb) 375 { 376 struct vfio_iommu_type1_dma_unmap *unmap; 377 struct vfio_bitmap *bitmap; 378 uint64_t pages = TARGET_PAGE_ALIGN(size) >> TARGET_PAGE_BITS; 379 int ret; 380 381 unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); 382 383 unmap->argsz = sizeof(*unmap) + sizeof(*bitmap); 384 unmap->iova = iova; 385 unmap->size = size; 386 unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP; 387 bitmap = (struct vfio_bitmap *)&unmap->data; 388 389 /* 390 * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of 391 * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap_pgsize to 392 * TARGET_PAGE_SIZE. 393 */ 394 395 bitmap->pgsize = TARGET_PAGE_SIZE; 396 bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / 397 BITS_PER_BYTE; 398 399 if (bitmap->size > container->max_dirty_bitmap_size) { 400 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, 401 (uint64_t)bitmap->size); 402 ret = -E2BIG; 403 goto unmap_exit; 404 } 405 406 bitmap->data = g_try_malloc0(bitmap->size); 407 if (!bitmap->data) { 408 ret = -ENOMEM; 409 goto unmap_exit; 410 } 411 412 ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); 413 if (!ret) { 414 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap->data, 415 iotlb->translated_addr, pages); 416 } else { 417 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); 418 } 419 420 g_free(bitmap->data); 421 unmap_exit: 422 g_free(unmap); 423 return ret; 424 } 425 426 /* 427 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 428 */ 429 static int vfio_dma_unmap(VFIOContainer *container, 430 hwaddr iova, ram_addr_t size, 431 IOMMUTLBEntry *iotlb) 432 { 433 struct vfio_iommu_type1_dma_unmap unmap = { 434 .argsz = sizeof(unmap), 435 .flags = 0, 436 .iova = iova, 437 .size = size, 438 }; 439 440 if (iotlb && container->dirty_pages_supported && 441 vfio_devices_all_running_and_saving(container)) { 442 return vfio_dma_unmap_bitmap(container, iova, size, iotlb); 443 } 444 445 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { 446 /* 447 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c 448 * v4.15) where an overflow in its wrap-around check prevents us from 449 * unmapping the last page of the address space. Test for the error 450 * condition and re-try the unmap excluding the last page. The 451 * expectation is that we've never mapped the last page anyway and this 452 * unmap request comes via vIOMMU support which also makes it unlikely 453 * that this page is used. This bug was introduced well after type1 v2 454 * support was introduced, so we shouldn't need to test for v1. A fix 455 * is queued for kernel v5.0 so this workaround can be removed once 456 * affected kernels are sufficiently deprecated. 457 */ 458 if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) && 459 container->iommu_type == VFIO_TYPE1v2_IOMMU) { 460 trace_vfio_dma_unmap_overflow_workaround(); 461 unmap.size -= 1ULL << ctz64(container->pgsizes); 462 continue; 463 } 464 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno)); 465 return -errno; 466 } 467 468 return 0; 469 } 470 471 static int vfio_dma_map(VFIOContainer *container, hwaddr iova, 472 ram_addr_t size, void *vaddr, bool readonly) 473 { 474 struct vfio_iommu_type1_dma_map map = { 475 .argsz = sizeof(map), 476 .flags = VFIO_DMA_MAP_FLAG_READ, 477 .vaddr = (__u64)(uintptr_t)vaddr, 478 .iova = iova, 479 .size = size, 480 }; 481 482 if (!readonly) { 483 map.flags |= VFIO_DMA_MAP_FLAG_WRITE; 484 } 485 486 /* 487 * Try the mapping, if it fails with EBUSY, unmap the region and try 488 * again. This shouldn't be necessary, but we sometimes see it in 489 * the VGA ROM space. 490 */ 491 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || 492 (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 && 493 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { 494 return 0; 495 } 496 497 error_report("VFIO_MAP_DMA failed: %s", strerror(errno)); 498 return -errno; 499 } 500 501 static void vfio_host_win_add(VFIOContainer *container, 502 hwaddr min_iova, hwaddr max_iova, 503 uint64_t iova_pgsizes) 504 { 505 VFIOHostDMAWindow *hostwin; 506 507 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 508 if (ranges_overlap(hostwin->min_iova, 509 hostwin->max_iova - hostwin->min_iova + 1, 510 min_iova, 511 max_iova - min_iova + 1)) { 512 hw_error("%s: Overlapped IOMMU are not enabled", __func__); 513 } 514 } 515 516 hostwin = g_malloc0(sizeof(*hostwin)); 517 518 hostwin->min_iova = min_iova; 519 hostwin->max_iova = max_iova; 520 hostwin->iova_pgsizes = iova_pgsizes; 521 QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next); 522 } 523 524 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova, 525 hwaddr max_iova) 526 { 527 VFIOHostDMAWindow *hostwin; 528 529 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 530 if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) { 531 QLIST_REMOVE(hostwin, hostwin_next); 532 return 0; 533 } 534 } 535 536 return -1; 537 } 538 539 static bool vfio_listener_skipped_section(MemoryRegionSection *section) 540 { 541 return (!memory_region_is_ram(section->mr) && 542 !memory_region_is_iommu(section->mr)) || 543 /* 544 * Sizing an enabled 64-bit BAR can cause spurious mappings to 545 * addresses in the upper part of the 64-bit address space. These 546 * are never accessed by the CPU and beyond the address width of 547 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width. 548 */ 549 section->offset_within_address_space & (1ULL << 63); 550 } 551 552 /* Called with rcu_read_lock held. */ 553 static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, 554 ram_addr_t *ram_addr, bool *read_only) 555 { 556 MemoryRegion *mr; 557 hwaddr xlat; 558 hwaddr len = iotlb->addr_mask + 1; 559 bool writable = iotlb->perm & IOMMU_WO; 560 561 /* 562 * The IOMMU TLB entry we have just covers translation through 563 * this IOMMU to its immediate target. We need to translate 564 * it the rest of the way through to memory. 565 */ 566 mr = address_space_translate(&address_space_memory, 567 iotlb->translated_addr, 568 &xlat, &len, writable, 569 MEMTXATTRS_UNSPECIFIED); 570 if (!memory_region_is_ram(mr)) { 571 error_report("iommu map to non memory area %"HWADDR_PRIx"", 572 xlat); 573 return false; 574 } 575 576 /* 577 * Translation truncates length to the IOMMU page size, 578 * check that it did not truncate too much. 579 */ 580 if (len & iotlb->addr_mask) { 581 error_report("iommu has granularity incompatible with target AS"); 582 return false; 583 } 584 585 if (vaddr) { 586 *vaddr = memory_region_get_ram_ptr(mr) + xlat; 587 } 588 589 if (ram_addr) { 590 *ram_addr = memory_region_get_ram_addr(mr) + xlat; 591 } 592 593 if (read_only) { 594 *read_only = !writable || mr->readonly; 595 } 596 597 return true; 598 } 599 600 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 601 { 602 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); 603 VFIOContainer *container = giommu->container; 604 hwaddr iova = iotlb->iova + giommu->iommu_offset; 605 void *vaddr; 606 int ret; 607 608 trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP", 609 iova, iova + iotlb->addr_mask); 610 611 if (iotlb->target_as != &address_space_memory) { 612 error_report("Wrong target AS \"%s\", only system memory is allowed", 613 iotlb->target_as->name ? iotlb->target_as->name : "none"); 614 return; 615 } 616 617 rcu_read_lock(); 618 619 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { 620 bool read_only; 621 622 if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) { 623 goto out; 624 } 625 /* 626 * vaddr is only valid until rcu_read_unlock(). But after 627 * vfio_dma_map has set up the mapping the pages will be 628 * pinned by the kernel. This makes sure that the RAM backend 629 * of vaddr will always be there, even if the memory object is 630 * destroyed and its backing memory munmap-ed. 631 */ 632 ret = vfio_dma_map(container, iova, 633 iotlb->addr_mask + 1, vaddr, 634 read_only); 635 if (ret) { 636 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " 637 "0x%"HWADDR_PRIx", %p) = %d (%m)", 638 container, iova, 639 iotlb->addr_mask + 1, vaddr, ret); 640 } 641 } else { 642 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb); 643 if (ret) { 644 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " 645 "0x%"HWADDR_PRIx") = %d (%m)", 646 container, iova, 647 iotlb->addr_mask + 1, ret); 648 } 649 } 650 out: 651 rcu_read_unlock(); 652 } 653 654 static void vfio_listener_region_add(MemoryListener *listener, 655 MemoryRegionSection *section) 656 { 657 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 658 hwaddr iova, end; 659 Int128 llend, llsize; 660 void *vaddr; 661 int ret; 662 VFIOHostDMAWindow *hostwin; 663 bool hostwin_found; 664 Error *err = NULL; 665 666 if (vfio_listener_skipped_section(section)) { 667 trace_vfio_listener_region_add_skip( 668 section->offset_within_address_space, 669 section->offset_within_address_space + 670 int128_get64(int128_sub(section->size, int128_one()))); 671 return; 672 } 673 674 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 675 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 676 error_report("%s received unaligned region", __func__); 677 return; 678 } 679 680 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 681 llend = int128_make64(section->offset_within_address_space); 682 llend = int128_add(llend, section->size); 683 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); 684 685 if (int128_ge(int128_make64(iova), llend)) { 686 return; 687 } 688 end = int128_get64(int128_sub(llend, int128_one())); 689 690 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 691 hwaddr pgsize = 0; 692 693 /* For now intersections are not allowed, we may relax this later */ 694 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 695 if (ranges_overlap(hostwin->min_iova, 696 hostwin->max_iova - hostwin->min_iova + 1, 697 section->offset_within_address_space, 698 int128_get64(section->size))) { 699 error_setg(&err, 700 "region [0x%"PRIx64",0x%"PRIx64"] overlaps with existing" 701 "host DMA window [0x%"PRIx64",0x%"PRIx64"]", 702 section->offset_within_address_space, 703 section->offset_within_address_space + 704 int128_get64(section->size) - 1, 705 hostwin->min_iova, hostwin->max_iova); 706 goto fail; 707 } 708 } 709 710 ret = vfio_spapr_create_window(container, section, &pgsize); 711 if (ret) { 712 error_setg_errno(&err, -ret, "Failed to create SPAPR window"); 713 goto fail; 714 } 715 716 vfio_host_win_add(container, section->offset_within_address_space, 717 section->offset_within_address_space + 718 int128_get64(section->size) - 1, pgsize); 719 #ifdef CONFIG_KVM 720 if (kvm_enabled()) { 721 VFIOGroup *group; 722 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); 723 struct kvm_vfio_spapr_tce param; 724 struct kvm_device_attr attr = { 725 .group = KVM_DEV_VFIO_GROUP, 726 .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE, 727 .addr = (uint64_t)(unsigned long)¶m, 728 }; 729 730 if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD, 731 ¶m.tablefd)) { 732 QLIST_FOREACH(group, &container->group_list, container_next) { 733 param.groupfd = group->fd; 734 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 735 error_report("vfio: failed to setup fd %d " 736 "for a group with fd %d: %s", 737 param.tablefd, param.groupfd, 738 strerror(errno)); 739 return; 740 } 741 trace_vfio_spapr_group_attach(param.groupfd, param.tablefd); 742 } 743 } 744 } 745 #endif 746 } 747 748 hostwin_found = false; 749 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 750 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { 751 hostwin_found = true; 752 break; 753 } 754 } 755 756 if (!hostwin_found) { 757 error_setg(&err, "Container %p can't map guest IOVA region" 758 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end); 759 goto fail; 760 } 761 762 memory_region_ref(section->mr); 763 764 if (memory_region_is_iommu(section->mr)) { 765 VFIOGuestIOMMU *giommu; 766 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); 767 int iommu_idx; 768 769 trace_vfio_listener_region_add_iommu(iova, end); 770 /* 771 * FIXME: For VFIO iommu types which have KVM acceleration to 772 * avoid bouncing all map/unmaps through qemu this way, this 773 * would be the right place to wire that up (tell the KVM 774 * device emulation the VFIO iommu handles to use). 775 */ 776 giommu = g_malloc0(sizeof(*giommu)); 777 giommu->iommu = iommu_mr; 778 giommu->iommu_offset = section->offset_within_address_space - 779 section->offset_within_region; 780 giommu->container = container; 781 llend = int128_add(int128_make64(section->offset_within_region), 782 section->size); 783 llend = int128_sub(llend, int128_one()); 784 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, 785 MEMTXATTRS_UNSPECIFIED); 786 iommu_notifier_init(&giommu->n, vfio_iommu_map_notify, 787 IOMMU_NOTIFIER_ALL, 788 section->offset_within_region, 789 int128_get64(llend), 790 iommu_idx); 791 792 ret = memory_region_register_iommu_notifier(section->mr, &giommu->n, 793 &err); 794 if (ret) { 795 g_free(giommu); 796 goto fail; 797 } 798 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); 799 memory_region_iommu_replay(giommu->iommu, &giommu->n); 800 801 return; 802 } 803 804 /* Here we assume that memory_region_is_ram(section->mr)==true */ 805 806 vaddr = memory_region_get_ram_ptr(section->mr) + 807 section->offset_within_region + 808 (iova - section->offset_within_address_space); 809 810 trace_vfio_listener_region_add_ram(iova, end, vaddr); 811 812 llsize = int128_sub(llend, int128_make64(iova)); 813 814 if (memory_region_is_ram_device(section->mr)) { 815 hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; 816 817 if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { 818 trace_vfio_listener_region_add_no_dma_map( 819 memory_region_name(section->mr), 820 section->offset_within_address_space, 821 int128_getlo(section->size), 822 pgmask + 1); 823 return; 824 } 825 } 826 827 ret = vfio_dma_map(container, iova, int128_get64(llsize), 828 vaddr, section->readonly); 829 if (ret) { 830 error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", " 831 "0x%"HWADDR_PRIx", %p) = %d (%m)", 832 container, iova, int128_get64(llsize), vaddr, ret); 833 if (memory_region_is_ram_device(section->mr)) { 834 /* Allow unexpected mappings not to be fatal for RAM devices */ 835 error_report_err(err); 836 return; 837 } 838 goto fail; 839 } 840 841 return; 842 843 fail: 844 if (memory_region_is_ram_device(section->mr)) { 845 error_report("failed to vfio_dma_map. pci p2p may not work"); 846 return; 847 } 848 /* 849 * On the initfn path, store the first error in the container so we 850 * can gracefully fail. Runtime, there's not much we can do other 851 * than throw a hardware error. 852 */ 853 if (!container->initialized) { 854 if (!container->error) { 855 error_propagate_prepend(&container->error, err, 856 "Region %s: ", 857 memory_region_name(section->mr)); 858 } else { 859 error_free(err); 860 } 861 } else { 862 error_report_err(err); 863 hw_error("vfio: DMA mapping failed, unable to continue"); 864 } 865 } 866 867 static void vfio_listener_region_del(MemoryListener *listener, 868 MemoryRegionSection *section) 869 { 870 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 871 hwaddr iova, end; 872 Int128 llend, llsize; 873 int ret; 874 bool try_unmap = true; 875 876 if (vfio_listener_skipped_section(section)) { 877 trace_vfio_listener_region_del_skip( 878 section->offset_within_address_space, 879 section->offset_within_address_space + 880 int128_get64(int128_sub(section->size, int128_one()))); 881 return; 882 } 883 884 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 885 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 886 error_report("%s received unaligned region", __func__); 887 return; 888 } 889 890 if (memory_region_is_iommu(section->mr)) { 891 VFIOGuestIOMMU *giommu; 892 893 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { 894 if (MEMORY_REGION(giommu->iommu) == section->mr && 895 giommu->n.start == section->offset_within_region) { 896 memory_region_unregister_iommu_notifier(section->mr, 897 &giommu->n); 898 QLIST_REMOVE(giommu, giommu_next); 899 g_free(giommu); 900 break; 901 } 902 } 903 904 /* 905 * FIXME: We assume the one big unmap below is adequate to 906 * remove any individual page mappings in the IOMMU which 907 * might have been copied into VFIO. This works for a page table 908 * based IOMMU where a big unmap flattens a large range of IO-PTEs. 909 * That may not be true for all IOMMU types. 910 */ 911 } 912 913 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 914 llend = int128_make64(section->offset_within_address_space); 915 llend = int128_add(llend, section->size); 916 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); 917 918 if (int128_ge(int128_make64(iova), llend)) { 919 return; 920 } 921 end = int128_get64(int128_sub(llend, int128_one())); 922 923 llsize = int128_sub(llend, int128_make64(iova)); 924 925 trace_vfio_listener_region_del(iova, end); 926 927 if (memory_region_is_ram_device(section->mr)) { 928 hwaddr pgmask; 929 VFIOHostDMAWindow *hostwin; 930 bool hostwin_found = false; 931 932 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { 933 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { 934 hostwin_found = true; 935 break; 936 } 937 } 938 assert(hostwin_found); /* or region_add() would have failed */ 939 940 pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; 941 try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); 942 } 943 944 if (try_unmap) { 945 ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); 946 if (ret) { 947 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " 948 "0x%"HWADDR_PRIx") = %d (%m)", 949 container, iova, int128_get64(llsize), ret); 950 } 951 } 952 953 memory_region_unref(section->mr); 954 955 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 956 vfio_spapr_remove_window(container, 957 section->offset_within_address_space); 958 if (vfio_host_win_del(container, 959 section->offset_within_address_space, 960 section->offset_within_address_space + 961 int128_get64(section->size) - 1) < 0) { 962 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx, 963 __func__, section->offset_within_address_space); 964 } 965 } 966 } 967 968 static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, 969 uint64_t size, ram_addr_t ram_addr) 970 { 971 struct vfio_iommu_type1_dirty_bitmap *dbitmap; 972 struct vfio_iommu_type1_dirty_bitmap_get *range; 973 uint64_t pages; 974 int ret; 975 976 dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); 977 978 dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); 979 dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; 980 range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; 981 range->iova = iova; 982 range->size = size; 983 984 /* 985 * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of 986 * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap's pgsize to 987 * TARGET_PAGE_SIZE. 988 */ 989 range->bitmap.pgsize = TARGET_PAGE_SIZE; 990 991 pages = TARGET_PAGE_ALIGN(range->size) >> TARGET_PAGE_BITS; 992 range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / 993 BITS_PER_BYTE; 994 range->bitmap.data = g_try_malloc0(range->bitmap.size); 995 if (!range->bitmap.data) { 996 ret = -ENOMEM; 997 goto err_out; 998 } 999 1000 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); 1001 if (ret) { 1002 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64 1003 " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova, 1004 (uint64_t)range->size, errno); 1005 goto err_out; 1006 } 1007 1008 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data, 1009 ram_addr, pages); 1010 1011 trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size, 1012 range->bitmap.size, ram_addr); 1013 err_out: 1014 g_free(range->bitmap.data); 1015 g_free(dbitmap); 1016 1017 return ret; 1018 } 1019 1020 typedef struct { 1021 IOMMUNotifier n; 1022 VFIOGuestIOMMU *giommu; 1023 } vfio_giommu_dirty_notifier; 1024 1025 static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 1026 { 1027 vfio_giommu_dirty_notifier *gdn = container_of(n, 1028 vfio_giommu_dirty_notifier, n); 1029 VFIOGuestIOMMU *giommu = gdn->giommu; 1030 VFIOContainer *container = giommu->container; 1031 hwaddr iova = iotlb->iova + giommu->iommu_offset; 1032 ram_addr_t translated_addr; 1033 1034 trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); 1035 1036 if (iotlb->target_as != &address_space_memory) { 1037 error_report("Wrong target AS \"%s\", only system memory is allowed", 1038 iotlb->target_as->name ? iotlb->target_as->name : "none"); 1039 return; 1040 } 1041 1042 rcu_read_lock(); 1043 if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { 1044 int ret; 1045 1046 ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, 1047 translated_addr); 1048 if (ret) { 1049 error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " 1050 "0x%"HWADDR_PRIx") = %d (%m)", 1051 container, iova, 1052 iotlb->addr_mask + 1, ret); 1053 } 1054 } 1055 rcu_read_unlock(); 1056 } 1057 1058 static int vfio_sync_dirty_bitmap(VFIOContainer *container, 1059 MemoryRegionSection *section) 1060 { 1061 ram_addr_t ram_addr; 1062 1063 if (memory_region_is_iommu(section->mr)) { 1064 VFIOGuestIOMMU *giommu; 1065 1066 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { 1067 if (MEMORY_REGION(giommu->iommu) == section->mr && 1068 giommu->n.start == section->offset_within_region) { 1069 Int128 llend; 1070 vfio_giommu_dirty_notifier gdn = { .giommu = giommu }; 1071 int idx = memory_region_iommu_attrs_to_index(giommu->iommu, 1072 MEMTXATTRS_UNSPECIFIED); 1073 1074 llend = int128_add(int128_make64(section->offset_within_region), 1075 section->size); 1076 llend = int128_sub(llend, int128_one()); 1077 1078 iommu_notifier_init(&gdn.n, 1079 vfio_iommu_map_dirty_notify, 1080 IOMMU_NOTIFIER_MAP, 1081 section->offset_within_region, 1082 int128_get64(llend), 1083 idx); 1084 memory_region_iommu_replay(giommu->iommu, &gdn.n); 1085 break; 1086 } 1087 } 1088 return 0; 1089 } 1090 1091 ram_addr = memory_region_get_ram_addr(section->mr) + 1092 section->offset_within_region; 1093 1094 return vfio_get_dirty_bitmap(container, 1095 TARGET_PAGE_ALIGN(section->offset_within_address_space), 1096 int128_get64(section->size), ram_addr); 1097 } 1098 1099 static void vfio_listerner_log_sync(MemoryListener *listener, 1100 MemoryRegionSection *section) 1101 { 1102 VFIOContainer *container = container_of(listener, VFIOContainer, listener); 1103 1104 if (vfio_listener_skipped_section(section) || 1105 !container->dirty_pages_supported) { 1106 return; 1107 } 1108 1109 if (vfio_devices_all_stopped_and_saving(container)) { 1110 vfio_sync_dirty_bitmap(container, section); 1111 } 1112 } 1113 1114 static const MemoryListener vfio_memory_listener = { 1115 .region_add = vfio_listener_region_add, 1116 .region_del = vfio_listener_region_del, 1117 .log_sync = vfio_listerner_log_sync, 1118 }; 1119 1120 static void vfio_listener_release(VFIOContainer *container) 1121 { 1122 memory_listener_unregister(&container->listener); 1123 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 1124 memory_listener_unregister(&container->prereg_listener); 1125 } 1126 } 1127 1128 struct vfio_info_cap_header * 1129 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) 1130 { 1131 struct vfio_info_cap_header *hdr; 1132 void *ptr = info; 1133 1134 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) { 1135 return NULL; 1136 } 1137 1138 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { 1139 if (hdr->id == id) { 1140 return hdr; 1141 } 1142 } 1143 1144 return NULL; 1145 } 1146 1147 static int vfio_setup_region_sparse_mmaps(VFIORegion *region, 1148 struct vfio_region_info *info) 1149 { 1150 struct vfio_info_cap_header *hdr; 1151 struct vfio_region_info_cap_sparse_mmap *sparse; 1152 int i, j; 1153 1154 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP); 1155 if (!hdr) { 1156 return -ENODEV; 1157 } 1158 1159 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header); 1160 1161 trace_vfio_region_sparse_mmap_header(region->vbasedev->name, 1162 region->nr, sparse->nr_areas); 1163 1164 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); 1165 1166 for (i = 0, j = 0; i < sparse->nr_areas; i++) { 1167 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, 1168 sparse->areas[i].offset + 1169 sparse->areas[i].size); 1170 1171 if (sparse->areas[i].size) { 1172 region->mmaps[j].offset = sparse->areas[i].offset; 1173 region->mmaps[j].size = sparse->areas[i].size; 1174 j++; 1175 } 1176 } 1177 1178 region->nr_mmaps = j; 1179 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap)); 1180 1181 return 0; 1182 } 1183 1184 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, 1185 int index, const char *name) 1186 { 1187 struct vfio_region_info *info; 1188 int ret; 1189 1190 ret = vfio_get_region_info(vbasedev, index, &info); 1191 if (ret) { 1192 return ret; 1193 } 1194 1195 region->vbasedev = vbasedev; 1196 region->flags = info->flags; 1197 region->size = info->size; 1198 region->fd_offset = info->offset; 1199 region->nr = index; 1200 1201 if (region->size) { 1202 region->mem = g_new0(MemoryRegion, 1); 1203 memory_region_init_io(region->mem, obj, &vfio_region_ops, 1204 region, name, region->size); 1205 1206 if (!vbasedev->no_mmap && 1207 region->flags & VFIO_REGION_INFO_FLAG_MMAP) { 1208 1209 ret = vfio_setup_region_sparse_mmaps(region, info); 1210 1211 if (ret) { 1212 region->nr_mmaps = 1; 1213 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); 1214 region->mmaps[0].offset = 0; 1215 region->mmaps[0].size = region->size; 1216 } 1217 } 1218 } 1219 1220 g_free(info); 1221 1222 trace_vfio_region_setup(vbasedev->name, index, name, 1223 region->flags, region->fd_offset, region->size); 1224 return 0; 1225 } 1226 1227 static void vfio_subregion_unmap(VFIORegion *region, int index) 1228 { 1229 trace_vfio_region_unmap(memory_region_name(®ion->mmaps[index].mem), 1230 region->mmaps[index].offset, 1231 region->mmaps[index].offset + 1232 region->mmaps[index].size - 1); 1233 memory_region_del_subregion(region->mem, ®ion->mmaps[index].mem); 1234 munmap(region->mmaps[index].mmap, region->mmaps[index].size); 1235 object_unparent(OBJECT(®ion->mmaps[index].mem)); 1236 region->mmaps[index].mmap = NULL; 1237 } 1238 1239 int vfio_region_mmap(VFIORegion *region) 1240 { 1241 int i, prot = 0; 1242 char *name; 1243 1244 if (!region->mem) { 1245 return 0; 1246 } 1247 1248 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0; 1249 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0; 1250 1251 for (i = 0; i < region->nr_mmaps; i++) { 1252 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot, 1253 MAP_SHARED, region->vbasedev->fd, 1254 region->fd_offset + 1255 region->mmaps[i].offset); 1256 if (region->mmaps[i].mmap == MAP_FAILED) { 1257 int ret = -errno; 1258 1259 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i, 1260 region->fd_offset + 1261 region->mmaps[i].offset, 1262 region->fd_offset + 1263 region->mmaps[i].offset + 1264 region->mmaps[i].size - 1, ret); 1265 1266 region->mmaps[i].mmap = NULL; 1267 1268 for (i--; i >= 0; i--) { 1269 vfio_subregion_unmap(region, i); 1270 } 1271 1272 return ret; 1273 } 1274 1275 name = g_strdup_printf("%s mmaps[%d]", 1276 memory_region_name(region->mem), i); 1277 memory_region_init_ram_device_ptr(®ion->mmaps[i].mem, 1278 memory_region_owner(region->mem), 1279 name, region->mmaps[i].size, 1280 region->mmaps[i].mmap); 1281 g_free(name); 1282 memory_region_add_subregion(region->mem, region->mmaps[i].offset, 1283 ®ion->mmaps[i].mem); 1284 1285 trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem), 1286 region->mmaps[i].offset, 1287 region->mmaps[i].offset + 1288 region->mmaps[i].size - 1); 1289 } 1290 1291 return 0; 1292 } 1293 1294 void vfio_region_unmap(VFIORegion *region) 1295 { 1296 int i; 1297 1298 if (!region->mem) { 1299 return; 1300 } 1301 1302 for (i = 0; i < region->nr_mmaps; i++) { 1303 if (region->mmaps[i].mmap) { 1304 vfio_subregion_unmap(region, i); 1305 } 1306 } 1307 } 1308 1309 void vfio_region_exit(VFIORegion *region) 1310 { 1311 int i; 1312 1313 if (!region->mem) { 1314 return; 1315 } 1316 1317 for (i = 0; i < region->nr_mmaps; i++) { 1318 if (region->mmaps[i].mmap) { 1319 memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); 1320 } 1321 } 1322 1323 trace_vfio_region_exit(region->vbasedev->name, region->nr); 1324 } 1325 1326 void vfio_region_finalize(VFIORegion *region) 1327 { 1328 int i; 1329 1330 if (!region->mem) { 1331 return; 1332 } 1333 1334 for (i = 0; i < region->nr_mmaps; i++) { 1335 if (region->mmaps[i].mmap) { 1336 munmap(region->mmaps[i].mmap, region->mmaps[i].size); 1337 object_unparent(OBJECT(®ion->mmaps[i].mem)); 1338 } 1339 } 1340 1341 object_unparent(OBJECT(region->mem)); 1342 1343 g_free(region->mem); 1344 g_free(region->mmaps); 1345 1346 trace_vfio_region_finalize(region->vbasedev->name, region->nr); 1347 1348 region->mem = NULL; 1349 region->mmaps = NULL; 1350 region->nr_mmaps = 0; 1351 region->size = 0; 1352 region->flags = 0; 1353 region->nr = 0; 1354 } 1355 1356 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled) 1357 { 1358 int i; 1359 1360 if (!region->mem) { 1361 return; 1362 } 1363 1364 for (i = 0; i < region->nr_mmaps; i++) { 1365 if (region->mmaps[i].mmap) { 1366 memory_region_set_enabled(®ion->mmaps[i].mem, enabled); 1367 } 1368 } 1369 1370 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem), 1371 enabled); 1372 } 1373 1374 void vfio_reset_handler(void *opaque) 1375 { 1376 VFIOGroup *group; 1377 VFIODevice *vbasedev; 1378 1379 QLIST_FOREACH(group, &vfio_group_list, next) { 1380 QLIST_FOREACH(vbasedev, &group->device_list, next) { 1381 if (vbasedev->dev->realized) { 1382 vbasedev->ops->vfio_compute_needs_reset(vbasedev); 1383 } 1384 } 1385 } 1386 1387 QLIST_FOREACH(group, &vfio_group_list, next) { 1388 QLIST_FOREACH(vbasedev, &group->device_list, next) { 1389 if (vbasedev->dev->realized && vbasedev->needs_reset) { 1390 vbasedev->ops->vfio_hot_reset_multi(vbasedev); 1391 } 1392 } 1393 } 1394 } 1395 1396 static void vfio_kvm_device_add_group(VFIOGroup *group) 1397 { 1398 #ifdef CONFIG_KVM 1399 struct kvm_device_attr attr = { 1400 .group = KVM_DEV_VFIO_GROUP, 1401 .attr = KVM_DEV_VFIO_GROUP_ADD, 1402 .addr = (uint64_t)(unsigned long)&group->fd, 1403 }; 1404 1405 if (!kvm_enabled()) { 1406 return; 1407 } 1408 1409 if (vfio_kvm_device_fd < 0) { 1410 struct kvm_create_device cd = { 1411 .type = KVM_DEV_TYPE_VFIO, 1412 }; 1413 1414 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) { 1415 error_report("Failed to create KVM VFIO device: %m"); 1416 return; 1417 } 1418 1419 vfio_kvm_device_fd = cd.fd; 1420 } 1421 1422 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 1423 error_report("Failed to add group %d to KVM VFIO device: %m", 1424 group->groupid); 1425 } 1426 #endif 1427 } 1428 1429 static void vfio_kvm_device_del_group(VFIOGroup *group) 1430 { 1431 #ifdef CONFIG_KVM 1432 struct kvm_device_attr attr = { 1433 .group = KVM_DEV_VFIO_GROUP, 1434 .attr = KVM_DEV_VFIO_GROUP_DEL, 1435 .addr = (uint64_t)(unsigned long)&group->fd, 1436 }; 1437 1438 if (vfio_kvm_device_fd < 0) { 1439 return; 1440 } 1441 1442 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { 1443 error_report("Failed to remove group %d from KVM VFIO device: %m", 1444 group->groupid); 1445 } 1446 #endif 1447 } 1448 1449 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) 1450 { 1451 VFIOAddressSpace *space; 1452 1453 QLIST_FOREACH(space, &vfio_address_spaces, list) { 1454 if (space->as == as) { 1455 return space; 1456 } 1457 } 1458 1459 /* No suitable VFIOAddressSpace, create a new one */ 1460 space = g_malloc0(sizeof(*space)); 1461 space->as = as; 1462 QLIST_INIT(&space->containers); 1463 1464 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); 1465 1466 return space; 1467 } 1468 1469 static void vfio_put_address_space(VFIOAddressSpace *space) 1470 { 1471 if (QLIST_EMPTY(&space->containers)) { 1472 QLIST_REMOVE(space, list); 1473 g_free(space); 1474 } 1475 } 1476 1477 /* 1478 * vfio_get_iommu_type - selects the richest iommu_type (v2 first) 1479 */ 1480 static int vfio_get_iommu_type(VFIOContainer *container, 1481 Error **errp) 1482 { 1483 int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU, 1484 VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU }; 1485 int i; 1486 1487 for (i = 0; i < ARRAY_SIZE(iommu_types); i++) { 1488 if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) { 1489 return iommu_types[i]; 1490 } 1491 } 1492 error_setg(errp, "No available IOMMU models"); 1493 return -EINVAL; 1494 } 1495 1496 static int vfio_init_container(VFIOContainer *container, int group_fd, 1497 Error **errp) 1498 { 1499 int iommu_type, ret; 1500 1501 iommu_type = vfio_get_iommu_type(container, errp); 1502 if (iommu_type < 0) { 1503 return iommu_type; 1504 } 1505 1506 ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd); 1507 if (ret) { 1508 error_setg_errno(errp, errno, "Failed to set group container"); 1509 return -errno; 1510 } 1511 1512 while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) { 1513 if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 1514 /* 1515 * On sPAPR, despite the IOMMU subdriver always advertises v1 and 1516 * v2, the running platform may not support v2 and there is no 1517 * way to guess it until an IOMMU group gets added to the container. 1518 * So in case it fails with v2, try v1 as a fallback. 1519 */ 1520 iommu_type = VFIO_SPAPR_TCE_IOMMU; 1521 continue; 1522 } 1523 error_setg_errno(errp, errno, "Failed to set iommu for container"); 1524 return -errno; 1525 } 1526 1527 container->iommu_type = iommu_type; 1528 return 0; 1529 } 1530 1531 static int vfio_get_iommu_info(VFIOContainer *container, 1532 struct vfio_iommu_type1_info **info) 1533 { 1534 1535 size_t argsz = sizeof(struct vfio_iommu_type1_info); 1536 1537 *info = g_new0(struct vfio_iommu_type1_info, 1); 1538 again: 1539 (*info)->argsz = argsz; 1540 1541 if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) { 1542 g_free(*info); 1543 *info = NULL; 1544 return -errno; 1545 } 1546 1547 if (((*info)->argsz > argsz)) { 1548 argsz = (*info)->argsz; 1549 *info = g_realloc(*info, argsz); 1550 goto again; 1551 } 1552 1553 return 0; 1554 } 1555 1556 static struct vfio_info_cap_header * 1557 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) 1558 { 1559 struct vfio_info_cap_header *hdr; 1560 void *ptr = info; 1561 1562 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { 1563 return NULL; 1564 } 1565 1566 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { 1567 if (hdr->id == id) { 1568 return hdr; 1569 } 1570 } 1571 1572 return NULL; 1573 } 1574 1575 static void vfio_get_iommu_info_migration(VFIOContainer *container, 1576 struct vfio_iommu_type1_info *info) 1577 { 1578 struct vfio_info_cap_header *hdr; 1579 struct vfio_iommu_type1_info_cap_migration *cap_mig; 1580 1581 hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION); 1582 if (!hdr) { 1583 return; 1584 } 1585 1586 cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration, 1587 header); 1588 1589 /* 1590 * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of 1591 * TARGET_PAGE_SIZE to mark those dirty. 1592 */ 1593 if (cap_mig->pgsize_bitmap & TARGET_PAGE_SIZE) { 1594 container->dirty_pages_supported = true; 1595 container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; 1596 container->dirty_pgsizes = cap_mig->pgsize_bitmap; 1597 } 1598 } 1599 1600 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, 1601 Error **errp) 1602 { 1603 VFIOContainer *container; 1604 int ret, fd; 1605 VFIOAddressSpace *space; 1606 1607 space = vfio_get_address_space(as); 1608 1609 /* 1610 * VFIO is currently incompatible with discarding of RAM insofar as the 1611 * madvise to purge (zap) the page from QEMU's address space does not 1612 * interact with the memory API and therefore leaves stale virtual to 1613 * physical mappings in the IOMMU if the page was previously pinned. We 1614 * therefore set discarding broken for each group added to a container, 1615 * whether the container is used individually or shared. This provides 1616 * us with options to allow devices within a group to opt-in and allow 1617 * discarding, so long as it is done consistently for a group (for instance 1618 * if the device is an mdev device where it is known that the host vendor 1619 * driver will never pin pages outside of the working set of the guest 1620 * driver, which would thus not be discarding candidates). 1621 * 1622 * The first opportunity to induce pinning occurs here where we attempt to 1623 * attach the group to existing containers within the AddressSpace. If any 1624 * pages are already zapped from the virtual address space, such as from 1625 * previous discards, new pinning will cause valid mappings to be 1626 * re-established. Likewise, when the overall MemoryListener for a new 1627 * container is registered, a replay of mappings within the AddressSpace 1628 * will occur, re-establishing any previously zapped pages as well. 1629 * 1630 * Especially virtio-balloon is currently only prevented from discarding 1631 * new memory, it will not yet set ram_block_discard_set_required() and 1632 * therefore, neither stops us here or deals with the sudden memory 1633 * consumption of inflated memory. 1634 */ 1635 ret = ram_block_discard_disable(true); 1636 if (ret) { 1637 error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); 1638 return ret; 1639 } 1640 1641 QLIST_FOREACH(container, &space->containers, next) { 1642 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { 1643 group->container = container; 1644 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 1645 vfio_kvm_device_add_group(group); 1646 return 0; 1647 } 1648 } 1649 1650 fd = qemu_open_old("/dev/vfio/vfio", O_RDWR); 1651 if (fd < 0) { 1652 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio"); 1653 ret = -errno; 1654 goto put_space_exit; 1655 } 1656 1657 ret = ioctl(fd, VFIO_GET_API_VERSION); 1658 if (ret != VFIO_API_VERSION) { 1659 error_setg(errp, "supported vfio version: %d, " 1660 "reported version: %d", VFIO_API_VERSION, ret); 1661 ret = -EINVAL; 1662 goto close_fd_exit; 1663 } 1664 1665 container = g_malloc0(sizeof(*container)); 1666 container->space = space; 1667 container->fd = fd; 1668 container->error = NULL; 1669 container->dirty_pages_supported = false; 1670 QLIST_INIT(&container->giommu_list); 1671 QLIST_INIT(&container->hostwin_list); 1672 1673 ret = vfio_init_container(container, group->fd, errp); 1674 if (ret) { 1675 goto free_container_exit; 1676 } 1677 1678 switch (container->iommu_type) { 1679 case VFIO_TYPE1v2_IOMMU: 1680 case VFIO_TYPE1_IOMMU: 1681 { 1682 struct vfio_iommu_type1_info *info; 1683 1684 /* 1685 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit 1686 * IOVA whatsoever. That's not actually true, but the current 1687 * kernel interface doesn't tell us what it can map, and the 1688 * existing Type1 IOMMUs generally support any IOVA we're 1689 * going to actually try in practice. 1690 */ 1691 ret = vfio_get_iommu_info(container, &info); 1692 1693 if (ret || !(info->flags & VFIO_IOMMU_INFO_PGSIZES)) { 1694 /* Assume 4k IOVA page size */ 1695 info->iova_pgsizes = 4096; 1696 } 1697 vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes); 1698 container->pgsizes = info->iova_pgsizes; 1699 1700 if (!ret) { 1701 vfio_get_iommu_info_migration(container, info); 1702 } 1703 g_free(info); 1704 break; 1705 } 1706 case VFIO_SPAPR_TCE_v2_IOMMU: 1707 case VFIO_SPAPR_TCE_IOMMU: 1708 { 1709 struct vfio_iommu_spapr_tce_info info; 1710 bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU; 1711 1712 /* 1713 * The host kernel code implementing VFIO_IOMMU_DISABLE is called 1714 * when container fd is closed so we do not call it explicitly 1715 * in this file. 1716 */ 1717 if (!v2) { 1718 ret = ioctl(fd, VFIO_IOMMU_ENABLE); 1719 if (ret) { 1720 error_setg_errno(errp, errno, "failed to enable container"); 1721 ret = -errno; 1722 goto free_container_exit; 1723 } 1724 } else { 1725 container->prereg_listener = vfio_prereg_listener; 1726 1727 memory_listener_register(&container->prereg_listener, 1728 &address_space_memory); 1729 if (container->error) { 1730 memory_listener_unregister(&container->prereg_listener); 1731 ret = -1; 1732 error_propagate_prepend(errp, container->error, 1733 "RAM memory listener initialization failed: "); 1734 goto free_container_exit; 1735 } 1736 } 1737 1738 info.argsz = sizeof(info); 1739 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); 1740 if (ret) { 1741 error_setg_errno(errp, errno, 1742 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed"); 1743 ret = -errno; 1744 if (v2) { 1745 memory_listener_unregister(&container->prereg_listener); 1746 } 1747 goto free_container_exit; 1748 } 1749 1750 if (v2) { 1751 container->pgsizes = info.ddw.pgsizes; 1752 /* 1753 * There is a default window in just created container. 1754 * To make region_add/del simpler, we better remove this 1755 * window now and let those iommu_listener callbacks 1756 * create/remove them when needed. 1757 */ 1758 ret = vfio_spapr_remove_window(container, info.dma32_window_start); 1759 if (ret) { 1760 error_setg_errno(errp, -ret, 1761 "failed to remove existing window"); 1762 goto free_container_exit; 1763 } 1764 } else { 1765 /* The default table uses 4K pages */ 1766 container->pgsizes = 0x1000; 1767 vfio_host_win_add(container, info.dma32_window_start, 1768 info.dma32_window_start + 1769 info.dma32_window_size - 1, 1770 0x1000); 1771 } 1772 } 1773 } 1774 1775 vfio_kvm_device_add_group(group); 1776 1777 QLIST_INIT(&container->group_list); 1778 QLIST_INSERT_HEAD(&space->containers, container, next); 1779 1780 group->container = container; 1781 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 1782 1783 container->listener = vfio_memory_listener; 1784 1785 memory_listener_register(&container->listener, container->space->as); 1786 1787 if (container->error) { 1788 ret = -1; 1789 error_propagate_prepend(errp, container->error, 1790 "memory listener initialization failed: "); 1791 goto listener_release_exit; 1792 } 1793 1794 container->initialized = true; 1795 1796 return 0; 1797 listener_release_exit: 1798 QLIST_REMOVE(group, container_next); 1799 QLIST_REMOVE(container, next); 1800 vfio_kvm_device_del_group(group); 1801 vfio_listener_release(container); 1802 1803 free_container_exit: 1804 g_free(container); 1805 1806 close_fd_exit: 1807 close(fd); 1808 1809 put_space_exit: 1810 ram_block_discard_disable(false); 1811 vfio_put_address_space(space); 1812 1813 return ret; 1814 } 1815 1816 static void vfio_disconnect_container(VFIOGroup *group) 1817 { 1818 VFIOContainer *container = group->container; 1819 1820 QLIST_REMOVE(group, container_next); 1821 group->container = NULL; 1822 1823 /* 1824 * Explicitly release the listener first before unset container, 1825 * since unset may destroy the backend container if it's the last 1826 * group. 1827 */ 1828 if (QLIST_EMPTY(&container->group_list)) { 1829 vfio_listener_release(container); 1830 } 1831 1832 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { 1833 error_report("vfio: error disconnecting group %d from container", 1834 group->groupid); 1835 } 1836 1837 if (QLIST_EMPTY(&container->group_list)) { 1838 VFIOAddressSpace *space = container->space; 1839 VFIOGuestIOMMU *giommu, *tmp; 1840 1841 QLIST_REMOVE(container, next); 1842 1843 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) { 1844 memory_region_unregister_iommu_notifier( 1845 MEMORY_REGION(giommu->iommu), &giommu->n); 1846 QLIST_REMOVE(giommu, giommu_next); 1847 g_free(giommu); 1848 } 1849 1850 trace_vfio_disconnect_container(container->fd); 1851 close(container->fd); 1852 g_free(container); 1853 1854 vfio_put_address_space(space); 1855 } 1856 } 1857 1858 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) 1859 { 1860 VFIOGroup *group; 1861 char path[32]; 1862 struct vfio_group_status status = { .argsz = sizeof(status) }; 1863 1864 QLIST_FOREACH(group, &vfio_group_list, next) { 1865 if (group->groupid == groupid) { 1866 /* Found it. Now is it already in the right context? */ 1867 if (group->container->space->as == as) { 1868 return group; 1869 } else { 1870 error_setg(errp, "group %d used in multiple address spaces", 1871 group->groupid); 1872 return NULL; 1873 } 1874 } 1875 } 1876 1877 group = g_malloc0(sizeof(*group)); 1878 1879 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); 1880 group->fd = qemu_open_old(path, O_RDWR); 1881 if (group->fd < 0) { 1882 error_setg_errno(errp, errno, "failed to open %s", path); 1883 goto free_group_exit; 1884 } 1885 1886 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { 1887 error_setg_errno(errp, errno, "failed to get group %d status", groupid); 1888 goto close_fd_exit; 1889 } 1890 1891 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { 1892 error_setg(errp, "group %d is not viable", groupid); 1893 error_append_hint(errp, 1894 "Please ensure all devices within the iommu_group " 1895 "are bound to their vfio bus driver.\n"); 1896 goto close_fd_exit; 1897 } 1898 1899 group->groupid = groupid; 1900 QLIST_INIT(&group->device_list); 1901 1902 if (vfio_connect_container(group, as, errp)) { 1903 error_prepend(errp, "failed to setup container for group %d: ", 1904 groupid); 1905 goto close_fd_exit; 1906 } 1907 1908 if (QLIST_EMPTY(&vfio_group_list)) { 1909 qemu_register_reset(vfio_reset_handler, NULL); 1910 } 1911 1912 QLIST_INSERT_HEAD(&vfio_group_list, group, next); 1913 1914 return group; 1915 1916 close_fd_exit: 1917 close(group->fd); 1918 1919 free_group_exit: 1920 g_free(group); 1921 1922 return NULL; 1923 } 1924 1925 void vfio_put_group(VFIOGroup *group) 1926 { 1927 if (!group || !QLIST_EMPTY(&group->device_list)) { 1928 return; 1929 } 1930 1931 if (!group->ram_block_discard_allowed) { 1932 ram_block_discard_disable(false); 1933 } 1934 vfio_kvm_device_del_group(group); 1935 vfio_disconnect_container(group); 1936 QLIST_REMOVE(group, next); 1937 trace_vfio_put_group(group->fd); 1938 close(group->fd); 1939 g_free(group); 1940 1941 if (QLIST_EMPTY(&vfio_group_list)) { 1942 qemu_unregister_reset(vfio_reset_handler, NULL); 1943 } 1944 } 1945 1946 int vfio_get_device(VFIOGroup *group, const char *name, 1947 VFIODevice *vbasedev, Error **errp) 1948 { 1949 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; 1950 int ret, fd; 1951 1952 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); 1953 if (fd < 0) { 1954 error_setg_errno(errp, errno, "error getting device from group %d", 1955 group->groupid); 1956 error_append_hint(errp, 1957 "Verify all devices in group %d are bound to vfio-<bus> " 1958 "or pci-stub and not already in use\n", group->groupid); 1959 return fd; 1960 } 1961 1962 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info); 1963 if (ret) { 1964 error_setg_errno(errp, errno, "error getting device info"); 1965 close(fd); 1966 return ret; 1967 } 1968 1969 /* 1970 * Set discarding of RAM as not broken for this group if the driver knows 1971 * the device operates compatibly with discarding. Setting must be 1972 * consistent per group, but since compatibility is really only possible 1973 * with mdev currently, we expect singleton groups. 1974 */ 1975 if (vbasedev->ram_block_discard_allowed != 1976 group->ram_block_discard_allowed) { 1977 if (!QLIST_EMPTY(&group->device_list)) { 1978 error_setg(errp, "Inconsistent setting of support for discarding " 1979 "RAM (e.g., balloon) within group"); 1980 close(fd); 1981 return -1; 1982 } 1983 1984 if (!group->ram_block_discard_allowed) { 1985 group->ram_block_discard_allowed = true; 1986 ram_block_discard_disable(false); 1987 } 1988 } 1989 1990 vbasedev->fd = fd; 1991 vbasedev->group = group; 1992 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); 1993 1994 vbasedev->num_irqs = dev_info.num_irqs; 1995 vbasedev->num_regions = dev_info.num_regions; 1996 vbasedev->flags = dev_info.flags; 1997 1998 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions, 1999 dev_info.num_irqs); 2000 2001 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); 2002 return 0; 2003 } 2004 2005 void vfio_put_base_device(VFIODevice *vbasedev) 2006 { 2007 if (!vbasedev->group) { 2008 return; 2009 } 2010 QLIST_REMOVE(vbasedev, next); 2011 vbasedev->group = NULL; 2012 trace_vfio_put_base_device(vbasedev->fd); 2013 close(vbasedev->fd); 2014 } 2015 2016 int vfio_get_region_info(VFIODevice *vbasedev, int index, 2017 struct vfio_region_info **info) 2018 { 2019 size_t argsz = sizeof(struct vfio_region_info); 2020 2021 *info = g_malloc0(argsz); 2022 2023 (*info)->index = index; 2024 retry: 2025 (*info)->argsz = argsz; 2026 2027 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) { 2028 g_free(*info); 2029 *info = NULL; 2030 return -errno; 2031 } 2032 2033 if ((*info)->argsz > argsz) { 2034 argsz = (*info)->argsz; 2035 *info = g_realloc(*info, argsz); 2036 2037 goto retry; 2038 } 2039 2040 return 0; 2041 } 2042 2043 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type, 2044 uint32_t subtype, struct vfio_region_info **info) 2045 { 2046 int i; 2047 2048 for (i = 0; i < vbasedev->num_regions; i++) { 2049 struct vfio_info_cap_header *hdr; 2050 struct vfio_region_info_cap_type *cap_type; 2051 2052 if (vfio_get_region_info(vbasedev, i, info)) { 2053 continue; 2054 } 2055 2056 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE); 2057 if (!hdr) { 2058 g_free(*info); 2059 continue; 2060 } 2061 2062 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header); 2063 2064 trace_vfio_get_dev_region(vbasedev->name, i, 2065 cap_type->type, cap_type->subtype); 2066 2067 if (cap_type->type == type && cap_type->subtype == subtype) { 2068 return 0; 2069 } 2070 2071 g_free(*info); 2072 } 2073 2074 *info = NULL; 2075 return -ENODEV; 2076 } 2077 2078 bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) 2079 { 2080 struct vfio_region_info *info = NULL; 2081 bool ret = false; 2082 2083 if (!vfio_get_region_info(vbasedev, region, &info)) { 2084 if (vfio_get_region_info_cap(info, cap_type)) { 2085 ret = true; 2086 } 2087 g_free(info); 2088 } 2089 2090 return ret; 2091 } 2092 2093 /* 2094 * Interfaces for IBM EEH (Enhanced Error Handling) 2095 */ 2096 static bool vfio_eeh_container_ok(VFIOContainer *container) 2097 { 2098 /* 2099 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO 2100 * implementation is broken if there are multiple groups in a 2101 * container. The hardware works in units of Partitionable 2102 * Endpoints (== IOMMU groups) and the EEH operations naively 2103 * iterate across all groups in the container, without any logic 2104 * to make sure the groups have their state synchronized. For 2105 * certain operations (ENABLE) that might be ok, until an error 2106 * occurs, but for others (GET_STATE) it's clearly broken. 2107 */ 2108 2109 /* 2110 * XXX Once fixed kernels exist, test for them here 2111 */ 2112 2113 if (QLIST_EMPTY(&container->group_list)) { 2114 return false; 2115 } 2116 2117 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) { 2118 return false; 2119 } 2120 2121 return true; 2122 } 2123 2124 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) 2125 { 2126 struct vfio_eeh_pe_op pe_op = { 2127 .argsz = sizeof(pe_op), 2128 .op = op, 2129 }; 2130 int ret; 2131 2132 if (!vfio_eeh_container_ok(container)) { 2133 error_report("vfio/eeh: EEH_PE_OP 0x%x: " 2134 "kernel requires a container with exactly one group", op); 2135 return -EPERM; 2136 } 2137 2138 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op); 2139 if (ret < 0) { 2140 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op); 2141 return -errno; 2142 } 2143 2144 return ret; 2145 } 2146 2147 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) 2148 { 2149 VFIOAddressSpace *space = vfio_get_address_space(as); 2150 VFIOContainer *container = NULL; 2151 2152 if (QLIST_EMPTY(&space->containers)) { 2153 /* No containers to act on */ 2154 goto out; 2155 } 2156 2157 container = QLIST_FIRST(&space->containers); 2158 2159 if (QLIST_NEXT(container, next)) { 2160 /* We don't yet have logic to synchronize EEH state across 2161 * multiple containers */ 2162 container = NULL; 2163 goto out; 2164 } 2165 2166 out: 2167 vfio_put_address_space(space); 2168 return container; 2169 } 2170 2171 bool vfio_eeh_as_ok(AddressSpace *as) 2172 { 2173 VFIOContainer *container = vfio_eeh_as_container(as); 2174 2175 return (container != NULL) && vfio_eeh_container_ok(container); 2176 } 2177 2178 int vfio_eeh_as_op(AddressSpace *as, uint32_t op) 2179 { 2180 VFIOContainer *container = vfio_eeh_as_container(as); 2181 2182 if (!container) { 2183 return -ENODEV; 2184 } 2185 return vfio_eeh_container_op(container, op); 2186 } 2187