1 /* 2 * generic functions used by VFIO devices 3 * 4 * Copyright Red Hat, Inc. 2012 5 * 6 * Authors: 7 * Alex Williamson <alex.williamson@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Based on qemu-kvm device-assignment: 13 * Adapted for KVM by Qumranet. 14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) 15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) 16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) 17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) 18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 #include <linux/vfio.h> 24 25 #include "hw/vfio/vfio-common.h" 26 #include "exec/address-spaces.h" 27 #include "exec/memory.h" 28 #include "exec/ram_addr.h" 29 #include "hw/hw.h" 30 #include "qemu/error-report.h" 31 #include "qemu/range.h" 32 #include "sysemu/reset.h" 33 #include "trace.h" 34 #include "qapi/error.h" 35 #include "pci.h" 36 37 VFIOGroupList vfio_group_list = 38 QLIST_HEAD_INITIALIZER(vfio_group_list); 39 40 static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state) 41 { 42 switch (container->iommu_type) { 43 case VFIO_TYPE1v2_IOMMU: 44 case VFIO_TYPE1_IOMMU: 45 /* 46 * We support coordinated discarding of RAM via the RamDiscardManager. 47 */ 48 return ram_block_uncoordinated_discard_disable(state); 49 default: 50 /* 51 * VFIO_SPAPR_TCE_IOMMU most probably works just fine with 52 * RamDiscardManager, however, it is completely untested. 53 * 54 * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does 55 * completely the opposite of managing mapping/pinning dynamically as 56 * required by RamDiscardManager. We would have to special-case sections 57 * with a RamDiscardManager. 58 */ 59 return ram_block_discard_disable(state); 60 } 61 } 62 63 static int vfio_dma_unmap_bitmap(const VFIOContainer *container, 64 hwaddr iova, ram_addr_t size, 65 IOMMUTLBEntry *iotlb) 66 { 67 const VFIOContainerBase *bcontainer = &container->bcontainer; 68 struct vfio_iommu_type1_dma_unmap *unmap; 69 struct vfio_bitmap *bitmap; 70 VFIOBitmap vbmap; 71 int ret; 72 73 ret = vfio_bitmap_alloc(&vbmap, size); 74 if (ret) { 75 return ret; 76 } 77 78 unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); 79 80 unmap->argsz = sizeof(*unmap) + sizeof(*bitmap); 81 unmap->iova = iova; 82 unmap->size = size; 83 unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP; 84 bitmap = (struct vfio_bitmap *)&unmap->data; 85 86 /* 87 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 88 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize 89 * to qemu_real_host_page_size. 90 */ 91 bitmap->pgsize = qemu_real_host_page_size(); 92 bitmap->size = vbmap.size; 93 bitmap->data = (__u64 *)vbmap.bitmap; 94 95 if (vbmap.size > bcontainer->max_dirty_bitmap_size) { 96 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size); 97 ret = -E2BIG; 98 goto unmap_exit; 99 } 100 101 ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); 102 if (!ret) { 103 cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, 104 iotlb->translated_addr, vbmap.pages); 105 } else { 106 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); 107 } 108 109 unmap_exit: 110 g_free(unmap); 111 g_free(vbmap.bitmap); 112 113 return ret; 114 } 115 116 /* 117 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 118 */ 119 static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer, 120 hwaddr iova, ram_addr_t size, 121 IOMMUTLBEntry *iotlb) 122 { 123 const VFIOContainer *container = container_of(bcontainer, VFIOContainer, 124 bcontainer); 125 struct vfio_iommu_type1_dma_unmap unmap = { 126 .argsz = sizeof(unmap), 127 .flags = 0, 128 .iova = iova, 129 .size = size, 130 }; 131 bool need_dirty_sync = false; 132 int ret; 133 134 if (iotlb && vfio_devices_all_running_and_mig_active(bcontainer)) { 135 if (!vfio_devices_all_device_dirty_tracking(bcontainer) && 136 bcontainer->dirty_pages_supported) { 137 return vfio_dma_unmap_bitmap(container, iova, size, iotlb); 138 } 139 140 need_dirty_sync = true; 141 } 142 143 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { 144 /* 145 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c 146 * v4.15) where an overflow in its wrap-around check prevents us from 147 * unmapping the last page of the address space. Test for the error 148 * condition and re-try the unmap excluding the last page. The 149 * expectation is that we've never mapped the last page anyway and this 150 * unmap request comes via vIOMMU support which also makes it unlikely 151 * that this page is used. This bug was introduced well after type1 v2 152 * support was introduced, so we shouldn't need to test for v1. A fix 153 * is queued for kernel v5.0 so this workaround can be removed once 154 * affected kernels are sufficiently deprecated. 155 */ 156 if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) && 157 container->iommu_type == VFIO_TYPE1v2_IOMMU) { 158 trace_vfio_legacy_dma_unmap_overflow_workaround(); 159 unmap.size -= 1ULL << ctz64(bcontainer->pgsizes); 160 continue; 161 } 162 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno)); 163 return -errno; 164 } 165 166 if (need_dirty_sync) { 167 ret = vfio_get_dirty_bitmap(bcontainer, iova, size, 168 iotlb->translated_addr); 169 if (ret) { 170 return ret; 171 } 172 } 173 174 return 0; 175 } 176 177 static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova, 178 ram_addr_t size, void *vaddr, bool readonly) 179 { 180 const VFIOContainer *container = container_of(bcontainer, VFIOContainer, 181 bcontainer); 182 struct vfio_iommu_type1_dma_map map = { 183 .argsz = sizeof(map), 184 .flags = VFIO_DMA_MAP_FLAG_READ, 185 .vaddr = (__u64)(uintptr_t)vaddr, 186 .iova = iova, 187 .size = size, 188 }; 189 190 if (!readonly) { 191 map.flags |= VFIO_DMA_MAP_FLAG_WRITE; 192 } 193 194 /* 195 * Try the mapping, if it fails with EBUSY, unmap the region and try 196 * again. This shouldn't be necessary, but we sometimes see it in 197 * the VGA ROM space. 198 */ 199 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || 200 (errno == EBUSY && 201 vfio_legacy_dma_unmap(bcontainer, iova, size, NULL) == 0 && 202 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { 203 return 0; 204 } 205 206 error_report("VFIO_MAP_DMA failed: %s", strerror(errno)); 207 return -errno; 208 } 209 210 static int 211 vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase *bcontainer, 212 bool start) 213 { 214 const VFIOContainer *container = container_of(bcontainer, VFIOContainer, 215 bcontainer); 216 int ret; 217 struct vfio_iommu_type1_dirty_bitmap dirty = { 218 .argsz = sizeof(dirty), 219 }; 220 221 if (start) { 222 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START; 223 } else { 224 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP; 225 } 226 227 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty); 228 if (ret) { 229 ret = -errno; 230 error_report("Failed to set dirty tracking flag 0x%x errno: %d", 231 dirty.flags, errno); 232 } 233 234 return ret; 235 } 236 237 static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase *bcontainer, 238 VFIOBitmap *vbmap, 239 hwaddr iova, hwaddr size) 240 { 241 const VFIOContainer *container = container_of(bcontainer, VFIOContainer, 242 bcontainer); 243 struct vfio_iommu_type1_dirty_bitmap *dbitmap; 244 struct vfio_iommu_type1_dirty_bitmap_get *range; 245 int ret; 246 247 dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); 248 249 dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); 250 dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; 251 range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; 252 range->iova = iova; 253 range->size = size; 254 255 /* 256 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 257 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize 258 * to qemu_real_host_page_size. 259 */ 260 range->bitmap.pgsize = qemu_real_host_page_size(); 261 range->bitmap.size = vbmap->size; 262 range->bitmap.data = (__u64 *)vbmap->bitmap; 263 264 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); 265 if (ret) { 266 ret = -errno; 267 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64 268 " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova, 269 (uint64_t)range->size, errno); 270 } 271 272 g_free(dbitmap); 273 274 return ret; 275 } 276 277 static struct vfio_info_cap_header * 278 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) 279 { 280 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { 281 return NULL; 282 } 283 284 return vfio_get_cap((void *)info, info->cap_offset, id); 285 } 286 287 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info, 288 unsigned int *avail) 289 { 290 struct vfio_info_cap_header *hdr; 291 struct vfio_iommu_type1_info_dma_avail *cap; 292 293 /* If the capability cannot be found, assume no DMA limiting */ 294 hdr = vfio_get_iommu_type1_info_cap(info, 295 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL); 296 if (!hdr) { 297 return false; 298 } 299 300 if (avail != NULL) { 301 cap = (void *) hdr; 302 *avail = cap->avail; 303 } 304 305 return true; 306 } 307 308 static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info, 309 VFIOContainerBase *bcontainer) 310 { 311 struct vfio_info_cap_header *hdr; 312 struct vfio_iommu_type1_info_cap_iova_range *cap; 313 314 hdr = vfio_get_iommu_type1_info_cap(info, 315 VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE); 316 if (!hdr) { 317 return false; 318 } 319 320 cap = (void *)hdr; 321 322 for (int i = 0; i < cap->nr_iovas; i++) { 323 Range *range = g_new(Range, 1); 324 325 range_set_bounds(range, cap->iova_ranges[i].start, 326 cap->iova_ranges[i].end); 327 bcontainer->iova_ranges = 328 range_list_insert(bcontainer->iova_ranges, range); 329 } 330 331 return true; 332 } 333 334 static void vfio_kvm_device_add_group(VFIOGroup *group) 335 { 336 Error *err = NULL; 337 338 if (vfio_kvm_device_add_fd(group->fd, &err)) { 339 error_reportf_err(err, "group ID %d: ", group->groupid); 340 } 341 } 342 343 static void vfio_kvm_device_del_group(VFIOGroup *group) 344 { 345 Error *err = NULL; 346 347 if (vfio_kvm_device_del_fd(group->fd, &err)) { 348 error_reportf_err(err, "group ID %d: ", group->groupid); 349 } 350 } 351 352 /* 353 * vfio_get_iommu_type - selects the richest iommu_type (v2 first) 354 */ 355 static int vfio_get_iommu_type(VFIOContainer *container, 356 Error **errp) 357 { 358 int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU, 359 VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU }; 360 int i; 361 362 for (i = 0; i < ARRAY_SIZE(iommu_types); i++) { 363 if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) { 364 return iommu_types[i]; 365 } 366 } 367 error_setg(errp, "No available IOMMU models"); 368 return -EINVAL; 369 } 370 371 /* 372 * vfio_get_iommu_ops - get a VFIOIOMMUClass associated with a type 373 */ 374 static const VFIOIOMMUClass *vfio_get_iommu_class(int iommu_type, Error **errp) 375 { 376 ObjectClass *klass = NULL; 377 378 switch (iommu_type) { 379 case VFIO_TYPE1v2_IOMMU: 380 case VFIO_TYPE1_IOMMU: 381 klass = object_class_by_name(TYPE_VFIO_IOMMU_LEGACY); 382 break; 383 case VFIO_SPAPR_TCE_v2_IOMMU: 384 case VFIO_SPAPR_TCE_IOMMU: 385 klass = object_class_by_name(TYPE_VFIO_IOMMU_SPAPR); 386 break; 387 default: 388 g_assert_not_reached(); 389 }; 390 391 return VFIO_IOMMU_CLASS(klass); 392 } 393 394 static int vfio_set_iommu(VFIOContainer *container, int group_fd, 395 VFIOAddressSpace *space, Error **errp) 396 { 397 int iommu_type, ret; 398 const VFIOIOMMUClass *vioc; 399 400 iommu_type = vfio_get_iommu_type(container, errp); 401 if (iommu_type < 0) { 402 return iommu_type; 403 } 404 405 ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd); 406 if (ret) { 407 error_setg_errno(errp, errno, "Failed to set group container"); 408 return -errno; 409 } 410 411 while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) { 412 if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { 413 /* 414 * On sPAPR, despite the IOMMU subdriver always advertises v1 and 415 * v2, the running platform may not support v2 and there is no 416 * way to guess it until an IOMMU group gets added to the container. 417 * So in case it fails with v2, try v1 as a fallback. 418 */ 419 iommu_type = VFIO_SPAPR_TCE_IOMMU; 420 continue; 421 } 422 error_setg_errno(errp, errno, "Failed to set iommu for container"); 423 return -errno; 424 } 425 426 container->iommu_type = iommu_type; 427 428 vioc = vfio_get_iommu_class(iommu_type, errp); 429 if (!vioc) { 430 error_setg(errp, "No available IOMMU models"); 431 return -EINVAL; 432 } 433 434 vfio_container_init(&container->bcontainer, space, vioc); 435 return 0; 436 } 437 438 static int vfio_get_iommu_info(VFIOContainer *container, 439 struct vfio_iommu_type1_info **info) 440 { 441 442 size_t argsz = sizeof(struct vfio_iommu_type1_info); 443 444 *info = g_new0(struct vfio_iommu_type1_info, 1); 445 again: 446 (*info)->argsz = argsz; 447 448 if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) { 449 g_free(*info); 450 *info = NULL; 451 return -errno; 452 } 453 454 if (((*info)->argsz > argsz)) { 455 argsz = (*info)->argsz; 456 *info = g_realloc(*info, argsz); 457 goto again; 458 } 459 460 return 0; 461 } 462 463 static struct vfio_info_cap_header * 464 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) 465 { 466 struct vfio_info_cap_header *hdr; 467 void *ptr = info; 468 469 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { 470 return NULL; 471 } 472 473 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { 474 if (hdr->id == id) { 475 return hdr; 476 } 477 } 478 479 return NULL; 480 } 481 482 static void vfio_get_iommu_info_migration(VFIOContainer *container, 483 struct vfio_iommu_type1_info *info) 484 { 485 struct vfio_info_cap_header *hdr; 486 struct vfio_iommu_type1_info_cap_migration *cap_mig; 487 VFIOContainerBase *bcontainer = &container->bcontainer; 488 489 hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION); 490 if (!hdr) { 491 return; 492 } 493 494 cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration, 495 header); 496 497 /* 498 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of 499 * qemu_real_host_page_size to mark those dirty. 500 */ 501 if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) { 502 bcontainer->dirty_pages_supported = true; 503 bcontainer->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; 504 bcontainer->dirty_pgsizes = cap_mig->pgsize_bitmap; 505 } 506 } 507 508 static int vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp) 509 { 510 VFIOContainer *container = container_of(bcontainer, VFIOContainer, 511 bcontainer); 512 g_autofree struct vfio_iommu_type1_info *info = NULL; 513 int ret; 514 515 ret = vfio_get_iommu_info(container, &info); 516 if (ret) { 517 error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info"); 518 return ret; 519 } 520 521 if (info->flags & VFIO_IOMMU_INFO_PGSIZES) { 522 bcontainer->pgsizes = info->iova_pgsizes; 523 } else { 524 bcontainer->pgsizes = qemu_real_host_page_size(); 525 } 526 527 if (!vfio_get_info_dma_avail(info, &bcontainer->dma_max_mappings)) { 528 bcontainer->dma_max_mappings = 65535; 529 } 530 531 vfio_get_info_iova_range(info, bcontainer); 532 533 vfio_get_iommu_info_migration(container, info); 534 return 0; 535 } 536 537 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, 538 Error **errp) 539 { 540 VFIOContainer *container; 541 VFIOContainerBase *bcontainer; 542 int ret, fd; 543 VFIOAddressSpace *space; 544 545 space = vfio_get_address_space(as); 546 547 /* 548 * VFIO is currently incompatible with discarding of RAM insofar as the 549 * madvise to purge (zap) the page from QEMU's address space does not 550 * interact with the memory API and therefore leaves stale virtual to 551 * physical mappings in the IOMMU if the page was previously pinned. We 552 * therefore set discarding broken for each group added to a container, 553 * whether the container is used individually or shared. This provides 554 * us with options to allow devices within a group to opt-in and allow 555 * discarding, so long as it is done consistently for a group (for instance 556 * if the device is an mdev device where it is known that the host vendor 557 * driver will never pin pages outside of the working set of the guest 558 * driver, which would thus not be discarding candidates). 559 * 560 * The first opportunity to induce pinning occurs here where we attempt to 561 * attach the group to existing containers within the AddressSpace. If any 562 * pages are already zapped from the virtual address space, such as from 563 * previous discards, new pinning will cause valid mappings to be 564 * re-established. Likewise, when the overall MemoryListener for a new 565 * container is registered, a replay of mappings within the AddressSpace 566 * will occur, re-establishing any previously zapped pages as well. 567 * 568 * Especially virtio-balloon is currently only prevented from discarding 569 * new memory, it will not yet set ram_block_discard_set_required() and 570 * therefore, neither stops us here or deals with the sudden memory 571 * consumption of inflated memory. 572 * 573 * We do support discarding of memory coordinated via the RamDiscardManager 574 * with some IOMMU types. vfio_ram_block_discard_disable() handles the 575 * details once we know which type of IOMMU we are using. 576 */ 577 578 QLIST_FOREACH(bcontainer, &space->containers, next) { 579 container = container_of(bcontainer, VFIOContainer, bcontainer); 580 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { 581 ret = vfio_ram_block_discard_disable(container, true); 582 if (ret) { 583 error_setg_errno(errp, -ret, 584 "Cannot set discarding of RAM broken"); 585 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, 586 &container->fd)) { 587 error_report("vfio: error disconnecting group %d from" 588 " container", group->groupid); 589 } 590 return ret; 591 } 592 group->container = container; 593 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 594 vfio_kvm_device_add_group(group); 595 return 0; 596 } 597 } 598 599 fd = qemu_open_old("/dev/vfio/vfio", O_RDWR); 600 if (fd < 0) { 601 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio"); 602 ret = -errno; 603 goto put_space_exit; 604 } 605 606 ret = ioctl(fd, VFIO_GET_API_VERSION); 607 if (ret != VFIO_API_VERSION) { 608 error_setg(errp, "supported vfio version: %d, " 609 "reported version: %d", VFIO_API_VERSION, ret); 610 ret = -EINVAL; 611 goto close_fd_exit; 612 } 613 614 container = g_malloc0(sizeof(*container)); 615 container->fd = fd; 616 bcontainer = &container->bcontainer; 617 618 ret = vfio_set_iommu(container, group->fd, space, errp); 619 if (ret) { 620 goto free_container_exit; 621 } 622 623 ret = vfio_ram_block_discard_disable(container, true); 624 if (ret) { 625 error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); 626 goto free_container_exit; 627 } 628 629 assert(bcontainer->ops->setup); 630 631 ret = bcontainer->ops->setup(bcontainer, errp); 632 if (ret) { 633 goto enable_discards_exit; 634 } 635 636 vfio_kvm_device_add_group(group); 637 638 QLIST_INIT(&container->group_list); 639 QLIST_INSERT_HEAD(&space->containers, bcontainer, next); 640 641 group->container = container; 642 QLIST_INSERT_HEAD(&container->group_list, group, container_next); 643 644 bcontainer->listener = vfio_memory_listener; 645 memory_listener_register(&bcontainer->listener, bcontainer->space->as); 646 647 if (bcontainer->error) { 648 ret = -1; 649 error_propagate_prepend(errp, bcontainer->error, 650 "memory listener initialization failed: "); 651 goto listener_release_exit; 652 } 653 654 bcontainer->initialized = true; 655 656 return 0; 657 listener_release_exit: 658 QLIST_REMOVE(group, container_next); 659 QLIST_REMOVE(bcontainer, next); 660 vfio_kvm_device_del_group(group); 661 memory_listener_unregister(&bcontainer->listener); 662 if (bcontainer->ops->release) { 663 bcontainer->ops->release(bcontainer); 664 } 665 666 enable_discards_exit: 667 vfio_ram_block_discard_disable(container, false); 668 669 free_container_exit: 670 g_free(container); 671 672 close_fd_exit: 673 close(fd); 674 675 put_space_exit: 676 vfio_put_address_space(space); 677 678 return ret; 679 } 680 681 static void vfio_disconnect_container(VFIOGroup *group) 682 { 683 VFIOContainer *container = group->container; 684 VFIOContainerBase *bcontainer = &container->bcontainer; 685 686 QLIST_REMOVE(group, container_next); 687 group->container = NULL; 688 689 /* 690 * Explicitly release the listener first before unset container, 691 * since unset may destroy the backend container if it's the last 692 * group. 693 */ 694 if (QLIST_EMPTY(&container->group_list)) { 695 memory_listener_unregister(&bcontainer->listener); 696 if (bcontainer->ops->release) { 697 bcontainer->ops->release(bcontainer); 698 } 699 } 700 701 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { 702 error_report("vfio: error disconnecting group %d from container", 703 group->groupid); 704 } 705 706 if (QLIST_EMPTY(&container->group_list)) { 707 VFIOAddressSpace *space = bcontainer->space; 708 709 vfio_container_destroy(bcontainer); 710 711 trace_vfio_disconnect_container(container->fd); 712 close(container->fd); 713 g_free(container); 714 715 vfio_put_address_space(space); 716 } 717 } 718 719 static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) 720 { 721 VFIOGroup *group; 722 char path[32]; 723 struct vfio_group_status status = { .argsz = sizeof(status) }; 724 725 QLIST_FOREACH(group, &vfio_group_list, next) { 726 if (group->groupid == groupid) { 727 /* Found it. Now is it already in the right context? */ 728 if (group->container->bcontainer.space->as == as) { 729 return group; 730 } else { 731 error_setg(errp, "group %d used in multiple address spaces", 732 group->groupid); 733 return NULL; 734 } 735 } 736 } 737 738 group = g_malloc0(sizeof(*group)); 739 740 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); 741 group->fd = qemu_open_old(path, O_RDWR); 742 if (group->fd < 0) { 743 error_setg_errno(errp, errno, "failed to open %s", path); 744 goto free_group_exit; 745 } 746 747 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { 748 error_setg_errno(errp, errno, "failed to get group %d status", groupid); 749 goto close_fd_exit; 750 } 751 752 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { 753 error_setg(errp, "group %d is not viable", groupid); 754 error_append_hint(errp, 755 "Please ensure all devices within the iommu_group " 756 "are bound to their vfio bus driver.\n"); 757 goto close_fd_exit; 758 } 759 760 group->groupid = groupid; 761 QLIST_INIT(&group->device_list); 762 763 if (vfio_connect_container(group, as, errp)) { 764 error_prepend(errp, "failed to setup container for group %d: ", 765 groupid); 766 goto close_fd_exit; 767 } 768 769 QLIST_INSERT_HEAD(&vfio_group_list, group, next); 770 771 return group; 772 773 close_fd_exit: 774 close(group->fd); 775 776 free_group_exit: 777 g_free(group); 778 779 return NULL; 780 } 781 782 static void vfio_put_group(VFIOGroup *group) 783 { 784 if (!group || !QLIST_EMPTY(&group->device_list)) { 785 return; 786 } 787 788 if (!group->ram_block_discard_allowed) { 789 vfio_ram_block_discard_disable(group->container, false); 790 } 791 vfio_kvm_device_del_group(group); 792 vfio_disconnect_container(group); 793 QLIST_REMOVE(group, next); 794 trace_vfio_put_group(group->fd); 795 close(group->fd); 796 g_free(group); 797 } 798 799 static int vfio_get_device(VFIOGroup *group, const char *name, 800 VFIODevice *vbasedev, Error **errp) 801 { 802 g_autofree struct vfio_device_info *info = NULL; 803 int fd; 804 805 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); 806 if (fd < 0) { 807 error_setg_errno(errp, errno, "error getting device from group %d", 808 group->groupid); 809 error_append_hint(errp, 810 "Verify all devices in group %d are bound to vfio-<bus> " 811 "or pci-stub and not already in use\n", group->groupid); 812 return fd; 813 } 814 815 info = vfio_get_device_info(fd); 816 if (!info) { 817 error_setg_errno(errp, errno, "error getting device info"); 818 close(fd); 819 return -1; 820 } 821 822 /* 823 * Set discarding of RAM as not broken for this group if the driver knows 824 * the device operates compatibly with discarding. Setting must be 825 * consistent per group, but since compatibility is really only possible 826 * with mdev currently, we expect singleton groups. 827 */ 828 if (vbasedev->ram_block_discard_allowed != 829 group->ram_block_discard_allowed) { 830 if (!QLIST_EMPTY(&group->device_list)) { 831 error_setg(errp, "Inconsistent setting of support for discarding " 832 "RAM (e.g., balloon) within group"); 833 close(fd); 834 return -1; 835 } 836 837 if (!group->ram_block_discard_allowed) { 838 group->ram_block_discard_allowed = true; 839 vfio_ram_block_discard_disable(group->container, false); 840 } 841 } 842 843 vbasedev->fd = fd; 844 vbasedev->group = group; 845 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); 846 847 vbasedev->num_irqs = info->num_irqs; 848 vbasedev->num_regions = info->num_regions; 849 vbasedev->flags = info->flags; 850 851 trace_vfio_get_device(name, info->flags, info->num_regions, info->num_irqs); 852 853 vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET); 854 855 return 0; 856 } 857 858 static void vfio_put_base_device(VFIODevice *vbasedev) 859 { 860 if (!vbasedev->group) { 861 return; 862 } 863 QLIST_REMOVE(vbasedev, next); 864 vbasedev->group = NULL; 865 trace_vfio_put_base_device(vbasedev->fd); 866 close(vbasedev->fd); 867 } 868 869 static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp) 870 { 871 char *tmp, group_path[PATH_MAX]; 872 g_autofree char *group_name = NULL; 873 int ret, groupid; 874 ssize_t len; 875 876 tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev); 877 len = readlink(tmp, group_path, sizeof(group_path)); 878 g_free(tmp); 879 880 if (len <= 0 || len >= sizeof(group_path)) { 881 ret = len < 0 ? -errno : -ENAMETOOLONG; 882 error_setg_errno(errp, -ret, "no iommu_group found"); 883 return ret; 884 } 885 886 group_path[len] = 0; 887 888 group_name = g_path_get_basename(group_path); 889 if (sscanf(group_name, "%d", &groupid) != 1) { 890 error_setg_errno(errp, errno, "failed to read %s", group_path); 891 return -errno; 892 } 893 return groupid; 894 } 895 896 /* 897 * vfio_attach_device: attach a device to a security context 898 * @name and @vbasedev->name are likely to be different depending 899 * on the type of the device, hence the need for passing @name 900 */ 901 static int vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev, 902 AddressSpace *as, Error **errp) 903 { 904 int groupid = vfio_device_groupid(vbasedev, errp); 905 VFIODevice *vbasedev_iter; 906 VFIOGroup *group; 907 VFIOContainerBase *bcontainer; 908 int ret; 909 910 if (groupid < 0) { 911 return groupid; 912 } 913 914 trace_vfio_attach_device(vbasedev->name, groupid); 915 916 group = vfio_get_group(groupid, as, errp); 917 if (!group) { 918 return -ENOENT; 919 } 920 921 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { 922 if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { 923 error_setg(errp, "device is already attached"); 924 vfio_put_group(group); 925 return -EBUSY; 926 } 927 } 928 ret = vfio_get_device(group, name, vbasedev, errp); 929 if (ret) { 930 vfio_put_group(group); 931 return ret; 932 } 933 934 bcontainer = &group->container->bcontainer; 935 vbasedev->bcontainer = bcontainer; 936 QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next); 937 QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next); 938 939 return ret; 940 } 941 942 static void vfio_legacy_detach_device(VFIODevice *vbasedev) 943 { 944 VFIOGroup *group = vbasedev->group; 945 946 QLIST_REMOVE(vbasedev, global_next); 947 QLIST_REMOVE(vbasedev, container_next); 948 vbasedev->bcontainer = NULL; 949 trace_vfio_detach_device(vbasedev->name, group->groupid); 950 vfio_put_base_device(vbasedev); 951 vfio_put_group(group); 952 } 953 954 static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single) 955 { 956 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); 957 VFIOGroup *group; 958 struct vfio_pci_hot_reset_info *info = NULL; 959 struct vfio_pci_dependent_device *devices; 960 struct vfio_pci_hot_reset *reset; 961 int32_t *fds; 962 int ret, i, count; 963 bool multi = false; 964 965 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); 966 967 if (!single) { 968 vfio_pci_pre_reset(vdev); 969 } 970 vdev->vbasedev.needs_reset = false; 971 972 ret = vfio_pci_get_pci_hot_reset_info(vdev, &info); 973 974 if (ret) { 975 goto out_single; 976 } 977 devices = &info->devices[0]; 978 979 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); 980 981 /* Verify that we have all the groups required */ 982 for (i = 0; i < info->count; i++) { 983 PCIHostDeviceAddress host; 984 VFIOPCIDevice *tmp; 985 VFIODevice *vbasedev_iter; 986 987 host.domain = devices[i].segment; 988 host.bus = devices[i].bus; 989 host.slot = PCI_SLOT(devices[i].devfn); 990 host.function = PCI_FUNC(devices[i].devfn); 991 992 trace_vfio_pci_hot_reset_dep_devices(host.domain, 993 host.bus, host.slot, host.function, devices[i].group_id); 994 995 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { 996 continue; 997 } 998 999 QLIST_FOREACH(group, &vfio_group_list, next) { 1000 if (group->groupid == devices[i].group_id) { 1001 break; 1002 } 1003 } 1004 1005 if (!group) { 1006 if (!vdev->has_pm_reset) { 1007 error_report("vfio: Cannot reset device %s, " 1008 "depends on group %d which is not owned.", 1009 vdev->vbasedev.name, devices[i].group_id); 1010 } 1011 ret = -EPERM; 1012 goto out; 1013 } 1014 1015 /* Prep dependent devices for reset and clear our marker. */ 1016 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { 1017 if (!vbasedev_iter->dev->realized || 1018 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { 1019 continue; 1020 } 1021 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); 1022 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { 1023 if (single) { 1024 ret = -EINVAL; 1025 goto out_single; 1026 } 1027 vfio_pci_pre_reset(tmp); 1028 tmp->vbasedev.needs_reset = false; 1029 multi = true; 1030 break; 1031 } 1032 } 1033 } 1034 1035 if (!single && !multi) { 1036 ret = -EINVAL; 1037 goto out_single; 1038 } 1039 1040 /* Determine how many group fds need to be passed */ 1041 count = 0; 1042 QLIST_FOREACH(group, &vfio_group_list, next) { 1043 for (i = 0; i < info->count; i++) { 1044 if (group->groupid == devices[i].group_id) { 1045 count++; 1046 break; 1047 } 1048 } 1049 } 1050 1051 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); 1052 reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); 1053 fds = &reset->group_fds[0]; 1054 1055 /* Fill in group fds */ 1056 QLIST_FOREACH(group, &vfio_group_list, next) { 1057 for (i = 0; i < info->count; i++) { 1058 if (group->groupid == devices[i].group_id) { 1059 fds[reset->count++] = group->fd; 1060 break; 1061 } 1062 } 1063 } 1064 1065 /* Bus reset! */ 1066 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); 1067 g_free(reset); 1068 if (ret) { 1069 ret = -errno; 1070 } 1071 1072 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, 1073 ret ? strerror(errno) : "Success"); 1074 1075 out: 1076 /* Re-enable INTx on affected devices */ 1077 for (i = 0; i < info->count; i++) { 1078 PCIHostDeviceAddress host; 1079 VFIOPCIDevice *tmp; 1080 VFIODevice *vbasedev_iter; 1081 1082 host.domain = devices[i].segment; 1083 host.bus = devices[i].bus; 1084 host.slot = PCI_SLOT(devices[i].devfn); 1085 host.function = PCI_FUNC(devices[i].devfn); 1086 1087 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { 1088 continue; 1089 } 1090 1091 QLIST_FOREACH(group, &vfio_group_list, next) { 1092 if (group->groupid == devices[i].group_id) { 1093 break; 1094 } 1095 } 1096 1097 if (!group) { 1098 break; 1099 } 1100 1101 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { 1102 if (!vbasedev_iter->dev->realized || 1103 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { 1104 continue; 1105 } 1106 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); 1107 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { 1108 vfio_pci_post_reset(tmp); 1109 break; 1110 } 1111 } 1112 } 1113 out_single: 1114 if (!single) { 1115 vfio_pci_post_reset(vdev); 1116 } 1117 g_free(info); 1118 1119 return ret; 1120 } 1121 1122 static void vfio_iommu_legacy_class_init(ObjectClass *klass, void *data) 1123 { 1124 VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); 1125 1126 vioc->setup = vfio_legacy_setup; 1127 vioc->dma_map = vfio_legacy_dma_map; 1128 vioc->dma_unmap = vfio_legacy_dma_unmap; 1129 vioc->attach_device = vfio_legacy_attach_device; 1130 vioc->detach_device = vfio_legacy_detach_device; 1131 vioc->set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking; 1132 vioc->query_dirty_bitmap = vfio_legacy_query_dirty_bitmap; 1133 vioc->pci_hot_reset = vfio_legacy_pci_hot_reset; 1134 }; 1135 1136 static const TypeInfo types[] = { 1137 { 1138 .name = TYPE_VFIO_IOMMU_LEGACY, 1139 .parent = TYPE_VFIO_IOMMU, 1140 .class_init = vfio_iommu_legacy_class_init, 1141 }, 1142 }; 1143 1144 DEFINE_TYPES(types) 1145