1 /*
2 * generic functions used by VFIO devices
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19 */
20
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #ifdef CONFIG_KVM
24 #include <linux/kvm.h>
25 #endif
26 #include <linux/vfio.h>
27
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/pci.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "exec/ram_addr.h"
33 #include "hw/hw.h"
34 #include "qemu/error-report.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/range.h"
37 #include "sysemu/kvm.h"
38 #include "sysemu/reset.h"
39 #include "sysemu/runstate.h"
40 #include "trace.h"
41 #include "qapi/error.h"
42 #include "migration/misc.h"
43 #include "migration/blocker.h"
44 #include "migration/qemu-file.h"
45 #include "sysemu/tpm.h"
46
47 VFIODeviceList vfio_device_list =
48 QLIST_HEAD_INITIALIZER(vfio_device_list);
49 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
50 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
51
52 #ifdef CONFIG_KVM
53 /*
54 * We have a single VFIO pseudo device per KVM VM. Once created it lives
55 * for the life of the VM. Closing the file descriptor only drops our
56 * reference to it and the device's reference to kvm. Therefore once
57 * initialized, this file descriptor is only released on QEMU exit and
58 * we'll re-use it should another vfio device be attached before then.
59 */
60 int vfio_kvm_device_fd = -1;
61 #endif
62
63 /*
64 * Device state interfaces
65 */
66
vfio_mig_active(void)67 bool vfio_mig_active(void)
68 {
69 VFIODevice *vbasedev;
70
71 if (QLIST_EMPTY(&vfio_device_list)) {
72 return false;
73 }
74
75 QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
76 if (vbasedev->migration_blocker) {
77 return false;
78 }
79 }
80 return true;
81 }
82
83 static Error *multiple_devices_migration_blocker;
84
85 /*
86 * Multiple devices migration is allowed only if all devices support P2P
87 * migration. Single device migration is allowed regardless of P2P migration
88 * support.
89 */
vfio_multiple_devices_migration_is_supported(void)90 static bool vfio_multiple_devices_migration_is_supported(void)
91 {
92 VFIODevice *vbasedev;
93 unsigned int device_num = 0;
94 bool all_support_p2p = true;
95
96 QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
97 if (vbasedev->migration) {
98 device_num++;
99
100 if (!(vbasedev->migration->mig_flags & VFIO_MIGRATION_P2P)) {
101 all_support_p2p = false;
102 }
103 }
104 }
105
106 return all_support_p2p || device_num <= 1;
107 }
108
vfio_block_multiple_devices_migration(VFIODevice * vbasedev,Error ** errp)109 int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp)
110 {
111 int ret;
112
113 if (vfio_multiple_devices_migration_is_supported()) {
114 return 0;
115 }
116
117 if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
118 error_setg(errp, "Multiple VFIO devices migration is supported only if "
119 "all of them support P2P migration");
120 return -EINVAL;
121 }
122
123 if (multiple_devices_migration_blocker) {
124 return 0;
125 }
126
127 error_setg(&multiple_devices_migration_blocker,
128 "Multiple VFIO devices migration is supported only if all of "
129 "them support P2P migration");
130 ret = migrate_add_blocker_normal(&multiple_devices_migration_blocker, errp);
131
132 return ret;
133 }
134
vfio_unblock_multiple_devices_migration(void)135 void vfio_unblock_multiple_devices_migration(void)
136 {
137 if (!multiple_devices_migration_blocker ||
138 !vfio_multiple_devices_migration_is_supported()) {
139 return;
140 }
141
142 migrate_del_blocker(&multiple_devices_migration_blocker);
143 }
144
vfio_viommu_preset(VFIODevice * vbasedev)145 bool vfio_viommu_preset(VFIODevice *vbasedev)
146 {
147 return vbasedev->bcontainer->space->as != &address_space_memory;
148 }
149
vfio_set_migration_error(int ret)150 static void vfio_set_migration_error(int ret)
151 {
152 if (migration_is_setup_or_active()) {
153 migration_file_set_error(ret, NULL);
154 }
155 }
156
vfio_device_state_is_running(VFIODevice * vbasedev)157 bool vfio_device_state_is_running(VFIODevice *vbasedev)
158 {
159 VFIOMigration *migration = vbasedev->migration;
160
161 return migration->device_state == VFIO_DEVICE_STATE_RUNNING ||
162 migration->device_state == VFIO_DEVICE_STATE_RUNNING_P2P;
163 }
164
vfio_device_state_is_precopy(VFIODevice * vbasedev)165 bool vfio_device_state_is_precopy(VFIODevice *vbasedev)
166 {
167 VFIOMigration *migration = vbasedev->migration;
168
169 return migration->device_state == VFIO_DEVICE_STATE_PRE_COPY ||
170 migration->device_state == VFIO_DEVICE_STATE_PRE_COPY_P2P;
171 }
172
vfio_devices_all_dirty_tracking(VFIOContainerBase * bcontainer)173 static bool vfio_devices_all_dirty_tracking(VFIOContainerBase *bcontainer)
174 {
175 VFIODevice *vbasedev;
176
177 if (!migration_is_active() && !migration_is_device()) {
178 return false;
179 }
180
181 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
182 VFIOMigration *migration = vbasedev->migration;
183
184 if (!migration) {
185 return false;
186 }
187
188 if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF &&
189 (vfio_device_state_is_running(vbasedev) ||
190 vfio_device_state_is_precopy(vbasedev))) {
191 return false;
192 }
193 }
194 return true;
195 }
196
vfio_devices_all_device_dirty_tracking(const VFIOContainerBase * bcontainer)197 bool vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer)
198 {
199 VFIODevice *vbasedev;
200
201 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
202 if (!vbasedev->dirty_pages_supported) {
203 return false;
204 }
205 }
206
207 return true;
208 }
209
210 /*
211 * Check if all VFIO devices are running and migration is active, which is
212 * essentially equivalent to the migration being in pre-copy phase.
213 */
214 bool
vfio_devices_all_running_and_mig_active(const VFIOContainerBase * bcontainer)215 vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer)
216 {
217 VFIODevice *vbasedev;
218
219 if (!migration_is_active()) {
220 return false;
221 }
222
223 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
224 VFIOMigration *migration = vbasedev->migration;
225
226 if (!migration) {
227 return false;
228 }
229
230 if (vfio_device_state_is_running(vbasedev) ||
231 vfio_device_state_is_precopy(vbasedev)) {
232 continue;
233 } else {
234 return false;
235 }
236 }
237 return true;
238 }
239
vfio_listener_skipped_section(MemoryRegionSection * section)240 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
241 {
242 return (!memory_region_is_ram(section->mr) &&
243 !memory_region_is_iommu(section->mr)) ||
244 memory_region_is_protected(section->mr) ||
245 /*
246 * Sizing an enabled 64-bit BAR can cause spurious mappings to
247 * addresses in the upper part of the 64-bit address space. These
248 * are never accessed by the CPU and beyond the address width of
249 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
250 */
251 section->offset_within_address_space & (1ULL << 63);
252 }
253
254 /* Called with rcu_read_lock held. */
vfio_get_xlat_addr(IOMMUTLBEntry * iotlb,void ** vaddr,ram_addr_t * ram_addr,bool * read_only,Error ** errp)255 static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
256 ram_addr_t *ram_addr, bool *read_only,
257 Error **errp)
258 {
259 bool ret, mr_has_discard_manager;
260
261 ret = memory_get_xlat_addr(iotlb, vaddr, ram_addr, read_only,
262 &mr_has_discard_manager, errp);
263 if (ret && mr_has_discard_manager) {
264 /*
265 * Malicious VMs might trigger discarding of IOMMU-mapped memory. The
266 * pages will remain pinned inside vfio until unmapped, resulting in a
267 * higher memory consumption than expected. If memory would get
268 * populated again later, there would be an inconsistency between pages
269 * pinned by vfio and pages seen by QEMU. This is the case until
270 * unmapped from the IOMMU (e.g., during device reset).
271 *
272 * With malicious guests, we really only care about pinning more memory
273 * than expected. RLIMIT_MEMLOCK set for the user/process can never be
274 * exceeded and can be used to mitigate this problem.
275 */
276 warn_report_once("Using vfio with vIOMMUs and coordinated discarding of"
277 " RAM (e.g., virtio-mem) works, however, malicious"
278 " guests can trigger pinning of more memory than"
279 " intended via an IOMMU. It's possible to mitigate "
280 " by setting/adjusting RLIMIT_MEMLOCK.");
281 }
282 return ret;
283 }
284
vfio_iommu_map_notify(IOMMUNotifier * n,IOMMUTLBEntry * iotlb)285 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
286 {
287 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
288 VFIOContainerBase *bcontainer = giommu->bcontainer;
289 hwaddr iova = iotlb->iova + giommu->iommu_offset;
290 void *vaddr;
291 int ret;
292 Error *local_err = NULL;
293
294 trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
295 iova, iova + iotlb->addr_mask);
296
297 if (iotlb->target_as != &address_space_memory) {
298 error_report("Wrong target AS \"%s\", only system memory is allowed",
299 iotlb->target_as->name ? iotlb->target_as->name : "none");
300 vfio_set_migration_error(-EINVAL);
301 return;
302 }
303
304 rcu_read_lock();
305
306 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
307 bool read_only;
308
309 if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, &local_err)) {
310 error_report_err(local_err);
311 goto out;
312 }
313 /*
314 * vaddr is only valid until rcu_read_unlock(). But after
315 * vfio_dma_map has set up the mapping the pages will be
316 * pinned by the kernel. This makes sure that the RAM backend
317 * of vaddr will always be there, even if the memory object is
318 * destroyed and its backing memory munmap-ed.
319 */
320 ret = vfio_container_dma_map(bcontainer, iova,
321 iotlb->addr_mask + 1, vaddr,
322 read_only);
323 if (ret) {
324 error_report("vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", "
325 "0x%"HWADDR_PRIx", %p) = %d (%s)",
326 bcontainer, iova,
327 iotlb->addr_mask + 1, vaddr, ret, strerror(-ret));
328 }
329 } else {
330 ret = vfio_container_dma_unmap(bcontainer, iova,
331 iotlb->addr_mask + 1, iotlb);
332 if (ret) {
333 error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
334 "0x%"HWADDR_PRIx") = %d (%s)",
335 bcontainer, iova,
336 iotlb->addr_mask + 1, ret, strerror(-ret));
337 vfio_set_migration_error(ret);
338 }
339 }
340 out:
341 rcu_read_unlock();
342 }
343
vfio_ram_discard_notify_discard(RamDiscardListener * rdl,MemoryRegionSection * section)344 static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
345 MemoryRegionSection *section)
346 {
347 VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
348 listener);
349 VFIOContainerBase *bcontainer = vrdl->bcontainer;
350 const hwaddr size = int128_get64(section->size);
351 const hwaddr iova = section->offset_within_address_space;
352 int ret;
353
354 /* Unmap with a single call. */
355 ret = vfio_container_dma_unmap(bcontainer, iova, size , NULL);
356 if (ret) {
357 error_report("%s: vfio_container_dma_unmap() failed: %s", __func__,
358 strerror(-ret));
359 }
360 }
361
vfio_ram_discard_notify_populate(RamDiscardListener * rdl,MemoryRegionSection * section)362 static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
363 MemoryRegionSection *section)
364 {
365 VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
366 listener);
367 VFIOContainerBase *bcontainer = vrdl->bcontainer;
368 const hwaddr end = section->offset_within_region +
369 int128_get64(section->size);
370 hwaddr start, next, iova;
371 void *vaddr;
372 int ret;
373
374 /*
375 * Map in (aligned within memory region) minimum granularity, so we can
376 * unmap in minimum granularity later.
377 */
378 for (start = section->offset_within_region; start < end; start = next) {
379 next = ROUND_UP(start + 1, vrdl->granularity);
380 next = MIN(next, end);
381
382 iova = start - section->offset_within_region +
383 section->offset_within_address_space;
384 vaddr = memory_region_get_ram_ptr(section->mr) + start;
385
386 ret = vfio_container_dma_map(bcontainer, iova, next - start,
387 vaddr, section->readonly);
388 if (ret) {
389 /* Rollback */
390 vfio_ram_discard_notify_discard(rdl, section);
391 return ret;
392 }
393 }
394 return 0;
395 }
396
vfio_register_ram_discard_listener(VFIOContainerBase * bcontainer,MemoryRegionSection * section)397 static void vfio_register_ram_discard_listener(VFIOContainerBase *bcontainer,
398 MemoryRegionSection *section)
399 {
400 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
401 VFIORamDiscardListener *vrdl;
402
403 /* Ignore some corner cases not relevant in practice. */
404 g_assert(QEMU_IS_ALIGNED(section->offset_within_region, TARGET_PAGE_SIZE));
405 g_assert(QEMU_IS_ALIGNED(section->offset_within_address_space,
406 TARGET_PAGE_SIZE));
407 g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE));
408
409 vrdl = g_new0(VFIORamDiscardListener, 1);
410 vrdl->bcontainer = bcontainer;
411 vrdl->mr = section->mr;
412 vrdl->offset_within_address_space = section->offset_within_address_space;
413 vrdl->size = int128_get64(section->size);
414 vrdl->granularity = ram_discard_manager_get_min_granularity(rdm,
415 section->mr);
416
417 g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity));
418 g_assert(bcontainer->pgsizes &&
419 vrdl->granularity >= 1ULL << ctz64(bcontainer->pgsizes));
420
421 ram_discard_listener_init(&vrdl->listener,
422 vfio_ram_discard_notify_populate,
423 vfio_ram_discard_notify_discard, true);
424 ram_discard_manager_register_listener(rdm, &vrdl->listener, section);
425 QLIST_INSERT_HEAD(&bcontainer->vrdl_list, vrdl, next);
426
427 /*
428 * Sanity-check if we have a theoretically problematic setup where we could
429 * exceed the maximum number of possible DMA mappings over time. We assume
430 * that each mapped section in the same address space as a RamDiscardManager
431 * section consumes exactly one DMA mapping, with the exception of
432 * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections
433 * in the same address space as RamDiscardManager sections.
434 *
435 * We assume that each section in the address space consumes one memslot.
436 * We take the number of KVM memory slots as a best guess for the maximum
437 * number of sections in the address space we could have over time,
438 * also consuming DMA mappings.
439 */
440 if (bcontainer->dma_max_mappings) {
441 unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512;
442
443 #ifdef CONFIG_KVM
444 if (kvm_enabled()) {
445 max_memslots = kvm_get_max_memslots();
446 }
447 #endif
448
449 QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
450 hwaddr start, end;
451
452 start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space,
453 vrdl->granularity);
454 end = ROUND_UP(vrdl->offset_within_address_space + vrdl->size,
455 vrdl->granularity);
456 vrdl_mappings += (end - start) / vrdl->granularity;
457 vrdl_count++;
458 }
459
460 if (vrdl_mappings + max_memslots - vrdl_count >
461 bcontainer->dma_max_mappings) {
462 warn_report("%s: possibly running out of DMA mappings. E.g., try"
463 " increasing the 'block-size' of virtio-mem devies."
464 " Maximum possible DMA mappings: %d, Maximum possible"
465 " memslots: %d", __func__, bcontainer->dma_max_mappings,
466 max_memslots);
467 }
468 }
469 }
470
vfio_unregister_ram_discard_listener(VFIOContainerBase * bcontainer,MemoryRegionSection * section)471 static void vfio_unregister_ram_discard_listener(VFIOContainerBase *bcontainer,
472 MemoryRegionSection *section)
473 {
474 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
475 VFIORamDiscardListener *vrdl = NULL;
476
477 QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
478 if (vrdl->mr == section->mr &&
479 vrdl->offset_within_address_space ==
480 section->offset_within_address_space) {
481 break;
482 }
483 }
484
485 if (!vrdl) {
486 hw_error("vfio: Trying to unregister missing RAM discard listener");
487 }
488
489 ram_discard_manager_unregister_listener(rdm, &vrdl->listener);
490 QLIST_REMOVE(vrdl, next);
491 g_free(vrdl);
492 }
493
vfio_known_safe_misalignment(MemoryRegionSection * section)494 static bool vfio_known_safe_misalignment(MemoryRegionSection *section)
495 {
496 MemoryRegion *mr = section->mr;
497
498 if (!TPM_IS_CRB(mr->owner)) {
499 return false;
500 }
501
502 /* this is a known safe misaligned region, just trace for debug purpose */
503 trace_vfio_known_safe_misalignment(memory_region_name(mr),
504 section->offset_within_address_space,
505 section->offset_within_region,
506 qemu_real_host_page_size());
507 return true;
508 }
509
vfio_listener_valid_section(MemoryRegionSection * section,const char * name)510 static bool vfio_listener_valid_section(MemoryRegionSection *section,
511 const char *name)
512 {
513 if (vfio_listener_skipped_section(section)) {
514 trace_vfio_listener_region_skip(name,
515 section->offset_within_address_space,
516 section->offset_within_address_space +
517 int128_get64(int128_sub(section->size, int128_one())));
518 return false;
519 }
520
521 if (unlikely((section->offset_within_address_space &
522 ~qemu_real_host_page_mask()) !=
523 (section->offset_within_region & ~qemu_real_host_page_mask()))) {
524 if (!vfio_known_safe_misalignment(section)) {
525 error_report("%s received unaligned region %s iova=0x%"PRIx64
526 " offset_within_region=0x%"PRIx64
527 " qemu_real_host_page_size=0x%"PRIxPTR,
528 __func__, memory_region_name(section->mr),
529 section->offset_within_address_space,
530 section->offset_within_region,
531 qemu_real_host_page_size());
532 }
533 return false;
534 }
535
536 return true;
537 }
538
vfio_get_section_iova_range(VFIOContainerBase * bcontainer,MemoryRegionSection * section,hwaddr * out_iova,hwaddr * out_end,Int128 * out_llend)539 static bool vfio_get_section_iova_range(VFIOContainerBase *bcontainer,
540 MemoryRegionSection *section,
541 hwaddr *out_iova, hwaddr *out_end,
542 Int128 *out_llend)
543 {
544 Int128 llend;
545 hwaddr iova;
546
547 iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
548 llend = int128_make64(section->offset_within_address_space);
549 llend = int128_add(llend, section->size);
550 llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask()));
551
552 if (int128_ge(int128_make64(iova), llend)) {
553 return false;
554 }
555
556 *out_iova = iova;
557 *out_end = int128_get64(int128_sub(llend, int128_one()));
558 if (out_llend) {
559 *out_llend = llend;
560 }
561 return true;
562 }
563
vfio_listener_region_add(MemoryListener * listener,MemoryRegionSection * section)564 static void vfio_listener_region_add(MemoryListener *listener,
565 MemoryRegionSection *section)
566 {
567 VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
568 listener);
569 hwaddr iova, end;
570 Int128 llend, llsize;
571 void *vaddr;
572 int ret;
573 Error *err = NULL;
574
575 if (!vfio_listener_valid_section(section, "region_add")) {
576 return;
577 }
578
579 if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end,
580 &llend)) {
581 if (memory_region_is_ram_device(section->mr)) {
582 trace_vfio_listener_region_add_no_dma_map(
583 memory_region_name(section->mr),
584 section->offset_within_address_space,
585 int128_getlo(section->size),
586 qemu_real_host_page_size());
587 }
588 return;
589 }
590
591 if (!vfio_container_add_section_window(bcontainer, section, &err)) {
592 goto fail;
593 }
594
595 memory_region_ref(section->mr);
596
597 if (memory_region_is_iommu(section->mr)) {
598 VFIOGuestIOMMU *giommu;
599 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
600 int iommu_idx;
601
602 trace_vfio_listener_region_add_iommu(iova, end);
603 /*
604 * FIXME: For VFIO iommu types which have KVM acceleration to
605 * avoid bouncing all map/unmaps through qemu this way, this
606 * would be the right place to wire that up (tell the KVM
607 * device emulation the VFIO iommu handles to use).
608 */
609 giommu = g_malloc0(sizeof(*giommu));
610 giommu->iommu_mr = iommu_mr;
611 giommu->iommu_offset = section->offset_within_address_space -
612 section->offset_within_region;
613 giommu->bcontainer = bcontainer;
614 llend = int128_add(int128_make64(section->offset_within_region),
615 section->size);
616 llend = int128_sub(llend, int128_one());
617 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
618 MEMTXATTRS_UNSPECIFIED);
619 iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
620 IOMMU_NOTIFIER_IOTLB_EVENTS,
621 section->offset_within_region,
622 int128_get64(llend),
623 iommu_idx);
624
625 ret = memory_region_iommu_set_page_size_mask(giommu->iommu_mr,
626 bcontainer->pgsizes,
627 &err);
628 if (ret) {
629 g_free(giommu);
630 goto fail;
631 }
632
633 if (bcontainer->iova_ranges) {
634 ret = memory_region_iommu_set_iova_ranges(giommu->iommu_mr,
635 bcontainer->iova_ranges,
636 &err);
637 if (ret) {
638 g_free(giommu);
639 goto fail;
640 }
641 }
642
643 ret = memory_region_register_iommu_notifier(section->mr, &giommu->n,
644 &err);
645 if (ret) {
646 g_free(giommu);
647 goto fail;
648 }
649 QLIST_INSERT_HEAD(&bcontainer->giommu_list, giommu, giommu_next);
650 memory_region_iommu_replay(giommu->iommu_mr, &giommu->n);
651
652 return;
653 }
654
655 /* Here we assume that memory_region_is_ram(section->mr)==true */
656
657 /*
658 * For RAM memory regions with a RamDiscardManager, we only want to map the
659 * actually populated parts - and update the mapping whenever we're notified
660 * about changes.
661 */
662 if (memory_region_has_ram_discard_manager(section->mr)) {
663 vfio_register_ram_discard_listener(bcontainer, section);
664 return;
665 }
666
667 vaddr = memory_region_get_ram_ptr(section->mr) +
668 section->offset_within_region +
669 (iova - section->offset_within_address_space);
670
671 trace_vfio_listener_region_add_ram(iova, end, vaddr);
672
673 llsize = int128_sub(llend, int128_make64(iova));
674
675 if (memory_region_is_ram_device(section->mr)) {
676 hwaddr pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1;
677
678 if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
679 trace_vfio_listener_region_add_no_dma_map(
680 memory_region_name(section->mr),
681 section->offset_within_address_space,
682 int128_getlo(section->size),
683 pgmask + 1);
684 return;
685 }
686 }
687
688 ret = vfio_container_dma_map(bcontainer, iova, int128_get64(llsize),
689 vaddr, section->readonly);
690 if (ret) {
691 error_setg(&err, "vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", "
692 "0x%"HWADDR_PRIx", %p) = %d (%s)",
693 bcontainer, iova, int128_get64(llsize), vaddr, ret,
694 strerror(-ret));
695 if (memory_region_is_ram_device(section->mr)) {
696 /* Allow unexpected mappings not to be fatal for RAM devices */
697 error_report_err(err);
698 return;
699 }
700 goto fail;
701 }
702
703 return;
704
705 fail:
706 if (memory_region_is_ram_device(section->mr)) {
707 error_reportf_err(err, "PCI p2p may not work: ");
708 return;
709 }
710 /*
711 * On the initfn path, store the first error in the container so we
712 * can gracefully fail. Runtime, there's not much we can do other
713 * than throw a hardware error.
714 */
715 if (!bcontainer->initialized) {
716 if (!bcontainer->error) {
717 error_propagate_prepend(&bcontainer->error, err,
718 "Region %s: ",
719 memory_region_name(section->mr));
720 } else {
721 error_free(err);
722 }
723 } else {
724 error_report_err(err);
725 hw_error("vfio: DMA mapping failed, unable to continue");
726 }
727 }
728
vfio_listener_region_del(MemoryListener * listener,MemoryRegionSection * section)729 static void vfio_listener_region_del(MemoryListener *listener,
730 MemoryRegionSection *section)
731 {
732 VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
733 listener);
734 hwaddr iova, end;
735 Int128 llend, llsize;
736 int ret;
737 bool try_unmap = true;
738
739 if (!vfio_listener_valid_section(section, "region_del")) {
740 return;
741 }
742
743 if (memory_region_is_iommu(section->mr)) {
744 VFIOGuestIOMMU *giommu;
745
746 QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) {
747 if (MEMORY_REGION(giommu->iommu_mr) == section->mr &&
748 giommu->n.start == section->offset_within_region) {
749 memory_region_unregister_iommu_notifier(section->mr,
750 &giommu->n);
751 QLIST_REMOVE(giommu, giommu_next);
752 g_free(giommu);
753 break;
754 }
755 }
756
757 /*
758 * FIXME: We assume the one big unmap below is adequate to
759 * remove any individual page mappings in the IOMMU which
760 * might have been copied into VFIO. This works for a page table
761 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
762 * That may not be true for all IOMMU types.
763 */
764 }
765
766 if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end,
767 &llend)) {
768 return;
769 }
770
771 llsize = int128_sub(llend, int128_make64(iova));
772
773 trace_vfio_listener_region_del(iova, end);
774
775 if (memory_region_is_ram_device(section->mr)) {
776 hwaddr pgmask;
777
778 pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1;
779 try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
780 } else if (memory_region_has_ram_discard_manager(section->mr)) {
781 vfio_unregister_ram_discard_listener(bcontainer, section);
782 /* Unregistering will trigger an unmap. */
783 try_unmap = false;
784 }
785
786 if (try_unmap) {
787 if (int128_eq(llsize, int128_2_64())) {
788 /* The unmap ioctl doesn't accept a full 64-bit span. */
789 llsize = int128_rshift(llsize, 1);
790 ret = vfio_container_dma_unmap(bcontainer, iova,
791 int128_get64(llsize), NULL);
792 if (ret) {
793 error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
794 "0x%"HWADDR_PRIx") = %d (%s)",
795 bcontainer, iova, int128_get64(llsize), ret,
796 strerror(-ret));
797 }
798 iova += int128_get64(llsize);
799 }
800 ret = vfio_container_dma_unmap(bcontainer, iova,
801 int128_get64(llsize), NULL);
802 if (ret) {
803 error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
804 "0x%"HWADDR_PRIx") = %d (%s)",
805 bcontainer, iova, int128_get64(llsize), ret,
806 strerror(-ret));
807 }
808 }
809
810 memory_region_unref(section->mr);
811
812 vfio_container_del_section_window(bcontainer, section);
813 }
814
815 typedef struct VFIODirtyRanges {
816 hwaddr min32;
817 hwaddr max32;
818 hwaddr min64;
819 hwaddr max64;
820 hwaddr minpci64;
821 hwaddr maxpci64;
822 } VFIODirtyRanges;
823
824 typedef struct VFIODirtyRangesListener {
825 VFIOContainerBase *bcontainer;
826 VFIODirtyRanges ranges;
827 MemoryListener listener;
828 } VFIODirtyRangesListener;
829
vfio_section_is_vfio_pci(MemoryRegionSection * section,VFIOContainerBase * bcontainer)830 static bool vfio_section_is_vfio_pci(MemoryRegionSection *section,
831 VFIOContainerBase *bcontainer)
832 {
833 VFIOPCIDevice *pcidev;
834 VFIODevice *vbasedev;
835 Object *owner;
836
837 owner = memory_region_owner(section->mr);
838
839 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
840 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
841 continue;
842 }
843 pcidev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
844 if (OBJECT(pcidev) == owner) {
845 return true;
846 }
847 }
848
849 return false;
850 }
851
vfio_dirty_tracking_update(MemoryListener * listener,MemoryRegionSection * section)852 static void vfio_dirty_tracking_update(MemoryListener *listener,
853 MemoryRegionSection *section)
854 {
855 VFIODirtyRangesListener *dirty = container_of(listener,
856 VFIODirtyRangesListener,
857 listener);
858 VFIODirtyRanges *range = &dirty->ranges;
859 hwaddr iova, end, *min, *max;
860
861 if (!vfio_listener_valid_section(section, "tracking_update") ||
862 !vfio_get_section_iova_range(dirty->bcontainer, section,
863 &iova, &end, NULL)) {
864 return;
865 }
866
867 /*
868 * The address space passed to the dirty tracker is reduced to three ranges:
869 * one for 32-bit DMA ranges, one for 64-bit DMA ranges and one for the
870 * PCI 64-bit hole.
871 *
872 * The underlying reports of dirty will query a sub-interval of each of
873 * these ranges.
874 *
875 * The purpose of the three range handling is to handle known cases of big
876 * holes in the address space, like the x86 AMD 1T hole, and firmware (like
877 * OVMF) which may relocate the pci-hole64 to the end of the address space.
878 * The latter would otherwise generate large ranges for tracking, stressing
879 * the limits of supported hardware. The pci-hole32 will always be below 4G
880 * (overlapping or not) so it doesn't need special handling and is part of
881 * the 32-bit range.
882 *
883 * The alternative would be an IOVATree but that has a much bigger runtime
884 * overhead and unnecessary complexity.
885 */
886 if (vfio_section_is_vfio_pci(section, dirty->bcontainer) &&
887 iova >= UINT32_MAX) {
888 min = &range->minpci64;
889 max = &range->maxpci64;
890 } else {
891 min = (end <= UINT32_MAX) ? &range->min32 : &range->min64;
892 max = (end <= UINT32_MAX) ? &range->max32 : &range->max64;
893 }
894 if (*min > iova) {
895 *min = iova;
896 }
897 if (*max < end) {
898 *max = end;
899 }
900
901 trace_vfio_device_dirty_tracking_update(iova, end, *min, *max);
902 return;
903 }
904
905 static const MemoryListener vfio_dirty_tracking_listener = {
906 .name = "vfio-tracking",
907 .region_add = vfio_dirty_tracking_update,
908 };
909
vfio_dirty_tracking_init(VFIOContainerBase * bcontainer,VFIODirtyRanges * ranges)910 static void vfio_dirty_tracking_init(VFIOContainerBase *bcontainer,
911 VFIODirtyRanges *ranges)
912 {
913 VFIODirtyRangesListener dirty;
914
915 memset(&dirty, 0, sizeof(dirty));
916 dirty.ranges.min32 = UINT32_MAX;
917 dirty.ranges.min64 = UINT64_MAX;
918 dirty.ranges.minpci64 = UINT64_MAX;
919 dirty.listener = vfio_dirty_tracking_listener;
920 dirty.bcontainer = bcontainer;
921
922 memory_listener_register(&dirty.listener,
923 bcontainer->space->as);
924
925 *ranges = dirty.ranges;
926
927 /*
928 * The memory listener is synchronous, and used to calculate the range
929 * to dirty tracking. Unregister it after we are done as we are not
930 * interested in any follow-up updates.
931 */
932 memory_listener_unregister(&dirty.listener);
933 }
934
vfio_devices_dma_logging_stop(VFIOContainerBase * bcontainer)935 static void vfio_devices_dma_logging_stop(VFIOContainerBase *bcontainer)
936 {
937 uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature),
938 sizeof(uint64_t))] = {};
939 struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
940 VFIODevice *vbasedev;
941
942 feature->argsz = sizeof(buf);
943 feature->flags = VFIO_DEVICE_FEATURE_SET |
944 VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP;
945
946 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
947 if (!vbasedev->dirty_tracking) {
948 continue;
949 }
950
951 if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
952 warn_report("%s: Failed to stop DMA logging, err %d (%s)",
953 vbasedev->name, -errno, strerror(errno));
954 }
955 vbasedev->dirty_tracking = false;
956 }
957 }
958
959 static struct vfio_device_feature *
vfio_device_feature_dma_logging_start_create(VFIOContainerBase * bcontainer,VFIODirtyRanges * tracking)960 vfio_device_feature_dma_logging_start_create(VFIOContainerBase *bcontainer,
961 VFIODirtyRanges *tracking)
962 {
963 struct vfio_device_feature *feature;
964 size_t feature_size;
965 struct vfio_device_feature_dma_logging_control *control;
966 struct vfio_device_feature_dma_logging_range *ranges;
967
968 feature_size = sizeof(struct vfio_device_feature) +
969 sizeof(struct vfio_device_feature_dma_logging_control);
970 feature = g_try_malloc0(feature_size);
971 if (!feature) {
972 errno = ENOMEM;
973 return NULL;
974 }
975 feature->argsz = feature_size;
976 feature->flags = VFIO_DEVICE_FEATURE_SET |
977 VFIO_DEVICE_FEATURE_DMA_LOGGING_START;
978
979 control = (struct vfio_device_feature_dma_logging_control *)feature->data;
980 control->page_size = qemu_real_host_page_size();
981
982 /*
983 * DMA logging uAPI guarantees to support at least a number of ranges that
984 * fits into a single host kernel base page.
985 */
986 control->num_ranges = !!tracking->max32 + !!tracking->max64 +
987 !!tracking->maxpci64;
988 ranges = g_try_new0(struct vfio_device_feature_dma_logging_range,
989 control->num_ranges);
990 if (!ranges) {
991 g_free(feature);
992 errno = ENOMEM;
993
994 return NULL;
995 }
996
997 control->ranges = (uintptr_t)ranges;
998 if (tracking->max32) {
999 ranges->iova = tracking->min32;
1000 ranges->length = (tracking->max32 - tracking->min32) + 1;
1001 ranges++;
1002 }
1003 if (tracking->max64) {
1004 ranges->iova = tracking->min64;
1005 ranges->length = (tracking->max64 - tracking->min64) + 1;
1006 ranges++;
1007 }
1008 if (tracking->maxpci64) {
1009 ranges->iova = tracking->minpci64;
1010 ranges->length = (tracking->maxpci64 - tracking->minpci64) + 1;
1011 }
1012
1013 trace_vfio_device_dirty_tracking_start(control->num_ranges,
1014 tracking->min32, tracking->max32,
1015 tracking->min64, tracking->max64,
1016 tracking->minpci64, tracking->maxpci64);
1017
1018 return feature;
1019 }
1020
vfio_device_feature_dma_logging_start_destroy(struct vfio_device_feature * feature)1021 static void vfio_device_feature_dma_logging_start_destroy(
1022 struct vfio_device_feature *feature)
1023 {
1024 struct vfio_device_feature_dma_logging_control *control =
1025 (struct vfio_device_feature_dma_logging_control *)feature->data;
1026 struct vfio_device_feature_dma_logging_range *ranges =
1027 (struct vfio_device_feature_dma_logging_range *)(uintptr_t)control->ranges;
1028
1029 g_free(ranges);
1030 g_free(feature);
1031 }
1032
vfio_devices_dma_logging_start(VFIOContainerBase * bcontainer,Error ** errp)1033 static int vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer,
1034 Error **errp)
1035 {
1036 struct vfio_device_feature *feature;
1037 VFIODirtyRanges ranges;
1038 VFIODevice *vbasedev;
1039 int ret = 0;
1040
1041 vfio_dirty_tracking_init(bcontainer, &ranges);
1042 feature = vfio_device_feature_dma_logging_start_create(bcontainer,
1043 &ranges);
1044 if (!feature) {
1045 error_setg_errno(errp, errno, "Failed to prepare DMA logging");
1046 return -errno;
1047 }
1048
1049 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
1050 if (vbasedev->dirty_tracking) {
1051 continue;
1052 }
1053
1054 ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
1055 if (ret) {
1056 ret = -errno;
1057 error_setg_errno(errp, errno, "%s: Failed to start DMA logging",
1058 vbasedev->name);
1059 goto out;
1060 }
1061 vbasedev->dirty_tracking = true;
1062 }
1063
1064 out:
1065 if (ret) {
1066 vfio_devices_dma_logging_stop(bcontainer);
1067 }
1068
1069 vfio_device_feature_dma_logging_start_destroy(feature);
1070
1071 return ret;
1072 }
1073
vfio_listener_log_global_start(MemoryListener * listener,Error ** errp)1074 static bool vfio_listener_log_global_start(MemoryListener *listener,
1075 Error **errp)
1076 {
1077 ERRP_GUARD();
1078 VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
1079 listener);
1080 int ret;
1081
1082 if (vfio_devices_all_device_dirty_tracking(bcontainer)) {
1083 ret = vfio_devices_dma_logging_start(bcontainer, errp);
1084 } else {
1085 ret = vfio_container_set_dirty_page_tracking(bcontainer, true, errp);
1086 }
1087
1088 if (ret) {
1089 error_prepend(errp, "vfio: Could not start dirty page tracking - ");
1090 }
1091 return !ret;
1092 }
1093
vfio_listener_log_global_stop(MemoryListener * listener)1094 static void vfio_listener_log_global_stop(MemoryListener *listener)
1095 {
1096 VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
1097 listener);
1098 Error *local_err = NULL;
1099 int ret = 0;
1100
1101 if (vfio_devices_all_device_dirty_tracking(bcontainer)) {
1102 vfio_devices_dma_logging_stop(bcontainer);
1103 } else {
1104 ret = vfio_container_set_dirty_page_tracking(bcontainer, false,
1105 &local_err);
1106 }
1107
1108 if (ret) {
1109 error_prepend(&local_err,
1110 "vfio: Could not stop dirty page tracking - ");
1111 error_report_err(local_err);
1112 vfio_set_migration_error(ret);
1113 }
1114 }
1115
vfio_device_dma_logging_report(VFIODevice * vbasedev,hwaddr iova,hwaddr size,void * bitmap)1116 static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova,
1117 hwaddr size, void *bitmap)
1118 {
1119 uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
1120 sizeof(struct vfio_device_feature_dma_logging_report),
1121 sizeof(uint64_t))] = {};
1122 struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
1123 struct vfio_device_feature_dma_logging_report *report =
1124 (struct vfio_device_feature_dma_logging_report *)feature->data;
1125
1126 report->iova = iova;
1127 report->length = size;
1128 report->page_size = qemu_real_host_page_size();
1129 report->bitmap = (uintptr_t)bitmap;
1130
1131 feature->argsz = sizeof(buf);
1132 feature->flags = VFIO_DEVICE_FEATURE_GET |
1133 VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT;
1134
1135 if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
1136 return -errno;
1137 }
1138
1139 return 0;
1140 }
1141
vfio_devices_query_dirty_bitmap(const VFIOContainerBase * bcontainer,VFIOBitmap * vbmap,hwaddr iova,hwaddr size,Error ** errp)1142 int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
1143 VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp)
1144 {
1145 VFIODevice *vbasedev;
1146 int ret;
1147
1148 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
1149 ret = vfio_device_dma_logging_report(vbasedev, iova, size,
1150 vbmap->bitmap);
1151 if (ret) {
1152 error_setg_errno(errp, -ret,
1153 "%s: Failed to get DMA logging report, iova: "
1154 "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx,
1155 vbasedev->name, iova, size);
1156
1157 return ret;
1158 }
1159 }
1160
1161 return 0;
1162 }
1163
vfio_get_dirty_bitmap(const VFIOContainerBase * bcontainer,uint64_t iova,uint64_t size,ram_addr_t ram_addr,Error ** errp)1164 int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova,
1165 uint64_t size, ram_addr_t ram_addr, Error **errp)
1166 {
1167 bool all_device_dirty_tracking =
1168 vfio_devices_all_device_dirty_tracking(bcontainer);
1169 uint64_t dirty_pages;
1170 VFIOBitmap vbmap;
1171 int ret;
1172
1173 if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) {
1174 cpu_physical_memory_set_dirty_range(ram_addr, size,
1175 tcg_enabled() ? DIRTY_CLIENTS_ALL :
1176 DIRTY_CLIENTS_NOCODE);
1177 return 0;
1178 }
1179
1180 ret = vfio_bitmap_alloc(&vbmap, size);
1181 if (ret) {
1182 error_setg_errno(errp, -ret,
1183 "Failed to allocate dirty tracking bitmap");
1184 return ret;
1185 }
1186
1187 if (all_device_dirty_tracking) {
1188 ret = vfio_devices_query_dirty_bitmap(bcontainer, &vbmap, iova, size,
1189 errp);
1190 } else {
1191 ret = vfio_container_query_dirty_bitmap(bcontainer, &vbmap, iova, size,
1192 errp);
1193 }
1194
1195 if (ret) {
1196 goto out;
1197 }
1198
1199 dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
1200 vbmap.pages);
1201
1202 trace_vfio_get_dirty_bitmap(iova, size, vbmap.size, ram_addr, dirty_pages);
1203 out:
1204 g_free(vbmap.bitmap);
1205
1206 return ret;
1207 }
1208
1209 typedef struct {
1210 IOMMUNotifier n;
1211 VFIOGuestIOMMU *giommu;
1212 } vfio_giommu_dirty_notifier;
1213
vfio_iommu_map_dirty_notify(IOMMUNotifier * n,IOMMUTLBEntry * iotlb)1214 static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
1215 {
1216 vfio_giommu_dirty_notifier *gdn = container_of(n,
1217 vfio_giommu_dirty_notifier, n);
1218 VFIOGuestIOMMU *giommu = gdn->giommu;
1219 VFIOContainerBase *bcontainer = giommu->bcontainer;
1220 hwaddr iova = iotlb->iova + giommu->iommu_offset;
1221 ram_addr_t translated_addr;
1222 Error *local_err = NULL;
1223 int ret = -EINVAL;
1224
1225 trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask);
1226
1227 if (iotlb->target_as != &address_space_memory) {
1228 error_report("Wrong target AS \"%s\", only system memory is allowed",
1229 iotlb->target_as->name ? iotlb->target_as->name : "none");
1230 goto out;
1231 }
1232
1233 rcu_read_lock();
1234 if (!vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL, &local_err)) {
1235 error_report_err(local_err);
1236 goto out_unlock;
1237 }
1238
1239 ret = vfio_get_dirty_bitmap(bcontainer, iova, iotlb->addr_mask + 1,
1240 translated_addr, &local_err);
1241 if (ret) {
1242 error_prepend(&local_err,
1243 "vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", "
1244 "0x%"HWADDR_PRIx") failed - ", bcontainer, iova,
1245 iotlb->addr_mask + 1);
1246 error_report_err(local_err);
1247 }
1248
1249 out_unlock:
1250 rcu_read_unlock();
1251
1252 out:
1253 if (ret) {
1254 vfio_set_migration_error(ret);
1255 }
1256 }
1257
vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection * section,void * opaque)1258 static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section,
1259 void *opaque)
1260 {
1261 const hwaddr size = int128_get64(section->size);
1262 const hwaddr iova = section->offset_within_address_space;
1263 const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) +
1264 section->offset_within_region;
1265 VFIORamDiscardListener *vrdl = opaque;
1266 Error *local_err = NULL;
1267 int ret;
1268
1269 /*
1270 * Sync the whole mapped region (spanning multiple individual mappings)
1271 * in one go.
1272 */
1273 ret = vfio_get_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr,
1274 &local_err);
1275 if (ret) {
1276 error_report_err(local_err);
1277 }
1278 return ret;
1279 }
1280
1281 static int
vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase * bcontainer,MemoryRegionSection * section)1282 vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase *bcontainer,
1283 MemoryRegionSection *section)
1284 {
1285 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
1286 VFIORamDiscardListener *vrdl = NULL;
1287
1288 QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
1289 if (vrdl->mr == section->mr &&
1290 vrdl->offset_within_address_space ==
1291 section->offset_within_address_space) {
1292 break;
1293 }
1294 }
1295
1296 if (!vrdl) {
1297 hw_error("vfio: Trying to sync missing RAM discard listener");
1298 }
1299
1300 /*
1301 * We only want/can synchronize the bitmap for actually mapped parts -
1302 * which correspond to populated parts. Replay all populated parts.
1303 */
1304 return ram_discard_manager_replay_populated(rdm, section,
1305 vfio_ram_discard_get_dirty_bitmap,
1306 &vrdl);
1307 }
1308
vfio_sync_dirty_bitmap(VFIOContainerBase * bcontainer,MemoryRegionSection * section,Error ** errp)1309 static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer,
1310 MemoryRegionSection *section, Error **errp)
1311 {
1312 ram_addr_t ram_addr;
1313
1314 if (memory_region_is_iommu(section->mr)) {
1315 VFIOGuestIOMMU *giommu;
1316
1317 QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) {
1318 if (MEMORY_REGION(giommu->iommu_mr) == section->mr &&
1319 giommu->n.start == section->offset_within_region) {
1320 Int128 llend;
1321 vfio_giommu_dirty_notifier gdn = { .giommu = giommu };
1322 int idx = memory_region_iommu_attrs_to_index(giommu->iommu_mr,
1323 MEMTXATTRS_UNSPECIFIED);
1324
1325 llend = int128_add(int128_make64(section->offset_within_region),
1326 section->size);
1327 llend = int128_sub(llend, int128_one());
1328
1329 iommu_notifier_init(&gdn.n,
1330 vfio_iommu_map_dirty_notify,
1331 IOMMU_NOTIFIER_MAP,
1332 section->offset_within_region,
1333 int128_get64(llend),
1334 idx);
1335 memory_region_iommu_replay(giommu->iommu_mr, &gdn.n);
1336 break;
1337 }
1338 }
1339 return 0;
1340 } else if (memory_region_has_ram_discard_manager(section->mr)) {
1341 int ret;
1342
1343 ret = vfio_sync_ram_discard_listener_dirty_bitmap(bcontainer, section);
1344 if (ret) {
1345 error_setg(errp,
1346 "Failed to sync dirty bitmap with RAM discard listener");
1347 }
1348 return ret;
1349 }
1350
1351 ram_addr = memory_region_get_ram_addr(section->mr) +
1352 section->offset_within_region;
1353
1354 return vfio_get_dirty_bitmap(bcontainer,
1355 REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
1356 int128_get64(section->size), ram_addr, errp);
1357 }
1358
vfio_listener_log_sync(MemoryListener * listener,MemoryRegionSection * section)1359 static void vfio_listener_log_sync(MemoryListener *listener,
1360 MemoryRegionSection *section)
1361 {
1362 VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
1363 listener);
1364 int ret;
1365 Error *local_err = NULL;
1366
1367 if (vfio_listener_skipped_section(section)) {
1368 return;
1369 }
1370
1371 if (vfio_devices_all_dirty_tracking(bcontainer)) {
1372 ret = vfio_sync_dirty_bitmap(bcontainer, section, &local_err);
1373 if (ret) {
1374 error_report_err(local_err);
1375 vfio_set_migration_error(ret);
1376 }
1377 }
1378 }
1379
1380 const MemoryListener vfio_memory_listener = {
1381 .name = "vfio",
1382 .region_add = vfio_listener_region_add,
1383 .region_del = vfio_listener_region_del,
1384 .log_global_start = vfio_listener_log_global_start,
1385 .log_global_stop = vfio_listener_log_global_stop,
1386 .log_sync = vfio_listener_log_sync,
1387 };
1388
vfio_reset_handler(void * opaque)1389 void vfio_reset_handler(void *opaque)
1390 {
1391 VFIODevice *vbasedev;
1392
1393 QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
1394 if (vbasedev->dev->realized) {
1395 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
1396 }
1397 }
1398
1399 QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
1400 if (vbasedev->dev->realized && vbasedev->needs_reset) {
1401 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
1402 }
1403 }
1404 }
1405
vfio_kvm_device_add_fd(int fd,Error ** errp)1406 int vfio_kvm_device_add_fd(int fd, Error **errp)
1407 {
1408 #ifdef CONFIG_KVM
1409 struct kvm_device_attr attr = {
1410 .group = KVM_DEV_VFIO_FILE,
1411 .attr = KVM_DEV_VFIO_FILE_ADD,
1412 .addr = (uint64_t)(unsigned long)&fd,
1413 };
1414
1415 if (!kvm_enabled()) {
1416 return 0;
1417 }
1418
1419 if (vfio_kvm_device_fd < 0) {
1420 struct kvm_create_device cd = {
1421 .type = KVM_DEV_TYPE_VFIO,
1422 };
1423
1424 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
1425 error_setg_errno(errp, errno, "Failed to create KVM VFIO device");
1426 return -errno;
1427 }
1428
1429 vfio_kvm_device_fd = cd.fd;
1430 }
1431
1432 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1433 error_setg_errno(errp, errno, "Failed to add fd %d to KVM VFIO device",
1434 fd);
1435 return -errno;
1436 }
1437 #endif
1438 return 0;
1439 }
1440
vfio_kvm_device_del_fd(int fd,Error ** errp)1441 int vfio_kvm_device_del_fd(int fd, Error **errp)
1442 {
1443 #ifdef CONFIG_KVM
1444 struct kvm_device_attr attr = {
1445 .group = KVM_DEV_VFIO_FILE,
1446 .attr = KVM_DEV_VFIO_FILE_DEL,
1447 .addr = (uint64_t)(unsigned long)&fd,
1448 };
1449
1450 if (vfio_kvm_device_fd < 0) {
1451 error_setg(errp, "KVM VFIO device isn't created yet");
1452 return -EINVAL;
1453 }
1454
1455 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1456 error_setg_errno(errp, errno,
1457 "Failed to remove fd %d from KVM VFIO device", fd);
1458 return -errno;
1459 }
1460 #endif
1461 return 0;
1462 }
1463
vfio_get_address_space(AddressSpace * as)1464 VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
1465 {
1466 VFIOAddressSpace *space;
1467
1468 QLIST_FOREACH(space, &vfio_address_spaces, list) {
1469 if (space->as == as) {
1470 return space;
1471 }
1472 }
1473
1474 /* No suitable VFIOAddressSpace, create a new one */
1475 space = g_malloc0(sizeof(*space));
1476 space->as = as;
1477 QLIST_INIT(&space->containers);
1478
1479 if (QLIST_EMPTY(&vfio_address_spaces)) {
1480 qemu_register_reset(vfio_reset_handler, NULL);
1481 }
1482
1483 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
1484
1485 return space;
1486 }
1487
vfio_put_address_space(VFIOAddressSpace * space)1488 void vfio_put_address_space(VFIOAddressSpace *space)
1489 {
1490 if (!QLIST_EMPTY(&space->containers)) {
1491 return;
1492 }
1493
1494 QLIST_REMOVE(space, list);
1495 g_free(space);
1496
1497 if (QLIST_EMPTY(&vfio_address_spaces)) {
1498 qemu_unregister_reset(vfio_reset_handler, NULL);
1499 }
1500 }
1501
vfio_get_device_info(int fd)1502 struct vfio_device_info *vfio_get_device_info(int fd)
1503 {
1504 struct vfio_device_info *info;
1505 uint32_t argsz = sizeof(*info);
1506
1507 info = g_malloc0(argsz);
1508
1509 retry:
1510 info->argsz = argsz;
1511
1512 if (ioctl(fd, VFIO_DEVICE_GET_INFO, info)) {
1513 g_free(info);
1514 return NULL;
1515 }
1516
1517 if (info->argsz > argsz) {
1518 argsz = info->argsz;
1519 info = g_realloc(info, argsz);
1520 goto retry;
1521 }
1522
1523 return info;
1524 }
1525
vfio_attach_device(char * name,VFIODevice * vbasedev,AddressSpace * as,Error ** errp)1526 bool vfio_attach_device(char *name, VFIODevice *vbasedev,
1527 AddressSpace *as, Error **errp)
1528 {
1529 const VFIOIOMMUClass *ops =
1530 VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_LEGACY));
1531
1532 if (vbasedev->iommufd) {
1533 ops = VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD));
1534 }
1535
1536 assert(ops);
1537
1538 return ops->attach_device(name, vbasedev, as, errp);
1539 }
1540
vfio_detach_device(VFIODevice * vbasedev)1541 void vfio_detach_device(VFIODevice *vbasedev)
1542 {
1543 if (!vbasedev->bcontainer) {
1544 return;
1545 }
1546 vbasedev->bcontainer->ops->detach_device(vbasedev);
1547 }
1548