1 /*
2 * generic functions used by VFIO devices
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19 */
20
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #include <linux/vfio.h>
24
25 #include "hw/vfio/vfio-common.h"
26 #include "exec/address-spaces.h"
27 #include "exec/memory.h"
28 #include "exec/ram_addr.h"
29 #include "hw/hw.h"
30 #include "qemu/error-report.h"
31 #include "qemu/range.h"
32 #include "sysemu/reset.h"
33 #include "trace.h"
34 #include "qapi/error.h"
35 #include "pci.h"
36
37 VFIOGroupList vfio_group_list =
38 QLIST_HEAD_INITIALIZER(vfio_group_list);
39
vfio_ram_block_discard_disable(VFIOContainer * container,bool state)40 static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state)
41 {
42 switch (container->iommu_type) {
43 case VFIO_TYPE1v2_IOMMU:
44 case VFIO_TYPE1_IOMMU:
45 /*
46 * We support coordinated discarding of RAM via the RamDiscardManager.
47 */
48 return ram_block_uncoordinated_discard_disable(state);
49 default:
50 /*
51 * VFIO_SPAPR_TCE_IOMMU most probably works just fine with
52 * RamDiscardManager, however, it is completely untested.
53 *
54 * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does
55 * completely the opposite of managing mapping/pinning dynamically as
56 * required by RamDiscardManager. We would have to special-case sections
57 * with a RamDiscardManager.
58 */
59 return ram_block_discard_disable(state);
60 }
61 }
62
vfio_dma_unmap_bitmap(const VFIOContainer * container,hwaddr iova,ram_addr_t size,IOMMUTLBEntry * iotlb)63 static int vfio_dma_unmap_bitmap(const VFIOContainer *container,
64 hwaddr iova, ram_addr_t size,
65 IOMMUTLBEntry *iotlb)
66 {
67 const VFIOContainerBase *bcontainer = &container->bcontainer;
68 struct vfio_iommu_type1_dma_unmap *unmap;
69 struct vfio_bitmap *bitmap;
70 VFIOBitmap vbmap;
71 int ret;
72
73 ret = vfio_bitmap_alloc(&vbmap, size);
74 if (ret) {
75 return ret;
76 }
77
78 unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
79
80 unmap->argsz = sizeof(*unmap) + sizeof(*bitmap);
81 unmap->iova = iova;
82 unmap->size = size;
83 unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP;
84 bitmap = (struct vfio_bitmap *)&unmap->data;
85
86 /*
87 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
88 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
89 * to qemu_real_host_page_size.
90 */
91 bitmap->pgsize = qemu_real_host_page_size();
92 bitmap->size = vbmap.size;
93 bitmap->data = (__u64 *)vbmap.bitmap;
94
95 if (vbmap.size > bcontainer->max_dirty_bitmap_size) {
96 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size);
97 ret = -E2BIG;
98 goto unmap_exit;
99 }
100
101 ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
102 if (!ret) {
103 cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
104 iotlb->translated_addr, vbmap.pages);
105 } else {
106 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
107 }
108
109 unmap_exit:
110 g_free(unmap);
111 g_free(vbmap.bitmap);
112
113 return ret;
114 }
115
116 /*
117 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
118 */
vfio_legacy_dma_unmap(const VFIOContainerBase * bcontainer,hwaddr iova,ram_addr_t size,IOMMUTLBEntry * iotlb)119 static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
120 hwaddr iova, ram_addr_t size,
121 IOMMUTLBEntry *iotlb)
122 {
123 const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
124 bcontainer);
125 struct vfio_iommu_type1_dma_unmap unmap = {
126 .argsz = sizeof(unmap),
127 .flags = 0,
128 .iova = iova,
129 .size = size,
130 };
131 bool need_dirty_sync = false;
132 int ret;
133
134 if (iotlb && vfio_devices_all_running_and_mig_active(bcontainer)) {
135 if (!vfio_devices_all_device_dirty_tracking(bcontainer) &&
136 bcontainer->dirty_pages_supported) {
137 return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
138 }
139
140 need_dirty_sync = true;
141 }
142
143 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
144 /*
145 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
146 * v4.15) where an overflow in its wrap-around check prevents us from
147 * unmapping the last page of the address space. Test for the error
148 * condition and re-try the unmap excluding the last page. The
149 * expectation is that we've never mapped the last page anyway and this
150 * unmap request comes via vIOMMU support which also makes it unlikely
151 * that this page is used. This bug was introduced well after type1 v2
152 * support was introduced, so we shouldn't need to test for v1. A fix
153 * is queued for kernel v5.0 so this workaround can be removed once
154 * affected kernels are sufficiently deprecated.
155 */
156 if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
157 container->iommu_type == VFIO_TYPE1v2_IOMMU) {
158 trace_vfio_legacy_dma_unmap_overflow_workaround();
159 unmap.size -= 1ULL << ctz64(bcontainer->pgsizes);
160 continue;
161 }
162 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
163 return -errno;
164 }
165
166 if (need_dirty_sync) {
167 ret = vfio_get_dirty_bitmap(bcontainer, iova, size,
168 iotlb->translated_addr);
169 if (ret) {
170 return ret;
171 }
172 }
173
174 return 0;
175 }
176
vfio_legacy_dma_map(const VFIOContainerBase * bcontainer,hwaddr iova,ram_addr_t size,void * vaddr,bool readonly)177 static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova,
178 ram_addr_t size, void *vaddr, bool readonly)
179 {
180 const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
181 bcontainer);
182 struct vfio_iommu_type1_dma_map map = {
183 .argsz = sizeof(map),
184 .flags = VFIO_DMA_MAP_FLAG_READ,
185 .vaddr = (__u64)(uintptr_t)vaddr,
186 .iova = iova,
187 .size = size,
188 };
189
190 if (!readonly) {
191 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
192 }
193
194 /*
195 * Try the mapping, if it fails with EBUSY, unmap the region and try
196 * again. This shouldn't be necessary, but we sometimes see it in
197 * the VGA ROM space.
198 */
199 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
200 (errno == EBUSY &&
201 vfio_legacy_dma_unmap(bcontainer, iova, size, NULL) == 0 &&
202 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
203 return 0;
204 }
205
206 error_report("VFIO_MAP_DMA failed: %s", strerror(errno));
207 return -errno;
208 }
209
210 static int
vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase * bcontainer,bool start)211 vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase *bcontainer,
212 bool start)
213 {
214 const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
215 bcontainer);
216 int ret;
217 struct vfio_iommu_type1_dirty_bitmap dirty = {
218 .argsz = sizeof(dirty),
219 };
220
221 if (start) {
222 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START;
223 } else {
224 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP;
225 }
226
227 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty);
228 if (ret) {
229 ret = -errno;
230 error_report("Failed to set dirty tracking flag 0x%x errno: %d",
231 dirty.flags, errno);
232 }
233
234 return ret;
235 }
236
vfio_legacy_query_dirty_bitmap(const VFIOContainerBase * bcontainer,VFIOBitmap * vbmap,hwaddr iova,hwaddr size)237 static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
238 VFIOBitmap *vbmap,
239 hwaddr iova, hwaddr size)
240 {
241 const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
242 bcontainer);
243 struct vfio_iommu_type1_dirty_bitmap *dbitmap;
244 struct vfio_iommu_type1_dirty_bitmap_get *range;
245 int ret;
246
247 dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range));
248
249 dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range);
250 dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
251 range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data;
252 range->iova = iova;
253 range->size = size;
254
255 /*
256 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
257 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
258 * to qemu_real_host_page_size.
259 */
260 range->bitmap.pgsize = qemu_real_host_page_size();
261 range->bitmap.size = vbmap->size;
262 range->bitmap.data = (__u64 *)vbmap->bitmap;
263
264 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap);
265 if (ret) {
266 ret = -errno;
267 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
268 " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova,
269 (uint64_t)range->size, errno);
270 }
271
272 g_free(dbitmap);
273
274 return ret;
275 }
276
277 static struct vfio_info_cap_header *
vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info * info,uint16_t id)278 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
279 {
280 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
281 return NULL;
282 }
283
284 return vfio_get_cap((void *)info, info->cap_offset, id);
285 }
286
vfio_get_info_dma_avail(struct vfio_iommu_type1_info * info,unsigned int * avail)287 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
288 unsigned int *avail)
289 {
290 struct vfio_info_cap_header *hdr;
291 struct vfio_iommu_type1_info_dma_avail *cap;
292
293 /* If the capability cannot be found, assume no DMA limiting */
294 hdr = vfio_get_iommu_type1_info_cap(info,
295 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL);
296 if (!hdr) {
297 return false;
298 }
299
300 if (avail != NULL) {
301 cap = (void *) hdr;
302 *avail = cap->avail;
303 }
304
305 return true;
306 }
307
vfio_get_info_iova_range(struct vfio_iommu_type1_info * info,VFIOContainerBase * bcontainer)308 static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info,
309 VFIOContainerBase *bcontainer)
310 {
311 struct vfio_info_cap_header *hdr;
312 struct vfio_iommu_type1_info_cap_iova_range *cap;
313
314 hdr = vfio_get_iommu_type1_info_cap(info,
315 VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE);
316 if (!hdr) {
317 return false;
318 }
319
320 cap = (void *)hdr;
321
322 for (int i = 0; i < cap->nr_iovas; i++) {
323 Range *range = g_new(Range, 1);
324
325 range_set_bounds(range, cap->iova_ranges[i].start,
326 cap->iova_ranges[i].end);
327 bcontainer->iova_ranges =
328 range_list_insert(bcontainer->iova_ranges, range);
329 }
330
331 return true;
332 }
333
vfio_kvm_device_add_group(VFIOGroup * group)334 static void vfio_kvm_device_add_group(VFIOGroup *group)
335 {
336 Error *err = NULL;
337
338 if (vfio_kvm_device_add_fd(group->fd, &err)) {
339 error_reportf_err(err, "group ID %d: ", group->groupid);
340 }
341 }
342
vfio_kvm_device_del_group(VFIOGroup * group)343 static void vfio_kvm_device_del_group(VFIOGroup *group)
344 {
345 Error *err = NULL;
346
347 if (vfio_kvm_device_del_fd(group->fd, &err)) {
348 error_reportf_err(err, "group ID %d: ", group->groupid);
349 }
350 }
351
352 /*
353 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
354 */
vfio_get_iommu_type(VFIOContainer * container,Error ** errp)355 static int vfio_get_iommu_type(VFIOContainer *container,
356 Error **errp)
357 {
358 int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU,
359 VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU };
360 int i;
361
362 for (i = 0; i < ARRAY_SIZE(iommu_types); i++) {
363 if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) {
364 return iommu_types[i];
365 }
366 }
367 error_setg(errp, "No available IOMMU models");
368 return -EINVAL;
369 }
370
371 /*
372 * vfio_get_iommu_ops - get a VFIOIOMMUClass associated with a type
373 */
vfio_get_iommu_class(int iommu_type,Error ** errp)374 static const VFIOIOMMUClass *vfio_get_iommu_class(int iommu_type, Error **errp)
375 {
376 ObjectClass *klass = NULL;
377
378 switch (iommu_type) {
379 case VFIO_TYPE1v2_IOMMU:
380 case VFIO_TYPE1_IOMMU:
381 klass = object_class_by_name(TYPE_VFIO_IOMMU_LEGACY);
382 break;
383 case VFIO_SPAPR_TCE_v2_IOMMU:
384 case VFIO_SPAPR_TCE_IOMMU:
385 klass = object_class_by_name(TYPE_VFIO_IOMMU_SPAPR);
386 break;
387 default:
388 g_assert_not_reached();
389 };
390
391 return VFIO_IOMMU_CLASS(klass);
392 }
393
vfio_set_iommu(VFIOContainer * container,int group_fd,VFIOAddressSpace * space,Error ** errp)394 static int vfio_set_iommu(VFIOContainer *container, int group_fd,
395 VFIOAddressSpace *space, Error **errp)
396 {
397 int iommu_type, ret;
398 const VFIOIOMMUClass *vioc;
399
400 iommu_type = vfio_get_iommu_type(container, errp);
401 if (iommu_type < 0) {
402 return iommu_type;
403 }
404
405 ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd);
406 if (ret) {
407 error_setg_errno(errp, errno, "Failed to set group container");
408 return -errno;
409 }
410
411 while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) {
412 if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
413 /*
414 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
415 * v2, the running platform may not support v2 and there is no
416 * way to guess it until an IOMMU group gets added to the container.
417 * So in case it fails with v2, try v1 as a fallback.
418 */
419 iommu_type = VFIO_SPAPR_TCE_IOMMU;
420 continue;
421 }
422 error_setg_errno(errp, errno, "Failed to set iommu for container");
423 return -errno;
424 }
425
426 container->iommu_type = iommu_type;
427
428 vioc = vfio_get_iommu_class(iommu_type, errp);
429 if (!vioc) {
430 error_setg(errp, "No available IOMMU models");
431 return -EINVAL;
432 }
433
434 vfio_container_init(&container->bcontainer, space, vioc);
435 return 0;
436 }
437
vfio_get_iommu_info(VFIOContainer * container,struct vfio_iommu_type1_info ** info)438 static int vfio_get_iommu_info(VFIOContainer *container,
439 struct vfio_iommu_type1_info **info)
440 {
441
442 size_t argsz = sizeof(struct vfio_iommu_type1_info);
443
444 *info = g_new0(struct vfio_iommu_type1_info, 1);
445 again:
446 (*info)->argsz = argsz;
447
448 if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) {
449 g_free(*info);
450 *info = NULL;
451 return -errno;
452 }
453
454 if (((*info)->argsz > argsz)) {
455 argsz = (*info)->argsz;
456 *info = g_realloc(*info, argsz);
457 goto again;
458 }
459
460 return 0;
461 }
462
463 static struct vfio_info_cap_header *
vfio_get_iommu_info_cap(struct vfio_iommu_type1_info * info,uint16_t id)464 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
465 {
466 struct vfio_info_cap_header *hdr;
467 void *ptr = info;
468
469 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
470 return NULL;
471 }
472
473 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
474 if (hdr->id == id) {
475 return hdr;
476 }
477 }
478
479 return NULL;
480 }
481
vfio_get_iommu_info_migration(VFIOContainer * container,struct vfio_iommu_type1_info * info)482 static void vfio_get_iommu_info_migration(VFIOContainer *container,
483 struct vfio_iommu_type1_info *info)
484 {
485 struct vfio_info_cap_header *hdr;
486 struct vfio_iommu_type1_info_cap_migration *cap_mig;
487 VFIOContainerBase *bcontainer = &container->bcontainer;
488
489 hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION);
490 if (!hdr) {
491 return;
492 }
493
494 cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration,
495 header);
496
497 /*
498 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
499 * qemu_real_host_page_size to mark those dirty.
500 */
501 if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {
502 bcontainer->dirty_pages_supported = true;
503 bcontainer->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
504 bcontainer->dirty_pgsizes = cap_mig->pgsize_bitmap;
505 }
506 }
507
vfio_legacy_setup(VFIOContainerBase * bcontainer,Error ** errp)508 static int vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp)
509 {
510 VFIOContainer *container = container_of(bcontainer, VFIOContainer,
511 bcontainer);
512 g_autofree struct vfio_iommu_type1_info *info = NULL;
513 int ret;
514
515 ret = vfio_get_iommu_info(container, &info);
516 if (ret) {
517 error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info");
518 return ret;
519 }
520
521 if (info->flags & VFIO_IOMMU_INFO_PGSIZES) {
522 bcontainer->pgsizes = info->iova_pgsizes;
523 } else {
524 bcontainer->pgsizes = qemu_real_host_page_size();
525 }
526
527 if (!vfio_get_info_dma_avail(info, &bcontainer->dma_max_mappings)) {
528 bcontainer->dma_max_mappings = 65535;
529 }
530
531 vfio_get_info_iova_range(info, bcontainer);
532
533 vfio_get_iommu_info_migration(container, info);
534 return 0;
535 }
536
vfio_connect_container(VFIOGroup * group,AddressSpace * as,Error ** errp)537 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
538 Error **errp)
539 {
540 VFIOContainer *container;
541 VFIOContainerBase *bcontainer;
542 int ret, fd;
543 VFIOAddressSpace *space;
544
545 space = vfio_get_address_space(as);
546
547 /*
548 * VFIO is currently incompatible with discarding of RAM insofar as the
549 * madvise to purge (zap) the page from QEMU's address space does not
550 * interact with the memory API and therefore leaves stale virtual to
551 * physical mappings in the IOMMU if the page was previously pinned. We
552 * therefore set discarding broken for each group added to a container,
553 * whether the container is used individually or shared. This provides
554 * us with options to allow devices within a group to opt-in and allow
555 * discarding, so long as it is done consistently for a group (for instance
556 * if the device is an mdev device where it is known that the host vendor
557 * driver will never pin pages outside of the working set of the guest
558 * driver, which would thus not be discarding candidates).
559 *
560 * The first opportunity to induce pinning occurs here where we attempt to
561 * attach the group to existing containers within the AddressSpace. If any
562 * pages are already zapped from the virtual address space, such as from
563 * previous discards, new pinning will cause valid mappings to be
564 * re-established. Likewise, when the overall MemoryListener for a new
565 * container is registered, a replay of mappings within the AddressSpace
566 * will occur, re-establishing any previously zapped pages as well.
567 *
568 * Especially virtio-balloon is currently only prevented from discarding
569 * new memory, it will not yet set ram_block_discard_set_required() and
570 * therefore, neither stops us here or deals with the sudden memory
571 * consumption of inflated memory.
572 *
573 * We do support discarding of memory coordinated via the RamDiscardManager
574 * with some IOMMU types. vfio_ram_block_discard_disable() handles the
575 * details once we know which type of IOMMU we are using.
576 */
577
578 QLIST_FOREACH(bcontainer, &space->containers, next) {
579 container = container_of(bcontainer, VFIOContainer, bcontainer);
580 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
581 ret = vfio_ram_block_discard_disable(container, true);
582 if (ret) {
583 error_setg_errno(errp, -ret,
584 "Cannot set discarding of RAM broken");
585 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER,
586 &container->fd)) {
587 error_report("vfio: error disconnecting group %d from"
588 " container", group->groupid);
589 }
590 return ret;
591 }
592 group->container = container;
593 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
594 vfio_kvm_device_add_group(group);
595 return 0;
596 }
597 }
598
599 fd = qemu_open_old("/dev/vfio/vfio", O_RDWR);
600 if (fd < 0) {
601 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
602 ret = -errno;
603 goto put_space_exit;
604 }
605
606 ret = ioctl(fd, VFIO_GET_API_VERSION);
607 if (ret != VFIO_API_VERSION) {
608 error_setg(errp, "supported vfio version: %d, "
609 "reported version: %d", VFIO_API_VERSION, ret);
610 ret = -EINVAL;
611 goto close_fd_exit;
612 }
613
614 container = g_malloc0(sizeof(*container));
615 container->fd = fd;
616 bcontainer = &container->bcontainer;
617
618 ret = vfio_set_iommu(container, group->fd, space, errp);
619 if (ret) {
620 goto free_container_exit;
621 }
622
623 ret = vfio_cpr_register_container(bcontainer, errp);
624 if (ret) {
625 goto free_container_exit;
626 }
627
628 ret = vfio_ram_block_discard_disable(container, true);
629 if (ret) {
630 error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken");
631 goto unregister_container_exit;
632 }
633
634 assert(bcontainer->ops->setup);
635
636 ret = bcontainer->ops->setup(bcontainer, errp);
637 if (ret) {
638 goto enable_discards_exit;
639 }
640
641 vfio_kvm_device_add_group(group);
642
643 QLIST_INIT(&container->group_list);
644 QLIST_INSERT_HEAD(&space->containers, bcontainer, next);
645
646 group->container = container;
647 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
648
649 bcontainer->listener = vfio_memory_listener;
650 memory_listener_register(&bcontainer->listener, bcontainer->space->as);
651
652 if (bcontainer->error) {
653 ret = -1;
654 error_propagate_prepend(errp, bcontainer->error,
655 "memory listener initialization failed: ");
656 goto listener_release_exit;
657 }
658
659 bcontainer->initialized = true;
660
661 return 0;
662 listener_release_exit:
663 QLIST_REMOVE(group, container_next);
664 QLIST_REMOVE(bcontainer, next);
665 vfio_kvm_device_del_group(group);
666 memory_listener_unregister(&bcontainer->listener);
667 if (bcontainer->ops->release) {
668 bcontainer->ops->release(bcontainer);
669 }
670
671 enable_discards_exit:
672 vfio_ram_block_discard_disable(container, false);
673
674 unregister_container_exit:
675 vfio_cpr_unregister_container(bcontainer);
676
677 free_container_exit:
678 g_free(container);
679
680 close_fd_exit:
681 close(fd);
682
683 put_space_exit:
684 vfio_put_address_space(space);
685
686 return ret;
687 }
688
vfio_disconnect_container(VFIOGroup * group)689 static void vfio_disconnect_container(VFIOGroup *group)
690 {
691 VFIOContainer *container = group->container;
692 VFIOContainerBase *bcontainer = &container->bcontainer;
693
694 QLIST_REMOVE(group, container_next);
695 group->container = NULL;
696
697 /*
698 * Explicitly release the listener first before unset container,
699 * since unset may destroy the backend container if it's the last
700 * group.
701 */
702 if (QLIST_EMPTY(&container->group_list)) {
703 memory_listener_unregister(&bcontainer->listener);
704 if (bcontainer->ops->release) {
705 bcontainer->ops->release(bcontainer);
706 }
707 }
708
709 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
710 error_report("vfio: error disconnecting group %d from container",
711 group->groupid);
712 }
713
714 if (QLIST_EMPTY(&container->group_list)) {
715 VFIOAddressSpace *space = bcontainer->space;
716
717 vfio_container_destroy(bcontainer);
718
719 trace_vfio_disconnect_container(container->fd);
720 vfio_cpr_unregister_container(bcontainer);
721 close(container->fd);
722 g_free(container);
723
724 vfio_put_address_space(space);
725 }
726 }
727
vfio_get_group(int groupid,AddressSpace * as,Error ** errp)728 static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
729 {
730 ERRP_GUARD();
731 VFIOGroup *group;
732 char path[32];
733 struct vfio_group_status status = { .argsz = sizeof(status) };
734
735 QLIST_FOREACH(group, &vfio_group_list, next) {
736 if (group->groupid == groupid) {
737 /* Found it. Now is it already in the right context? */
738 if (group->container->bcontainer.space->as == as) {
739 return group;
740 } else {
741 error_setg(errp, "group %d used in multiple address spaces",
742 group->groupid);
743 return NULL;
744 }
745 }
746 }
747
748 group = g_malloc0(sizeof(*group));
749
750 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
751 group->fd = qemu_open_old(path, O_RDWR);
752 if (group->fd < 0) {
753 error_setg_errno(errp, errno, "failed to open %s", path);
754 goto free_group_exit;
755 }
756
757 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
758 error_setg_errno(errp, errno, "failed to get group %d status", groupid);
759 goto close_fd_exit;
760 }
761
762 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
763 error_setg(errp, "group %d is not viable", groupid);
764 error_append_hint(errp,
765 "Please ensure all devices within the iommu_group "
766 "are bound to their vfio bus driver.\n");
767 goto close_fd_exit;
768 }
769
770 group->groupid = groupid;
771 QLIST_INIT(&group->device_list);
772
773 if (vfio_connect_container(group, as, errp)) {
774 error_prepend(errp, "failed to setup container for group %d: ",
775 groupid);
776 goto close_fd_exit;
777 }
778
779 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
780
781 return group;
782
783 close_fd_exit:
784 close(group->fd);
785
786 free_group_exit:
787 g_free(group);
788
789 return NULL;
790 }
791
vfio_put_group(VFIOGroup * group)792 static void vfio_put_group(VFIOGroup *group)
793 {
794 if (!group || !QLIST_EMPTY(&group->device_list)) {
795 return;
796 }
797
798 if (!group->ram_block_discard_allowed) {
799 vfio_ram_block_discard_disable(group->container, false);
800 }
801 vfio_kvm_device_del_group(group);
802 vfio_disconnect_container(group);
803 QLIST_REMOVE(group, next);
804 trace_vfio_put_group(group->fd);
805 close(group->fd);
806 g_free(group);
807 }
808
vfio_get_device(VFIOGroup * group,const char * name,VFIODevice * vbasedev,Error ** errp)809 static int vfio_get_device(VFIOGroup *group, const char *name,
810 VFIODevice *vbasedev, Error **errp)
811 {
812 g_autofree struct vfio_device_info *info = NULL;
813 int fd;
814
815 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
816 if (fd < 0) {
817 error_setg_errno(errp, errno, "error getting device from group %d",
818 group->groupid);
819 error_append_hint(errp,
820 "Verify all devices in group %d are bound to vfio-<bus> "
821 "or pci-stub and not already in use\n", group->groupid);
822 return fd;
823 }
824
825 info = vfio_get_device_info(fd);
826 if (!info) {
827 error_setg_errno(errp, errno, "error getting device info");
828 close(fd);
829 return -1;
830 }
831
832 /*
833 * Set discarding of RAM as not broken for this group if the driver knows
834 * the device operates compatibly with discarding. Setting must be
835 * consistent per group, but since compatibility is really only possible
836 * with mdev currently, we expect singleton groups.
837 */
838 if (vbasedev->ram_block_discard_allowed !=
839 group->ram_block_discard_allowed) {
840 if (!QLIST_EMPTY(&group->device_list)) {
841 error_setg(errp, "Inconsistent setting of support for discarding "
842 "RAM (e.g., balloon) within group");
843 close(fd);
844 return -1;
845 }
846
847 if (!group->ram_block_discard_allowed) {
848 group->ram_block_discard_allowed = true;
849 vfio_ram_block_discard_disable(group->container, false);
850 }
851 }
852
853 vbasedev->fd = fd;
854 vbasedev->group = group;
855 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
856
857 vbasedev->num_irqs = info->num_irqs;
858 vbasedev->num_regions = info->num_regions;
859 vbasedev->flags = info->flags;
860
861 trace_vfio_get_device(name, info->flags, info->num_regions, info->num_irqs);
862
863 vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET);
864
865 return 0;
866 }
867
vfio_put_base_device(VFIODevice * vbasedev)868 static void vfio_put_base_device(VFIODevice *vbasedev)
869 {
870 if (!vbasedev->group) {
871 return;
872 }
873 QLIST_REMOVE(vbasedev, next);
874 vbasedev->group = NULL;
875 trace_vfio_put_base_device(vbasedev->fd);
876 close(vbasedev->fd);
877 }
878
vfio_device_groupid(VFIODevice * vbasedev,Error ** errp)879 static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp)
880 {
881 char *tmp, group_path[PATH_MAX];
882 g_autofree char *group_name = NULL;
883 int ret, groupid;
884 ssize_t len;
885
886 tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev);
887 len = readlink(tmp, group_path, sizeof(group_path));
888 g_free(tmp);
889
890 if (len <= 0 || len >= sizeof(group_path)) {
891 ret = len < 0 ? -errno : -ENAMETOOLONG;
892 error_setg_errno(errp, -ret, "no iommu_group found");
893 return ret;
894 }
895
896 group_path[len] = 0;
897
898 group_name = g_path_get_basename(group_path);
899 if (sscanf(group_name, "%d", &groupid) != 1) {
900 error_setg_errno(errp, errno, "failed to read %s", group_path);
901 return -errno;
902 }
903 return groupid;
904 }
905
906 /*
907 * vfio_attach_device: attach a device to a security context
908 * @name and @vbasedev->name are likely to be different depending
909 * on the type of the device, hence the need for passing @name
910 */
vfio_legacy_attach_device(const char * name,VFIODevice * vbasedev,AddressSpace * as,Error ** errp)911 static int vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev,
912 AddressSpace *as, Error **errp)
913 {
914 int groupid = vfio_device_groupid(vbasedev, errp);
915 VFIODevice *vbasedev_iter;
916 VFIOGroup *group;
917 VFIOContainerBase *bcontainer;
918 int ret;
919
920 if (groupid < 0) {
921 return groupid;
922 }
923
924 trace_vfio_attach_device(vbasedev->name, groupid);
925
926 group = vfio_get_group(groupid, as, errp);
927 if (!group) {
928 return -ENOENT;
929 }
930
931 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
932 if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
933 error_setg(errp, "device is already attached");
934 vfio_put_group(group);
935 return -EBUSY;
936 }
937 }
938 ret = vfio_get_device(group, name, vbasedev, errp);
939 if (ret) {
940 vfio_put_group(group);
941 return ret;
942 }
943
944 bcontainer = &group->container->bcontainer;
945 vbasedev->bcontainer = bcontainer;
946 QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next);
947 QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next);
948
949 return ret;
950 }
951
vfio_legacy_detach_device(VFIODevice * vbasedev)952 static void vfio_legacy_detach_device(VFIODevice *vbasedev)
953 {
954 VFIOGroup *group = vbasedev->group;
955
956 QLIST_REMOVE(vbasedev, global_next);
957 QLIST_REMOVE(vbasedev, container_next);
958 vbasedev->bcontainer = NULL;
959 trace_vfio_detach_device(vbasedev->name, group->groupid);
960 vfio_put_base_device(vbasedev);
961 vfio_put_group(group);
962 }
963
vfio_legacy_pci_hot_reset(VFIODevice * vbasedev,bool single)964 static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single)
965 {
966 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
967 VFIOGroup *group;
968 struct vfio_pci_hot_reset_info *info = NULL;
969 struct vfio_pci_dependent_device *devices;
970 struct vfio_pci_hot_reset *reset;
971 int32_t *fds;
972 int ret, i, count;
973 bool multi = false;
974
975 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
976
977 if (!single) {
978 vfio_pci_pre_reset(vdev);
979 }
980 vdev->vbasedev.needs_reset = false;
981
982 ret = vfio_pci_get_pci_hot_reset_info(vdev, &info);
983
984 if (ret) {
985 goto out_single;
986 }
987 devices = &info->devices[0];
988
989 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
990
991 /* Verify that we have all the groups required */
992 for (i = 0; i < info->count; i++) {
993 PCIHostDeviceAddress host;
994 VFIOPCIDevice *tmp;
995 VFIODevice *vbasedev_iter;
996
997 host.domain = devices[i].segment;
998 host.bus = devices[i].bus;
999 host.slot = PCI_SLOT(devices[i].devfn);
1000 host.function = PCI_FUNC(devices[i].devfn);
1001
1002 trace_vfio_pci_hot_reset_dep_devices(host.domain,
1003 host.bus, host.slot, host.function, devices[i].group_id);
1004
1005 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
1006 continue;
1007 }
1008
1009 QLIST_FOREACH(group, &vfio_group_list, next) {
1010 if (group->groupid == devices[i].group_id) {
1011 break;
1012 }
1013 }
1014
1015 if (!group) {
1016 if (!vdev->has_pm_reset) {
1017 error_report("vfio: Cannot reset device %s, "
1018 "depends on group %d which is not owned.",
1019 vdev->vbasedev.name, devices[i].group_id);
1020 }
1021 ret = -EPERM;
1022 goto out;
1023 }
1024
1025 /* Prep dependent devices for reset and clear our marker. */
1026 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
1027 if (!vbasedev_iter->dev->realized ||
1028 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
1029 continue;
1030 }
1031 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
1032 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
1033 if (single) {
1034 ret = -EINVAL;
1035 goto out_single;
1036 }
1037 vfio_pci_pre_reset(tmp);
1038 tmp->vbasedev.needs_reset = false;
1039 multi = true;
1040 break;
1041 }
1042 }
1043 }
1044
1045 if (!single && !multi) {
1046 ret = -EINVAL;
1047 goto out_single;
1048 }
1049
1050 /* Determine how many group fds need to be passed */
1051 count = 0;
1052 QLIST_FOREACH(group, &vfio_group_list, next) {
1053 for (i = 0; i < info->count; i++) {
1054 if (group->groupid == devices[i].group_id) {
1055 count++;
1056 break;
1057 }
1058 }
1059 }
1060
1061 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
1062 reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
1063 fds = &reset->group_fds[0];
1064
1065 /* Fill in group fds */
1066 QLIST_FOREACH(group, &vfio_group_list, next) {
1067 for (i = 0; i < info->count; i++) {
1068 if (group->groupid == devices[i].group_id) {
1069 fds[reset->count++] = group->fd;
1070 break;
1071 }
1072 }
1073 }
1074
1075 /* Bus reset! */
1076 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
1077 g_free(reset);
1078 if (ret) {
1079 ret = -errno;
1080 }
1081
1082 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
1083 ret ? strerror(errno) : "Success");
1084
1085 out:
1086 /* Re-enable INTx on affected devices */
1087 for (i = 0; i < info->count; i++) {
1088 PCIHostDeviceAddress host;
1089 VFIOPCIDevice *tmp;
1090 VFIODevice *vbasedev_iter;
1091
1092 host.domain = devices[i].segment;
1093 host.bus = devices[i].bus;
1094 host.slot = PCI_SLOT(devices[i].devfn);
1095 host.function = PCI_FUNC(devices[i].devfn);
1096
1097 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
1098 continue;
1099 }
1100
1101 QLIST_FOREACH(group, &vfio_group_list, next) {
1102 if (group->groupid == devices[i].group_id) {
1103 break;
1104 }
1105 }
1106
1107 if (!group) {
1108 break;
1109 }
1110
1111 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
1112 if (!vbasedev_iter->dev->realized ||
1113 vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
1114 continue;
1115 }
1116 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
1117 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
1118 vfio_pci_post_reset(tmp);
1119 break;
1120 }
1121 }
1122 }
1123 out_single:
1124 if (!single) {
1125 vfio_pci_post_reset(vdev);
1126 }
1127 g_free(info);
1128
1129 return ret;
1130 }
1131
vfio_iommu_legacy_class_init(ObjectClass * klass,void * data)1132 static void vfio_iommu_legacy_class_init(ObjectClass *klass, void *data)
1133 {
1134 VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass);
1135
1136 vioc->setup = vfio_legacy_setup;
1137 vioc->dma_map = vfio_legacy_dma_map;
1138 vioc->dma_unmap = vfio_legacy_dma_unmap;
1139 vioc->attach_device = vfio_legacy_attach_device;
1140 vioc->detach_device = vfio_legacy_detach_device;
1141 vioc->set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking;
1142 vioc->query_dirty_bitmap = vfio_legacy_query_dirty_bitmap;
1143 vioc->pci_hot_reset = vfio_legacy_pci_hot_reset;
1144 };
1145
1146 static const TypeInfo types[] = {
1147 {
1148 .name = TYPE_VFIO_IOMMU_LEGACY,
1149 .parent = TYPE_VFIO_IOMMU,
1150 .class_init = vfio_iommu_legacy_class_init,
1151 },
1152 };
1153
1154 DEFINE_TYPES(types)
1155