xref: /qemu/hw/virtio/vhost.c (revision dcc474c6)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "qemu/atomic.h"
20 #include "qemu/range.h"
21 #include "qemu/error-report.h"
22 #include "qemu/memfd.h"
23 #include "standard-headers/linux/vhost_types.h"
24 #include "exec/address-spaces.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/virtio/virtio-access.h"
27 #include "migration/blocker.h"
28 #include "migration/qemu-file-types.h"
29 #include "sysemu/dma.h"
30 #include "trace.h"
31 
32 /* enabled until disconnected backend stabilizes */
33 #define _VHOST_DEBUG 1
34 
35 #ifdef _VHOST_DEBUG
36 #define VHOST_OPS_DEBUG(fmt, ...) \
37     do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
38                       strerror(errno), errno); } while (0)
39 #else
40 #define VHOST_OPS_DEBUG(fmt, ...) \
41     do { } while (0)
42 #endif
43 
44 static struct vhost_log *vhost_log;
45 static struct vhost_log *vhost_log_shm;
46 
47 static unsigned int used_memslots;
48 static QLIST_HEAD(, vhost_dev) vhost_devices =
49     QLIST_HEAD_INITIALIZER(vhost_devices);
50 
51 bool vhost_has_free_slot(void)
52 {
53     unsigned int slots_limit = ~0U;
54     struct vhost_dev *hdev;
55 
56     QLIST_FOREACH(hdev, &vhost_devices, entry) {
57         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
58         slots_limit = MIN(slots_limit, r);
59     }
60     return slots_limit > used_memslots;
61 }
62 
63 static void vhost_dev_sync_region(struct vhost_dev *dev,
64                                   MemoryRegionSection *section,
65                                   uint64_t mfirst, uint64_t mlast,
66                                   uint64_t rfirst, uint64_t rlast)
67 {
68     vhost_log_chunk_t *log = dev->log->log;
69 
70     uint64_t start = MAX(mfirst, rfirst);
71     uint64_t end = MIN(mlast, rlast);
72     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
73     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
74     uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
75 
76     if (end < start) {
77         return;
78     }
79     assert(end / VHOST_LOG_CHUNK < dev->log_size);
80     assert(start / VHOST_LOG_CHUNK < dev->log_size);
81 
82     for (;from < to; ++from) {
83         vhost_log_chunk_t log;
84         /* We first check with non-atomic: much cheaper,
85          * and we expect non-dirty to be the common case. */
86         if (!*from) {
87             addr += VHOST_LOG_CHUNK;
88             continue;
89         }
90         /* Data must be read atomically. We don't really need barrier semantics
91          * but it's easier to use atomic_* than roll our own. */
92         log = atomic_xchg(from, 0);
93         while (log) {
94             int bit = ctzl(log);
95             hwaddr page_addr;
96             hwaddr section_offset;
97             hwaddr mr_offset;
98             page_addr = addr + bit * VHOST_LOG_PAGE;
99             section_offset = page_addr - section->offset_within_address_space;
100             mr_offset = section_offset + section->offset_within_region;
101             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
102             log &= ~(0x1ull << bit);
103         }
104         addr += VHOST_LOG_CHUNK;
105     }
106 }
107 
108 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
109                                    MemoryRegionSection *section,
110                                    hwaddr first,
111                                    hwaddr last)
112 {
113     int i;
114     hwaddr start_addr;
115     hwaddr end_addr;
116 
117     if (!dev->log_enabled || !dev->started) {
118         return 0;
119     }
120     start_addr = section->offset_within_address_space;
121     end_addr = range_get_last(start_addr, int128_get64(section->size));
122     start_addr = MAX(first, start_addr);
123     end_addr = MIN(last, end_addr);
124 
125     for (i = 0; i < dev->mem->nregions; ++i) {
126         struct vhost_memory_region *reg = dev->mem->regions + i;
127         vhost_dev_sync_region(dev, section, start_addr, end_addr,
128                               reg->guest_phys_addr,
129                               range_get_last(reg->guest_phys_addr,
130                                              reg->memory_size));
131     }
132     for (i = 0; i < dev->nvqs; ++i) {
133         struct vhost_virtqueue *vq = dev->vqs + i;
134 
135         if (!vq->used_phys && !vq->used_size) {
136             continue;
137         }
138 
139         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
140                               range_get_last(vq->used_phys, vq->used_size));
141     }
142     return 0;
143 }
144 
145 static void vhost_log_sync(MemoryListener *listener,
146                           MemoryRegionSection *section)
147 {
148     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
149                                          memory_listener);
150     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
151 }
152 
153 static void vhost_log_sync_range(struct vhost_dev *dev,
154                                  hwaddr first, hwaddr last)
155 {
156     int i;
157     /* FIXME: this is N^2 in number of sections */
158     for (i = 0; i < dev->n_mem_sections; ++i) {
159         MemoryRegionSection *section = &dev->mem_sections[i];
160         vhost_sync_dirty_bitmap(dev, section, first, last);
161     }
162 }
163 
164 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
165 {
166     uint64_t log_size = 0;
167     int i;
168     for (i = 0; i < dev->mem->nregions; ++i) {
169         struct vhost_memory_region *reg = dev->mem->regions + i;
170         uint64_t last = range_get_last(reg->guest_phys_addr,
171                                        reg->memory_size);
172         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
173     }
174     for (i = 0; i < dev->nvqs; ++i) {
175         struct vhost_virtqueue *vq = dev->vqs + i;
176 
177         if (!vq->used_phys && !vq->used_size) {
178             continue;
179         }
180 
181         uint64_t last = vq->used_phys + vq->used_size - 1;
182         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
183     }
184     return log_size;
185 }
186 
187 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
188 {
189     Error *err = NULL;
190     struct vhost_log *log;
191     uint64_t logsize = size * sizeof(*(log->log));
192     int fd = -1;
193 
194     log = g_new0(struct vhost_log, 1);
195     if (share) {
196         log->log = qemu_memfd_alloc("vhost-log", logsize,
197                                     F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
198                                     &fd, &err);
199         if (err) {
200             error_report_err(err);
201             g_free(log);
202             return NULL;
203         }
204         memset(log->log, 0, logsize);
205     } else {
206         log->log = g_malloc0(logsize);
207     }
208 
209     log->size = size;
210     log->refcnt = 1;
211     log->fd = fd;
212 
213     return log;
214 }
215 
216 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
217 {
218     struct vhost_log *log = share ? vhost_log_shm : vhost_log;
219 
220     if (!log || log->size != size) {
221         log = vhost_log_alloc(size, share);
222         if (share) {
223             vhost_log_shm = log;
224         } else {
225             vhost_log = log;
226         }
227     } else {
228         ++log->refcnt;
229     }
230 
231     return log;
232 }
233 
234 static void vhost_log_put(struct vhost_dev *dev, bool sync)
235 {
236     struct vhost_log *log = dev->log;
237 
238     if (!log) {
239         return;
240     }
241 
242     --log->refcnt;
243     if (log->refcnt == 0) {
244         /* Sync only the range covered by the old log */
245         if (dev->log_size && sync) {
246             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
247         }
248 
249         if (vhost_log == log) {
250             g_free(log->log);
251             vhost_log = NULL;
252         } else if (vhost_log_shm == log) {
253             qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
254                             log->fd);
255             vhost_log_shm = NULL;
256         }
257 
258         g_free(log);
259     }
260 
261     dev->log = NULL;
262     dev->log_size = 0;
263 }
264 
265 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
266 {
267     return dev->vhost_ops->vhost_requires_shm_log &&
268            dev->vhost_ops->vhost_requires_shm_log(dev);
269 }
270 
271 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
272 {
273     struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
274     uint64_t log_base = (uintptr_t)log->log;
275     int r;
276 
277     /* inform backend of log switching, this must be done before
278        releasing the current log, to ensure no logging is lost */
279     r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
280     if (r < 0) {
281         VHOST_OPS_DEBUG("vhost_set_log_base failed");
282     }
283 
284     vhost_log_put(dev, true);
285     dev->log = log;
286     dev->log_size = size;
287 }
288 
289 static int vhost_dev_has_iommu(struct vhost_dev *dev)
290 {
291     VirtIODevice *vdev = dev->vdev;
292 
293     return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
294 }
295 
296 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
297                               hwaddr *plen, bool is_write)
298 {
299     if (!vhost_dev_has_iommu(dev)) {
300         return cpu_physical_memory_map(addr, plen, is_write);
301     } else {
302         return (void *)(uintptr_t)addr;
303     }
304 }
305 
306 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
307                                hwaddr len, int is_write,
308                                hwaddr access_len)
309 {
310     if (!vhost_dev_has_iommu(dev)) {
311         cpu_physical_memory_unmap(buffer, len, is_write, access_len);
312     }
313 }
314 
315 static int vhost_verify_ring_part_mapping(void *ring_hva,
316                                           uint64_t ring_gpa,
317                                           uint64_t ring_size,
318                                           void *reg_hva,
319                                           uint64_t reg_gpa,
320                                           uint64_t reg_size)
321 {
322     uint64_t hva_ring_offset;
323     uint64_t ring_last = range_get_last(ring_gpa, ring_size);
324     uint64_t reg_last = range_get_last(reg_gpa, reg_size);
325 
326     if (ring_last < reg_gpa || ring_gpa > reg_last) {
327         return 0;
328     }
329     /* check that whole ring's is mapped */
330     if (ring_last > reg_last) {
331         return -ENOMEM;
332     }
333     /* check that ring's MemoryRegion wasn't replaced */
334     hva_ring_offset = ring_gpa - reg_gpa;
335     if (ring_hva != reg_hva + hva_ring_offset) {
336         return -EBUSY;
337     }
338 
339     return 0;
340 }
341 
342 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
343                                       void *reg_hva,
344                                       uint64_t reg_gpa,
345                                       uint64_t reg_size)
346 {
347     int i, j;
348     int r = 0;
349     const char *part_name[] = {
350         "descriptor table",
351         "available ring",
352         "used ring"
353     };
354 
355     if (vhost_dev_has_iommu(dev)) {
356         return 0;
357     }
358 
359     for (i = 0; i < dev->nvqs; ++i) {
360         struct vhost_virtqueue *vq = dev->vqs + i;
361 
362         if (vq->desc_phys == 0) {
363             continue;
364         }
365 
366         j = 0;
367         r = vhost_verify_ring_part_mapping(
368                 vq->desc, vq->desc_phys, vq->desc_size,
369                 reg_hva, reg_gpa, reg_size);
370         if (r) {
371             break;
372         }
373 
374         j++;
375         r = vhost_verify_ring_part_mapping(
376                 vq->avail, vq->avail_phys, vq->avail_size,
377                 reg_hva, reg_gpa, reg_size);
378         if (r) {
379             break;
380         }
381 
382         j++;
383         r = vhost_verify_ring_part_mapping(
384                 vq->used, vq->used_phys, vq->used_size,
385                 reg_hva, reg_gpa, reg_size);
386         if (r) {
387             break;
388         }
389     }
390 
391     if (r == -ENOMEM) {
392         error_report("Unable to map %s for ring %d", part_name[j], i);
393     } else if (r == -EBUSY) {
394         error_report("%s relocated for ring %d", part_name[j], i);
395     }
396     return r;
397 }
398 
399 static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section)
400 {
401     bool result;
402     bool log_dirty = memory_region_get_dirty_log_mask(section->mr) &
403                      ~(1 << DIRTY_MEMORY_MIGRATION);
404     result = memory_region_is_ram(section->mr) &&
405         !memory_region_is_rom(section->mr);
406 
407     /* Vhost doesn't handle any block which is doing dirty-tracking other
408      * than migration; this typically fires on VGA areas.
409      */
410     result &= !log_dirty;
411 
412     if (result && dev->vhost_ops->vhost_backend_mem_section_filter) {
413         result &=
414             dev->vhost_ops->vhost_backend_mem_section_filter(dev, section);
415     }
416 
417     trace_vhost_section(section->mr->name, result);
418     return result;
419 }
420 
421 static void vhost_begin(MemoryListener *listener)
422 {
423     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
424                                          memory_listener);
425     dev->tmp_sections = NULL;
426     dev->n_tmp_sections = 0;
427 }
428 
429 static void vhost_commit(MemoryListener *listener)
430 {
431     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
432                                          memory_listener);
433     MemoryRegionSection *old_sections;
434     int n_old_sections;
435     uint64_t log_size;
436     size_t regions_size;
437     int r;
438     int i;
439     bool changed = false;
440 
441     /* Note we can be called before the device is started, but then
442      * starting the device calls set_mem_table, so we need to have
443      * built the data structures.
444      */
445     old_sections = dev->mem_sections;
446     n_old_sections = dev->n_mem_sections;
447     dev->mem_sections = dev->tmp_sections;
448     dev->n_mem_sections = dev->n_tmp_sections;
449 
450     if (dev->n_mem_sections != n_old_sections) {
451         changed = true;
452     } else {
453         /* Same size, lets check the contents */
454         for (int i = 0; i < n_old_sections; i++) {
455             if (!MemoryRegionSection_eq(&old_sections[i],
456                                         &dev->mem_sections[i])) {
457                 changed = true;
458                 break;
459             }
460         }
461     }
462 
463     trace_vhost_commit(dev->started, changed);
464     if (!changed) {
465         goto out;
466     }
467 
468     /* Rebuild the regions list from the new sections list */
469     regions_size = offsetof(struct vhost_memory, regions) +
470                        dev->n_mem_sections * sizeof dev->mem->regions[0];
471     dev->mem = g_realloc(dev->mem, regions_size);
472     dev->mem->nregions = dev->n_mem_sections;
473     used_memslots = dev->mem->nregions;
474     for (i = 0; i < dev->n_mem_sections; i++) {
475         struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
476         struct MemoryRegionSection *mrs = dev->mem_sections + i;
477 
478         cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
479         cur_vmr->memory_size     = int128_get64(mrs->size);
480         cur_vmr->userspace_addr  =
481             (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
482             mrs->offset_within_region;
483         cur_vmr->flags_padding   = 0;
484     }
485 
486     if (!dev->started) {
487         goto out;
488     }
489 
490     for (i = 0; i < dev->mem->nregions; i++) {
491         if (vhost_verify_ring_mappings(dev,
492                        (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
493                        dev->mem->regions[i].guest_phys_addr,
494                        dev->mem->regions[i].memory_size)) {
495             error_report("Verify ring failure on region %d", i);
496             abort();
497         }
498     }
499 
500     if (!dev->log_enabled) {
501         r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
502         if (r < 0) {
503             VHOST_OPS_DEBUG("vhost_set_mem_table failed");
504         }
505         goto out;
506     }
507     log_size = vhost_get_log_size(dev);
508     /* We allocate an extra 4K bytes to log,
509      * to reduce the * number of reallocations. */
510 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
511     /* To log more, must increase log size before table update. */
512     if (dev->log_size < log_size) {
513         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
514     }
515     r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
516     if (r < 0) {
517         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
518     }
519     /* To log less, can only decrease log size after table update. */
520     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
521         vhost_dev_log_resize(dev, log_size);
522     }
523 
524 out:
525     /* Deref the old list of sections, this must happen _after_ the
526      * vhost_set_mem_table to ensure the client isn't still using the
527      * section we're about to unref.
528      */
529     while (n_old_sections--) {
530         memory_region_unref(old_sections[n_old_sections].mr);
531     }
532     g_free(old_sections);
533     return;
534 }
535 
536 /* Adds the section data to the tmp_section structure.
537  * It relies on the listener calling us in memory address order
538  * and for each region (via the _add and _nop methods) to
539  * join neighbours.
540  */
541 static void vhost_region_add_section(struct vhost_dev *dev,
542                                      MemoryRegionSection *section)
543 {
544     bool need_add = true;
545     uint64_t mrs_size = int128_get64(section->size);
546     uint64_t mrs_gpa = section->offset_within_address_space;
547     uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
548                          section->offset_within_region;
549     RAMBlock *mrs_rb = section->mr->ram_block;
550 
551     trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
552                                    mrs_host);
553 
554     if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) {
555         /* Round the section to it's page size */
556         /* First align the start down to a page boundary */
557         size_t mrs_page = qemu_ram_pagesize(mrs_rb);
558         uint64_t alignage = mrs_host & (mrs_page - 1);
559         if (alignage) {
560             mrs_host -= alignage;
561             mrs_size += alignage;
562             mrs_gpa  -= alignage;
563         }
564         /* Now align the size up to a page boundary */
565         alignage = mrs_size & (mrs_page - 1);
566         if (alignage) {
567             mrs_size += mrs_page - alignage;
568         }
569         trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa,
570                                                mrs_size, mrs_host);
571     }
572 
573     if (dev->n_tmp_sections) {
574         /* Since we already have at least one section, lets see if
575          * this extends it; since we're scanning in order, we only
576          * have to look at the last one, and the FlatView that calls
577          * us shouldn't have overlaps.
578          */
579         MemoryRegionSection *prev_sec = dev->tmp_sections +
580                                                (dev->n_tmp_sections - 1);
581         uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
582         uint64_t prev_size = int128_get64(prev_sec->size);
583         uint64_t prev_gpa_end   = range_get_last(prev_gpa_start, prev_size);
584         uint64_t prev_host_start =
585                         (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
586                         prev_sec->offset_within_region;
587         uint64_t prev_host_end   = range_get_last(prev_host_start, prev_size);
588 
589         if (mrs_gpa <= (prev_gpa_end + 1)) {
590             /* OK, looks like overlapping/intersecting - it's possible that
591              * the rounding to page sizes has made them overlap, but they should
592              * match up in the same RAMBlock if they do.
593              */
594             if (mrs_gpa < prev_gpa_start) {
595                 error_report("%s:Section '%s' rounded to %"PRIx64
596                              " prior to previous '%s' %"PRIx64,
597                              __func__, section->mr->name, mrs_gpa,
598                              prev_sec->mr->name, prev_gpa_start);
599                 /* A way to cleanly fail here would be better */
600                 return;
601             }
602             /* Offset from the start of the previous GPA to this GPA */
603             size_t offset = mrs_gpa - prev_gpa_start;
604 
605             if (prev_host_start + offset == mrs_host &&
606                 section->mr == prev_sec->mr &&
607                 (!dev->vhost_ops->vhost_backend_can_merge ||
608                  dev->vhost_ops->vhost_backend_can_merge(dev,
609                     mrs_host, mrs_size,
610                     prev_host_start, prev_size))) {
611                 uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size);
612                 need_add = false;
613                 prev_sec->offset_within_address_space =
614                     MIN(prev_gpa_start, mrs_gpa);
615                 prev_sec->offset_within_region =
616                     MIN(prev_host_start, mrs_host) -
617                     (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr);
618                 prev_sec->size = int128_make64(max_end - MIN(prev_host_start,
619                                                mrs_host));
620                 trace_vhost_region_add_section_merge(section->mr->name,
621                                         int128_get64(prev_sec->size),
622                                         prev_sec->offset_within_address_space,
623                                         prev_sec->offset_within_region);
624             } else {
625                 /* adjoining regions are fine, but overlapping ones with
626                  * different blocks/offsets shouldn't happen
627                  */
628                 if (mrs_gpa != prev_gpa_end + 1) {
629                     error_report("%s: Overlapping but not coherent sections "
630                                  "at %"PRIx64,
631                                  __func__, mrs_gpa);
632                     return;
633                 }
634             }
635         }
636     }
637 
638     if (need_add) {
639         ++dev->n_tmp_sections;
640         dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
641                                     dev->n_tmp_sections);
642         dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
643         /* The flatview isn't stable and we don't use it, making it NULL
644          * means we can memcmp the list.
645          */
646         dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
647         memory_region_ref(section->mr);
648     }
649 }
650 
651 /* Used for both add and nop callbacks */
652 static void vhost_region_addnop(MemoryListener *listener,
653                                 MemoryRegionSection *section)
654 {
655     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
656                                          memory_listener);
657 
658     if (!vhost_section(dev, section)) {
659         return;
660     }
661     vhost_region_add_section(dev, section);
662 }
663 
664 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
665 {
666     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
667     struct vhost_dev *hdev = iommu->hdev;
668     hwaddr iova = iotlb->iova + iommu->iommu_offset;
669 
670     if (vhost_backend_invalidate_device_iotlb(hdev, iova,
671                                               iotlb->addr_mask + 1)) {
672         error_report("Fail to invalidate device iotlb");
673     }
674 }
675 
676 static void vhost_iommu_region_add(MemoryListener *listener,
677                                    MemoryRegionSection *section)
678 {
679     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
680                                          iommu_listener);
681     struct vhost_iommu *iommu;
682     Int128 end;
683     int iommu_idx, ret;
684     IOMMUMemoryRegion *iommu_mr;
685     Error *err = NULL;
686 
687     if (!memory_region_is_iommu(section->mr)) {
688         return;
689     }
690 
691     iommu_mr = IOMMU_MEMORY_REGION(section->mr);
692 
693     iommu = g_malloc0(sizeof(*iommu));
694     end = int128_add(int128_make64(section->offset_within_region),
695                      section->size);
696     end = int128_sub(end, int128_one());
697     iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
698                                                    MEMTXATTRS_UNSPECIFIED);
699     iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
700                         IOMMU_NOTIFIER_UNMAP,
701                         section->offset_within_region,
702                         int128_get64(end),
703                         iommu_idx);
704     iommu->mr = section->mr;
705     iommu->iommu_offset = section->offset_within_address_space -
706                           section->offset_within_region;
707     iommu->hdev = dev;
708     ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, &err);
709     if (ret) {
710         error_report_err(err);
711         exit(1);
712     }
713     QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
714     /* TODO: can replay help performance here? */
715 }
716 
717 static void vhost_iommu_region_del(MemoryListener *listener,
718                                    MemoryRegionSection *section)
719 {
720     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
721                                          iommu_listener);
722     struct vhost_iommu *iommu;
723 
724     if (!memory_region_is_iommu(section->mr)) {
725         return;
726     }
727 
728     QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
729         if (iommu->mr == section->mr &&
730             iommu->n.start == section->offset_within_region) {
731             memory_region_unregister_iommu_notifier(iommu->mr,
732                                                     &iommu->n);
733             QLIST_REMOVE(iommu, iommu_next);
734             g_free(iommu);
735             break;
736         }
737     }
738 }
739 
740 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
741                                     struct vhost_virtqueue *vq,
742                                     unsigned idx, bool enable_log)
743 {
744     struct vhost_vring_addr addr = {
745         .index = idx,
746         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
747         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
748         .used_user_addr = (uint64_t)(unsigned long)vq->used,
749         .log_guest_addr = vq->used_phys,
750         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
751     };
752     int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
753     if (r < 0) {
754         VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
755         return -errno;
756     }
757     return 0;
758 }
759 
760 static int vhost_dev_set_features(struct vhost_dev *dev,
761                                   bool enable_log)
762 {
763     uint64_t features = dev->acked_features;
764     int r;
765     if (enable_log) {
766         features |= 0x1ULL << VHOST_F_LOG_ALL;
767     }
768     r = dev->vhost_ops->vhost_set_features(dev, features);
769     if (r < 0) {
770         VHOST_OPS_DEBUG("vhost_set_features failed");
771     }
772     return r < 0 ? -errno : 0;
773 }
774 
775 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
776 {
777     int r, i, idx;
778     r = vhost_dev_set_features(dev, enable_log);
779     if (r < 0) {
780         goto err_features;
781     }
782     for (i = 0; i < dev->nvqs; ++i) {
783         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
784         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
785                                      enable_log);
786         if (r < 0) {
787             goto err_vq;
788         }
789     }
790     return 0;
791 err_vq:
792     for (; i >= 0; --i) {
793         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
794         vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
795                                  dev->log_enabled);
796     }
797     vhost_dev_set_features(dev, dev->log_enabled);
798 err_features:
799     return r;
800 }
801 
802 static int vhost_migration_log(MemoryListener *listener, int enable)
803 {
804     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
805                                          memory_listener);
806     int r;
807     if (!!enable == dev->log_enabled) {
808         return 0;
809     }
810     if (!dev->started) {
811         dev->log_enabled = enable;
812         return 0;
813     }
814     if (!enable) {
815         r = vhost_dev_set_log(dev, false);
816         if (r < 0) {
817             return r;
818         }
819         vhost_log_put(dev, false);
820     } else {
821         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
822         r = vhost_dev_set_log(dev, true);
823         if (r < 0) {
824             return r;
825         }
826     }
827     dev->log_enabled = enable;
828     return 0;
829 }
830 
831 static void vhost_log_global_start(MemoryListener *listener)
832 {
833     int r;
834 
835     r = vhost_migration_log(listener, true);
836     if (r < 0) {
837         abort();
838     }
839 }
840 
841 static void vhost_log_global_stop(MemoryListener *listener)
842 {
843     int r;
844 
845     r = vhost_migration_log(listener, false);
846     if (r < 0) {
847         abort();
848     }
849 }
850 
851 static void vhost_log_start(MemoryListener *listener,
852                             MemoryRegionSection *section,
853                             int old, int new)
854 {
855     /* FIXME: implement */
856 }
857 
858 static void vhost_log_stop(MemoryListener *listener,
859                            MemoryRegionSection *section,
860                            int old, int new)
861 {
862     /* FIXME: implement */
863 }
864 
865 /* The vhost driver natively knows how to handle the vrings of non
866  * cross-endian legacy devices and modern devices. Only legacy devices
867  * exposed to a bi-endian guest may require the vhost driver to use a
868  * specific endianness.
869  */
870 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
871 {
872     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
873         return false;
874     }
875 #ifdef HOST_WORDS_BIGENDIAN
876     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
877 #else
878     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
879 #endif
880 }
881 
882 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
883                                                    bool is_big_endian,
884                                                    int vhost_vq_index)
885 {
886     struct vhost_vring_state s = {
887         .index = vhost_vq_index,
888         .num = is_big_endian
889     };
890 
891     if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
892         return 0;
893     }
894 
895     VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
896     if (errno == ENOTTY) {
897         error_report("vhost does not support cross-endian");
898         return -ENOSYS;
899     }
900 
901     return -errno;
902 }
903 
904 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
905                                       uint64_t gpa, uint64_t *uaddr,
906                                       uint64_t *len)
907 {
908     int i;
909 
910     for (i = 0; i < hdev->mem->nregions; i++) {
911         struct vhost_memory_region *reg = hdev->mem->regions + i;
912 
913         if (gpa >= reg->guest_phys_addr &&
914             reg->guest_phys_addr + reg->memory_size > gpa) {
915             *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
916             *len = reg->guest_phys_addr + reg->memory_size - gpa;
917             return 0;
918         }
919     }
920 
921     return -EFAULT;
922 }
923 
924 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
925 {
926     IOMMUTLBEntry iotlb;
927     uint64_t uaddr, len;
928     int ret = -EFAULT;
929 
930     RCU_READ_LOCK_GUARD();
931 
932     trace_vhost_iotlb_miss(dev, 1);
933 
934     iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
935                                           iova, write,
936                                           MEMTXATTRS_UNSPECIFIED);
937     if (iotlb.target_as != NULL) {
938         ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
939                                          &uaddr, &len);
940         if (ret) {
941             trace_vhost_iotlb_miss(dev, 3);
942             error_report("Fail to lookup the translated address "
943                          "%"PRIx64, iotlb.translated_addr);
944             goto out;
945         }
946 
947         len = MIN(iotlb.addr_mask + 1, len);
948         iova = iova & ~iotlb.addr_mask;
949 
950         ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
951                                                 len, iotlb.perm);
952         if (ret) {
953             trace_vhost_iotlb_miss(dev, 4);
954             error_report("Fail to update device iotlb");
955             goto out;
956         }
957     }
958 
959     trace_vhost_iotlb_miss(dev, 2);
960 
961 out:
962     return ret;
963 }
964 
965 static int vhost_virtqueue_start(struct vhost_dev *dev,
966                                 struct VirtIODevice *vdev,
967                                 struct vhost_virtqueue *vq,
968                                 unsigned idx)
969 {
970     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
971     VirtioBusState *vbus = VIRTIO_BUS(qbus);
972     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
973     hwaddr s, l, a;
974     int r;
975     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
976     struct vhost_vring_file file = {
977         .index = vhost_vq_index
978     };
979     struct vhost_vring_state state = {
980         .index = vhost_vq_index
981     };
982     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
983 
984     a = virtio_queue_get_desc_addr(vdev, idx);
985     if (a == 0) {
986         /* Queue might not be ready for start */
987         return 0;
988     }
989 
990     vq->num = state.num = virtio_queue_get_num(vdev, idx);
991     r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
992     if (r) {
993         VHOST_OPS_DEBUG("vhost_set_vring_num failed");
994         return -errno;
995     }
996 
997     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
998     r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
999     if (r) {
1000         VHOST_OPS_DEBUG("vhost_set_vring_base failed");
1001         return -errno;
1002     }
1003 
1004     if (vhost_needs_vring_endian(vdev)) {
1005         r = vhost_virtqueue_set_vring_endian_legacy(dev,
1006                                                     virtio_is_big_endian(vdev),
1007                                                     vhost_vq_index);
1008         if (r) {
1009             return -errno;
1010         }
1011     }
1012 
1013     vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
1014     vq->desc_phys = a;
1015     vq->desc = vhost_memory_map(dev, a, &l, false);
1016     if (!vq->desc || l != s) {
1017         r = -ENOMEM;
1018         goto fail_alloc_desc;
1019     }
1020     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1021     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
1022     vq->avail = vhost_memory_map(dev, a, &l, false);
1023     if (!vq->avail || l != s) {
1024         r = -ENOMEM;
1025         goto fail_alloc_avail;
1026     }
1027     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1028     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1029     vq->used = vhost_memory_map(dev, a, &l, true);
1030     if (!vq->used || l != s) {
1031         r = -ENOMEM;
1032         goto fail_alloc_used;
1033     }
1034 
1035     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1036     if (r < 0) {
1037         r = -errno;
1038         goto fail_alloc;
1039     }
1040 
1041     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1042     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1043     if (r) {
1044         VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1045         r = -errno;
1046         goto fail_kick;
1047     }
1048 
1049     /* Clear and discard previous events if any. */
1050     event_notifier_test_and_clear(&vq->masked_notifier);
1051 
1052     /* Init vring in unmasked state, unless guest_notifier_mask
1053      * will do it later.
1054      */
1055     if (!vdev->use_guest_notifier_mask) {
1056         /* TODO: check and handle errors. */
1057         vhost_virtqueue_mask(dev, vdev, idx, false);
1058     }
1059 
1060     if (k->query_guest_notifiers &&
1061         k->query_guest_notifiers(qbus->parent) &&
1062         virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1063         file.fd = -1;
1064         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1065         if (r) {
1066             goto fail_vector;
1067         }
1068     }
1069 
1070     return 0;
1071 
1072 fail_vector:
1073 fail_kick:
1074 fail_alloc:
1075     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1076                        0, 0);
1077 fail_alloc_used:
1078     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1079                        0, 0);
1080 fail_alloc_avail:
1081     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1082                        0, 0);
1083 fail_alloc_desc:
1084     return r;
1085 }
1086 
1087 static void vhost_virtqueue_stop(struct vhost_dev *dev,
1088                                     struct VirtIODevice *vdev,
1089                                     struct vhost_virtqueue *vq,
1090                                     unsigned idx)
1091 {
1092     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1093     struct vhost_vring_state state = {
1094         .index = vhost_vq_index,
1095     };
1096     int r;
1097 
1098     if (virtio_queue_get_desc_addr(vdev, idx) == 0) {
1099         /* Don't stop the virtqueue which might have not been started */
1100         return;
1101     }
1102 
1103     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1104     if (r < 0) {
1105         VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r);
1106         /* Connection to the backend is broken, so let's sync internal
1107          * last avail idx to the device used idx.
1108          */
1109         virtio_queue_restore_last_avail_idx(vdev, idx);
1110     } else {
1111         virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1112     }
1113     virtio_queue_invalidate_signalled_used(vdev, idx);
1114     virtio_queue_update_used_idx(vdev, idx);
1115 
1116     /* In the cross-endian case, we need to reset the vring endianness to
1117      * native as legacy devices expect so by default.
1118      */
1119     if (vhost_needs_vring_endian(vdev)) {
1120         vhost_virtqueue_set_vring_endian_legacy(dev,
1121                                                 !virtio_is_big_endian(vdev),
1122                                                 vhost_vq_index);
1123     }
1124 
1125     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1126                        1, virtio_queue_get_used_size(vdev, idx));
1127     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1128                        0, virtio_queue_get_avail_size(vdev, idx));
1129     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1130                        0, virtio_queue_get_desc_size(vdev, idx));
1131 }
1132 
1133 static void vhost_eventfd_add(MemoryListener *listener,
1134                               MemoryRegionSection *section,
1135                               bool match_data, uint64_t data, EventNotifier *e)
1136 {
1137 }
1138 
1139 static void vhost_eventfd_del(MemoryListener *listener,
1140                               MemoryRegionSection *section,
1141                               bool match_data, uint64_t data, EventNotifier *e)
1142 {
1143 }
1144 
1145 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1146                                                 int n, uint32_t timeout)
1147 {
1148     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1149     struct vhost_vring_state state = {
1150         .index = vhost_vq_index,
1151         .num = timeout,
1152     };
1153     int r;
1154 
1155     if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1156         return -EINVAL;
1157     }
1158 
1159     r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1160     if (r) {
1161         VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1162         return r;
1163     }
1164 
1165     return 0;
1166 }
1167 
1168 static int vhost_virtqueue_init(struct vhost_dev *dev,
1169                                 struct vhost_virtqueue *vq, int n)
1170 {
1171     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1172     struct vhost_vring_file file = {
1173         .index = vhost_vq_index,
1174     };
1175     int r = event_notifier_init(&vq->masked_notifier, 0);
1176     if (r < 0) {
1177         return r;
1178     }
1179 
1180     file.fd = event_notifier_get_fd(&vq->masked_notifier);
1181     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1182     if (r) {
1183         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1184         r = -errno;
1185         goto fail_call;
1186     }
1187 
1188     vq->dev = dev;
1189 
1190     return 0;
1191 fail_call:
1192     event_notifier_cleanup(&vq->masked_notifier);
1193     return r;
1194 }
1195 
1196 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1197 {
1198     event_notifier_cleanup(&vq->masked_notifier);
1199 }
1200 
1201 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1202                    VhostBackendType backend_type, uint32_t busyloop_timeout)
1203 {
1204     uint64_t features;
1205     int i, r, n_initialized_vqs = 0;
1206     Error *local_err = NULL;
1207 
1208     hdev->vdev = NULL;
1209     hdev->migration_blocker = NULL;
1210 
1211     r = vhost_set_backend_type(hdev, backend_type);
1212     assert(r >= 0);
1213 
1214     r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1215     if (r < 0) {
1216         goto fail;
1217     }
1218 
1219     r = hdev->vhost_ops->vhost_set_owner(hdev);
1220     if (r < 0) {
1221         VHOST_OPS_DEBUG("vhost_set_owner failed");
1222         goto fail;
1223     }
1224 
1225     r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1226     if (r < 0) {
1227         VHOST_OPS_DEBUG("vhost_get_features failed");
1228         goto fail;
1229     }
1230 
1231     for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1232         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1233         if (r < 0) {
1234             goto fail;
1235         }
1236     }
1237 
1238     if (busyloop_timeout) {
1239         for (i = 0; i < hdev->nvqs; ++i) {
1240             r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1241                                                      busyloop_timeout);
1242             if (r < 0) {
1243                 goto fail_busyloop;
1244             }
1245         }
1246     }
1247 
1248     hdev->features = features;
1249 
1250     hdev->memory_listener = (MemoryListener) {
1251         .begin = vhost_begin,
1252         .commit = vhost_commit,
1253         .region_add = vhost_region_addnop,
1254         .region_nop = vhost_region_addnop,
1255         .log_start = vhost_log_start,
1256         .log_stop = vhost_log_stop,
1257         .log_sync = vhost_log_sync,
1258         .log_global_start = vhost_log_global_start,
1259         .log_global_stop = vhost_log_global_stop,
1260         .eventfd_add = vhost_eventfd_add,
1261         .eventfd_del = vhost_eventfd_del,
1262         .priority = 10
1263     };
1264 
1265     hdev->iommu_listener = (MemoryListener) {
1266         .region_add = vhost_iommu_region_add,
1267         .region_del = vhost_iommu_region_del,
1268     };
1269 
1270     if (hdev->migration_blocker == NULL) {
1271         if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1272             error_setg(&hdev->migration_blocker,
1273                        "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1274         } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) {
1275             error_setg(&hdev->migration_blocker,
1276                        "Migration disabled: failed to allocate shared memory");
1277         }
1278     }
1279 
1280     if (hdev->migration_blocker != NULL) {
1281         r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1282         if (local_err) {
1283             error_report_err(local_err);
1284             error_free(hdev->migration_blocker);
1285             goto fail_busyloop;
1286         }
1287     }
1288 
1289     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1290     hdev->n_mem_sections = 0;
1291     hdev->mem_sections = NULL;
1292     hdev->log = NULL;
1293     hdev->log_size = 0;
1294     hdev->log_enabled = false;
1295     hdev->started = false;
1296     memory_listener_register(&hdev->memory_listener, &address_space_memory);
1297     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1298 
1299     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1300         error_report("vhost backend memory slots limit is less"
1301                 " than current number of present memory slots");
1302         r = -1;
1303         if (busyloop_timeout) {
1304             goto fail_busyloop;
1305         } else {
1306             goto fail;
1307         }
1308     }
1309 
1310     return 0;
1311 
1312 fail_busyloop:
1313     while (--i >= 0) {
1314         vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1315     }
1316 fail:
1317     hdev->nvqs = n_initialized_vqs;
1318     vhost_dev_cleanup(hdev);
1319     return r;
1320 }
1321 
1322 void vhost_dev_cleanup(struct vhost_dev *hdev)
1323 {
1324     int i;
1325 
1326     for (i = 0; i < hdev->nvqs; ++i) {
1327         vhost_virtqueue_cleanup(hdev->vqs + i);
1328     }
1329     if (hdev->mem) {
1330         /* those are only safe after successful init */
1331         memory_listener_unregister(&hdev->memory_listener);
1332         QLIST_REMOVE(hdev, entry);
1333     }
1334     if (hdev->migration_blocker) {
1335         migrate_del_blocker(hdev->migration_blocker);
1336         error_free(hdev->migration_blocker);
1337     }
1338     g_free(hdev->mem);
1339     g_free(hdev->mem_sections);
1340     if (hdev->vhost_ops) {
1341         hdev->vhost_ops->vhost_backend_cleanup(hdev);
1342     }
1343     assert(!hdev->log);
1344 
1345     memset(hdev, 0, sizeof(struct vhost_dev));
1346 }
1347 
1348 /* Stop processing guest IO notifications in qemu.
1349  * Start processing them in vhost in kernel.
1350  */
1351 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1352 {
1353     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1354     int i, r, e;
1355 
1356     /* We will pass the notifiers to the kernel, make sure that QEMU
1357      * doesn't interfere.
1358      */
1359     r = virtio_device_grab_ioeventfd(vdev);
1360     if (r < 0) {
1361         error_report("binding does not support host notifiers");
1362         goto fail;
1363     }
1364 
1365     for (i = 0; i < hdev->nvqs; ++i) {
1366         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1367                                          true);
1368         if (r < 0) {
1369             error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1370             goto fail_vq;
1371         }
1372     }
1373 
1374     return 0;
1375 fail_vq:
1376     while (--i >= 0) {
1377         e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1378                                          false);
1379         if (e < 0) {
1380             error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1381         }
1382         assert (e >= 0);
1383         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1384     }
1385     virtio_device_release_ioeventfd(vdev);
1386 fail:
1387     return r;
1388 }
1389 
1390 /* Stop processing guest IO notifications in vhost.
1391  * Start processing them in qemu.
1392  * This might actually run the qemu handlers right away,
1393  * so virtio in qemu must be completely setup when this is called.
1394  */
1395 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1396 {
1397     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1398     int i, r;
1399 
1400     for (i = 0; i < hdev->nvqs; ++i) {
1401         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1402                                          false);
1403         if (r < 0) {
1404             error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1405         }
1406         assert (r >= 0);
1407         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1408     }
1409     virtio_device_release_ioeventfd(vdev);
1410 }
1411 
1412 /* Test and clear event pending status.
1413  * Should be called after unmask to avoid losing events.
1414  */
1415 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1416 {
1417     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1418     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1419     return event_notifier_test_and_clear(&vq->masked_notifier);
1420 }
1421 
1422 /* Mask/unmask events from this vq. */
1423 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1424                          bool mask)
1425 {
1426     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1427     int r, index = n - hdev->vq_index;
1428     struct vhost_vring_file file;
1429 
1430     /* should only be called after backend is connected */
1431     assert(hdev->vhost_ops);
1432 
1433     if (mask) {
1434         assert(vdev->use_guest_notifier_mask);
1435         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1436     } else {
1437         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1438     }
1439 
1440     file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1441     r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1442     if (r < 0) {
1443         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1444     }
1445 }
1446 
1447 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1448                             uint64_t features)
1449 {
1450     const int *bit = feature_bits;
1451     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1452         uint64_t bit_mask = (1ULL << *bit);
1453         if (!(hdev->features & bit_mask)) {
1454             features &= ~bit_mask;
1455         }
1456         bit++;
1457     }
1458     return features;
1459 }
1460 
1461 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1462                         uint64_t features)
1463 {
1464     const int *bit = feature_bits;
1465     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1466         uint64_t bit_mask = (1ULL << *bit);
1467         if (features & bit_mask) {
1468             hdev->acked_features |= bit_mask;
1469         }
1470         bit++;
1471     }
1472 }
1473 
1474 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1475                          uint32_t config_len)
1476 {
1477     assert(hdev->vhost_ops);
1478 
1479     if (hdev->vhost_ops->vhost_get_config) {
1480         return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1481     }
1482 
1483     return -1;
1484 }
1485 
1486 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1487                          uint32_t offset, uint32_t size, uint32_t flags)
1488 {
1489     assert(hdev->vhost_ops);
1490 
1491     if (hdev->vhost_ops->vhost_set_config) {
1492         return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1493                                                  size, flags);
1494     }
1495 
1496     return -1;
1497 }
1498 
1499 void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1500                                    const VhostDevConfigOps *ops)
1501 {
1502     hdev->config_ops = ops;
1503 }
1504 
1505 void vhost_dev_free_inflight(struct vhost_inflight *inflight)
1506 {
1507     if (inflight->addr) {
1508         qemu_memfd_free(inflight->addr, inflight->size, inflight->fd);
1509         inflight->addr = NULL;
1510         inflight->fd = -1;
1511     }
1512 }
1513 
1514 static int vhost_dev_resize_inflight(struct vhost_inflight *inflight,
1515                                      uint64_t new_size)
1516 {
1517     Error *err = NULL;
1518     int fd = -1;
1519     void *addr = qemu_memfd_alloc("vhost-inflight", new_size,
1520                                   F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1521                                   &fd, &err);
1522 
1523     if (err) {
1524         error_report_err(err);
1525         return -1;
1526     }
1527 
1528     vhost_dev_free_inflight(inflight);
1529     inflight->offset = 0;
1530     inflight->addr = addr;
1531     inflight->fd = fd;
1532     inflight->size = new_size;
1533 
1534     return 0;
1535 }
1536 
1537 void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1538 {
1539     if (inflight->addr) {
1540         qemu_put_be64(f, inflight->size);
1541         qemu_put_be16(f, inflight->queue_size);
1542         qemu_put_buffer(f, inflight->addr, inflight->size);
1543     } else {
1544         qemu_put_be64(f, 0);
1545     }
1546 }
1547 
1548 int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1549 {
1550     uint64_t size;
1551 
1552     size = qemu_get_be64(f);
1553     if (!size) {
1554         return 0;
1555     }
1556 
1557     if (inflight->size != size) {
1558         if (vhost_dev_resize_inflight(inflight, size)) {
1559             return -1;
1560         }
1561     }
1562     inflight->queue_size = qemu_get_be16(f);
1563 
1564     qemu_get_buffer(f, inflight->addr, size);
1565 
1566     return 0;
1567 }
1568 
1569 int vhost_dev_set_inflight(struct vhost_dev *dev,
1570                            struct vhost_inflight *inflight)
1571 {
1572     int r;
1573 
1574     if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) {
1575         r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight);
1576         if (r) {
1577             VHOST_OPS_DEBUG("vhost_set_inflight_fd failed");
1578             return -errno;
1579         }
1580     }
1581 
1582     return 0;
1583 }
1584 
1585 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
1586                            struct vhost_inflight *inflight)
1587 {
1588     int r;
1589 
1590     if (dev->vhost_ops->vhost_get_inflight_fd) {
1591         r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight);
1592         if (r) {
1593             VHOST_OPS_DEBUG("vhost_get_inflight_fd failed");
1594             return -errno;
1595         }
1596     }
1597 
1598     return 0;
1599 }
1600 
1601 /* Host notifiers must be enabled at this point. */
1602 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1603 {
1604     int i, r;
1605 
1606     /* should only be called after backend is connected */
1607     assert(hdev->vhost_ops);
1608 
1609     hdev->started = true;
1610     hdev->vdev = vdev;
1611 
1612     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1613     if (r < 0) {
1614         goto fail_features;
1615     }
1616 
1617     if (vhost_dev_has_iommu(hdev)) {
1618         memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1619     }
1620 
1621     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1622     if (r < 0) {
1623         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1624         r = -errno;
1625         goto fail_mem;
1626     }
1627     for (i = 0; i < hdev->nvqs; ++i) {
1628         r = vhost_virtqueue_start(hdev,
1629                                   vdev,
1630                                   hdev->vqs + i,
1631                                   hdev->vq_index + i);
1632         if (r < 0) {
1633             goto fail_vq;
1634         }
1635     }
1636 
1637     if (hdev->log_enabled) {
1638         uint64_t log_base;
1639 
1640         hdev->log_size = vhost_get_log_size(hdev);
1641         hdev->log = vhost_log_get(hdev->log_size,
1642                                   vhost_dev_log_is_shared(hdev));
1643         log_base = (uintptr_t)hdev->log->log;
1644         r = hdev->vhost_ops->vhost_set_log_base(hdev,
1645                                                 hdev->log_size ? log_base : 0,
1646                                                 hdev->log);
1647         if (r < 0) {
1648             VHOST_OPS_DEBUG("vhost_set_log_base failed");
1649             r = -errno;
1650             goto fail_log;
1651         }
1652     }
1653 
1654     if (vhost_dev_has_iommu(hdev)) {
1655         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1656 
1657         /* Update used ring information for IOTLB to work correctly,
1658          * vhost-kernel code requires for this.*/
1659         for (i = 0; i < hdev->nvqs; ++i) {
1660             struct vhost_virtqueue *vq = hdev->vqs + i;
1661             vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1662         }
1663     }
1664     return 0;
1665 fail_log:
1666     vhost_log_put(hdev, false);
1667 fail_vq:
1668     while (--i >= 0) {
1669         vhost_virtqueue_stop(hdev,
1670                              vdev,
1671                              hdev->vqs + i,
1672                              hdev->vq_index + i);
1673     }
1674 
1675 fail_mem:
1676 fail_features:
1677 
1678     hdev->started = false;
1679     return r;
1680 }
1681 
1682 /* Host notifiers must be enabled at this point. */
1683 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1684 {
1685     int i;
1686 
1687     /* should only be called after backend is connected */
1688     assert(hdev->vhost_ops);
1689 
1690     for (i = 0; i < hdev->nvqs; ++i) {
1691         vhost_virtqueue_stop(hdev,
1692                              vdev,
1693                              hdev->vqs + i,
1694                              hdev->vq_index + i);
1695     }
1696 
1697     if (vhost_dev_has_iommu(hdev)) {
1698         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1699         memory_listener_unregister(&hdev->iommu_listener);
1700     }
1701     vhost_log_put(hdev, true);
1702     hdev->started = false;
1703     hdev->vdev = NULL;
1704 }
1705 
1706 int vhost_net_set_backend(struct vhost_dev *hdev,
1707                           struct vhost_vring_file *file)
1708 {
1709     if (hdev->vhost_ops->vhost_net_set_backend) {
1710         return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1711     }
1712 
1713     return -1;
1714 }
1715