xref: /qemu/hw/virtio/vhost.c (revision 49f95221)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "qemu/atomic.h"
20 #include "qemu/range.h"
21 #include "qemu/error-report.h"
22 #include "qemu/memfd.h"
23 #include "standard-headers/linux/vhost_types.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "hw/virtio/virtio-access.h"
26 #include "migration/blocker.h"
27 #include "migration/qemu-file-types.h"
28 #include "sysemu/dma.h"
29 #include "trace.h"
30 
31 /* enabled until disconnected backend stabilizes */
32 #define _VHOST_DEBUG 1
33 
34 #ifdef _VHOST_DEBUG
35 #define VHOST_OPS_DEBUG(retval, fmt, ...) \
36     do { \
37         error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
38                      strerror(-retval), -retval); \
39     } while (0)
40 #else
41 #define VHOST_OPS_DEBUG(retval, fmt, ...) \
42     do { } while (0)
43 #endif
44 
45 static struct vhost_log *vhost_log;
46 static struct vhost_log *vhost_log_shm;
47 
48 static unsigned int used_memslots;
49 static QLIST_HEAD(, vhost_dev) vhost_devices =
50     QLIST_HEAD_INITIALIZER(vhost_devices);
51 
52 bool vhost_has_free_slot(void)
53 {
54     unsigned int slots_limit = ~0U;
55     struct vhost_dev *hdev;
56 
57     QLIST_FOREACH(hdev, &vhost_devices, entry) {
58         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
59         slots_limit = MIN(slots_limit, r);
60     }
61     return slots_limit > used_memslots;
62 }
63 
64 static void vhost_dev_sync_region(struct vhost_dev *dev,
65                                   MemoryRegionSection *section,
66                                   uint64_t mfirst, uint64_t mlast,
67                                   uint64_t rfirst, uint64_t rlast)
68 {
69     vhost_log_chunk_t *log = dev->log->log;
70 
71     uint64_t start = MAX(mfirst, rfirst);
72     uint64_t end = MIN(mlast, rlast);
73     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
74     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
75     uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
76 
77     if (end < start) {
78         return;
79     }
80     assert(end / VHOST_LOG_CHUNK < dev->log_size);
81     assert(start / VHOST_LOG_CHUNK < dev->log_size);
82 
83     for (;from < to; ++from) {
84         vhost_log_chunk_t log;
85         /* We first check with non-atomic: much cheaper,
86          * and we expect non-dirty to be the common case. */
87         if (!*from) {
88             addr += VHOST_LOG_CHUNK;
89             continue;
90         }
91         /* Data must be read atomically. We don't really need barrier semantics
92          * but it's easier to use atomic_* than roll our own. */
93         log = qatomic_xchg(from, 0);
94         while (log) {
95             int bit = ctzl(log);
96             hwaddr page_addr;
97             hwaddr section_offset;
98             hwaddr mr_offset;
99             page_addr = addr + bit * VHOST_LOG_PAGE;
100             section_offset = page_addr - section->offset_within_address_space;
101             mr_offset = section_offset + section->offset_within_region;
102             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
103             log &= ~(0x1ull << bit);
104         }
105         addr += VHOST_LOG_CHUNK;
106     }
107 }
108 
109 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
110                                    MemoryRegionSection *section,
111                                    hwaddr first,
112                                    hwaddr last)
113 {
114     int i;
115     hwaddr start_addr;
116     hwaddr end_addr;
117 
118     if (!dev->log_enabled || !dev->started) {
119         return 0;
120     }
121     start_addr = section->offset_within_address_space;
122     end_addr = range_get_last(start_addr, int128_get64(section->size));
123     start_addr = MAX(first, start_addr);
124     end_addr = MIN(last, end_addr);
125 
126     for (i = 0; i < dev->mem->nregions; ++i) {
127         struct vhost_memory_region *reg = dev->mem->regions + i;
128         vhost_dev_sync_region(dev, section, start_addr, end_addr,
129                               reg->guest_phys_addr,
130                               range_get_last(reg->guest_phys_addr,
131                                              reg->memory_size));
132     }
133     for (i = 0; i < dev->nvqs; ++i) {
134         struct vhost_virtqueue *vq = dev->vqs + i;
135 
136         if (!vq->used_phys && !vq->used_size) {
137             continue;
138         }
139 
140         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
141                               range_get_last(vq->used_phys, vq->used_size));
142     }
143     return 0;
144 }
145 
146 static void vhost_log_sync(MemoryListener *listener,
147                           MemoryRegionSection *section)
148 {
149     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
150                                          memory_listener);
151     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
152 }
153 
154 static void vhost_log_sync_range(struct vhost_dev *dev,
155                                  hwaddr first, hwaddr last)
156 {
157     int i;
158     /* FIXME: this is N^2 in number of sections */
159     for (i = 0; i < dev->n_mem_sections; ++i) {
160         MemoryRegionSection *section = &dev->mem_sections[i];
161         vhost_sync_dirty_bitmap(dev, section, first, last);
162     }
163 }
164 
165 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
166 {
167     uint64_t log_size = 0;
168     int i;
169     for (i = 0; i < dev->mem->nregions; ++i) {
170         struct vhost_memory_region *reg = dev->mem->regions + i;
171         uint64_t last = range_get_last(reg->guest_phys_addr,
172                                        reg->memory_size);
173         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
174     }
175     return log_size;
176 }
177 
178 static int vhost_set_backend_type(struct vhost_dev *dev,
179                                   VhostBackendType backend_type)
180 {
181     int r = 0;
182 
183     switch (backend_type) {
184 #ifdef CONFIG_VHOST_KERNEL
185     case VHOST_BACKEND_TYPE_KERNEL:
186         dev->vhost_ops = &kernel_ops;
187         break;
188 #endif
189 #ifdef CONFIG_VHOST_USER
190     case VHOST_BACKEND_TYPE_USER:
191         dev->vhost_ops = &user_ops;
192         break;
193 #endif
194 #ifdef CONFIG_VHOST_VDPA
195     case VHOST_BACKEND_TYPE_VDPA:
196         dev->vhost_ops = &vdpa_ops;
197         break;
198 #endif
199     default:
200         error_report("Unknown vhost backend type");
201         r = -1;
202     }
203 
204     return r;
205 }
206 
207 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
208 {
209     Error *err = NULL;
210     struct vhost_log *log;
211     uint64_t logsize = size * sizeof(*(log->log));
212     int fd = -1;
213 
214     log = g_new0(struct vhost_log, 1);
215     if (share) {
216         log->log = qemu_memfd_alloc("vhost-log", logsize,
217                                     F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
218                                     &fd, &err);
219         if (err) {
220             error_report_err(err);
221             g_free(log);
222             return NULL;
223         }
224         memset(log->log, 0, logsize);
225     } else {
226         log->log = g_malloc0(logsize);
227     }
228 
229     log->size = size;
230     log->refcnt = 1;
231     log->fd = fd;
232 
233     return log;
234 }
235 
236 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
237 {
238     struct vhost_log *log = share ? vhost_log_shm : vhost_log;
239 
240     if (!log || log->size != size) {
241         log = vhost_log_alloc(size, share);
242         if (share) {
243             vhost_log_shm = log;
244         } else {
245             vhost_log = log;
246         }
247     } else {
248         ++log->refcnt;
249     }
250 
251     return log;
252 }
253 
254 static void vhost_log_put(struct vhost_dev *dev, bool sync)
255 {
256     struct vhost_log *log = dev->log;
257 
258     if (!log) {
259         return;
260     }
261 
262     --log->refcnt;
263     if (log->refcnt == 0) {
264         /* Sync only the range covered by the old log */
265         if (dev->log_size && sync) {
266             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
267         }
268 
269         if (vhost_log == log) {
270             g_free(log->log);
271             vhost_log = NULL;
272         } else if (vhost_log_shm == log) {
273             qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
274                             log->fd);
275             vhost_log_shm = NULL;
276         }
277 
278         g_free(log);
279     }
280 
281     dev->log = NULL;
282     dev->log_size = 0;
283 }
284 
285 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
286 {
287     return dev->vhost_ops->vhost_requires_shm_log &&
288            dev->vhost_ops->vhost_requires_shm_log(dev);
289 }
290 
291 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
292 {
293     struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
294     uint64_t log_base = (uintptr_t)log->log;
295     int r;
296 
297     /* inform backend of log switching, this must be done before
298        releasing the current log, to ensure no logging is lost */
299     r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
300     if (r < 0) {
301         VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
302     }
303 
304     vhost_log_put(dev, true);
305     dev->log = log;
306     dev->log_size = size;
307 }
308 
309 static int vhost_dev_has_iommu(struct vhost_dev *dev)
310 {
311     VirtIODevice *vdev = dev->vdev;
312 
313     /*
314      * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
315      * incremental memory mapping API via IOTLB API. For platform that
316      * does not have IOMMU, there's no need to enable this feature
317      * which may cause unnecessary IOTLB miss/update transactions.
318      */
319     return virtio_bus_device_iommu_enabled(vdev) &&
320            virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
321 }
322 
323 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
324                               hwaddr *plen, bool is_write)
325 {
326     if (!vhost_dev_has_iommu(dev)) {
327         return cpu_physical_memory_map(addr, plen, is_write);
328     } else {
329         return (void *)(uintptr_t)addr;
330     }
331 }
332 
333 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
334                                hwaddr len, int is_write,
335                                hwaddr access_len)
336 {
337     if (!vhost_dev_has_iommu(dev)) {
338         cpu_physical_memory_unmap(buffer, len, is_write, access_len);
339     }
340 }
341 
342 static int vhost_verify_ring_part_mapping(void *ring_hva,
343                                           uint64_t ring_gpa,
344                                           uint64_t ring_size,
345                                           void *reg_hva,
346                                           uint64_t reg_gpa,
347                                           uint64_t reg_size)
348 {
349     uint64_t hva_ring_offset;
350     uint64_t ring_last = range_get_last(ring_gpa, ring_size);
351     uint64_t reg_last = range_get_last(reg_gpa, reg_size);
352 
353     if (ring_last < reg_gpa || ring_gpa > reg_last) {
354         return 0;
355     }
356     /* check that whole ring's is mapped */
357     if (ring_last > reg_last) {
358         return -ENOMEM;
359     }
360     /* check that ring's MemoryRegion wasn't replaced */
361     hva_ring_offset = ring_gpa - reg_gpa;
362     if (ring_hva != reg_hva + hva_ring_offset) {
363         return -EBUSY;
364     }
365 
366     return 0;
367 }
368 
369 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
370                                       void *reg_hva,
371                                       uint64_t reg_gpa,
372                                       uint64_t reg_size)
373 {
374     int i, j;
375     int r = 0;
376     const char *part_name[] = {
377         "descriptor table",
378         "available ring",
379         "used ring"
380     };
381 
382     if (vhost_dev_has_iommu(dev)) {
383         return 0;
384     }
385 
386     for (i = 0; i < dev->nvqs; ++i) {
387         struct vhost_virtqueue *vq = dev->vqs + i;
388 
389         if (vq->desc_phys == 0) {
390             continue;
391         }
392 
393         j = 0;
394         r = vhost_verify_ring_part_mapping(
395                 vq->desc, vq->desc_phys, vq->desc_size,
396                 reg_hva, reg_gpa, reg_size);
397         if (r) {
398             break;
399         }
400 
401         j++;
402         r = vhost_verify_ring_part_mapping(
403                 vq->avail, vq->avail_phys, vq->avail_size,
404                 reg_hva, reg_gpa, reg_size);
405         if (r) {
406             break;
407         }
408 
409         j++;
410         r = vhost_verify_ring_part_mapping(
411                 vq->used, vq->used_phys, vq->used_size,
412                 reg_hva, reg_gpa, reg_size);
413         if (r) {
414             break;
415         }
416     }
417 
418     if (r == -ENOMEM) {
419         error_report("Unable to map %s for ring %d", part_name[j], i);
420     } else if (r == -EBUSY) {
421         error_report("%s relocated for ring %d", part_name[j], i);
422     }
423     return r;
424 }
425 
426 /*
427  * vhost_section: identify sections needed for vhost access
428  *
429  * We only care about RAM sections here (where virtqueue and guest
430  * internals accessed by virtio might live). If we find one we still
431  * allow the backend to potentially filter it out of our list.
432  */
433 static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section)
434 {
435     MemoryRegion *mr = section->mr;
436 
437     if (memory_region_is_ram(mr) && !memory_region_is_rom(mr)) {
438         uint8_t dirty_mask = memory_region_get_dirty_log_mask(mr);
439         uint8_t handled_dirty;
440 
441         /*
442          * Kernel based vhost doesn't handle any block which is doing
443          * dirty-tracking other than migration for which it has
444          * specific logging support. However for TCG the kernel never
445          * gets involved anyway so we can also ignore it's
446          * self-modiying code detection flags. However a vhost-user
447          * client could still confuse a TCG guest if it re-writes
448          * executable memory that has already been translated.
449          */
450         handled_dirty = (1 << DIRTY_MEMORY_MIGRATION) |
451             (1 << DIRTY_MEMORY_CODE);
452 
453         if (dirty_mask & ~handled_dirty) {
454             trace_vhost_reject_section(mr->name, 1);
455             return false;
456         }
457 
458         if (dev->vhost_ops->vhost_backend_mem_section_filter &&
459             !dev->vhost_ops->vhost_backend_mem_section_filter(dev, section)) {
460             trace_vhost_reject_section(mr->name, 2);
461             return false;
462         }
463 
464         trace_vhost_section(mr->name);
465         return true;
466     } else {
467         trace_vhost_reject_section(mr->name, 3);
468         return false;
469     }
470 }
471 
472 static void vhost_begin(MemoryListener *listener)
473 {
474     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
475                                          memory_listener);
476     dev->tmp_sections = NULL;
477     dev->n_tmp_sections = 0;
478 }
479 
480 static void vhost_commit(MemoryListener *listener)
481 {
482     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
483                                          memory_listener);
484     MemoryRegionSection *old_sections;
485     int n_old_sections;
486     uint64_t log_size;
487     size_t regions_size;
488     int r;
489     int i;
490     bool changed = false;
491 
492     /* Note we can be called before the device is started, but then
493      * starting the device calls set_mem_table, so we need to have
494      * built the data structures.
495      */
496     old_sections = dev->mem_sections;
497     n_old_sections = dev->n_mem_sections;
498     dev->mem_sections = dev->tmp_sections;
499     dev->n_mem_sections = dev->n_tmp_sections;
500 
501     if (dev->n_mem_sections != n_old_sections) {
502         changed = true;
503     } else {
504         /* Same size, lets check the contents */
505         for (int i = 0; i < n_old_sections; i++) {
506             if (!MemoryRegionSection_eq(&old_sections[i],
507                                         &dev->mem_sections[i])) {
508                 changed = true;
509                 break;
510             }
511         }
512     }
513 
514     trace_vhost_commit(dev->started, changed);
515     if (!changed) {
516         goto out;
517     }
518 
519     /* Rebuild the regions list from the new sections list */
520     regions_size = offsetof(struct vhost_memory, regions) +
521                        dev->n_mem_sections * sizeof dev->mem->regions[0];
522     dev->mem = g_realloc(dev->mem, regions_size);
523     dev->mem->nregions = dev->n_mem_sections;
524     used_memslots = dev->mem->nregions;
525     for (i = 0; i < dev->n_mem_sections; i++) {
526         struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
527         struct MemoryRegionSection *mrs = dev->mem_sections + i;
528 
529         cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
530         cur_vmr->memory_size     = int128_get64(mrs->size);
531         cur_vmr->userspace_addr  =
532             (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
533             mrs->offset_within_region;
534         cur_vmr->flags_padding   = 0;
535     }
536 
537     if (!dev->started) {
538         goto out;
539     }
540 
541     for (i = 0; i < dev->mem->nregions; i++) {
542         if (vhost_verify_ring_mappings(dev,
543                        (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
544                        dev->mem->regions[i].guest_phys_addr,
545                        dev->mem->regions[i].memory_size)) {
546             error_report("Verify ring failure on region %d", i);
547             abort();
548         }
549     }
550 
551     if (!dev->log_enabled) {
552         r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
553         if (r < 0) {
554             VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
555         }
556         goto out;
557     }
558     log_size = vhost_get_log_size(dev);
559     /* We allocate an extra 4K bytes to log,
560      * to reduce the * number of reallocations. */
561 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
562     /* To log more, must increase log size before table update. */
563     if (dev->log_size < log_size) {
564         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
565     }
566     r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
567     if (r < 0) {
568         VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
569     }
570     /* To log less, can only decrease log size after table update. */
571     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
572         vhost_dev_log_resize(dev, log_size);
573     }
574 
575 out:
576     /* Deref the old list of sections, this must happen _after_ the
577      * vhost_set_mem_table to ensure the client isn't still using the
578      * section we're about to unref.
579      */
580     while (n_old_sections--) {
581         memory_region_unref(old_sections[n_old_sections].mr);
582     }
583     g_free(old_sections);
584     return;
585 }
586 
587 /* Adds the section data to the tmp_section structure.
588  * It relies on the listener calling us in memory address order
589  * and for each region (via the _add and _nop methods) to
590  * join neighbours.
591  */
592 static void vhost_region_add_section(struct vhost_dev *dev,
593                                      MemoryRegionSection *section)
594 {
595     bool need_add = true;
596     uint64_t mrs_size = int128_get64(section->size);
597     uint64_t mrs_gpa = section->offset_within_address_space;
598     uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
599                          section->offset_within_region;
600     RAMBlock *mrs_rb = section->mr->ram_block;
601 
602     trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
603                                    mrs_host);
604 
605     if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) {
606         /* Round the section to it's page size */
607         /* First align the start down to a page boundary */
608         size_t mrs_page = qemu_ram_pagesize(mrs_rb);
609         uint64_t alignage = mrs_host & (mrs_page - 1);
610         if (alignage) {
611             mrs_host -= alignage;
612             mrs_size += alignage;
613             mrs_gpa  -= alignage;
614         }
615         /* Now align the size up to a page boundary */
616         alignage = mrs_size & (mrs_page - 1);
617         if (alignage) {
618             mrs_size += mrs_page - alignage;
619         }
620         trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa,
621                                                mrs_size, mrs_host);
622     }
623 
624     if (dev->n_tmp_sections) {
625         /* Since we already have at least one section, lets see if
626          * this extends it; since we're scanning in order, we only
627          * have to look at the last one, and the FlatView that calls
628          * us shouldn't have overlaps.
629          */
630         MemoryRegionSection *prev_sec = dev->tmp_sections +
631                                                (dev->n_tmp_sections - 1);
632         uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
633         uint64_t prev_size = int128_get64(prev_sec->size);
634         uint64_t prev_gpa_end   = range_get_last(prev_gpa_start, prev_size);
635         uint64_t prev_host_start =
636                         (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
637                         prev_sec->offset_within_region;
638         uint64_t prev_host_end   = range_get_last(prev_host_start, prev_size);
639 
640         if (mrs_gpa <= (prev_gpa_end + 1)) {
641             /* OK, looks like overlapping/intersecting - it's possible that
642              * the rounding to page sizes has made them overlap, but they should
643              * match up in the same RAMBlock if they do.
644              */
645             if (mrs_gpa < prev_gpa_start) {
646                 error_report("%s:Section '%s' rounded to %"PRIx64
647                              " prior to previous '%s' %"PRIx64,
648                              __func__, section->mr->name, mrs_gpa,
649                              prev_sec->mr->name, prev_gpa_start);
650                 /* A way to cleanly fail here would be better */
651                 return;
652             }
653             /* Offset from the start of the previous GPA to this GPA */
654             size_t offset = mrs_gpa - prev_gpa_start;
655 
656             if (prev_host_start + offset == mrs_host &&
657                 section->mr == prev_sec->mr &&
658                 (!dev->vhost_ops->vhost_backend_can_merge ||
659                  dev->vhost_ops->vhost_backend_can_merge(dev,
660                     mrs_host, mrs_size,
661                     prev_host_start, prev_size))) {
662                 uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size);
663                 need_add = false;
664                 prev_sec->offset_within_address_space =
665                     MIN(prev_gpa_start, mrs_gpa);
666                 prev_sec->offset_within_region =
667                     MIN(prev_host_start, mrs_host) -
668                     (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr);
669                 prev_sec->size = int128_make64(max_end - MIN(prev_host_start,
670                                                mrs_host));
671                 trace_vhost_region_add_section_merge(section->mr->name,
672                                         int128_get64(prev_sec->size),
673                                         prev_sec->offset_within_address_space,
674                                         prev_sec->offset_within_region);
675             } else {
676                 /* adjoining regions are fine, but overlapping ones with
677                  * different blocks/offsets shouldn't happen
678                  */
679                 if (mrs_gpa != prev_gpa_end + 1) {
680                     error_report("%s: Overlapping but not coherent sections "
681                                  "at %"PRIx64,
682                                  __func__, mrs_gpa);
683                     return;
684                 }
685             }
686         }
687     }
688 
689     if (need_add) {
690         ++dev->n_tmp_sections;
691         dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
692                                     dev->n_tmp_sections);
693         dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
694         /* The flatview isn't stable and we don't use it, making it NULL
695          * means we can memcmp the list.
696          */
697         dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
698         memory_region_ref(section->mr);
699     }
700 }
701 
702 /* Used for both add and nop callbacks */
703 static void vhost_region_addnop(MemoryListener *listener,
704                                 MemoryRegionSection *section)
705 {
706     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
707                                          memory_listener);
708 
709     if (!vhost_section(dev, section)) {
710         return;
711     }
712     vhost_region_add_section(dev, section);
713 }
714 
715 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
716 {
717     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
718     struct vhost_dev *hdev = iommu->hdev;
719     hwaddr iova = iotlb->iova + iommu->iommu_offset;
720 
721     if (vhost_backend_invalidate_device_iotlb(hdev, iova,
722                                               iotlb->addr_mask + 1)) {
723         error_report("Fail to invalidate device iotlb");
724     }
725 }
726 
727 static void vhost_iommu_region_add(MemoryListener *listener,
728                                    MemoryRegionSection *section)
729 {
730     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
731                                          iommu_listener);
732     struct vhost_iommu *iommu;
733     Int128 end;
734     int iommu_idx;
735     IOMMUMemoryRegion *iommu_mr;
736     int ret;
737 
738     if (!memory_region_is_iommu(section->mr)) {
739         return;
740     }
741 
742     iommu_mr = IOMMU_MEMORY_REGION(section->mr);
743 
744     iommu = g_malloc0(sizeof(*iommu));
745     end = int128_add(int128_make64(section->offset_within_region),
746                      section->size);
747     end = int128_sub(end, int128_one());
748     iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
749                                                    MEMTXATTRS_UNSPECIFIED);
750     iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
751                         IOMMU_NOTIFIER_DEVIOTLB_UNMAP,
752                         section->offset_within_region,
753                         int128_get64(end),
754                         iommu_idx);
755     iommu->mr = section->mr;
756     iommu->iommu_offset = section->offset_within_address_space -
757                           section->offset_within_region;
758     iommu->hdev = dev;
759     ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL);
760     if (ret) {
761         /*
762          * Some vIOMMUs do not support dev-iotlb yet.  If so, try to use the
763          * UNMAP legacy message
764          */
765         iommu->n.notifier_flags = IOMMU_NOTIFIER_UNMAP;
766         memory_region_register_iommu_notifier(section->mr, &iommu->n,
767                                               &error_fatal);
768     }
769     QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
770     /* TODO: can replay help performance here? */
771 }
772 
773 static void vhost_iommu_region_del(MemoryListener *listener,
774                                    MemoryRegionSection *section)
775 {
776     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
777                                          iommu_listener);
778     struct vhost_iommu *iommu;
779 
780     if (!memory_region_is_iommu(section->mr)) {
781         return;
782     }
783 
784     QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
785         if (iommu->mr == section->mr &&
786             iommu->n.start == section->offset_within_region) {
787             memory_region_unregister_iommu_notifier(iommu->mr,
788                                                     &iommu->n);
789             QLIST_REMOVE(iommu, iommu_next);
790             g_free(iommu);
791             break;
792         }
793     }
794 }
795 
796 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
797                                     struct vhost_virtqueue *vq,
798                                     unsigned idx, bool enable_log)
799 {
800     struct vhost_vring_addr addr;
801     int r;
802     memset(&addr, 0, sizeof(struct vhost_vring_addr));
803 
804     if (dev->vhost_ops->vhost_vq_get_addr) {
805         r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq);
806         if (r < 0) {
807             VHOST_OPS_DEBUG(r, "vhost_vq_get_addr failed");
808             return r;
809         }
810     } else {
811         addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc;
812         addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail;
813         addr.used_user_addr = (uint64_t)(unsigned long)vq->used;
814     }
815     addr.index = idx;
816     addr.log_guest_addr = vq->used_phys;
817     addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0;
818     r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
819     if (r < 0) {
820         VHOST_OPS_DEBUG(r, "vhost_set_vring_addr failed");
821     }
822     return r;
823 }
824 
825 static int vhost_dev_set_features(struct vhost_dev *dev,
826                                   bool enable_log)
827 {
828     uint64_t features = dev->acked_features;
829     int r;
830     if (enable_log) {
831         features |= 0x1ULL << VHOST_F_LOG_ALL;
832     }
833     if (!vhost_dev_has_iommu(dev)) {
834         features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM);
835     }
836     if (dev->vhost_ops->vhost_force_iommu) {
837         if (dev->vhost_ops->vhost_force_iommu(dev) == true) {
838             features |= 0x1ULL << VIRTIO_F_IOMMU_PLATFORM;
839        }
840     }
841     r = dev->vhost_ops->vhost_set_features(dev, features);
842     if (r < 0) {
843         VHOST_OPS_DEBUG(r, "vhost_set_features failed");
844         goto out;
845     }
846     if (dev->vhost_ops->vhost_set_backend_cap) {
847         r = dev->vhost_ops->vhost_set_backend_cap(dev);
848         if (r < 0) {
849             VHOST_OPS_DEBUG(r, "vhost_set_backend_cap failed");
850             goto out;
851         }
852     }
853 
854 out:
855     return r;
856 }
857 
858 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
859 {
860     int r, i, idx;
861     hwaddr addr;
862 
863     r = vhost_dev_set_features(dev, enable_log);
864     if (r < 0) {
865         goto err_features;
866     }
867     for (i = 0; i < dev->nvqs; ++i) {
868         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
869         addr = virtio_queue_get_desc_addr(dev->vdev, idx);
870         if (!addr) {
871             /*
872              * The queue might not be ready for start. If this
873              * is the case there is no reason to continue the process.
874              * The similar logic is used by the vhost_virtqueue_start()
875              * routine.
876              */
877             continue;
878         }
879         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
880                                      enable_log);
881         if (r < 0) {
882             goto err_vq;
883         }
884     }
885     return 0;
886 err_vq:
887     for (; i >= 0; --i) {
888         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
889         vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
890                                  dev->log_enabled);
891     }
892     vhost_dev_set_features(dev, dev->log_enabled);
893 err_features:
894     return r;
895 }
896 
897 static int vhost_migration_log(MemoryListener *listener, bool enable)
898 {
899     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
900                                          memory_listener);
901     int r;
902     if (enable == dev->log_enabled) {
903         return 0;
904     }
905     if (!dev->started) {
906         dev->log_enabled = enable;
907         return 0;
908     }
909 
910     r = 0;
911     if (!enable) {
912         r = vhost_dev_set_log(dev, false);
913         if (r < 0) {
914             goto check_dev_state;
915         }
916         vhost_log_put(dev, false);
917     } else {
918         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
919         r = vhost_dev_set_log(dev, true);
920         if (r < 0) {
921             goto check_dev_state;
922         }
923     }
924 
925 check_dev_state:
926     dev->log_enabled = enable;
927     /*
928      * vhost-user-* devices could change their state during log
929      * initialization due to disconnect. So check dev state after
930      * vhost communication.
931      */
932     if (!dev->started) {
933         /*
934          * Since device is in the stopped state, it is okay for
935          * migration. Return success.
936          */
937         r = 0;
938     }
939     if (r) {
940         /* An error occurred. */
941         dev->log_enabled = false;
942     }
943 
944     return r;
945 }
946 
947 static void vhost_log_global_start(MemoryListener *listener)
948 {
949     int r;
950 
951     r = vhost_migration_log(listener, true);
952     if (r < 0) {
953         abort();
954     }
955 }
956 
957 static void vhost_log_global_stop(MemoryListener *listener)
958 {
959     int r;
960 
961     r = vhost_migration_log(listener, false);
962     if (r < 0) {
963         abort();
964     }
965 }
966 
967 static void vhost_log_start(MemoryListener *listener,
968                             MemoryRegionSection *section,
969                             int old, int new)
970 {
971     /* FIXME: implement */
972 }
973 
974 static void vhost_log_stop(MemoryListener *listener,
975                            MemoryRegionSection *section,
976                            int old, int new)
977 {
978     /* FIXME: implement */
979 }
980 
981 /* The vhost driver natively knows how to handle the vrings of non
982  * cross-endian legacy devices and modern devices. Only legacy devices
983  * exposed to a bi-endian guest may require the vhost driver to use a
984  * specific endianness.
985  */
986 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
987 {
988     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
989         return false;
990     }
991 #if HOST_BIG_ENDIAN
992     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
993 #else
994     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
995 #endif
996 }
997 
998 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
999                                                    bool is_big_endian,
1000                                                    int vhost_vq_index)
1001 {
1002     int r;
1003     struct vhost_vring_state s = {
1004         .index = vhost_vq_index,
1005         .num = is_big_endian
1006     };
1007 
1008     r = dev->vhost_ops->vhost_set_vring_endian(dev, &s);
1009     if (r < 0) {
1010         VHOST_OPS_DEBUG(r, "vhost_set_vring_endian failed");
1011     }
1012     return r;
1013 }
1014 
1015 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
1016                                       uint64_t gpa, uint64_t *uaddr,
1017                                       uint64_t *len)
1018 {
1019     int i;
1020 
1021     for (i = 0; i < hdev->mem->nregions; i++) {
1022         struct vhost_memory_region *reg = hdev->mem->regions + i;
1023 
1024         if (gpa >= reg->guest_phys_addr &&
1025             reg->guest_phys_addr + reg->memory_size > gpa) {
1026             *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
1027             *len = reg->guest_phys_addr + reg->memory_size - gpa;
1028             return 0;
1029         }
1030     }
1031 
1032     return -EFAULT;
1033 }
1034 
1035 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
1036 {
1037     IOMMUTLBEntry iotlb;
1038     uint64_t uaddr, len;
1039     int ret = -EFAULT;
1040 
1041     RCU_READ_LOCK_GUARD();
1042 
1043     trace_vhost_iotlb_miss(dev, 1);
1044 
1045     iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
1046                                           iova, write,
1047                                           MEMTXATTRS_UNSPECIFIED);
1048     if (iotlb.target_as != NULL) {
1049         ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
1050                                          &uaddr, &len);
1051         if (ret) {
1052             trace_vhost_iotlb_miss(dev, 3);
1053             error_report("Fail to lookup the translated address "
1054                          "%"PRIx64, iotlb.translated_addr);
1055             goto out;
1056         }
1057 
1058         len = MIN(iotlb.addr_mask + 1, len);
1059         iova = iova & ~iotlb.addr_mask;
1060 
1061         ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
1062                                                 len, iotlb.perm);
1063         if (ret) {
1064             trace_vhost_iotlb_miss(dev, 4);
1065             error_report("Fail to update device iotlb");
1066             goto out;
1067         }
1068     }
1069 
1070     trace_vhost_iotlb_miss(dev, 2);
1071 
1072 out:
1073     return ret;
1074 }
1075 
1076 static int vhost_virtqueue_start(struct vhost_dev *dev,
1077                                 struct VirtIODevice *vdev,
1078                                 struct vhost_virtqueue *vq,
1079                                 unsigned idx)
1080 {
1081     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1082     VirtioBusState *vbus = VIRTIO_BUS(qbus);
1083     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1084     hwaddr s, l, a;
1085     int r;
1086     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1087     struct vhost_vring_file file = {
1088         .index = vhost_vq_index
1089     };
1090     struct vhost_vring_state state = {
1091         .index = vhost_vq_index
1092     };
1093     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
1094 
1095     a = virtio_queue_get_desc_addr(vdev, idx);
1096     if (a == 0) {
1097         /* Queue might not be ready for start */
1098         return 0;
1099     }
1100 
1101     vq->num = state.num = virtio_queue_get_num(vdev, idx);
1102     r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
1103     if (r) {
1104         VHOST_OPS_DEBUG(r, "vhost_set_vring_num failed");
1105         return r;
1106     }
1107 
1108     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
1109     r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
1110     if (r) {
1111         VHOST_OPS_DEBUG(r, "vhost_set_vring_base failed");
1112         return r;
1113     }
1114 
1115     if (vhost_needs_vring_endian(vdev)) {
1116         r = vhost_virtqueue_set_vring_endian_legacy(dev,
1117                                                     virtio_is_big_endian(vdev),
1118                                                     vhost_vq_index);
1119         if (r) {
1120             return r;
1121         }
1122     }
1123 
1124     vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
1125     vq->desc_phys = a;
1126     vq->desc = vhost_memory_map(dev, a, &l, false);
1127     if (!vq->desc || l != s) {
1128         r = -ENOMEM;
1129         goto fail_alloc_desc;
1130     }
1131     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1132     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
1133     vq->avail = vhost_memory_map(dev, a, &l, false);
1134     if (!vq->avail || l != s) {
1135         r = -ENOMEM;
1136         goto fail_alloc_avail;
1137     }
1138     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1139     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1140     vq->used = vhost_memory_map(dev, a, &l, true);
1141     if (!vq->used || l != s) {
1142         r = -ENOMEM;
1143         goto fail_alloc_used;
1144     }
1145 
1146     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1147     if (r < 0) {
1148         goto fail_alloc;
1149     }
1150 
1151     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1152     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1153     if (r) {
1154         VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed");
1155         goto fail_kick;
1156     }
1157 
1158     /* Clear and discard previous events if any. */
1159     event_notifier_test_and_clear(&vq->masked_notifier);
1160 
1161     /* Init vring in unmasked state, unless guest_notifier_mask
1162      * will do it later.
1163      */
1164     if (!vdev->use_guest_notifier_mask) {
1165         /* TODO: check and handle errors. */
1166         vhost_virtqueue_mask(dev, vdev, idx, false);
1167     }
1168 
1169     if (k->query_guest_notifiers &&
1170         k->query_guest_notifiers(qbus->parent) &&
1171         virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1172         file.fd = -1;
1173         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1174         if (r) {
1175             goto fail_vector;
1176         }
1177     }
1178 
1179     return 0;
1180 
1181 fail_vector:
1182 fail_kick:
1183 fail_alloc:
1184     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1185                        0, 0);
1186 fail_alloc_used:
1187     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1188                        0, 0);
1189 fail_alloc_avail:
1190     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1191                        0, 0);
1192 fail_alloc_desc:
1193     return r;
1194 }
1195 
1196 static void vhost_virtqueue_stop(struct vhost_dev *dev,
1197                                     struct VirtIODevice *vdev,
1198                                     struct vhost_virtqueue *vq,
1199                                     unsigned idx)
1200 {
1201     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1202     struct vhost_vring_state state = {
1203         .index = vhost_vq_index,
1204     };
1205     int r;
1206 
1207     if (virtio_queue_get_desc_addr(vdev, idx) == 0) {
1208         /* Don't stop the virtqueue which might have not been started */
1209         return;
1210     }
1211 
1212     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1213     if (r < 0) {
1214         VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r);
1215         /* Connection to the backend is broken, so let's sync internal
1216          * last avail idx to the device used idx.
1217          */
1218         virtio_queue_restore_last_avail_idx(vdev, idx);
1219     } else {
1220         virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1221     }
1222     virtio_queue_invalidate_signalled_used(vdev, idx);
1223     virtio_queue_update_used_idx(vdev, idx);
1224 
1225     /* In the cross-endian case, we need to reset the vring endianness to
1226      * native as legacy devices expect so by default.
1227      */
1228     if (vhost_needs_vring_endian(vdev)) {
1229         vhost_virtqueue_set_vring_endian_legacy(dev,
1230                                                 !virtio_is_big_endian(vdev),
1231                                                 vhost_vq_index);
1232     }
1233 
1234     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1235                        1, virtio_queue_get_used_size(vdev, idx));
1236     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1237                        0, virtio_queue_get_avail_size(vdev, idx));
1238     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1239                        0, virtio_queue_get_desc_size(vdev, idx));
1240 }
1241 
1242 static void vhost_eventfd_add(MemoryListener *listener,
1243                               MemoryRegionSection *section,
1244                               bool match_data, uint64_t data, EventNotifier *e)
1245 {
1246 }
1247 
1248 static void vhost_eventfd_del(MemoryListener *listener,
1249                               MemoryRegionSection *section,
1250                               bool match_data, uint64_t data, EventNotifier *e)
1251 {
1252 }
1253 
1254 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1255                                                 int n, uint32_t timeout)
1256 {
1257     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1258     struct vhost_vring_state state = {
1259         .index = vhost_vq_index,
1260         .num = timeout,
1261     };
1262     int r;
1263 
1264     if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1265         return -EINVAL;
1266     }
1267 
1268     r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1269     if (r) {
1270         VHOST_OPS_DEBUG(r, "vhost_set_vring_busyloop_timeout failed");
1271         return r;
1272     }
1273 
1274     return 0;
1275 }
1276 
1277 static int vhost_virtqueue_init(struct vhost_dev *dev,
1278                                 struct vhost_virtqueue *vq, int n)
1279 {
1280     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1281     struct vhost_vring_file file = {
1282         .index = vhost_vq_index,
1283     };
1284     int r = event_notifier_init(&vq->masked_notifier, 0);
1285     if (r < 0) {
1286         return r;
1287     }
1288 
1289     file.fd = event_notifier_get_wfd(&vq->masked_notifier);
1290     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1291     if (r) {
1292         VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed");
1293         goto fail_call;
1294     }
1295 
1296     vq->dev = dev;
1297 
1298     return 0;
1299 fail_call:
1300     event_notifier_cleanup(&vq->masked_notifier);
1301     return r;
1302 }
1303 
1304 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1305 {
1306     event_notifier_cleanup(&vq->masked_notifier);
1307 }
1308 
1309 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1310                    VhostBackendType backend_type, uint32_t busyloop_timeout,
1311                    Error **errp)
1312 {
1313     uint64_t features;
1314     int i, r, n_initialized_vqs = 0;
1315 
1316     hdev->vdev = NULL;
1317     hdev->migration_blocker = NULL;
1318 
1319     r = vhost_set_backend_type(hdev, backend_type);
1320     assert(r >= 0);
1321 
1322     r = hdev->vhost_ops->vhost_backend_init(hdev, opaque, errp);
1323     if (r < 0) {
1324         goto fail;
1325     }
1326 
1327     r = hdev->vhost_ops->vhost_set_owner(hdev);
1328     if (r < 0) {
1329         error_setg_errno(errp, -r, "vhost_set_owner failed");
1330         goto fail;
1331     }
1332 
1333     r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1334     if (r < 0) {
1335         error_setg_errno(errp, -r, "vhost_get_features failed");
1336         goto fail;
1337     }
1338 
1339     for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1340         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1341         if (r < 0) {
1342             error_setg_errno(errp, -r, "Failed to initialize virtqueue %d", i);
1343             goto fail;
1344         }
1345     }
1346 
1347     if (busyloop_timeout) {
1348         for (i = 0; i < hdev->nvqs; ++i) {
1349             r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1350                                                      busyloop_timeout);
1351             if (r < 0) {
1352                 error_setg_errno(errp, -r, "Failed to set busyloop timeout");
1353                 goto fail_busyloop;
1354             }
1355         }
1356     }
1357 
1358     hdev->features = features;
1359 
1360     hdev->memory_listener = (MemoryListener) {
1361         .name = "vhost",
1362         .begin = vhost_begin,
1363         .commit = vhost_commit,
1364         .region_add = vhost_region_addnop,
1365         .region_nop = vhost_region_addnop,
1366         .log_start = vhost_log_start,
1367         .log_stop = vhost_log_stop,
1368         .log_sync = vhost_log_sync,
1369         .log_global_start = vhost_log_global_start,
1370         .log_global_stop = vhost_log_global_stop,
1371         .eventfd_add = vhost_eventfd_add,
1372         .eventfd_del = vhost_eventfd_del,
1373         .priority = 10
1374     };
1375 
1376     hdev->iommu_listener = (MemoryListener) {
1377         .name = "vhost-iommu",
1378         .region_add = vhost_iommu_region_add,
1379         .region_del = vhost_iommu_region_del,
1380     };
1381 
1382     if (hdev->migration_blocker == NULL) {
1383         if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1384             error_setg(&hdev->migration_blocker,
1385                        "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1386         } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) {
1387             error_setg(&hdev->migration_blocker,
1388                        "Migration disabled: failed to allocate shared memory");
1389         }
1390     }
1391 
1392     if (hdev->migration_blocker != NULL) {
1393         r = migrate_add_blocker(hdev->migration_blocker, errp);
1394         if (r < 0) {
1395             error_free(hdev->migration_blocker);
1396             goto fail_busyloop;
1397         }
1398     }
1399 
1400     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1401     hdev->n_mem_sections = 0;
1402     hdev->mem_sections = NULL;
1403     hdev->log = NULL;
1404     hdev->log_size = 0;
1405     hdev->log_enabled = false;
1406     hdev->started = false;
1407     memory_listener_register(&hdev->memory_listener, &address_space_memory);
1408     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1409 
1410     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1411         error_setg(errp, "vhost backend memory slots limit is less"
1412                    " than current number of present memory slots");
1413         r = -EINVAL;
1414         goto fail_busyloop;
1415     }
1416 
1417     return 0;
1418 
1419 fail_busyloop:
1420     if (busyloop_timeout) {
1421         while (--i >= 0) {
1422             vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1423         }
1424     }
1425 fail:
1426     hdev->nvqs = n_initialized_vqs;
1427     vhost_dev_cleanup(hdev);
1428     return r;
1429 }
1430 
1431 void vhost_dev_cleanup(struct vhost_dev *hdev)
1432 {
1433     int i;
1434 
1435     for (i = 0; i < hdev->nvqs; ++i) {
1436         vhost_virtqueue_cleanup(hdev->vqs + i);
1437     }
1438     if (hdev->mem) {
1439         /* those are only safe after successful init */
1440         memory_listener_unregister(&hdev->memory_listener);
1441         QLIST_REMOVE(hdev, entry);
1442     }
1443     if (hdev->migration_blocker) {
1444         migrate_del_blocker(hdev->migration_blocker);
1445         error_free(hdev->migration_blocker);
1446     }
1447     g_free(hdev->mem);
1448     g_free(hdev->mem_sections);
1449     if (hdev->vhost_ops) {
1450         hdev->vhost_ops->vhost_backend_cleanup(hdev);
1451     }
1452     assert(!hdev->log);
1453 
1454     memset(hdev, 0, sizeof(struct vhost_dev));
1455 }
1456 
1457 /* Stop processing guest IO notifications in qemu.
1458  * Start processing them in vhost in kernel.
1459  */
1460 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1461 {
1462     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1463     int i, r, e;
1464 
1465     /* We will pass the notifiers to the kernel, make sure that QEMU
1466      * doesn't interfere.
1467      */
1468     r = virtio_device_grab_ioeventfd(vdev);
1469     if (r < 0) {
1470         error_report("binding does not support host notifiers");
1471         goto fail;
1472     }
1473 
1474     for (i = 0; i < hdev->nvqs; ++i) {
1475         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1476                                          true);
1477         if (r < 0) {
1478             error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1479             goto fail_vq;
1480         }
1481     }
1482 
1483     return 0;
1484 fail_vq:
1485     while (--i >= 0) {
1486         e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1487                                          false);
1488         if (e < 0) {
1489             error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1490         }
1491         assert (e >= 0);
1492         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1493     }
1494     virtio_device_release_ioeventfd(vdev);
1495 fail:
1496     return r;
1497 }
1498 
1499 /* Stop processing guest IO notifications in vhost.
1500  * Start processing them in qemu.
1501  * This might actually run the qemu handlers right away,
1502  * so virtio in qemu must be completely setup when this is called.
1503  */
1504 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1505 {
1506     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1507     int i, r;
1508 
1509     for (i = 0; i < hdev->nvqs; ++i) {
1510         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1511                                          false);
1512         if (r < 0) {
1513             error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1514         }
1515         assert (r >= 0);
1516         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1517     }
1518     virtio_device_release_ioeventfd(vdev);
1519 }
1520 
1521 /* Test and clear event pending status.
1522  * Should be called after unmask to avoid losing events.
1523  */
1524 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1525 {
1526     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1527     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1528     return event_notifier_test_and_clear(&vq->masked_notifier);
1529 }
1530 
1531 /* Mask/unmask events from this vq. */
1532 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1533                          bool mask)
1534 {
1535     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1536     int r, index = n - hdev->vq_index;
1537     struct vhost_vring_file file;
1538 
1539     /* should only be called after backend is connected */
1540     assert(hdev->vhost_ops);
1541 
1542     if (mask) {
1543         assert(vdev->use_guest_notifier_mask);
1544         file.fd = event_notifier_get_wfd(&hdev->vqs[index].masked_notifier);
1545     } else {
1546         file.fd = event_notifier_get_wfd(virtio_queue_get_guest_notifier(vvq));
1547     }
1548 
1549     file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1550     r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1551     if (r < 0) {
1552         VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed");
1553     }
1554 }
1555 
1556 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1557                             uint64_t features)
1558 {
1559     const int *bit = feature_bits;
1560     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1561         uint64_t bit_mask = (1ULL << *bit);
1562         if (!(hdev->features & bit_mask)) {
1563             features &= ~bit_mask;
1564         }
1565         bit++;
1566     }
1567     return features;
1568 }
1569 
1570 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1571                         uint64_t features)
1572 {
1573     const int *bit = feature_bits;
1574     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1575         uint64_t bit_mask = (1ULL << *bit);
1576         if (features & bit_mask) {
1577             hdev->acked_features |= bit_mask;
1578         }
1579         bit++;
1580     }
1581 }
1582 
1583 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1584                          uint32_t config_len, Error **errp)
1585 {
1586     assert(hdev->vhost_ops);
1587 
1588     if (hdev->vhost_ops->vhost_get_config) {
1589         return hdev->vhost_ops->vhost_get_config(hdev, config, config_len,
1590                                                  errp);
1591     }
1592 
1593     error_setg(errp, "vhost_get_config not implemented");
1594     return -ENOSYS;
1595 }
1596 
1597 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1598                          uint32_t offset, uint32_t size, uint32_t flags)
1599 {
1600     assert(hdev->vhost_ops);
1601 
1602     if (hdev->vhost_ops->vhost_set_config) {
1603         return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1604                                                  size, flags);
1605     }
1606 
1607     return -ENOSYS;
1608 }
1609 
1610 void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1611                                    const VhostDevConfigOps *ops)
1612 {
1613     hdev->config_ops = ops;
1614 }
1615 
1616 void vhost_dev_free_inflight(struct vhost_inflight *inflight)
1617 {
1618     if (inflight && inflight->addr) {
1619         qemu_memfd_free(inflight->addr, inflight->size, inflight->fd);
1620         inflight->addr = NULL;
1621         inflight->fd = -1;
1622     }
1623 }
1624 
1625 static int vhost_dev_resize_inflight(struct vhost_inflight *inflight,
1626                                      uint64_t new_size)
1627 {
1628     Error *err = NULL;
1629     int fd = -1;
1630     void *addr = qemu_memfd_alloc("vhost-inflight", new_size,
1631                                   F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1632                                   &fd, &err);
1633 
1634     if (err) {
1635         error_report_err(err);
1636         return -ENOMEM;
1637     }
1638 
1639     vhost_dev_free_inflight(inflight);
1640     inflight->offset = 0;
1641     inflight->addr = addr;
1642     inflight->fd = fd;
1643     inflight->size = new_size;
1644 
1645     return 0;
1646 }
1647 
1648 void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1649 {
1650     if (inflight->addr) {
1651         qemu_put_be64(f, inflight->size);
1652         qemu_put_be16(f, inflight->queue_size);
1653         qemu_put_buffer(f, inflight->addr, inflight->size);
1654     } else {
1655         qemu_put_be64(f, 0);
1656     }
1657 }
1658 
1659 int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1660 {
1661     uint64_t size;
1662 
1663     size = qemu_get_be64(f);
1664     if (!size) {
1665         return 0;
1666     }
1667 
1668     if (inflight->size != size) {
1669         int ret = vhost_dev_resize_inflight(inflight, size);
1670         if (ret < 0) {
1671             return ret;
1672         }
1673     }
1674     inflight->queue_size = qemu_get_be16(f);
1675 
1676     qemu_get_buffer(f, inflight->addr, size);
1677 
1678     return 0;
1679 }
1680 
1681 int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev)
1682 {
1683     int r;
1684 
1685     if (hdev->vhost_ops->vhost_get_inflight_fd == NULL ||
1686         hdev->vhost_ops->vhost_set_inflight_fd == NULL) {
1687         return 0;
1688     }
1689 
1690     hdev->vdev = vdev;
1691 
1692     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1693     if (r < 0) {
1694         VHOST_OPS_DEBUG(r, "vhost_dev_prepare_inflight failed");
1695         return r;
1696     }
1697 
1698     return 0;
1699 }
1700 
1701 int vhost_dev_set_inflight(struct vhost_dev *dev,
1702                            struct vhost_inflight *inflight)
1703 {
1704     int r;
1705 
1706     if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) {
1707         r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight);
1708         if (r) {
1709             VHOST_OPS_DEBUG(r, "vhost_set_inflight_fd failed");
1710             return r;
1711         }
1712     }
1713 
1714     return 0;
1715 }
1716 
1717 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
1718                            struct vhost_inflight *inflight)
1719 {
1720     int r;
1721 
1722     if (dev->vhost_ops->vhost_get_inflight_fd) {
1723         r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight);
1724         if (r) {
1725             VHOST_OPS_DEBUG(r, "vhost_get_inflight_fd failed");
1726             return r;
1727         }
1728     }
1729 
1730     return 0;
1731 }
1732 
1733 /* Host notifiers must be enabled at this point. */
1734 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1735 {
1736     int i, r;
1737 
1738     /* should only be called after backend is connected */
1739     assert(hdev->vhost_ops);
1740 
1741     hdev->started = true;
1742     hdev->vdev = vdev;
1743 
1744     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1745     if (r < 0) {
1746         goto fail_features;
1747     }
1748 
1749     if (vhost_dev_has_iommu(hdev)) {
1750         memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1751     }
1752 
1753     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1754     if (r < 0) {
1755         VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
1756         goto fail_mem;
1757     }
1758     for (i = 0; i < hdev->nvqs; ++i) {
1759         r = vhost_virtqueue_start(hdev,
1760                                   vdev,
1761                                   hdev->vqs + i,
1762                                   hdev->vq_index + i);
1763         if (r < 0) {
1764             goto fail_vq;
1765         }
1766     }
1767 
1768     if (hdev->log_enabled) {
1769         uint64_t log_base;
1770 
1771         hdev->log_size = vhost_get_log_size(hdev);
1772         hdev->log = vhost_log_get(hdev->log_size,
1773                                   vhost_dev_log_is_shared(hdev));
1774         log_base = (uintptr_t)hdev->log->log;
1775         r = hdev->vhost_ops->vhost_set_log_base(hdev,
1776                                                 hdev->log_size ? log_base : 0,
1777                                                 hdev->log);
1778         if (r < 0) {
1779             VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
1780             goto fail_log;
1781         }
1782     }
1783     if (hdev->vhost_ops->vhost_dev_start) {
1784         r = hdev->vhost_ops->vhost_dev_start(hdev, true);
1785         if (r) {
1786             goto fail_log;
1787         }
1788     }
1789     if (vhost_dev_has_iommu(hdev) &&
1790         hdev->vhost_ops->vhost_set_iotlb_callback) {
1791             hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1792 
1793         /* Update used ring information for IOTLB to work correctly,
1794          * vhost-kernel code requires for this.*/
1795         for (i = 0; i < hdev->nvqs; ++i) {
1796             struct vhost_virtqueue *vq = hdev->vqs + i;
1797             vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1798         }
1799     }
1800     return 0;
1801 fail_log:
1802     vhost_log_put(hdev, false);
1803 fail_vq:
1804     while (--i >= 0) {
1805         vhost_virtqueue_stop(hdev,
1806                              vdev,
1807                              hdev->vqs + i,
1808                              hdev->vq_index + i);
1809     }
1810 
1811 fail_mem:
1812 fail_features:
1813 
1814     hdev->started = false;
1815     return r;
1816 }
1817 
1818 /* Host notifiers must be enabled at this point. */
1819 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1820 {
1821     int i;
1822 
1823     /* should only be called after backend is connected */
1824     assert(hdev->vhost_ops);
1825 
1826     if (hdev->vhost_ops->vhost_dev_start) {
1827         hdev->vhost_ops->vhost_dev_start(hdev, false);
1828     }
1829     for (i = 0; i < hdev->nvqs; ++i) {
1830         vhost_virtqueue_stop(hdev,
1831                              vdev,
1832                              hdev->vqs + i,
1833                              hdev->vq_index + i);
1834     }
1835 
1836     if (vhost_dev_has_iommu(hdev)) {
1837         if (hdev->vhost_ops->vhost_set_iotlb_callback) {
1838             hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1839         }
1840         memory_listener_unregister(&hdev->iommu_listener);
1841     }
1842     vhost_log_put(hdev, true);
1843     hdev->started = false;
1844     hdev->vdev = NULL;
1845 }
1846 
1847 int vhost_net_set_backend(struct vhost_dev *hdev,
1848                           struct vhost_vring_file *file)
1849 {
1850     if (hdev->vhost_ops->vhost_net_set_backend) {
1851         return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1852     }
1853 
1854     return -ENOSYS;
1855 }
1856