xref: /qemu/hw/virtio/vhost.c (revision 2ce68e4c)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "hw/virtio/vhost.h"
17 #include "hw/hw.h"
18 #include "qemu/atomic.h"
19 #include "qemu/range.h"
20 #include "qemu/error-report.h"
21 #include <linux/vhost.h>
22 #include "exec/address-spaces.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "migration/migration.h"
26 
27 static struct vhost_log *vhost_log;
28 
29 static unsigned int used_memslots;
30 static QLIST_HEAD(, vhost_dev) vhost_devices =
31     QLIST_HEAD_INITIALIZER(vhost_devices);
32 
33 bool vhost_has_free_slot(void)
34 {
35     unsigned int slots_limit = ~0U;
36     struct vhost_dev *hdev;
37 
38     QLIST_FOREACH(hdev, &vhost_devices, entry) {
39         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
40         slots_limit = MIN(slots_limit, r);
41     }
42     return slots_limit > used_memslots;
43 }
44 
45 static void vhost_dev_sync_region(struct vhost_dev *dev,
46                                   MemoryRegionSection *section,
47                                   uint64_t mfirst, uint64_t mlast,
48                                   uint64_t rfirst, uint64_t rlast)
49 {
50     vhost_log_chunk_t *log = dev->log->log;
51 
52     uint64_t start = MAX(mfirst, rfirst);
53     uint64_t end = MIN(mlast, rlast);
54     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
55     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
56     uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
57 
58     if (end < start) {
59         return;
60     }
61     assert(end / VHOST_LOG_CHUNK < dev->log_size);
62     assert(start / VHOST_LOG_CHUNK < dev->log_size);
63 
64     for (;from < to; ++from) {
65         vhost_log_chunk_t log;
66         /* We first check with non-atomic: much cheaper,
67          * and we expect non-dirty to be the common case. */
68         if (!*from) {
69             addr += VHOST_LOG_CHUNK;
70             continue;
71         }
72         /* Data must be read atomically. We don't really need barrier semantics
73          * but it's easier to use atomic_* than roll our own. */
74         log = atomic_xchg(from, 0);
75         while (log) {
76             int bit = ctzl(log);
77             hwaddr page_addr;
78             hwaddr section_offset;
79             hwaddr mr_offset;
80             page_addr = addr + bit * VHOST_LOG_PAGE;
81             section_offset = page_addr - section->offset_within_address_space;
82             mr_offset = section_offset + section->offset_within_region;
83             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
84             log &= ~(0x1ull << bit);
85         }
86         addr += VHOST_LOG_CHUNK;
87     }
88 }
89 
90 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
91                                    MemoryRegionSection *section,
92                                    hwaddr first,
93                                    hwaddr last)
94 {
95     int i;
96     hwaddr start_addr;
97     hwaddr end_addr;
98 
99     if (!dev->log_enabled || !dev->started) {
100         return 0;
101     }
102     start_addr = section->offset_within_address_space;
103     end_addr = range_get_last(start_addr, int128_get64(section->size));
104     start_addr = MAX(first, start_addr);
105     end_addr = MIN(last, end_addr);
106 
107     for (i = 0; i < dev->mem->nregions; ++i) {
108         struct vhost_memory_region *reg = dev->mem->regions + i;
109         vhost_dev_sync_region(dev, section, start_addr, end_addr,
110                               reg->guest_phys_addr,
111                               range_get_last(reg->guest_phys_addr,
112                                              reg->memory_size));
113     }
114     for (i = 0; i < dev->nvqs; ++i) {
115         struct vhost_virtqueue *vq = dev->vqs + i;
116         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
117                               range_get_last(vq->used_phys, vq->used_size));
118     }
119     return 0;
120 }
121 
122 static void vhost_log_sync(MemoryListener *listener,
123                           MemoryRegionSection *section)
124 {
125     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
126                                          memory_listener);
127     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
128 }
129 
130 static void vhost_log_sync_range(struct vhost_dev *dev,
131                                  hwaddr first, hwaddr last)
132 {
133     int i;
134     /* FIXME: this is N^2 in number of sections */
135     for (i = 0; i < dev->n_mem_sections; ++i) {
136         MemoryRegionSection *section = &dev->mem_sections[i];
137         vhost_sync_dirty_bitmap(dev, section, first, last);
138     }
139 }
140 
141 /* Assign/unassign. Keep an unsorted array of non-overlapping
142  * memory regions in dev->mem. */
143 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
144                                       uint64_t start_addr,
145                                       uint64_t size)
146 {
147     int from, to, n = dev->mem->nregions;
148     /* Track overlapping/split regions for sanity checking. */
149     int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
150 
151     for (from = 0, to = 0; from < n; ++from, ++to) {
152         struct vhost_memory_region *reg = dev->mem->regions + to;
153         uint64_t reglast;
154         uint64_t memlast;
155         uint64_t change;
156 
157         /* clone old region */
158         if (to != from) {
159             memcpy(reg, dev->mem->regions + from, sizeof *reg);
160         }
161 
162         /* No overlap is simple */
163         if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
164                             start_addr, size)) {
165             continue;
166         }
167 
168         /* Split only happens if supplied region
169          * is in the middle of an existing one. Thus it can not
170          * overlap with any other existing region. */
171         assert(!split);
172 
173         reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
174         memlast = range_get_last(start_addr, size);
175 
176         /* Remove whole region */
177         if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
178             --dev->mem->nregions;
179             --to;
180             ++overlap_middle;
181             continue;
182         }
183 
184         /* Shrink region */
185         if (memlast >= reglast) {
186             reg->memory_size = start_addr - reg->guest_phys_addr;
187             assert(reg->memory_size);
188             assert(!overlap_end);
189             ++overlap_end;
190             continue;
191         }
192 
193         /* Shift region */
194         if (start_addr <= reg->guest_phys_addr) {
195             change = memlast + 1 - reg->guest_phys_addr;
196             reg->memory_size -= change;
197             reg->guest_phys_addr += change;
198             reg->userspace_addr += change;
199             assert(reg->memory_size);
200             assert(!overlap_start);
201             ++overlap_start;
202             continue;
203         }
204 
205         /* This only happens if supplied region
206          * is in the middle of an existing one. Thus it can not
207          * overlap with any other existing region. */
208         assert(!overlap_start);
209         assert(!overlap_end);
210         assert(!overlap_middle);
211         /* Split region: shrink first part, shift second part. */
212         memcpy(dev->mem->regions + n, reg, sizeof *reg);
213         reg->memory_size = start_addr - reg->guest_phys_addr;
214         assert(reg->memory_size);
215         change = memlast + 1 - reg->guest_phys_addr;
216         reg = dev->mem->regions + n;
217         reg->memory_size -= change;
218         assert(reg->memory_size);
219         reg->guest_phys_addr += change;
220         reg->userspace_addr += change;
221         /* Never add more than 1 region */
222         assert(dev->mem->nregions == n);
223         ++dev->mem->nregions;
224         ++split;
225     }
226 }
227 
228 /* Called after unassign, so no regions overlap the given range. */
229 static void vhost_dev_assign_memory(struct vhost_dev *dev,
230                                     uint64_t start_addr,
231                                     uint64_t size,
232                                     uint64_t uaddr)
233 {
234     int from, to;
235     struct vhost_memory_region *merged = NULL;
236     for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
237         struct vhost_memory_region *reg = dev->mem->regions + to;
238         uint64_t prlast, urlast;
239         uint64_t pmlast, umlast;
240         uint64_t s, e, u;
241 
242         /* clone old region */
243         if (to != from) {
244             memcpy(reg, dev->mem->regions + from, sizeof *reg);
245         }
246         prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
247         pmlast = range_get_last(start_addr, size);
248         urlast = range_get_last(reg->userspace_addr, reg->memory_size);
249         umlast = range_get_last(uaddr, size);
250 
251         /* check for overlapping regions: should never happen. */
252         assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
253         /* Not an adjacent or overlapping region - do not merge. */
254         if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
255             (pmlast + 1 != reg->guest_phys_addr ||
256              umlast + 1 != reg->userspace_addr)) {
257             continue;
258         }
259 
260         if (merged) {
261             --to;
262             assert(to >= 0);
263         } else {
264             merged = reg;
265         }
266         u = MIN(uaddr, reg->userspace_addr);
267         s = MIN(start_addr, reg->guest_phys_addr);
268         e = MAX(pmlast, prlast);
269         uaddr = merged->userspace_addr = u;
270         start_addr = merged->guest_phys_addr = s;
271         size = merged->memory_size = e - s + 1;
272         assert(merged->memory_size);
273     }
274 
275     if (!merged) {
276         struct vhost_memory_region *reg = dev->mem->regions + to;
277         memset(reg, 0, sizeof *reg);
278         reg->memory_size = size;
279         assert(reg->memory_size);
280         reg->guest_phys_addr = start_addr;
281         reg->userspace_addr = uaddr;
282         ++to;
283     }
284     assert(to <= dev->mem->nregions + 1);
285     dev->mem->nregions = to;
286 }
287 
288 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
289 {
290     uint64_t log_size = 0;
291     int i;
292     for (i = 0; i < dev->mem->nregions; ++i) {
293         struct vhost_memory_region *reg = dev->mem->regions + i;
294         uint64_t last = range_get_last(reg->guest_phys_addr,
295                                        reg->memory_size);
296         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
297     }
298     for (i = 0; i < dev->nvqs; ++i) {
299         struct vhost_virtqueue *vq = dev->vqs + i;
300         uint64_t last = vq->used_phys + vq->used_size - 1;
301         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
302     }
303     return log_size;
304 }
305 static struct vhost_log *vhost_log_alloc(uint64_t size)
306 {
307     struct vhost_log *log = g_malloc0(sizeof *log + size * sizeof(*(log->log)));
308 
309     log->size = size;
310     log->refcnt = 1;
311 
312     return log;
313 }
314 
315 static struct vhost_log *vhost_log_get(uint64_t size)
316 {
317     if (!vhost_log || vhost_log->size != size) {
318         vhost_log = vhost_log_alloc(size);
319     } else {
320         ++vhost_log->refcnt;
321     }
322 
323     return vhost_log;
324 }
325 
326 static void vhost_log_put(struct vhost_dev *dev, bool sync)
327 {
328     struct vhost_log *log = dev->log;
329 
330     if (!log) {
331         return;
332     }
333 
334     --log->refcnt;
335     if (log->refcnt == 0) {
336         /* Sync only the range covered by the old log */
337         if (dev->log_size && sync) {
338             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
339         }
340         if (vhost_log == log) {
341             vhost_log = NULL;
342         }
343         g_free(log);
344     }
345 }
346 
347 static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
348 {
349     struct vhost_log *log = vhost_log_get(size);
350     uint64_t log_base = (uintptr_t)log->log;
351     int r;
352 
353     r = dev->vhost_ops->vhost_call(dev, VHOST_SET_LOG_BASE, &log_base);
354     assert(r >= 0);
355     vhost_log_put(dev, true);
356     dev->log = log;
357     dev->log_size = size;
358 }
359 
360 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
361                                       uint64_t start_addr,
362                                       uint64_t size)
363 {
364     int i;
365     int r = 0;
366 
367     for (i = 0; !r && i < dev->nvqs; ++i) {
368         struct vhost_virtqueue *vq = dev->vqs + i;
369         hwaddr l;
370         void *p;
371 
372         if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
373             continue;
374         }
375         l = vq->ring_size;
376         p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
377         if (!p || l != vq->ring_size) {
378             fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
379             r = -ENOMEM;
380         }
381         if (p != vq->ring) {
382             fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
383             r = -EBUSY;
384         }
385         cpu_physical_memory_unmap(p, l, 0, 0);
386     }
387     return r;
388 }
389 
390 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
391 						      uint64_t start_addr,
392 						      uint64_t size)
393 {
394     int i, n = dev->mem->nregions;
395     for (i = 0; i < n; ++i) {
396         struct vhost_memory_region *reg = dev->mem->regions + i;
397         if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
398                            start_addr, size)) {
399             return reg;
400         }
401     }
402     return NULL;
403 }
404 
405 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
406                                  uint64_t start_addr,
407                                  uint64_t size,
408                                  uint64_t uaddr)
409 {
410     struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
411     uint64_t reglast;
412     uint64_t memlast;
413 
414     if (!reg) {
415         return true;
416     }
417 
418     reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
419     memlast = range_get_last(start_addr, size);
420 
421     /* Need to extend region? */
422     if (start_addr < reg->guest_phys_addr || memlast > reglast) {
423         return true;
424     }
425     /* userspace_addr changed? */
426     return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
427 }
428 
429 static void vhost_set_memory(MemoryListener *listener,
430                              MemoryRegionSection *section,
431                              bool add)
432 {
433     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
434                                          memory_listener);
435     hwaddr start_addr = section->offset_within_address_space;
436     ram_addr_t size = int128_get64(section->size);
437     bool log_dirty =
438         memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
439     int s = offsetof(struct vhost_memory, regions) +
440         (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
441     void *ram;
442 
443     dev->mem = g_realloc(dev->mem, s);
444 
445     if (log_dirty) {
446         add = false;
447     }
448 
449     assert(size);
450 
451     /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
452     ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
453     if (add) {
454         if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
455             /* Region exists with same address. Nothing to do. */
456             return;
457         }
458     } else {
459         if (!vhost_dev_find_reg(dev, start_addr, size)) {
460             /* Removing region that we don't access. Nothing to do. */
461             return;
462         }
463     }
464 
465     vhost_dev_unassign_memory(dev, start_addr, size);
466     if (add) {
467         /* Add given mapping, merging adjacent regions if any */
468         vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
469     } else {
470         /* Remove old mapping for this memory, if any. */
471         vhost_dev_unassign_memory(dev, start_addr, size);
472     }
473     dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
474     dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
475     dev->memory_changed = true;
476     used_memslots = dev->mem->nregions;
477 }
478 
479 static bool vhost_section(MemoryRegionSection *section)
480 {
481     return memory_region_is_ram(section->mr);
482 }
483 
484 static void vhost_begin(MemoryListener *listener)
485 {
486     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
487                                          memory_listener);
488     dev->mem_changed_end_addr = 0;
489     dev->mem_changed_start_addr = -1;
490 }
491 
492 static void vhost_commit(MemoryListener *listener)
493 {
494     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
495                                          memory_listener);
496     hwaddr start_addr = 0;
497     ram_addr_t size = 0;
498     uint64_t log_size;
499     int r;
500 
501     if (!dev->memory_changed) {
502         return;
503     }
504     if (!dev->started) {
505         return;
506     }
507     if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
508         return;
509     }
510 
511     if (dev->started) {
512         start_addr = dev->mem_changed_start_addr;
513         size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
514 
515         r = vhost_verify_ring_mappings(dev, start_addr, size);
516         assert(r >= 0);
517     }
518 
519     if (!dev->log_enabled) {
520         r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
521         assert(r >= 0);
522         dev->memory_changed = false;
523         return;
524     }
525     log_size = vhost_get_log_size(dev);
526     /* We allocate an extra 4K bytes to log,
527      * to reduce the * number of reallocations. */
528 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
529     /* To log more, must increase log size before table update. */
530     if (dev->log_size < log_size) {
531         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
532     }
533     r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
534     assert(r >= 0);
535     /* To log less, can only decrease log size after table update. */
536     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
537         vhost_dev_log_resize(dev, log_size);
538     }
539     dev->memory_changed = false;
540 }
541 
542 static void vhost_region_add(MemoryListener *listener,
543                              MemoryRegionSection *section)
544 {
545     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
546                                          memory_listener);
547 
548     if (!vhost_section(section)) {
549         return;
550     }
551 
552     ++dev->n_mem_sections;
553     dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
554                                 dev->n_mem_sections);
555     dev->mem_sections[dev->n_mem_sections - 1] = *section;
556     memory_region_ref(section->mr);
557     vhost_set_memory(listener, section, true);
558 }
559 
560 static void vhost_region_del(MemoryListener *listener,
561                              MemoryRegionSection *section)
562 {
563     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
564                                          memory_listener);
565     int i;
566 
567     if (!vhost_section(section)) {
568         return;
569     }
570 
571     vhost_set_memory(listener, section, false);
572     memory_region_unref(section->mr);
573     for (i = 0; i < dev->n_mem_sections; ++i) {
574         if (dev->mem_sections[i].offset_within_address_space
575             == section->offset_within_address_space) {
576             --dev->n_mem_sections;
577             memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
578                     (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
579             break;
580         }
581     }
582 }
583 
584 static void vhost_region_nop(MemoryListener *listener,
585                              MemoryRegionSection *section)
586 {
587 }
588 
589 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
590                                     struct vhost_virtqueue *vq,
591                                     unsigned idx, bool enable_log)
592 {
593     struct vhost_vring_addr addr = {
594         .index = idx,
595         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
596         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
597         .used_user_addr = (uint64_t)(unsigned long)vq->used,
598         .log_guest_addr = vq->used_phys,
599         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
600     };
601     int r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_ADDR, &addr);
602     if (r < 0) {
603         return -errno;
604     }
605     return 0;
606 }
607 
608 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
609 {
610     uint64_t features = dev->acked_features;
611     int r;
612     if (enable_log) {
613         features |= 0x1ULL << VHOST_F_LOG_ALL;
614     }
615     r = dev->vhost_ops->vhost_call(dev, VHOST_SET_FEATURES, &features);
616     return r < 0 ? -errno : 0;
617 }
618 
619 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
620 {
621     int r, t, i;
622     r = vhost_dev_set_features(dev, enable_log);
623     if (r < 0) {
624         goto err_features;
625     }
626     for (i = 0; i < dev->nvqs; ++i) {
627         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
628                                      enable_log);
629         if (r < 0) {
630             goto err_vq;
631         }
632     }
633     return 0;
634 err_vq:
635     for (; i >= 0; --i) {
636         t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
637                                      dev->log_enabled);
638         assert(t >= 0);
639     }
640     t = vhost_dev_set_features(dev, dev->log_enabled);
641     assert(t >= 0);
642 err_features:
643     return r;
644 }
645 
646 static int vhost_migration_log(MemoryListener *listener, int enable)
647 {
648     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
649                                          memory_listener);
650     int r;
651     if (!!enable == dev->log_enabled) {
652         return 0;
653     }
654     if (!dev->started) {
655         dev->log_enabled = enable;
656         return 0;
657     }
658     if (!enable) {
659         r = vhost_dev_set_log(dev, false);
660         if (r < 0) {
661             return r;
662         }
663         vhost_log_put(dev, false);
664         dev->log = NULL;
665         dev->log_size = 0;
666     } else {
667         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
668         r = vhost_dev_set_log(dev, true);
669         if (r < 0) {
670             return r;
671         }
672     }
673     dev->log_enabled = enable;
674     return 0;
675 }
676 
677 static void vhost_log_global_start(MemoryListener *listener)
678 {
679     int r;
680 
681     r = vhost_migration_log(listener, true);
682     if (r < 0) {
683         abort();
684     }
685 }
686 
687 static void vhost_log_global_stop(MemoryListener *listener)
688 {
689     int r;
690 
691     r = vhost_migration_log(listener, false);
692     if (r < 0) {
693         abort();
694     }
695 }
696 
697 static void vhost_log_start(MemoryListener *listener,
698                             MemoryRegionSection *section,
699                             int old, int new)
700 {
701     /* FIXME: implement */
702 }
703 
704 static void vhost_log_stop(MemoryListener *listener,
705                            MemoryRegionSection *section,
706                            int old, int new)
707 {
708     /* FIXME: implement */
709 }
710 
711 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
712                                                    bool is_big_endian,
713                                                    int vhost_vq_index)
714 {
715     struct vhost_vring_state s = {
716         .index = vhost_vq_index,
717         .num = is_big_endian
718     };
719 
720     if (!dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_ENDIAN, &s)) {
721         return 0;
722     }
723 
724     if (errno == ENOTTY) {
725         error_report("vhost does not support cross-endian");
726         return -ENOSYS;
727     }
728 
729     return -errno;
730 }
731 
732 static int vhost_virtqueue_start(struct vhost_dev *dev,
733                                 struct VirtIODevice *vdev,
734                                 struct vhost_virtqueue *vq,
735                                 unsigned idx)
736 {
737     hwaddr s, l, a;
738     int r;
739     int vhost_vq_index = dev->vhost_ops->vhost_backend_get_vq_index(dev, idx);
740     struct vhost_vring_file file = {
741         .index = vhost_vq_index
742     };
743     struct vhost_vring_state state = {
744         .index = vhost_vq_index
745     };
746     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
747 
748 
749     vq->num = state.num = virtio_queue_get_num(vdev, idx);
750     r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_NUM, &state);
751     if (r) {
752         return -errno;
753     }
754 
755     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
756     r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_BASE, &state);
757     if (r) {
758         return -errno;
759     }
760 
761     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
762         virtio_legacy_is_cross_endian(vdev)) {
763         r = vhost_virtqueue_set_vring_endian_legacy(dev,
764                                                     virtio_is_big_endian(vdev),
765                                                     vhost_vq_index);
766         if (r) {
767             return -errno;
768         }
769     }
770 
771     s = l = virtio_queue_get_desc_size(vdev, idx);
772     a = virtio_queue_get_desc_addr(vdev, idx);
773     vq->desc = cpu_physical_memory_map(a, &l, 0);
774     if (!vq->desc || l != s) {
775         r = -ENOMEM;
776         goto fail_alloc_desc;
777     }
778     s = l = virtio_queue_get_avail_size(vdev, idx);
779     a = virtio_queue_get_avail_addr(vdev, idx);
780     vq->avail = cpu_physical_memory_map(a, &l, 0);
781     if (!vq->avail || l != s) {
782         r = -ENOMEM;
783         goto fail_alloc_avail;
784     }
785     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
786     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
787     vq->used = cpu_physical_memory_map(a, &l, 1);
788     if (!vq->used || l != s) {
789         r = -ENOMEM;
790         goto fail_alloc_used;
791     }
792 
793     vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
794     vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
795     vq->ring = cpu_physical_memory_map(a, &l, 1);
796     if (!vq->ring || l != s) {
797         r = -ENOMEM;
798         goto fail_alloc_ring;
799     }
800 
801     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
802     if (r < 0) {
803         r = -errno;
804         goto fail_alloc;
805     }
806 
807     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
808     r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_KICK, &file);
809     if (r) {
810         r = -errno;
811         goto fail_kick;
812     }
813 
814     /* Clear and discard previous events if any. */
815     event_notifier_test_and_clear(&vq->masked_notifier);
816 
817     return 0;
818 
819 fail_kick:
820 fail_alloc:
821     cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
822                               0, 0);
823 fail_alloc_ring:
824     cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
825                               0, 0);
826 fail_alloc_used:
827     cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
828                               0, 0);
829 fail_alloc_avail:
830     cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
831                               0, 0);
832 fail_alloc_desc:
833     return r;
834 }
835 
836 static void vhost_virtqueue_stop(struct vhost_dev *dev,
837                                     struct VirtIODevice *vdev,
838                                     struct vhost_virtqueue *vq,
839                                     unsigned idx)
840 {
841     int vhost_vq_index = dev->vhost_ops->vhost_backend_get_vq_index(dev, idx);
842     struct vhost_vring_state state = {
843         .index = vhost_vq_index,
844     };
845     int r;
846 
847     r = dev->vhost_ops->vhost_call(dev, VHOST_GET_VRING_BASE, &state);
848     if (r < 0) {
849         fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
850         fflush(stderr);
851     }
852     virtio_queue_set_last_avail_idx(vdev, idx, state.num);
853     virtio_queue_invalidate_signalled_used(vdev, idx);
854 
855     /* In the cross-endian case, we need to reset the vring endianness to
856      * native as legacy devices expect so by default.
857      */
858     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
859         virtio_legacy_is_cross_endian(vdev)) {
860         r = vhost_virtqueue_set_vring_endian_legacy(dev,
861                                                     !virtio_is_big_endian(vdev),
862                                                     vhost_vq_index);
863         if (r < 0) {
864             error_report("failed to reset vring endianness");
865         }
866     }
867 
868     assert (r >= 0);
869     cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
870                               0, virtio_queue_get_ring_size(vdev, idx));
871     cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
872                               1, virtio_queue_get_used_size(vdev, idx));
873     cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
874                               0, virtio_queue_get_avail_size(vdev, idx));
875     cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
876                               0, virtio_queue_get_desc_size(vdev, idx));
877 }
878 
879 static void vhost_eventfd_add(MemoryListener *listener,
880                               MemoryRegionSection *section,
881                               bool match_data, uint64_t data, EventNotifier *e)
882 {
883 }
884 
885 static void vhost_eventfd_del(MemoryListener *listener,
886                               MemoryRegionSection *section,
887                               bool match_data, uint64_t data, EventNotifier *e)
888 {
889 }
890 
891 static int vhost_virtqueue_init(struct vhost_dev *dev,
892                                 struct vhost_virtqueue *vq, int n)
893 {
894     int vhost_vq_index = dev->vhost_ops->vhost_backend_get_vq_index(dev, n);
895     struct vhost_vring_file file = {
896         .index = vhost_vq_index,
897     };
898     int r = event_notifier_init(&vq->masked_notifier, 0);
899     if (r < 0) {
900         return r;
901     }
902 
903     file.fd = event_notifier_get_fd(&vq->masked_notifier);
904     r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_CALL, &file);
905     if (r) {
906         r = -errno;
907         goto fail_call;
908     }
909     return 0;
910 fail_call:
911     event_notifier_cleanup(&vq->masked_notifier);
912     return r;
913 }
914 
915 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
916 {
917     event_notifier_cleanup(&vq->masked_notifier);
918 }
919 
920 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
921                    VhostBackendType backend_type)
922 {
923     uint64_t features;
924     int i, r;
925 
926     if (vhost_set_backend_type(hdev, backend_type) < 0) {
927         close((uintptr_t)opaque);
928         return -1;
929     }
930 
931     if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) {
932         close((uintptr_t)opaque);
933         return -errno;
934     }
935 
936     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
937 
938     r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_OWNER, NULL);
939     if (r < 0) {
940         goto fail;
941     }
942 
943     r = hdev->vhost_ops->vhost_call(hdev, VHOST_GET_FEATURES, &features);
944     if (r < 0) {
945         goto fail;
946     }
947 
948     for (i = 0; i < hdev->nvqs; ++i) {
949         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
950         if (r < 0) {
951             goto fail_vq;
952         }
953     }
954     hdev->features = features;
955 
956     hdev->memory_listener = (MemoryListener) {
957         .begin = vhost_begin,
958         .commit = vhost_commit,
959         .region_add = vhost_region_add,
960         .region_del = vhost_region_del,
961         .region_nop = vhost_region_nop,
962         .log_start = vhost_log_start,
963         .log_stop = vhost_log_stop,
964         .log_sync = vhost_log_sync,
965         .log_global_start = vhost_log_global_start,
966         .log_global_stop = vhost_log_global_stop,
967         .eventfd_add = vhost_eventfd_add,
968         .eventfd_del = vhost_eventfd_del,
969         .priority = 10
970     };
971     hdev->migration_blocker = NULL;
972     if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
973         error_setg(&hdev->migration_blocker,
974                    "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
975         migrate_add_blocker(hdev->migration_blocker);
976     }
977     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
978     hdev->n_mem_sections = 0;
979     hdev->mem_sections = NULL;
980     hdev->log = NULL;
981     hdev->log_size = 0;
982     hdev->log_enabled = false;
983     hdev->started = false;
984     hdev->memory_changed = false;
985     memory_listener_register(&hdev->memory_listener, &address_space_memory);
986     return 0;
987 fail_vq:
988     while (--i >= 0) {
989         vhost_virtqueue_cleanup(hdev->vqs + i);
990     }
991 fail:
992     r = -errno;
993     hdev->vhost_ops->vhost_backend_cleanup(hdev);
994     QLIST_REMOVE(hdev, entry);
995     return r;
996 }
997 
998 void vhost_dev_cleanup(struct vhost_dev *hdev)
999 {
1000     int i;
1001     for (i = 0; i < hdev->nvqs; ++i) {
1002         vhost_virtqueue_cleanup(hdev->vqs + i);
1003     }
1004     memory_listener_unregister(&hdev->memory_listener);
1005     if (hdev->migration_blocker) {
1006         migrate_del_blocker(hdev->migration_blocker);
1007         error_free(hdev->migration_blocker);
1008     }
1009     g_free(hdev->mem);
1010     g_free(hdev->mem_sections);
1011     hdev->vhost_ops->vhost_backend_cleanup(hdev);
1012     QLIST_REMOVE(hdev, entry);
1013 }
1014 
1015 /* Stop processing guest IO notifications in qemu.
1016  * Start processing them in vhost in kernel.
1017  */
1018 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1019 {
1020     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1021     VirtioBusState *vbus = VIRTIO_BUS(qbus);
1022     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1023     int i, r, e;
1024     if (!k->set_host_notifier) {
1025         fprintf(stderr, "binding does not support host notifiers\n");
1026         r = -ENOSYS;
1027         goto fail;
1028     }
1029 
1030     for (i = 0; i < hdev->nvqs; ++i) {
1031         r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
1032         if (r < 0) {
1033             fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
1034             goto fail_vq;
1035         }
1036     }
1037 
1038     return 0;
1039 fail_vq:
1040     while (--i >= 0) {
1041         e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
1042         if (e < 0) {
1043             fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
1044             fflush(stderr);
1045         }
1046         assert (e >= 0);
1047     }
1048 fail:
1049     return r;
1050 }
1051 
1052 /* Stop processing guest IO notifications in vhost.
1053  * Start processing them in qemu.
1054  * This might actually run the qemu handlers right away,
1055  * so virtio in qemu must be completely setup when this is called.
1056  */
1057 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1058 {
1059     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1060     VirtioBusState *vbus = VIRTIO_BUS(qbus);
1061     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1062     int i, r;
1063 
1064     for (i = 0; i < hdev->nvqs; ++i) {
1065         r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
1066         if (r < 0) {
1067             fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
1068             fflush(stderr);
1069         }
1070         assert (r >= 0);
1071     }
1072 }
1073 
1074 /* Test and clear event pending status.
1075  * Should be called after unmask to avoid losing events.
1076  */
1077 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1078 {
1079     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1080     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1081     return event_notifier_test_and_clear(&vq->masked_notifier);
1082 }
1083 
1084 /* Mask/unmask events from this vq. */
1085 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1086                          bool mask)
1087 {
1088     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1089     int r, index = n - hdev->vq_index;
1090     struct vhost_vring_file file;
1091 
1092     if (mask) {
1093         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1094     } else {
1095         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1096     }
1097 
1098     file.index = hdev->vhost_ops->vhost_backend_get_vq_index(hdev, n);
1099     r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_VRING_CALL, &file);
1100     assert(r >= 0);
1101 }
1102 
1103 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1104                             uint64_t features)
1105 {
1106     const int *bit = feature_bits;
1107     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1108         uint64_t bit_mask = (1ULL << *bit);
1109         if (!(hdev->features & bit_mask)) {
1110             features &= ~bit_mask;
1111         }
1112         bit++;
1113     }
1114     return features;
1115 }
1116 
1117 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1118                         uint64_t features)
1119 {
1120     const int *bit = feature_bits;
1121     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1122         uint64_t bit_mask = (1ULL << *bit);
1123         if (features & bit_mask) {
1124             hdev->acked_features |= bit_mask;
1125         }
1126         bit++;
1127     }
1128 }
1129 
1130 /* Host notifiers must be enabled at this point. */
1131 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1132 {
1133     int i, r;
1134 
1135     hdev->started = true;
1136 
1137     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1138     if (r < 0) {
1139         goto fail_features;
1140     }
1141     r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem);
1142     if (r < 0) {
1143         r = -errno;
1144         goto fail_mem;
1145     }
1146     for (i = 0; i < hdev->nvqs; ++i) {
1147         r = vhost_virtqueue_start(hdev,
1148                                   vdev,
1149                                   hdev->vqs + i,
1150                                   hdev->vq_index + i);
1151         if (r < 0) {
1152             goto fail_vq;
1153         }
1154     }
1155 
1156     if (hdev->log_enabled) {
1157         uint64_t log_base;
1158 
1159         hdev->log_size = vhost_get_log_size(hdev);
1160         hdev->log = vhost_log_get(hdev->log_size);
1161         log_base = (uintptr_t)hdev->log->log;
1162         r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_LOG_BASE,
1163                                         hdev->log_size ? &log_base : NULL);
1164         if (r < 0) {
1165             r = -errno;
1166             goto fail_log;
1167         }
1168     }
1169 
1170     return 0;
1171 fail_log:
1172     vhost_log_put(hdev, false);
1173 fail_vq:
1174     while (--i >= 0) {
1175         vhost_virtqueue_stop(hdev,
1176                              vdev,
1177                              hdev->vqs + i,
1178                              hdev->vq_index + i);
1179     }
1180     i = hdev->nvqs;
1181 fail_mem:
1182 fail_features:
1183 
1184     hdev->started = false;
1185     return r;
1186 }
1187 
1188 /* Host notifiers must be enabled at this point. */
1189 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1190 {
1191     int i;
1192 
1193     for (i = 0; i < hdev->nvqs; ++i) {
1194         vhost_virtqueue_stop(hdev,
1195                              vdev,
1196                              hdev->vqs + i,
1197                              hdev->vq_index + i);
1198     }
1199 
1200     vhost_log_put(hdev, true);
1201     hdev->started = false;
1202     hdev->log = NULL;
1203     hdev->log_size = 0;
1204 }
1205 
1206