xref: /qemu/hw/virtio/virtio.c (revision 42508261)
1 /*
2  * Virtio Support
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/qapi-commands-virtio.h"
17 #include "trace.h"
18 #include "qemu/defer-call.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/main-loop.h"
22 #include "qemu/module.h"
23 #include "exec/tswap.h"
24 #include "qom/object_interfaces.h"
25 #include "hw/core/cpu.h"
26 #include "hw/virtio/virtio.h"
27 #include "hw/virtio/vhost.h"
28 #include "migration/qemu-file-types.h"
29 #include "qemu/atomic.h"
30 #include "hw/virtio/virtio-bus.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/virtio/virtio-access.h"
33 #include "sysemu/dma.h"
34 #include "sysemu/runstate.h"
35 #include "virtio-qmp.h"
36 
37 #include "standard-headers/linux/virtio_ids.h"
38 #include "standard-headers/linux/vhost_types.h"
39 #include "standard-headers/linux/virtio_blk.h"
40 #include "standard-headers/linux/virtio_console.h"
41 #include "standard-headers/linux/virtio_gpu.h"
42 #include "standard-headers/linux/virtio_net.h"
43 #include "standard-headers/linux/virtio_scsi.h"
44 #include "standard-headers/linux/virtio_i2c.h"
45 #include "standard-headers/linux/virtio_balloon.h"
46 #include "standard-headers/linux/virtio_iommu.h"
47 #include "standard-headers/linux/virtio_mem.h"
48 #include "standard-headers/linux/virtio_vsock.h"
49 
50 /*
51  * Maximum size of virtio device config space
52  */
53 #define VHOST_USER_MAX_CONFIG_SIZE 256
54 
55 /*
56  * The alignment to use between consumer and producer parts of vring.
57  * x86 pagesize again. This is the default, used by transports like PCI
58  * which don't provide a means for the guest to tell the host the alignment.
59  */
60 #define VIRTIO_PCI_VRING_ALIGN         4096
61 
62 typedef struct VRingDesc
63 {
64     uint64_t addr;
65     uint32_t len;
66     uint16_t flags;
67     uint16_t next;
68 } VRingDesc;
69 
70 typedef struct VRingPackedDesc {
71     uint64_t addr;
72     uint32_t len;
73     uint16_t id;
74     uint16_t flags;
75 } VRingPackedDesc;
76 
77 typedef struct VRingAvail
78 {
79     uint16_t flags;
80     uint16_t idx;
81     uint16_t ring[];
82 } VRingAvail;
83 
84 typedef struct VRingUsedElem
85 {
86     uint32_t id;
87     uint32_t len;
88 } VRingUsedElem;
89 
90 typedef struct VRingUsed
91 {
92     uint16_t flags;
93     uint16_t idx;
94     VRingUsedElem ring[];
95 } VRingUsed;
96 
97 typedef struct VRingMemoryRegionCaches {
98     struct rcu_head rcu;
99     MemoryRegionCache desc;
100     MemoryRegionCache avail;
101     MemoryRegionCache used;
102 } VRingMemoryRegionCaches;
103 
104 typedef struct VRing
105 {
106     unsigned int num;
107     unsigned int num_default;
108     unsigned int align;
109     hwaddr desc;
110     hwaddr avail;
111     hwaddr used;
112     VRingMemoryRegionCaches *caches;
113 } VRing;
114 
115 typedef struct VRingPackedDescEvent {
116     uint16_t off_wrap;
117     uint16_t flags;
118 } VRingPackedDescEvent ;
119 
120 struct VirtQueue
121 {
122     VRing vring;
123     VirtQueueElement *used_elems;
124 
125     /* Next head to pop */
126     uint16_t last_avail_idx;
127     bool last_avail_wrap_counter;
128 
129     /* Last avail_idx read from VQ. */
130     uint16_t shadow_avail_idx;
131     bool shadow_avail_wrap_counter;
132 
133     uint16_t used_idx;
134     bool used_wrap_counter;
135 
136     /* Last used index value we have signalled on */
137     uint16_t signalled_used;
138 
139     /* Last used index value we have signalled on */
140     bool signalled_used_valid;
141 
142     /* Notification enabled? */
143     bool notification;
144 
145     uint16_t queue_index;
146 
147     unsigned int inuse;
148 
149     uint16_t vector;
150     VirtIOHandleOutput handle_output;
151     VirtIODevice *vdev;
152     EventNotifier guest_notifier;
153     EventNotifier host_notifier;
154     bool host_notifier_enabled;
155     QLIST_ENTRY(VirtQueue) node;
156 };
157 
158 const char *virtio_device_names[] = {
159     [VIRTIO_ID_NET] = "virtio-net",
160     [VIRTIO_ID_BLOCK] = "virtio-blk",
161     [VIRTIO_ID_CONSOLE] = "virtio-serial",
162     [VIRTIO_ID_RNG] = "virtio-rng",
163     [VIRTIO_ID_BALLOON] = "virtio-balloon",
164     [VIRTIO_ID_IOMEM] = "virtio-iomem",
165     [VIRTIO_ID_RPMSG] = "virtio-rpmsg",
166     [VIRTIO_ID_SCSI] = "virtio-scsi",
167     [VIRTIO_ID_9P] = "virtio-9p",
168     [VIRTIO_ID_MAC80211_WLAN] = "virtio-mac-wlan",
169     [VIRTIO_ID_RPROC_SERIAL] = "virtio-rproc-serial",
170     [VIRTIO_ID_CAIF] = "virtio-caif",
171     [VIRTIO_ID_MEMORY_BALLOON] = "virtio-mem-balloon",
172     [VIRTIO_ID_GPU] = "virtio-gpu",
173     [VIRTIO_ID_CLOCK] = "virtio-clk",
174     [VIRTIO_ID_INPUT] = "virtio-input",
175     [VIRTIO_ID_VSOCK] = "vhost-vsock",
176     [VIRTIO_ID_CRYPTO] = "virtio-crypto",
177     [VIRTIO_ID_SIGNAL_DIST] = "virtio-signal",
178     [VIRTIO_ID_PSTORE] = "virtio-pstore",
179     [VIRTIO_ID_IOMMU] = "virtio-iommu",
180     [VIRTIO_ID_MEM] = "virtio-mem",
181     [VIRTIO_ID_SOUND] = "virtio-sound",
182     [VIRTIO_ID_FS] = "virtio-user-fs",
183     [VIRTIO_ID_PMEM] = "virtio-pmem",
184     [VIRTIO_ID_RPMB] = "virtio-rpmb",
185     [VIRTIO_ID_MAC80211_HWSIM] = "virtio-mac-hwsim",
186     [VIRTIO_ID_VIDEO_ENCODER] = "virtio-vid-encoder",
187     [VIRTIO_ID_VIDEO_DECODER] = "virtio-vid-decoder",
188     [VIRTIO_ID_SCMI] = "virtio-scmi",
189     [VIRTIO_ID_NITRO_SEC_MOD] = "virtio-nitro-sec-mod",
190     [VIRTIO_ID_I2C_ADAPTER] = "vhost-user-i2c",
191     [VIRTIO_ID_WATCHDOG] = "virtio-watchdog",
192     [VIRTIO_ID_CAN] = "virtio-can",
193     [VIRTIO_ID_DMABUF] = "virtio-dmabuf",
194     [VIRTIO_ID_PARAM_SERV] = "virtio-param-serv",
195     [VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol",
196     [VIRTIO_ID_BT] = "virtio-bluetooth",
197     [VIRTIO_ID_GPIO] = "virtio-gpio"
198 };
199 
virtio_id_to_name(uint16_t device_id)200 static const char *virtio_id_to_name(uint16_t device_id)
201 {
202     assert(device_id < G_N_ELEMENTS(virtio_device_names));
203     const char *name = virtio_device_names[device_id];
204     assert(name != NULL);
205     return name;
206 }
207 
208 /* Called within call_rcu().  */
virtio_free_region_cache(VRingMemoryRegionCaches * caches)209 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
210 {
211     assert(caches != NULL);
212     address_space_cache_destroy(&caches->desc);
213     address_space_cache_destroy(&caches->avail);
214     address_space_cache_destroy(&caches->used);
215     g_free(caches);
216 }
217 
virtio_virtqueue_reset_region_cache(struct VirtQueue * vq)218 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
219 {
220     VRingMemoryRegionCaches *caches;
221 
222     caches = qatomic_read(&vq->vring.caches);
223     qatomic_rcu_set(&vq->vring.caches, NULL);
224     if (caches) {
225         call_rcu(caches, virtio_free_region_cache, rcu);
226     }
227 }
228 
virtio_init_region_cache(VirtIODevice * vdev,int n)229 void virtio_init_region_cache(VirtIODevice *vdev, int n)
230 {
231     VirtQueue *vq = &vdev->vq[n];
232     VRingMemoryRegionCaches *old = vq->vring.caches;
233     VRingMemoryRegionCaches *new = NULL;
234     hwaddr addr, size;
235     int64_t len;
236     bool packed;
237 
238 
239     addr = vq->vring.desc;
240     if (!addr) {
241         goto out_no_cache;
242     }
243     new = g_new0(VRingMemoryRegionCaches, 1);
244     size = virtio_queue_get_desc_size(vdev, n);
245     packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
246                                    true : false;
247     len = address_space_cache_init(&new->desc, vdev->dma_as,
248                                    addr, size, packed);
249     if (len < size) {
250         virtio_error(vdev, "Cannot map desc");
251         goto err_desc;
252     }
253 
254     size = virtio_queue_get_used_size(vdev, n);
255     len = address_space_cache_init(&new->used, vdev->dma_as,
256                                    vq->vring.used, size, true);
257     if (len < size) {
258         virtio_error(vdev, "Cannot map used");
259         goto err_used;
260     }
261 
262     size = virtio_queue_get_avail_size(vdev, n);
263     len = address_space_cache_init(&new->avail, vdev->dma_as,
264                                    vq->vring.avail, size, false);
265     if (len < size) {
266         virtio_error(vdev, "Cannot map avail");
267         goto err_avail;
268     }
269 
270     qatomic_rcu_set(&vq->vring.caches, new);
271     if (old) {
272         call_rcu(old, virtio_free_region_cache, rcu);
273     }
274     return;
275 
276 err_avail:
277     address_space_cache_destroy(&new->avail);
278 err_used:
279     address_space_cache_destroy(&new->used);
280 err_desc:
281     address_space_cache_destroy(&new->desc);
282 out_no_cache:
283     g_free(new);
284     virtio_virtqueue_reset_region_cache(vq);
285 }
286 
287 /* virt queue functions */
virtio_queue_update_rings(VirtIODevice * vdev,int n)288 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
289 {
290     VRing *vring = &vdev->vq[n].vring;
291 
292     if (!vring->num || !vring->desc || !vring->align) {
293         /* not yet setup -> nothing to do */
294         return;
295     }
296     vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
297     vring->used = vring_align(vring->avail +
298                               offsetof(VRingAvail, ring[vring->num]),
299                               vring->align);
300     virtio_init_region_cache(vdev, n);
301 }
302 
303 /* Called within rcu_read_lock().  */
vring_split_desc_read(VirtIODevice * vdev,VRingDesc * desc,MemoryRegionCache * cache,int i)304 static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc,
305                                   MemoryRegionCache *cache, int i)
306 {
307     address_space_read_cached(cache, i * sizeof(VRingDesc),
308                               desc, sizeof(VRingDesc));
309     virtio_tswap64s(vdev, &desc->addr);
310     virtio_tswap32s(vdev, &desc->len);
311     virtio_tswap16s(vdev, &desc->flags);
312     virtio_tswap16s(vdev, &desc->next);
313 }
314 
vring_packed_event_read(VirtIODevice * vdev,MemoryRegionCache * cache,VRingPackedDescEvent * e)315 static void vring_packed_event_read(VirtIODevice *vdev,
316                                     MemoryRegionCache *cache,
317                                     VRingPackedDescEvent *e)
318 {
319     hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap);
320     hwaddr off_flags = offsetof(VRingPackedDescEvent, flags);
321 
322     e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags);
323     /* Make sure flags is seen before off_wrap */
324     smp_rmb();
325     e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off);
326     virtio_tswap16s(vdev, &e->flags);
327 }
328 
vring_packed_off_wrap_write(VirtIODevice * vdev,MemoryRegionCache * cache,uint16_t off_wrap)329 static void vring_packed_off_wrap_write(VirtIODevice *vdev,
330                                         MemoryRegionCache *cache,
331                                         uint16_t off_wrap)
332 {
333     hwaddr off = offsetof(VRingPackedDescEvent, off_wrap);
334 
335     virtio_stw_phys_cached(vdev, cache, off, off_wrap);
336     address_space_cache_invalidate(cache, off, sizeof(off_wrap));
337 }
338 
vring_packed_flags_write(VirtIODevice * vdev,MemoryRegionCache * cache,uint16_t flags)339 static void vring_packed_flags_write(VirtIODevice *vdev,
340                                      MemoryRegionCache *cache, uint16_t flags)
341 {
342     hwaddr off = offsetof(VRingPackedDescEvent, flags);
343 
344     virtio_stw_phys_cached(vdev, cache, off, flags);
345     address_space_cache_invalidate(cache, off, sizeof(flags));
346 }
347 
348 /* Called within rcu_read_lock().  */
vring_get_region_caches(struct VirtQueue * vq)349 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
350 {
351     return qatomic_rcu_read(&vq->vring.caches);
352 }
353 
354 /* Called within rcu_read_lock().  */
vring_avail_flags(VirtQueue * vq)355 static inline uint16_t vring_avail_flags(VirtQueue *vq)
356 {
357     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
358     hwaddr pa = offsetof(VRingAvail, flags);
359 
360     if (!caches) {
361         return 0;
362     }
363 
364     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
365 }
366 
367 /* Called within rcu_read_lock().  */
vring_avail_idx(VirtQueue * vq)368 static inline uint16_t vring_avail_idx(VirtQueue *vq)
369 {
370     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
371     hwaddr pa = offsetof(VRingAvail, idx);
372 
373     if (!caches) {
374         return 0;
375     }
376 
377     vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
378     return vq->shadow_avail_idx;
379 }
380 
381 /* Called within rcu_read_lock().  */
vring_avail_ring(VirtQueue * vq,int i)382 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
383 {
384     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
385     hwaddr pa = offsetof(VRingAvail, ring[i]);
386 
387     if (!caches) {
388         return 0;
389     }
390 
391     return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
392 }
393 
394 /* Called within rcu_read_lock().  */
vring_get_used_event(VirtQueue * vq)395 static inline uint16_t vring_get_used_event(VirtQueue *vq)
396 {
397     return vring_avail_ring(vq, vq->vring.num);
398 }
399 
400 /* Called within rcu_read_lock().  */
vring_used_write(VirtQueue * vq,VRingUsedElem * uelem,int i)401 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
402                                     int i)
403 {
404     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
405     hwaddr pa = offsetof(VRingUsed, ring[i]);
406 
407     if (!caches) {
408         return;
409     }
410 
411     virtio_tswap32s(vq->vdev, &uelem->id);
412     virtio_tswap32s(vq->vdev, &uelem->len);
413     address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
414     address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
415 }
416 
417 /* Called within rcu_read_lock(). */
vring_used_flags(VirtQueue * vq)418 static inline uint16_t vring_used_flags(VirtQueue *vq)
419 {
420     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
421     hwaddr pa = offsetof(VRingUsed, flags);
422 
423     if (!caches) {
424         return 0;
425     }
426 
427     return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
428 }
429 
430 /* Called within rcu_read_lock().  */
vring_used_idx(VirtQueue * vq)431 static uint16_t vring_used_idx(VirtQueue *vq)
432 {
433     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
434     hwaddr pa = offsetof(VRingUsed, idx);
435 
436     if (!caches) {
437         return 0;
438     }
439 
440     return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
441 }
442 
443 /* Called within rcu_read_lock().  */
vring_used_idx_set(VirtQueue * vq,uint16_t val)444 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
445 {
446     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
447     hwaddr pa = offsetof(VRingUsed, idx);
448 
449     if (caches) {
450         virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
451         address_space_cache_invalidate(&caches->used, pa, sizeof(val));
452     }
453 
454     vq->used_idx = val;
455 }
456 
457 /* Called within rcu_read_lock().  */
vring_used_flags_set_bit(VirtQueue * vq,int mask)458 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
459 {
460     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
461     VirtIODevice *vdev = vq->vdev;
462     hwaddr pa = offsetof(VRingUsed, flags);
463     uint16_t flags;
464 
465     if (!caches) {
466         return;
467     }
468 
469     flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
470     virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
471     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
472 }
473 
474 /* Called within rcu_read_lock().  */
vring_used_flags_unset_bit(VirtQueue * vq,int mask)475 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
476 {
477     VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
478     VirtIODevice *vdev = vq->vdev;
479     hwaddr pa = offsetof(VRingUsed, flags);
480     uint16_t flags;
481 
482     if (!caches) {
483         return;
484     }
485 
486     flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
487     virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
488     address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
489 }
490 
491 /* Called within rcu_read_lock().  */
vring_set_avail_event(VirtQueue * vq,uint16_t val)492 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
493 {
494     VRingMemoryRegionCaches *caches;
495     hwaddr pa;
496     if (!vq->notification) {
497         return;
498     }
499 
500     caches = vring_get_region_caches(vq);
501     if (!caches) {
502         return;
503     }
504 
505     pa = offsetof(VRingUsed, ring[vq->vring.num]);
506     virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
507     address_space_cache_invalidate(&caches->used, pa, sizeof(val));
508 }
509 
virtio_queue_split_set_notification(VirtQueue * vq,int enable)510 static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
511 {
512     RCU_READ_LOCK_GUARD();
513 
514     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
515         vring_set_avail_event(vq, vring_avail_idx(vq));
516     } else if (enable) {
517         vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
518     } else {
519         vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
520     }
521     if (enable) {
522         /* Expose avail event/used flags before caller checks the avail idx. */
523         smp_mb();
524     }
525 }
526 
virtio_queue_packed_set_notification(VirtQueue * vq,int enable)527 static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
528 {
529     uint16_t off_wrap;
530     VRingPackedDescEvent e;
531     VRingMemoryRegionCaches *caches;
532 
533     RCU_READ_LOCK_GUARD();
534     caches = vring_get_region_caches(vq);
535     if (!caches) {
536         return;
537     }
538 
539     vring_packed_event_read(vq->vdev, &caches->used, &e);
540 
541     if (!enable) {
542         e.flags = VRING_PACKED_EVENT_FLAG_DISABLE;
543     } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
544         off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
545         vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
546         /* Make sure off_wrap is wrote before flags */
547         smp_wmb();
548         e.flags = VRING_PACKED_EVENT_FLAG_DESC;
549     } else {
550         e.flags = VRING_PACKED_EVENT_FLAG_ENABLE;
551     }
552 
553     vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
554     if (enable) {
555         /* Expose avail event/used flags before caller checks the avail idx. */
556         smp_mb();
557     }
558 }
559 
virtio_queue_get_notification(VirtQueue * vq)560 bool virtio_queue_get_notification(VirtQueue *vq)
561 {
562     return vq->notification;
563 }
564 
virtio_queue_set_notification(VirtQueue * vq,int enable)565 void virtio_queue_set_notification(VirtQueue *vq, int enable)
566 {
567     vq->notification = enable;
568 
569     if (!vq->vring.desc) {
570         return;
571     }
572 
573     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
574         virtio_queue_packed_set_notification(vq, enable);
575     } else {
576         virtio_queue_split_set_notification(vq, enable);
577     }
578 }
579 
virtio_queue_ready(VirtQueue * vq)580 int virtio_queue_ready(VirtQueue *vq)
581 {
582     return vq->vring.avail != 0;
583 }
584 
vring_packed_desc_read_flags(VirtIODevice * vdev,uint16_t * flags,MemoryRegionCache * cache,int i)585 static void vring_packed_desc_read_flags(VirtIODevice *vdev,
586                                          uint16_t *flags,
587                                          MemoryRegionCache *cache,
588                                          int i)
589 {
590     hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
591 
592     *flags = virtio_lduw_phys_cached(vdev, cache, off);
593 }
594 
vring_packed_desc_read(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i,bool strict_order)595 static void vring_packed_desc_read(VirtIODevice *vdev,
596                                    VRingPackedDesc *desc,
597                                    MemoryRegionCache *cache,
598                                    int i, bool strict_order)
599 {
600     hwaddr off = i * sizeof(VRingPackedDesc);
601 
602     vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
603 
604     if (strict_order) {
605         /* Make sure flags is read before the rest fields. */
606         smp_rmb();
607     }
608 
609     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr),
610                               &desc->addr, sizeof(desc->addr));
611     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id),
612                               &desc->id, sizeof(desc->id));
613     address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len),
614                               &desc->len, sizeof(desc->len));
615     virtio_tswap64s(vdev, &desc->addr);
616     virtio_tswap16s(vdev, &desc->id);
617     virtio_tswap32s(vdev, &desc->len);
618 }
619 
vring_packed_desc_write_data(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i)620 static void vring_packed_desc_write_data(VirtIODevice *vdev,
621                                          VRingPackedDesc *desc,
622                                          MemoryRegionCache *cache,
623                                          int i)
624 {
625     hwaddr off_id = i * sizeof(VRingPackedDesc) +
626                     offsetof(VRingPackedDesc, id);
627     hwaddr off_len = i * sizeof(VRingPackedDesc) +
628                     offsetof(VRingPackedDesc, len);
629 
630     virtio_tswap32s(vdev, &desc->len);
631     virtio_tswap16s(vdev, &desc->id);
632     address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
633     address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
634     address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
635     address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
636 }
637 
vring_packed_desc_write_flags(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i)638 static void vring_packed_desc_write_flags(VirtIODevice *vdev,
639                                           VRingPackedDesc *desc,
640                                           MemoryRegionCache *cache,
641                                           int i)
642 {
643     hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
644 
645     virtio_stw_phys_cached(vdev, cache, off, desc->flags);
646     address_space_cache_invalidate(cache, off, sizeof(desc->flags));
647 }
648 
vring_packed_desc_write(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i,bool strict_order)649 static void vring_packed_desc_write(VirtIODevice *vdev,
650                                     VRingPackedDesc *desc,
651                                     MemoryRegionCache *cache,
652                                     int i, bool strict_order)
653 {
654     vring_packed_desc_write_data(vdev, desc, cache, i);
655     if (strict_order) {
656         /* Make sure data is wrote before flags. */
657         smp_wmb();
658     }
659     vring_packed_desc_write_flags(vdev, desc, cache, i);
660 }
661 
is_desc_avail(uint16_t flags,bool wrap_counter)662 static inline bool is_desc_avail(uint16_t flags, bool wrap_counter)
663 {
664     bool avail, used;
665 
666     avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
667     used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
668     return (avail != used) && (avail == wrap_counter);
669 }
670 
671 /* Fetch avail_idx from VQ memory only when we really need to know if
672  * guest has added some buffers.
673  * Called within rcu_read_lock().  */
virtio_queue_empty_rcu(VirtQueue * vq)674 static int virtio_queue_empty_rcu(VirtQueue *vq)
675 {
676     if (virtio_device_disabled(vq->vdev)) {
677         return 1;
678     }
679 
680     if (unlikely(!vq->vring.avail)) {
681         return 1;
682     }
683 
684     if (vq->shadow_avail_idx != vq->last_avail_idx) {
685         return 0;
686     }
687 
688     return vring_avail_idx(vq) == vq->last_avail_idx;
689 }
690 
virtio_queue_split_empty(VirtQueue * vq)691 static int virtio_queue_split_empty(VirtQueue *vq)
692 {
693     bool empty;
694 
695     if (virtio_device_disabled(vq->vdev)) {
696         return 1;
697     }
698 
699     if (unlikely(!vq->vring.avail)) {
700         return 1;
701     }
702 
703     if (vq->shadow_avail_idx != vq->last_avail_idx) {
704         return 0;
705     }
706 
707     RCU_READ_LOCK_GUARD();
708     empty = vring_avail_idx(vq) == vq->last_avail_idx;
709     return empty;
710 }
711 
712 /* Called within rcu_read_lock().  */
virtio_queue_packed_empty_rcu(VirtQueue * vq)713 static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
714 {
715     struct VRingPackedDesc desc;
716     VRingMemoryRegionCaches *cache;
717 
718     if (unlikely(!vq->vring.desc)) {
719         return 1;
720     }
721 
722     cache = vring_get_region_caches(vq);
723     if (!cache) {
724         return 1;
725     }
726 
727     vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
728                                  vq->last_avail_idx);
729 
730     return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
731 }
732 
virtio_queue_packed_empty(VirtQueue * vq)733 static int virtio_queue_packed_empty(VirtQueue *vq)
734 {
735     RCU_READ_LOCK_GUARD();
736     return virtio_queue_packed_empty_rcu(vq);
737 }
738 
virtio_queue_empty(VirtQueue * vq)739 int virtio_queue_empty(VirtQueue *vq)
740 {
741     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
742         return virtio_queue_packed_empty(vq);
743     } else {
744         return virtio_queue_split_empty(vq);
745     }
746 }
747 
virtqueue_unmap_sg(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)748 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
749                                unsigned int len)
750 {
751     AddressSpace *dma_as = vq->vdev->dma_as;
752     unsigned int offset;
753     int i;
754 
755     offset = 0;
756     for (i = 0; i < elem->in_num; i++) {
757         size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
758 
759         dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
760                          elem->in_sg[i].iov_len,
761                          DMA_DIRECTION_FROM_DEVICE, size);
762 
763         offset += size;
764     }
765 
766     for (i = 0; i < elem->out_num; i++)
767         dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
768                          elem->out_sg[i].iov_len,
769                          DMA_DIRECTION_TO_DEVICE,
770                          elem->out_sg[i].iov_len);
771 }
772 
773 /* virtqueue_detach_element:
774  * @vq: The #VirtQueue
775  * @elem: The #VirtQueueElement
776  * @len: number of bytes written
777  *
778  * Detach the element from the virtqueue.  This function is suitable for device
779  * reset or other situations where a #VirtQueueElement is simply freed and will
780  * not be pushed or discarded.
781  */
virtqueue_detach_element(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)782 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
783                               unsigned int len)
784 {
785     vq->inuse -= elem->ndescs;
786     virtqueue_unmap_sg(vq, elem, len);
787 }
788 
virtqueue_split_rewind(VirtQueue * vq,unsigned int num)789 static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num)
790 {
791     vq->last_avail_idx -= num;
792 }
793 
virtqueue_packed_rewind(VirtQueue * vq,unsigned int num)794 static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num)
795 {
796     if (vq->last_avail_idx < num) {
797         vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
798         vq->last_avail_wrap_counter ^= 1;
799     } else {
800         vq->last_avail_idx -= num;
801     }
802 }
803 
804 /* virtqueue_unpop:
805  * @vq: The #VirtQueue
806  * @elem: The #VirtQueueElement
807  * @len: number of bytes written
808  *
809  * Pretend the most recent element wasn't popped from the virtqueue.  The next
810  * call to virtqueue_pop() will refetch the element.
811  */
virtqueue_unpop(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)812 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
813                      unsigned int len)
814 {
815 
816     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
817         virtqueue_packed_rewind(vq, 1);
818     } else {
819         virtqueue_split_rewind(vq, 1);
820     }
821 
822     virtqueue_detach_element(vq, elem, len);
823 }
824 
825 /* virtqueue_rewind:
826  * @vq: The #VirtQueue
827  * @num: Number of elements to push back
828  *
829  * Pretend that elements weren't popped from the virtqueue.  The next
830  * virtqueue_pop() will refetch the oldest element.
831  *
832  * Use virtqueue_unpop() instead if you have a VirtQueueElement.
833  *
834  * Returns: true on success, false if @num is greater than the number of in use
835  * elements.
836  */
virtqueue_rewind(VirtQueue * vq,unsigned int num)837 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
838 {
839     if (num > vq->inuse) {
840         return false;
841     }
842 
843     vq->inuse -= num;
844     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
845         virtqueue_packed_rewind(vq, num);
846     } else {
847         virtqueue_split_rewind(vq, num);
848     }
849     return true;
850 }
851 
virtqueue_split_fill(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len,unsigned int idx)852 static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
853                     unsigned int len, unsigned int idx)
854 {
855     VRingUsedElem uelem;
856 
857     if (unlikely(!vq->vring.used)) {
858         return;
859     }
860 
861     idx = (idx + vq->used_idx) % vq->vring.num;
862 
863     uelem.id = elem->index;
864     uelem.len = len;
865     vring_used_write(vq, &uelem, idx);
866 }
867 
virtqueue_packed_fill(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len,unsigned int idx)868 static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
869                                   unsigned int len, unsigned int idx)
870 {
871     vq->used_elems[idx].index = elem->index;
872     vq->used_elems[idx].len = len;
873     vq->used_elems[idx].ndescs = elem->ndescs;
874 }
875 
virtqueue_packed_fill_desc(VirtQueue * vq,const VirtQueueElement * elem,unsigned int idx,bool strict_order)876 static void virtqueue_packed_fill_desc(VirtQueue *vq,
877                                        const VirtQueueElement *elem,
878                                        unsigned int idx,
879                                        bool strict_order)
880 {
881     uint16_t head;
882     VRingMemoryRegionCaches *caches;
883     VRingPackedDesc desc = {
884         .id = elem->index,
885         .len = elem->len,
886     };
887     bool wrap_counter = vq->used_wrap_counter;
888 
889     if (unlikely(!vq->vring.desc)) {
890         return;
891     }
892 
893     head = vq->used_idx + idx;
894     if (head >= vq->vring.num) {
895         head -= vq->vring.num;
896         wrap_counter ^= 1;
897     }
898     if (wrap_counter) {
899         desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
900         desc.flags |= (1 << VRING_PACKED_DESC_F_USED);
901     } else {
902         desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
903         desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED);
904     }
905 
906     caches = vring_get_region_caches(vq);
907     if (!caches) {
908         return;
909     }
910 
911     vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
912 }
913 
914 /* Called within rcu_read_lock().  */
virtqueue_fill(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len,unsigned int idx)915 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
916                     unsigned int len, unsigned int idx)
917 {
918     trace_virtqueue_fill(vq, elem, len, idx);
919 
920     virtqueue_unmap_sg(vq, elem, len);
921 
922     if (virtio_device_disabled(vq->vdev)) {
923         return;
924     }
925 
926     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
927         virtqueue_packed_fill(vq, elem, len, idx);
928     } else {
929         virtqueue_split_fill(vq, elem, len, idx);
930     }
931 }
932 
933 /* Called within rcu_read_lock().  */
virtqueue_split_flush(VirtQueue * vq,unsigned int count)934 static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
935 {
936     uint16_t old, new;
937 
938     if (unlikely(!vq->vring.used)) {
939         return;
940     }
941 
942     /* Make sure buffer is written before we update index. */
943     smp_wmb();
944     trace_virtqueue_flush(vq, count);
945     old = vq->used_idx;
946     new = old + count;
947     vring_used_idx_set(vq, new);
948     vq->inuse -= count;
949     if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
950         vq->signalled_used_valid = false;
951 }
952 
virtqueue_packed_flush(VirtQueue * vq,unsigned int count)953 static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
954 {
955     unsigned int i, ndescs = 0;
956 
957     if (unlikely(!vq->vring.desc)) {
958         return;
959     }
960 
961     /*
962      * For indirect element's 'ndescs' is 1.
963      * For all other elemment's 'ndescs' is the
964      * number of descriptors chained by NEXT (as set in virtqueue_packed_pop).
965      * So When the 'elem' be filled into the descriptor ring,
966      * The 'idx' of this 'elem' shall be
967      * the value of 'vq->used_idx' plus the 'ndescs'.
968      */
969     ndescs += vq->used_elems[0].ndescs;
970     for (i = 1; i < count; i++) {
971         virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
972         ndescs += vq->used_elems[i].ndescs;
973     }
974     virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
975 
976     vq->inuse -= ndescs;
977     vq->used_idx += ndescs;
978     if (vq->used_idx >= vq->vring.num) {
979         vq->used_idx -= vq->vring.num;
980         vq->used_wrap_counter ^= 1;
981         vq->signalled_used_valid = false;
982     }
983 }
984 
virtqueue_flush(VirtQueue * vq,unsigned int count)985 void virtqueue_flush(VirtQueue *vq, unsigned int count)
986 {
987     if (virtio_device_disabled(vq->vdev)) {
988         vq->inuse -= count;
989         return;
990     }
991 
992     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
993         virtqueue_packed_flush(vq, count);
994     } else {
995         virtqueue_split_flush(vq, count);
996     }
997 }
998 
virtqueue_push(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)999 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
1000                     unsigned int len)
1001 {
1002     RCU_READ_LOCK_GUARD();
1003     virtqueue_fill(vq, elem, len, 0);
1004     virtqueue_flush(vq, 1);
1005 }
1006 
1007 /* Called within rcu_read_lock().  */
virtqueue_num_heads(VirtQueue * vq,unsigned int idx)1008 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
1009 {
1010     uint16_t avail_idx, num_heads;
1011 
1012     /* Use shadow index whenever possible. */
1013     avail_idx = (vq->shadow_avail_idx != idx) ? vq->shadow_avail_idx
1014                                               : vring_avail_idx(vq);
1015     num_heads = avail_idx - idx;
1016 
1017     /* Check it isn't doing very strange things with descriptor numbers. */
1018     if (num_heads > vq->vring.num) {
1019         virtio_error(vq->vdev, "Guest moved used index from %u to %u",
1020                      idx, vq->shadow_avail_idx);
1021         return -EINVAL;
1022     }
1023     /*
1024      * On success, callers read a descriptor at vq->last_avail_idx.
1025      * Make sure descriptor read does not bypass avail index read.
1026      *
1027      * This is necessary even if we are using a shadow index, since
1028      * the shadow index could have been initialized by calling
1029      * vring_avail_idx() outside of this function, i.e., by a guest
1030      * memory read not accompanied by a barrier.
1031      */
1032     if (num_heads) {
1033         smp_rmb();
1034     }
1035 
1036     return num_heads;
1037 }
1038 
1039 /* Called within rcu_read_lock().  */
virtqueue_get_head(VirtQueue * vq,unsigned int idx,unsigned int * head)1040 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
1041                                unsigned int *head)
1042 {
1043     /* Grab the next descriptor number they're advertising, and increment
1044      * the index we've seen. */
1045     *head = vring_avail_ring(vq, idx % vq->vring.num);
1046 
1047     /* If their number is silly, that's a fatal mistake. */
1048     if (*head >= vq->vring.num) {
1049         virtio_error(vq->vdev, "Guest says index %u is available", *head);
1050         return false;
1051     }
1052 
1053     return true;
1054 }
1055 
1056 enum {
1057     VIRTQUEUE_READ_DESC_ERROR = -1,
1058     VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
1059     VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
1060 };
1061 
1062 /* Reads the 'desc->next' descriptor into '*desc'. */
virtqueue_split_read_next_desc(VirtIODevice * vdev,VRingDesc * desc,MemoryRegionCache * desc_cache,unsigned int max)1063 static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
1064                                           MemoryRegionCache *desc_cache,
1065                                           unsigned int max)
1066 {
1067     /* If this descriptor says it doesn't chain, we're done. */
1068     if (!(desc->flags & VRING_DESC_F_NEXT)) {
1069         return VIRTQUEUE_READ_DESC_DONE;
1070     }
1071 
1072     /* Check they're not leading us off end of descriptors. */
1073     if (desc->next >= max) {
1074         virtio_error(vdev, "Desc next is %u", desc->next);
1075         return VIRTQUEUE_READ_DESC_ERROR;
1076     }
1077 
1078     vring_split_desc_read(vdev, desc, desc_cache, desc->next);
1079     return VIRTQUEUE_READ_DESC_MORE;
1080 }
1081 
1082 /* Called within rcu_read_lock().  */
virtqueue_split_get_avail_bytes(VirtQueue * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes,VRingMemoryRegionCaches * caches)1083 static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
1084                             unsigned int *in_bytes, unsigned int *out_bytes,
1085                             unsigned max_in_bytes, unsigned max_out_bytes,
1086                             VRingMemoryRegionCaches *caches)
1087 {
1088     VirtIODevice *vdev = vq->vdev;
1089     unsigned int idx;
1090     unsigned int total_bufs, in_total, out_total;
1091     MemoryRegionCache indirect_desc_cache;
1092     int64_t len = 0;
1093     int rc;
1094 
1095     address_space_cache_init_empty(&indirect_desc_cache);
1096 
1097     idx = vq->last_avail_idx;
1098     total_bufs = in_total = out_total = 0;
1099 
1100     while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
1101         MemoryRegionCache *desc_cache = &caches->desc;
1102         unsigned int num_bufs;
1103         VRingDesc desc;
1104         unsigned int i;
1105         unsigned int max = vq->vring.num;
1106 
1107         num_bufs = total_bufs;
1108 
1109         if (!virtqueue_get_head(vq, idx++, &i)) {
1110             goto err;
1111         }
1112 
1113         vring_split_desc_read(vdev, &desc, desc_cache, i);
1114 
1115         if (desc.flags & VRING_DESC_F_INDIRECT) {
1116             if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1117                 virtio_error(vdev, "Invalid size for indirect buffer table");
1118                 goto err;
1119             }
1120 
1121             /* If we've got too many, that implies a descriptor loop. */
1122             if (num_bufs >= max) {
1123                 virtio_error(vdev, "Looped descriptor");
1124                 goto err;
1125             }
1126 
1127             /* loop over the indirect descriptor table */
1128             len = address_space_cache_init(&indirect_desc_cache,
1129                                            vdev->dma_as,
1130                                            desc.addr, desc.len, false);
1131             desc_cache = &indirect_desc_cache;
1132             if (len < desc.len) {
1133                 virtio_error(vdev, "Cannot map indirect buffer");
1134                 goto err;
1135             }
1136 
1137             max = desc.len / sizeof(VRingDesc);
1138             num_bufs = i = 0;
1139             vring_split_desc_read(vdev, &desc, desc_cache, i);
1140         }
1141 
1142         do {
1143             /* If we've got too many, that implies a descriptor loop. */
1144             if (++num_bufs > max) {
1145                 virtio_error(vdev, "Looped descriptor");
1146                 goto err;
1147             }
1148 
1149             if (desc.flags & VRING_DESC_F_WRITE) {
1150                 in_total += desc.len;
1151             } else {
1152                 out_total += desc.len;
1153             }
1154             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1155                 goto done;
1156             }
1157 
1158             rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
1159         } while (rc == VIRTQUEUE_READ_DESC_MORE);
1160 
1161         if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1162             goto err;
1163         }
1164 
1165         if (desc_cache == &indirect_desc_cache) {
1166             address_space_cache_destroy(&indirect_desc_cache);
1167             total_bufs++;
1168         } else {
1169             total_bufs = num_bufs;
1170         }
1171     }
1172 
1173     if (rc < 0) {
1174         goto err;
1175     }
1176 
1177 done:
1178     address_space_cache_destroy(&indirect_desc_cache);
1179     if (in_bytes) {
1180         *in_bytes = in_total;
1181     }
1182     if (out_bytes) {
1183         *out_bytes = out_total;
1184     }
1185     return;
1186 
1187 err:
1188     in_total = out_total = 0;
1189     goto done;
1190 }
1191 
virtqueue_packed_read_next_desc(VirtQueue * vq,VRingPackedDesc * desc,MemoryRegionCache * desc_cache,unsigned int max,unsigned int * next,bool indirect)1192 static int virtqueue_packed_read_next_desc(VirtQueue *vq,
1193                                            VRingPackedDesc *desc,
1194                                            MemoryRegionCache
1195                                            *desc_cache,
1196                                            unsigned int max,
1197                                            unsigned int *next,
1198                                            bool indirect)
1199 {
1200     /* If this descriptor says it doesn't chain, we're done. */
1201     if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
1202         return VIRTQUEUE_READ_DESC_DONE;
1203     }
1204 
1205     ++*next;
1206     if (*next == max) {
1207         if (indirect) {
1208             return VIRTQUEUE_READ_DESC_DONE;
1209         } else {
1210             (*next) -= vq->vring.num;
1211         }
1212     }
1213 
1214     vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
1215     return VIRTQUEUE_READ_DESC_MORE;
1216 }
1217 
1218 /* Called within rcu_read_lock().  */
virtqueue_packed_get_avail_bytes(VirtQueue * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes,VRingMemoryRegionCaches * caches)1219 static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
1220                                              unsigned int *in_bytes,
1221                                              unsigned int *out_bytes,
1222                                              unsigned max_in_bytes,
1223                                              unsigned max_out_bytes,
1224                                              VRingMemoryRegionCaches *caches)
1225 {
1226     VirtIODevice *vdev = vq->vdev;
1227     unsigned int idx;
1228     unsigned int total_bufs, in_total, out_total;
1229     MemoryRegionCache indirect_desc_cache;
1230     MemoryRegionCache *desc_cache;
1231     int64_t len = 0;
1232     VRingPackedDesc desc;
1233     bool wrap_counter;
1234 
1235     address_space_cache_init_empty(&indirect_desc_cache);
1236 
1237     idx = vq->last_avail_idx;
1238     wrap_counter = vq->last_avail_wrap_counter;
1239     total_bufs = in_total = out_total = 0;
1240 
1241     for (;;) {
1242         unsigned int num_bufs = total_bufs;
1243         unsigned int i = idx;
1244         int rc;
1245         unsigned int max = vq->vring.num;
1246 
1247         desc_cache = &caches->desc;
1248 
1249         vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
1250         if (!is_desc_avail(desc.flags, wrap_counter)) {
1251             break;
1252         }
1253 
1254         if (desc.flags & VRING_DESC_F_INDIRECT) {
1255             if (desc.len % sizeof(VRingPackedDesc)) {
1256                 virtio_error(vdev, "Invalid size for indirect buffer table");
1257                 goto err;
1258             }
1259 
1260             /* If we've got too many, that implies a descriptor loop. */
1261             if (num_bufs >= max) {
1262                 virtio_error(vdev, "Looped descriptor");
1263                 goto err;
1264             }
1265 
1266             /* loop over the indirect descriptor table */
1267             len = address_space_cache_init(&indirect_desc_cache,
1268                                            vdev->dma_as,
1269                                            desc.addr, desc.len, false);
1270             desc_cache = &indirect_desc_cache;
1271             if (len < desc.len) {
1272                 virtio_error(vdev, "Cannot map indirect buffer");
1273                 goto err;
1274             }
1275 
1276             max = desc.len / sizeof(VRingPackedDesc);
1277             num_bufs = i = 0;
1278             vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1279         }
1280 
1281         do {
1282             /* If we've got too many, that implies a descriptor loop. */
1283             if (++num_bufs > max) {
1284                 virtio_error(vdev, "Looped descriptor");
1285                 goto err;
1286             }
1287 
1288             if (desc.flags & VRING_DESC_F_WRITE) {
1289                 in_total += desc.len;
1290             } else {
1291                 out_total += desc.len;
1292             }
1293             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1294                 goto done;
1295             }
1296 
1297             rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max,
1298                                                  &i, desc_cache ==
1299                                                  &indirect_desc_cache);
1300         } while (rc == VIRTQUEUE_READ_DESC_MORE);
1301 
1302         if (desc_cache == &indirect_desc_cache) {
1303             address_space_cache_destroy(&indirect_desc_cache);
1304             total_bufs++;
1305             idx++;
1306         } else {
1307             idx += num_bufs - total_bufs;
1308             total_bufs = num_bufs;
1309         }
1310 
1311         if (idx >= vq->vring.num) {
1312             idx -= vq->vring.num;
1313             wrap_counter ^= 1;
1314         }
1315     }
1316 
1317     /* Record the index and wrap counter for a kick we want */
1318     vq->shadow_avail_idx = idx;
1319     vq->shadow_avail_wrap_counter = wrap_counter;
1320 done:
1321     address_space_cache_destroy(&indirect_desc_cache);
1322     if (in_bytes) {
1323         *in_bytes = in_total;
1324     }
1325     if (out_bytes) {
1326         *out_bytes = out_total;
1327     }
1328     return;
1329 
1330 err:
1331     in_total = out_total = 0;
1332     goto done;
1333 }
1334 
virtqueue_get_avail_bytes(VirtQueue * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes)1335 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
1336                                unsigned int *out_bytes,
1337                                unsigned max_in_bytes, unsigned max_out_bytes)
1338 {
1339     uint16_t desc_size;
1340     VRingMemoryRegionCaches *caches;
1341 
1342     RCU_READ_LOCK_GUARD();
1343 
1344     if (unlikely(!vq->vring.desc)) {
1345         goto err;
1346     }
1347 
1348     caches = vring_get_region_caches(vq);
1349     if (!caches) {
1350         goto err;
1351     }
1352 
1353     desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
1354                                 sizeof(VRingPackedDesc) : sizeof(VRingDesc);
1355     if (caches->desc.len < vq->vring.num * desc_size) {
1356         virtio_error(vq->vdev, "Cannot map descriptor ring");
1357         goto err;
1358     }
1359 
1360     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1361         virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes,
1362                                          max_in_bytes, max_out_bytes,
1363                                          caches);
1364     } else {
1365         virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
1366                                         max_in_bytes, max_out_bytes,
1367                                         caches);
1368     }
1369 
1370     return;
1371 err:
1372     if (in_bytes) {
1373         *in_bytes = 0;
1374     }
1375     if (out_bytes) {
1376         *out_bytes = 0;
1377     }
1378 }
1379 
virtqueue_avail_bytes(VirtQueue * vq,unsigned int in_bytes,unsigned int out_bytes)1380 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
1381                           unsigned int out_bytes)
1382 {
1383     unsigned int in_total, out_total;
1384 
1385     virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
1386     return in_bytes <= in_total && out_bytes <= out_total;
1387 }
1388 
virtqueue_map_desc(VirtIODevice * vdev,unsigned int * p_num_sg,hwaddr * addr,struct iovec * iov,unsigned int max_num_sg,bool is_write,hwaddr pa,size_t sz)1389 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
1390                                hwaddr *addr, struct iovec *iov,
1391                                unsigned int max_num_sg, bool is_write,
1392                                hwaddr pa, size_t sz)
1393 {
1394     bool ok = false;
1395     unsigned num_sg = *p_num_sg;
1396     assert(num_sg <= max_num_sg);
1397 
1398     if (!sz) {
1399         virtio_error(vdev, "virtio: zero sized buffers are not allowed");
1400         goto out;
1401     }
1402 
1403     while (sz) {
1404         hwaddr len = sz;
1405 
1406         if (num_sg == max_num_sg) {
1407             virtio_error(vdev, "virtio: too many write descriptors in "
1408                                "indirect table");
1409             goto out;
1410         }
1411 
1412         iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
1413                                               is_write ?
1414                                               DMA_DIRECTION_FROM_DEVICE :
1415                                               DMA_DIRECTION_TO_DEVICE,
1416                                               MEMTXATTRS_UNSPECIFIED);
1417         if (!iov[num_sg].iov_base) {
1418             virtio_error(vdev, "virtio: bogus descriptor or out of resources");
1419             goto out;
1420         }
1421 
1422         iov[num_sg].iov_len = len;
1423         addr[num_sg] = pa;
1424 
1425         sz -= len;
1426         pa += len;
1427         num_sg++;
1428     }
1429     ok = true;
1430 
1431 out:
1432     *p_num_sg = num_sg;
1433     return ok;
1434 }
1435 
1436 /* Only used by error code paths before we have a VirtQueueElement (therefore
1437  * virtqueue_unmap_sg() can't be used).  Assumes buffers weren't written to
1438  * yet.
1439  */
virtqueue_undo_map_desc(unsigned int out_num,unsigned int in_num,struct iovec * iov)1440 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
1441                                     struct iovec *iov)
1442 {
1443     unsigned int i;
1444 
1445     for (i = 0; i < out_num + in_num; i++) {
1446         int is_write = i >= out_num;
1447 
1448         cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
1449         iov++;
1450     }
1451 }
1452 
virtqueue_map_iovec(VirtIODevice * vdev,struct iovec * sg,hwaddr * addr,unsigned int num_sg,bool is_write)1453 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
1454                                 hwaddr *addr, unsigned int num_sg,
1455                                 bool is_write)
1456 {
1457     unsigned int i;
1458     hwaddr len;
1459 
1460     for (i = 0; i < num_sg; i++) {
1461         len = sg[i].iov_len;
1462         sg[i].iov_base = dma_memory_map(vdev->dma_as,
1463                                         addr[i], &len, is_write ?
1464                                         DMA_DIRECTION_FROM_DEVICE :
1465                                         DMA_DIRECTION_TO_DEVICE,
1466                                         MEMTXATTRS_UNSPECIFIED);
1467         if (!sg[i].iov_base) {
1468             error_report("virtio: error trying to map MMIO memory");
1469             exit(1);
1470         }
1471         if (len != sg[i].iov_len) {
1472             error_report("virtio: unexpected memory split");
1473             exit(1);
1474         }
1475     }
1476 }
1477 
virtqueue_map(VirtIODevice * vdev,VirtQueueElement * elem)1478 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
1479 {
1480     virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true);
1481     virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num,
1482                                                                         false);
1483 }
1484 
virtqueue_alloc_element(size_t sz,unsigned out_num,unsigned in_num)1485 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
1486 {
1487     VirtQueueElement *elem;
1488     size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
1489     size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
1490     size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
1491     size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
1492     size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
1493     size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
1494 
1495     assert(sz >= sizeof(VirtQueueElement));
1496     elem = g_malloc(out_sg_end);
1497     trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
1498     elem->out_num = out_num;
1499     elem->in_num = in_num;
1500     elem->in_addr = (void *)elem + in_addr_ofs;
1501     elem->out_addr = (void *)elem + out_addr_ofs;
1502     elem->in_sg = (void *)elem + in_sg_ofs;
1503     elem->out_sg = (void *)elem + out_sg_ofs;
1504     return elem;
1505 }
1506 
virtqueue_split_pop(VirtQueue * vq,size_t sz)1507 static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
1508 {
1509     unsigned int i, head, max;
1510     VRingMemoryRegionCaches *caches;
1511     MemoryRegionCache indirect_desc_cache;
1512     MemoryRegionCache *desc_cache;
1513     int64_t len;
1514     VirtIODevice *vdev = vq->vdev;
1515     VirtQueueElement *elem = NULL;
1516     unsigned out_num, in_num, elem_entries;
1517     hwaddr addr[VIRTQUEUE_MAX_SIZE];
1518     struct iovec iov[VIRTQUEUE_MAX_SIZE];
1519     VRingDesc desc;
1520     int rc;
1521 
1522     address_space_cache_init_empty(&indirect_desc_cache);
1523 
1524     RCU_READ_LOCK_GUARD();
1525     if (virtio_queue_empty_rcu(vq)) {
1526         goto done;
1527     }
1528     /* Needed after virtio_queue_empty(), see comment in
1529      * virtqueue_num_heads(). */
1530     smp_rmb();
1531 
1532     /* When we start there are none of either input nor output. */
1533     out_num = in_num = elem_entries = 0;
1534 
1535     max = vq->vring.num;
1536 
1537     if (vq->inuse >= vq->vring.num) {
1538         virtio_error(vdev, "Virtqueue size exceeded");
1539         goto done;
1540     }
1541 
1542     if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
1543         goto done;
1544     }
1545 
1546     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1547         vring_set_avail_event(vq, vq->last_avail_idx);
1548     }
1549 
1550     i = head;
1551 
1552     caches = vring_get_region_caches(vq);
1553     if (!caches) {
1554         virtio_error(vdev, "Region caches not initialized");
1555         goto done;
1556     }
1557 
1558     if (caches->desc.len < max * sizeof(VRingDesc)) {
1559         virtio_error(vdev, "Cannot map descriptor ring");
1560         goto done;
1561     }
1562 
1563     desc_cache = &caches->desc;
1564     vring_split_desc_read(vdev, &desc, desc_cache, i);
1565     if (desc.flags & VRING_DESC_F_INDIRECT) {
1566         if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1567             virtio_error(vdev, "Invalid size for indirect buffer table");
1568             goto done;
1569         }
1570 
1571         /* loop over the indirect descriptor table */
1572         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1573                                        desc.addr, desc.len, false);
1574         desc_cache = &indirect_desc_cache;
1575         if (len < desc.len) {
1576             virtio_error(vdev, "Cannot map indirect buffer");
1577             goto done;
1578         }
1579 
1580         max = desc.len / sizeof(VRingDesc);
1581         i = 0;
1582         vring_split_desc_read(vdev, &desc, desc_cache, i);
1583     }
1584 
1585     /* Collect all the descriptors */
1586     do {
1587         bool map_ok;
1588 
1589         if (desc.flags & VRING_DESC_F_WRITE) {
1590             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1591                                         iov + out_num,
1592                                         VIRTQUEUE_MAX_SIZE - out_num, true,
1593                                         desc.addr, desc.len);
1594         } else {
1595             if (in_num) {
1596                 virtio_error(vdev, "Incorrect order for descriptors");
1597                 goto err_undo_map;
1598             }
1599             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1600                                         VIRTQUEUE_MAX_SIZE, false,
1601                                         desc.addr, desc.len);
1602         }
1603         if (!map_ok) {
1604             goto err_undo_map;
1605         }
1606 
1607         /* If we've got too many, that implies a descriptor loop. */
1608         if (++elem_entries > max) {
1609             virtio_error(vdev, "Looped descriptor");
1610             goto err_undo_map;
1611         }
1612 
1613         rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
1614     } while (rc == VIRTQUEUE_READ_DESC_MORE);
1615 
1616     if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1617         goto err_undo_map;
1618     }
1619 
1620     /* Now copy what we have collected and mapped */
1621     elem = virtqueue_alloc_element(sz, out_num, in_num);
1622     elem->index = head;
1623     elem->ndescs = 1;
1624     for (i = 0; i < out_num; i++) {
1625         elem->out_addr[i] = addr[i];
1626         elem->out_sg[i] = iov[i];
1627     }
1628     for (i = 0; i < in_num; i++) {
1629         elem->in_addr[i] = addr[out_num + i];
1630         elem->in_sg[i] = iov[out_num + i];
1631     }
1632 
1633     vq->inuse++;
1634 
1635     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1636 done:
1637     address_space_cache_destroy(&indirect_desc_cache);
1638 
1639     return elem;
1640 
1641 err_undo_map:
1642     virtqueue_undo_map_desc(out_num, in_num, iov);
1643     goto done;
1644 }
1645 
virtqueue_packed_pop(VirtQueue * vq,size_t sz)1646 static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
1647 {
1648     unsigned int i, max;
1649     VRingMemoryRegionCaches *caches;
1650     MemoryRegionCache indirect_desc_cache;
1651     MemoryRegionCache *desc_cache;
1652     int64_t len;
1653     VirtIODevice *vdev = vq->vdev;
1654     VirtQueueElement *elem = NULL;
1655     unsigned out_num, in_num, elem_entries;
1656     hwaddr addr[VIRTQUEUE_MAX_SIZE];
1657     struct iovec iov[VIRTQUEUE_MAX_SIZE];
1658     VRingPackedDesc desc;
1659     uint16_t id;
1660     int rc;
1661 
1662     address_space_cache_init_empty(&indirect_desc_cache);
1663 
1664     RCU_READ_LOCK_GUARD();
1665     if (virtio_queue_packed_empty_rcu(vq)) {
1666         goto done;
1667     }
1668 
1669     /* When we start there are none of either input nor output. */
1670     out_num = in_num = elem_entries = 0;
1671 
1672     max = vq->vring.num;
1673 
1674     if (vq->inuse >= vq->vring.num) {
1675         virtio_error(vdev, "Virtqueue size exceeded");
1676         goto done;
1677     }
1678 
1679     i = vq->last_avail_idx;
1680 
1681     caches = vring_get_region_caches(vq);
1682     if (!caches) {
1683         virtio_error(vdev, "Region caches not initialized");
1684         goto done;
1685     }
1686 
1687     if (caches->desc.len < max * sizeof(VRingDesc)) {
1688         virtio_error(vdev, "Cannot map descriptor ring");
1689         goto done;
1690     }
1691 
1692     desc_cache = &caches->desc;
1693     vring_packed_desc_read(vdev, &desc, desc_cache, i, true);
1694     id = desc.id;
1695     if (desc.flags & VRING_DESC_F_INDIRECT) {
1696         if (desc.len % sizeof(VRingPackedDesc)) {
1697             virtio_error(vdev, "Invalid size for indirect buffer table");
1698             goto done;
1699         }
1700 
1701         /* loop over the indirect descriptor table */
1702         len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1703                                        desc.addr, desc.len, false);
1704         desc_cache = &indirect_desc_cache;
1705         if (len < desc.len) {
1706             virtio_error(vdev, "Cannot map indirect buffer");
1707             goto done;
1708         }
1709 
1710         max = desc.len / sizeof(VRingPackedDesc);
1711         i = 0;
1712         vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1713     }
1714 
1715     /* Collect all the descriptors */
1716     do {
1717         bool map_ok;
1718 
1719         if (desc.flags & VRING_DESC_F_WRITE) {
1720             map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1721                                         iov + out_num,
1722                                         VIRTQUEUE_MAX_SIZE - out_num, true,
1723                                         desc.addr, desc.len);
1724         } else {
1725             if (in_num) {
1726                 virtio_error(vdev, "Incorrect order for descriptors");
1727                 goto err_undo_map;
1728             }
1729             map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1730                                         VIRTQUEUE_MAX_SIZE, false,
1731                                         desc.addr, desc.len);
1732         }
1733         if (!map_ok) {
1734             goto err_undo_map;
1735         }
1736 
1737         /* If we've got too many, that implies a descriptor loop. */
1738         if (++elem_entries > max) {
1739             virtio_error(vdev, "Looped descriptor");
1740             goto err_undo_map;
1741         }
1742 
1743         rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i,
1744                                              desc_cache ==
1745                                              &indirect_desc_cache);
1746     } while (rc == VIRTQUEUE_READ_DESC_MORE);
1747 
1748     /* Now copy what we have collected and mapped */
1749     elem = virtqueue_alloc_element(sz, out_num, in_num);
1750     for (i = 0; i < out_num; i++) {
1751         elem->out_addr[i] = addr[i];
1752         elem->out_sg[i] = iov[i];
1753     }
1754     for (i = 0; i < in_num; i++) {
1755         elem->in_addr[i] = addr[out_num + i];
1756         elem->in_sg[i] = iov[out_num + i];
1757     }
1758 
1759     elem->index = id;
1760     elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
1761     vq->last_avail_idx += elem->ndescs;
1762     vq->inuse += elem->ndescs;
1763 
1764     if (vq->last_avail_idx >= vq->vring.num) {
1765         vq->last_avail_idx -= vq->vring.num;
1766         vq->last_avail_wrap_counter ^= 1;
1767     }
1768 
1769     vq->shadow_avail_idx = vq->last_avail_idx;
1770     vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
1771 
1772     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1773 done:
1774     address_space_cache_destroy(&indirect_desc_cache);
1775 
1776     return elem;
1777 
1778 err_undo_map:
1779     virtqueue_undo_map_desc(out_num, in_num, iov);
1780     goto done;
1781 }
1782 
virtqueue_pop(VirtQueue * vq,size_t sz)1783 void *virtqueue_pop(VirtQueue *vq, size_t sz)
1784 {
1785     if (virtio_device_disabled(vq->vdev)) {
1786         return NULL;
1787     }
1788 
1789     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1790         return virtqueue_packed_pop(vq, sz);
1791     } else {
1792         return virtqueue_split_pop(vq, sz);
1793     }
1794 }
1795 
virtqueue_packed_drop_all(VirtQueue * vq)1796 static unsigned int virtqueue_packed_drop_all(VirtQueue *vq)
1797 {
1798     VRingMemoryRegionCaches *caches;
1799     MemoryRegionCache *desc_cache;
1800     unsigned int dropped = 0;
1801     VirtQueueElement elem = {};
1802     VirtIODevice *vdev = vq->vdev;
1803     VRingPackedDesc desc;
1804 
1805     RCU_READ_LOCK_GUARD();
1806 
1807     caches = vring_get_region_caches(vq);
1808     if (!caches) {
1809         return 0;
1810     }
1811 
1812     desc_cache = &caches->desc;
1813 
1814     virtio_queue_set_notification(vq, 0);
1815 
1816     while (vq->inuse < vq->vring.num) {
1817         unsigned int idx = vq->last_avail_idx;
1818         /*
1819          * works similar to virtqueue_pop but does not map buffers
1820          * and does not allocate any memory.
1821          */
1822         vring_packed_desc_read(vdev, &desc, desc_cache,
1823                                vq->last_avail_idx , true);
1824         if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
1825             break;
1826         }
1827         elem.index = desc.id;
1828         elem.ndescs = 1;
1829         while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache,
1830                                                vq->vring.num, &idx, false)) {
1831             ++elem.ndescs;
1832         }
1833         /*
1834          * immediately push the element, nothing to unmap
1835          * as both in_num and out_num are set to 0.
1836          */
1837         virtqueue_push(vq, &elem, 0);
1838         dropped++;
1839         vq->last_avail_idx += elem.ndescs;
1840         if (vq->last_avail_idx >= vq->vring.num) {
1841             vq->last_avail_idx -= vq->vring.num;
1842             vq->last_avail_wrap_counter ^= 1;
1843         }
1844     }
1845 
1846     return dropped;
1847 }
1848 
virtqueue_split_drop_all(VirtQueue * vq)1849 static unsigned int virtqueue_split_drop_all(VirtQueue *vq)
1850 {
1851     unsigned int dropped = 0;
1852     VirtQueueElement elem = {};
1853     VirtIODevice *vdev = vq->vdev;
1854     bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1855 
1856     while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
1857         /* works similar to virtqueue_pop but does not map buffers
1858         * and does not allocate any memory */
1859         smp_rmb();
1860         if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
1861             break;
1862         }
1863         vq->inuse++;
1864         vq->last_avail_idx++;
1865         if (fEventIdx) {
1866             vring_set_avail_event(vq, vq->last_avail_idx);
1867         }
1868         /* immediately push the element, nothing to unmap
1869          * as both in_num and out_num are set to 0 */
1870         virtqueue_push(vq, &elem, 0);
1871         dropped++;
1872     }
1873 
1874     return dropped;
1875 }
1876 
1877 /* virtqueue_drop_all:
1878  * @vq: The #VirtQueue
1879  * Drops all queued buffers and indicates them to the guest
1880  * as if they are done. Useful when buffers can not be
1881  * processed but must be returned to the guest.
1882  */
virtqueue_drop_all(VirtQueue * vq)1883 unsigned int virtqueue_drop_all(VirtQueue *vq)
1884 {
1885     struct VirtIODevice *vdev = vq->vdev;
1886 
1887     if (virtio_device_disabled(vq->vdev)) {
1888         return 0;
1889     }
1890 
1891     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1892         return virtqueue_packed_drop_all(vq);
1893     } else {
1894         return virtqueue_split_drop_all(vq);
1895     }
1896 }
1897 
1898 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1899  * it is what QEMU has always done by mistake.  We can change it sooner
1900  * or later by bumping the version number of the affected vm states.
1901  * In the meanwhile, since the in-memory layout of VirtQueueElement
1902  * has changed, we need to marshal to and from the layout that was
1903  * used before the change.
1904  */
1905 typedef struct VirtQueueElementOld {
1906     unsigned int index;
1907     unsigned int out_num;
1908     unsigned int in_num;
1909     hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1910     hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1911     struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1912     struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1913 } VirtQueueElementOld;
1914 
qemu_get_virtqueue_element(VirtIODevice * vdev,QEMUFile * f,size_t sz)1915 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
1916 {
1917     VirtQueueElement *elem;
1918     VirtQueueElementOld data;
1919     int i;
1920 
1921     qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1922 
1923     /* TODO: teach all callers that this can fail, and return failure instead
1924      * of asserting here.
1925      * This is just one thing (there are probably more) that must be
1926      * fixed before we can allow NDEBUG compilation.
1927      */
1928     assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1929     assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1930 
1931     elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1932     elem->index = data.index;
1933 
1934     for (i = 0; i < elem->in_num; i++) {
1935         elem->in_addr[i] = data.in_addr[i];
1936     }
1937 
1938     for (i = 0; i < elem->out_num; i++) {
1939         elem->out_addr[i] = data.out_addr[i];
1940     }
1941 
1942     for (i = 0; i < elem->in_num; i++) {
1943         /* Base is overwritten by virtqueue_map.  */
1944         elem->in_sg[i].iov_base = 0;
1945         elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1946     }
1947 
1948     for (i = 0; i < elem->out_num; i++) {
1949         /* Base is overwritten by virtqueue_map.  */
1950         elem->out_sg[i].iov_base = 0;
1951         elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1952     }
1953 
1954     if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1955         qemu_get_be32s(f, &elem->ndescs);
1956     }
1957 
1958     virtqueue_map(vdev, elem);
1959     return elem;
1960 }
1961 
qemu_put_virtqueue_element(VirtIODevice * vdev,QEMUFile * f,VirtQueueElement * elem)1962 void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
1963                                 VirtQueueElement *elem)
1964 {
1965     VirtQueueElementOld data;
1966     int i;
1967 
1968     memset(&data, 0, sizeof(data));
1969     data.index = elem->index;
1970     data.in_num = elem->in_num;
1971     data.out_num = elem->out_num;
1972 
1973     for (i = 0; i < elem->in_num; i++) {
1974         data.in_addr[i] = elem->in_addr[i];
1975     }
1976 
1977     for (i = 0; i < elem->out_num; i++) {
1978         data.out_addr[i] = elem->out_addr[i];
1979     }
1980 
1981     for (i = 0; i < elem->in_num; i++) {
1982         /* Base is overwritten by virtqueue_map when loading.  Do not
1983          * save it, as it would leak the QEMU address space layout.  */
1984         data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1985     }
1986 
1987     for (i = 0; i < elem->out_num; i++) {
1988         /* Do not save iov_base as above.  */
1989         data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1990     }
1991 
1992     if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1993         qemu_put_be32s(f, &elem->ndescs);
1994     }
1995 
1996     qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1997 }
1998 
1999 /* virtio device */
virtio_notify_vector(VirtIODevice * vdev,uint16_t vector)2000 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
2001 {
2002     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2003     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2004 
2005     if (virtio_device_disabled(vdev)) {
2006         return;
2007     }
2008 
2009     if (k->notify) {
2010         k->notify(qbus->parent, vector);
2011     }
2012 }
2013 
virtio_update_irq(VirtIODevice * vdev)2014 void virtio_update_irq(VirtIODevice *vdev)
2015 {
2016     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
2017 }
2018 
virtio_validate_features(VirtIODevice * vdev)2019 static int virtio_validate_features(VirtIODevice *vdev)
2020 {
2021     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2022 
2023     if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
2024         !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
2025         return -EFAULT;
2026     }
2027 
2028     if (k->validate_features) {
2029         return k->validate_features(vdev);
2030     } else {
2031         return 0;
2032     }
2033 }
2034 
virtio_set_status(VirtIODevice * vdev,uint8_t val)2035 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
2036 {
2037     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2038     trace_virtio_set_status(vdev, val);
2039 
2040     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2041         if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
2042             val & VIRTIO_CONFIG_S_FEATURES_OK) {
2043             int ret = virtio_validate_features(vdev);
2044 
2045             if (ret) {
2046                 return ret;
2047             }
2048         }
2049     }
2050 
2051     if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
2052         (val & VIRTIO_CONFIG_S_DRIVER_OK)) {
2053         virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
2054     }
2055 
2056     if (k->set_status) {
2057         k->set_status(vdev, val);
2058     }
2059     vdev->status = val;
2060 
2061     return 0;
2062 }
2063 
virtio_default_endian(void)2064 static enum virtio_device_endian virtio_default_endian(void)
2065 {
2066     if (target_words_bigendian()) {
2067         return VIRTIO_DEVICE_ENDIAN_BIG;
2068     } else {
2069         return VIRTIO_DEVICE_ENDIAN_LITTLE;
2070     }
2071 }
2072 
virtio_current_cpu_endian(void)2073 static enum virtio_device_endian virtio_current_cpu_endian(void)
2074 {
2075     if (cpu_virtio_is_big_endian(current_cpu)) {
2076         return VIRTIO_DEVICE_ENDIAN_BIG;
2077     } else {
2078         return VIRTIO_DEVICE_ENDIAN_LITTLE;
2079     }
2080 }
2081 
__virtio_queue_reset(VirtIODevice * vdev,uint32_t i)2082 static void __virtio_queue_reset(VirtIODevice *vdev, uint32_t i)
2083 {
2084     vdev->vq[i].vring.desc = 0;
2085     vdev->vq[i].vring.avail = 0;
2086     vdev->vq[i].vring.used = 0;
2087     vdev->vq[i].last_avail_idx = 0;
2088     vdev->vq[i].shadow_avail_idx = 0;
2089     vdev->vq[i].used_idx = 0;
2090     vdev->vq[i].last_avail_wrap_counter = true;
2091     vdev->vq[i].shadow_avail_wrap_counter = true;
2092     vdev->vq[i].used_wrap_counter = true;
2093     virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
2094     vdev->vq[i].signalled_used = 0;
2095     vdev->vq[i].signalled_used_valid = false;
2096     vdev->vq[i].notification = true;
2097     vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
2098     vdev->vq[i].inuse = 0;
2099     virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2100 }
2101 
virtio_queue_reset(VirtIODevice * vdev,uint32_t queue_index)2102 void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index)
2103 {
2104     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2105 
2106     if (k->queue_reset) {
2107         k->queue_reset(vdev, queue_index);
2108     }
2109 
2110     __virtio_queue_reset(vdev, queue_index);
2111 }
2112 
virtio_queue_enable(VirtIODevice * vdev,uint32_t queue_index)2113 void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
2114 {
2115     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2116 
2117     /*
2118      * TODO: Seabios is currently out of spec and triggering this error.
2119      * So this needs to be fixed in Seabios, then this can
2120      * be re-enabled for new machine types only, and also after
2121      * being converted to LOG_GUEST_ERROR.
2122      *
2123     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2124         error_report("queue_enable is only supported in devices of virtio "
2125                      "1.0 or later.");
2126     }
2127     */
2128 
2129     if (k->queue_enable) {
2130         k->queue_enable(vdev, queue_index);
2131     }
2132 }
2133 
virtio_reset(void * opaque)2134 void virtio_reset(void *opaque)
2135 {
2136     VirtIODevice *vdev = opaque;
2137     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2138     int i;
2139 
2140     virtio_set_status(vdev, 0);
2141     if (current_cpu) {
2142         /* Guest initiated reset */
2143         vdev->device_endian = virtio_current_cpu_endian();
2144     } else {
2145         /* System reset */
2146         vdev->device_endian = virtio_default_endian();
2147     }
2148 
2149     if (vdev->vhost_started && k->get_vhost) {
2150         vhost_reset_device(k->get_vhost(vdev));
2151     }
2152 
2153     if (k->reset) {
2154         k->reset(vdev);
2155     }
2156 
2157     vdev->start_on_kick = false;
2158     vdev->started = false;
2159     vdev->broken = false;
2160     vdev->guest_features = 0;
2161     vdev->queue_sel = 0;
2162     vdev->status = 0;
2163     vdev->disabled = false;
2164     qatomic_set(&vdev->isr, 0);
2165     vdev->config_vector = VIRTIO_NO_VECTOR;
2166     virtio_notify_vector(vdev, vdev->config_vector);
2167 
2168     for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2169         __virtio_queue_reset(vdev, i);
2170     }
2171 }
2172 
virtio_queue_set_addr(VirtIODevice * vdev,int n,hwaddr addr)2173 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
2174 {
2175     if (!vdev->vq[n].vring.num) {
2176         return;
2177     }
2178     vdev->vq[n].vring.desc = addr;
2179     virtio_queue_update_rings(vdev, n);
2180 }
2181 
virtio_queue_get_addr(VirtIODevice * vdev,int n)2182 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
2183 {
2184     return vdev->vq[n].vring.desc;
2185 }
2186 
virtio_queue_set_rings(VirtIODevice * vdev,int n,hwaddr desc,hwaddr avail,hwaddr used)2187 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
2188                             hwaddr avail, hwaddr used)
2189 {
2190     if (!vdev->vq[n].vring.num) {
2191         return;
2192     }
2193     vdev->vq[n].vring.desc = desc;
2194     vdev->vq[n].vring.avail = avail;
2195     vdev->vq[n].vring.used = used;
2196     virtio_init_region_cache(vdev, n);
2197 }
2198 
virtio_queue_set_num(VirtIODevice * vdev,int n,int num)2199 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
2200 {
2201     /* Don't allow guest to flip queue between existent and
2202      * nonexistent states, or to set it to an invalid size.
2203      */
2204     if (!!num != !!vdev->vq[n].vring.num ||
2205         num > VIRTQUEUE_MAX_SIZE ||
2206         num < 0) {
2207         return;
2208     }
2209     vdev->vq[n].vring.num = num;
2210 }
2211 
virtio_vector_first_queue(VirtIODevice * vdev,uint16_t vector)2212 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
2213 {
2214     return QLIST_FIRST(&vdev->vector_queues[vector]);
2215 }
2216 
virtio_vector_next_queue(VirtQueue * vq)2217 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
2218 {
2219     return QLIST_NEXT(vq, node);
2220 }
2221 
virtio_queue_get_num(VirtIODevice * vdev,int n)2222 int virtio_queue_get_num(VirtIODevice *vdev, int n)
2223 {
2224     return vdev->vq[n].vring.num;
2225 }
2226 
virtio_queue_get_max_num(VirtIODevice * vdev,int n)2227 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
2228 {
2229     return vdev->vq[n].vring.num_default;
2230 }
2231 
virtio_get_num_queues(VirtIODevice * vdev)2232 int virtio_get_num_queues(VirtIODevice *vdev)
2233 {
2234     int i;
2235 
2236     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2237         if (!virtio_queue_get_num(vdev, i)) {
2238             break;
2239         }
2240     }
2241 
2242     return i;
2243 }
2244 
virtio_queue_set_align(VirtIODevice * vdev,int n,int align)2245 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
2246 {
2247     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2248     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2249 
2250     /* virtio-1 compliant devices cannot change the alignment */
2251     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2252         error_report("tried to modify queue alignment for virtio-1 device");
2253         return;
2254     }
2255     /* Check that the transport told us it was going to do this
2256      * (so a buggy transport will immediately assert rather than
2257      * silently failing to migrate this state)
2258      */
2259     assert(k->has_variable_vring_alignment);
2260 
2261     if (align) {
2262         vdev->vq[n].vring.align = align;
2263         virtio_queue_update_rings(vdev, n);
2264     }
2265 }
2266 
virtio_queue_notify_vq(VirtQueue * vq)2267 static void virtio_queue_notify_vq(VirtQueue *vq)
2268 {
2269     if (vq->vring.desc && vq->handle_output) {
2270         VirtIODevice *vdev = vq->vdev;
2271 
2272         if (unlikely(vdev->broken)) {
2273             return;
2274         }
2275 
2276         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2277         vq->handle_output(vdev, vq);
2278 
2279         if (unlikely(vdev->start_on_kick)) {
2280             virtio_set_started(vdev, true);
2281         }
2282     }
2283 }
2284 
virtio_queue_notify(VirtIODevice * vdev,int n)2285 void virtio_queue_notify(VirtIODevice *vdev, int n)
2286 {
2287     VirtQueue *vq = &vdev->vq[n];
2288 
2289     if (unlikely(!vq->vring.desc || vdev->broken)) {
2290         return;
2291     }
2292 
2293     trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2294     if (vq->host_notifier_enabled) {
2295         event_notifier_set(&vq->host_notifier);
2296     } else if (vq->handle_output) {
2297         vq->handle_output(vdev, vq);
2298 
2299         if (unlikely(vdev->start_on_kick)) {
2300             virtio_set_started(vdev, true);
2301         }
2302     }
2303 }
2304 
virtio_queue_vector(VirtIODevice * vdev,int n)2305 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
2306 {
2307     return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
2308         VIRTIO_NO_VECTOR;
2309 }
2310 
virtio_queue_set_vector(VirtIODevice * vdev,int n,uint16_t vector)2311 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
2312 {
2313     VirtQueue *vq = &vdev->vq[n];
2314 
2315     if (n < VIRTIO_QUEUE_MAX) {
2316         if (vdev->vector_queues &&
2317             vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
2318             QLIST_REMOVE(vq, node);
2319         }
2320         vdev->vq[n].vector = vector;
2321         if (vdev->vector_queues &&
2322             vector != VIRTIO_NO_VECTOR) {
2323             QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
2324         }
2325     }
2326 }
2327 
virtio_add_queue(VirtIODevice * vdev,int queue_size,VirtIOHandleOutput handle_output)2328 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
2329                             VirtIOHandleOutput handle_output)
2330 {
2331     int i;
2332 
2333     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2334         if (vdev->vq[i].vring.num == 0)
2335             break;
2336     }
2337 
2338     if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
2339         abort();
2340 
2341     vdev->vq[i].vring.num = queue_size;
2342     vdev->vq[i].vring.num_default = queue_size;
2343     vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
2344     vdev->vq[i].handle_output = handle_output;
2345     vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size);
2346 
2347     return &vdev->vq[i];
2348 }
2349 
virtio_delete_queue(VirtQueue * vq)2350 void virtio_delete_queue(VirtQueue *vq)
2351 {
2352     vq->vring.num = 0;
2353     vq->vring.num_default = 0;
2354     vq->handle_output = NULL;
2355     g_free(vq->used_elems);
2356     vq->used_elems = NULL;
2357     virtio_virtqueue_reset_region_cache(vq);
2358 }
2359 
virtio_del_queue(VirtIODevice * vdev,int n)2360 void virtio_del_queue(VirtIODevice *vdev, int n)
2361 {
2362     if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
2363         abort();
2364     }
2365 
2366     virtio_delete_queue(&vdev->vq[n]);
2367 }
2368 
virtio_set_isr(VirtIODevice * vdev,int value)2369 static void virtio_set_isr(VirtIODevice *vdev, int value)
2370 {
2371     uint8_t old = qatomic_read(&vdev->isr);
2372 
2373     /* Do not write ISR if it does not change, so that its cacheline remains
2374      * shared in the common case where the guest does not read it.
2375      */
2376     if ((old & value) != value) {
2377         qatomic_or(&vdev->isr, value);
2378     }
2379 }
2380 
2381 /* Called within rcu_read_lock(). */
virtio_split_should_notify(VirtIODevice * vdev,VirtQueue * vq)2382 static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2383 {
2384     uint16_t old, new;
2385     bool v;
2386     /* We need to expose used array entries before checking used event. */
2387     smp_mb();
2388     /* Always notify when queue is empty (when feature acknowledge) */
2389     if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2390         !vq->inuse && virtio_queue_empty(vq)) {
2391         return true;
2392     }
2393 
2394     if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2395         return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2396     }
2397 
2398     v = vq->signalled_used_valid;
2399     vq->signalled_used_valid = true;
2400     old = vq->signalled_used;
2401     new = vq->signalled_used = vq->used_idx;
2402     return !v || vring_need_event(vring_get_used_event(vq), new, old);
2403 }
2404 
vring_packed_need_event(VirtQueue * vq,bool wrap,uint16_t off_wrap,uint16_t new,uint16_t old)2405 static bool vring_packed_need_event(VirtQueue *vq, bool wrap,
2406                                     uint16_t off_wrap, uint16_t new,
2407                                     uint16_t old)
2408 {
2409     int off = off_wrap & ~(1 << 15);
2410 
2411     if (wrap != off_wrap >> 15) {
2412         off -= vq->vring.num;
2413     }
2414 
2415     return vring_need_event(off, new, old);
2416 }
2417 
2418 /* Called within rcu_read_lock(). */
virtio_packed_should_notify(VirtIODevice * vdev,VirtQueue * vq)2419 static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2420 {
2421     VRingPackedDescEvent e;
2422     uint16_t old, new;
2423     bool v;
2424     VRingMemoryRegionCaches *caches;
2425 
2426     caches = vring_get_region_caches(vq);
2427     if (!caches) {
2428         return false;
2429     }
2430 
2431     vring_packed_event_read(vdev, &caches->avail, &e);
2432 
2433     old = vq->signalled_used;
2434     new = vq->signalled_used = vq->used_idx;
2435     v = vq->signalled_used_valid;
2436     vq->signalled_used_valid = true;
2437 
2438     if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) {
2439         return false;
2440     } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) {
2441         return true;
2442     }
2443 
2444     return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
2445                                          e.off_wrap, new, old);
2446 }
2447 
2448 /* Called within rcu_read_lock().  */
virtio_should_notify(VirtIODevice * vdev,VirtQueue * vq)2449 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2450 {
2451     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2452         return virtio_packed_should_notify(vdev, vq);
2453     } else {
2454         return virtio_split_should_notify(vdev, vq);
2455     }
2456 }
2457 
2458 /* Batch irqs while inside a defer_call_begin()/defer_call_end() section */
virtio_notify_irqfd_deferred_fn(void * opaque)2459 static void virtio_notify_irqfd_deferred_fn(void *opaque)
2460 {
2461     EventNotifier *notifier = opaque;
2462     VirtQueue *vq = container_of(notifier, VirtQueue, guest_notifier);
2463 
2464     trace_virtio_notify_irqfd_deferred_fn(vq->vdev, vq);
2465     event_notifier_set(notifier);
2466 }
2467 
virtio_notify_irqfd(VirtIODevice * vdev,VirtQueue * vq)2468 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
2469 {
2470     WITH_RCU_READ_LOCK_GUARD() {
2471         if (!virtio_should_notify(vdev, vq)) {
2472             return;
2473         }
2474     }
2475 
2476     trace_virtio_notify_irqfd(vdev, vq);
2477 
2478     /*
2479      * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2480      * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2481      * incorrectly polling this bit during crashdump and hibernation
2482      * in MSI mode, causing a hang if this bit is never updated.
2483      * Recent releases of Windows do not really shut down, but rather
2484      * log out and hibernate to make the next startup faster.  Hence,
2485      * this manifested as a more serious hang during shutdown with
2486      *
2487      * Next driver release from 2016 fixed this problem, so working around it
2488      * is not a must, but it's easy to do so let's do it here.
2489      *
2490      * Note: it's safe to update ISR from any thread as it was switched
2491      * to an atomic operation.
2492      */
2493     virtio_set_isr(vq->vdev, 0x1);
2494     defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier);
2495 }
2496 
virtio_irq(VirtQueue * vq)2497 static void virtio_irq(VirtQueue *vq)
2498 {
2499     virtio_set_isr(vq->vdev, 0x1);
2500     virtio_notify_vector(vq->vdev, vq->vector);
2501 }
2502 
virtio_notify(VirtIODevice * vdev,VirtQueue * vq)2503 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
2504 {
2505     WITH_RCU_READ_LOCK_GUARD() {
2506         if (!virtio_should_notify(vdev, vq)) {
2507             return;
2508         }
2509     }
2510 
2511     trace_virtio_notify(vdev, vq);
2512     virtio_irq(vq);
2513 }
2514 
virtio_notify_config(VirtIODevice * vdev)2515 void virtio_notify_config(VirtIODevice *vdev)
2516 {
2517     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
2518         return;
2519 
2520     virtio_set_isr(vdev, 0x3);
2521     vdev->generation++;
2522     virtio_notify_vector(vdev, vdev->config_vector);
2523 }
2524 
virtio_device_endian_needed(void * opaque)2525 static bool virtio_device_endian_needed(void *opaque)
2526 {
2527     VirtIODevice *vdev = opaque;
2528 
2529     assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
2530     if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2531         return vdev->device_endian != virtio_default_endian();
2532     }
2533     /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2534     return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
2535 }
2536 
virtio_64bit_features_needed(void * opaque)2537 static bool virtio_64bit_features_needed(void *opaque)
2538 {
2539     VirtIODevice *vdev = opaque;
2540 
2541     return (vdev->host_features >> 32) != 0;
2542 }
2543 
virtio_virtqueue_needed(void * opaque)2544 static bool virtio_virtqueue_needed(void *opaque)
2545 {
2546     VirtIODevice *vdev = opaque;
2547 
2548     return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
2549 }
2550 
virtio_packed_virtqueue_needed(void * opaque)2551 static bool virtio_packed_virtqueue_needed(void *opaque)
2552 {
2553     VirtIODevice *vdev = opaque;
2554 
2555     return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
2556 }
2557 
virtio_ringsize_needed(void * opaque)2558 static bool virtio_ringsize_needed(void *opaque)
2559 {
2560     VirtIODevice *vdev = opaque;
2561     int i;
2562 
2563     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2564         if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
2565             return true;
2566         }
2567     }
2568     return false;
2569 }
2570 
virtio_extra_state_needed(void * opaque)2571 static bool virtio_extra_state_needed(void *opaque)
2572 {
2573     VirtIODevice *vdev = opaque;
2574     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2575     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2576 
2577     return k->has_extra_state &&
2578         k->has_extra_state(qbus->parent);
2579 }
2580 
virtio_broken_needed(void * opaque)2581 static bool virtio_broken_needed(void *opaque)
2582 {
2583     VirtIODevice *vdev = opaque;
2584 
2585     return vdev->broken;
2586 }
2587 
virtio_started_needed(void * opaque)2588 static bool virtio_started_needed(void *opaque)
2589 {
2590     VirtIODevice *vdev = opaque;
2591 
2592     return vdev->started;
2593 }
2594 
virtio_disabled_needed(void * opaque)2595 static bool virtio_disabled_needed(void *opaque)
2596 {
2597     VirtIODevice *vdev = opaque;
2598 
2599     return vdev->disabled;
2600 }
2601 
2602 static const VMStateDescription vmstate_virtqueue = {
2603     .name = "virtqueue_state",
2604     .version_id = 1,
2605     .minimum_version_id = 1,
2606     .fields = (const VMStateField[]) {
2607         VMSTATE_UINT64(vring.avail, struct VirtQueue),
2608         VMSTATE_UINT64(vring.used, struct VirtQueue),
2609         VMSTATE_END_OF_LIST()
2610     }
2611 };
2612 
2613 static const VMStateDescription vmstate_packed_virtqueue = {
2614     .name = "packed_virtqueue_state",
2615     .version_id = 1,
2616     .minimum_version_id = 1,
2617     .fields = (const VMStateField[]) {
2618         VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
2619         VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
2620         VMSTATE_UINT16(used_idx, struct VirtQueue),
2621         VMSTATE_BOOL(used_wrap_counter, struct VirtQueue),
2622         VMSTATE_UINT32(inuse, struct VirtQueue),
2623         VMSTATE_END_OF_LIST()
2624     }
2625 };
2626 
2627 static const VMStateDescription vmstate_virtio_virtqueues = {
2628     .name = "virtio/virtqueues",
2629     .version_id = 1,
2630     .minimum_version_id = 1,
2631     .needed = &virtio_virtqueue_needed,
2632     .fields = (const VMStateField[]) {
2633         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2634                       VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
2635         VMSTATE_END_OF_LIST()
2636     }
2637 };
2638 
2639 static const VMStateDescription vmstate_virtio_packed_virtqueues = {
2640     .name = "virtio/packed_virtqueues",
2641     .version_id = 1,
2642     .minimum_version_id = 1,
2643     .needed = &virtio_packed_virtqueue_needed,
2644     .fields = (const VMStateField[]) {
2645         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2646                       VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue),
2647         VMSTATE_END_OF_LIST()
2648     }
2649 };
2650 
2651 static const VMStateDescription vmstate_ringsize = {
2652     .name = "ringsize_state",
2653     .version_id = 1,
2654     .minimum_version_id = 1,
2655     .fields = (const VMStateField[]) {
2656         VMSTATE_UINT32(vring.num_default, struct VirtQueue),
2657         VMSTATE_END_OF_LIST()
2658     }
2659 };
2660 
2661 static const VMStateDescription vmstate_virtio_ringsize = {
2662     .name = "virtio/ringsize",
2663     .version_id = 1,
2664     .minimum_version_id = 1,
2665     .needed = &virtio_ringsize_needed,
2666     .fields = (const VMStateField[]) {
2667         VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2668                       VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
2669         VMSTATE_END_OF_LIST()
2670     }
2671 };
2672 
get_extra_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field)2673 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
2674                            const VMStateField *field)
2675 {
2676     VirtIODevice *vdev = pv;
2677     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2678     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2679 
2680     if (!k->load_extra_state) {
2681         return -1;
2682     } else {
2683         return k->load_extra_state(qbus->parent, f);
2684     }
2685 }
2686 
put_extra_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)2687 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
2688                            const VMStateField *field, JSONWriter *vmdesc)
2689 {
2690     VirtIODevice *vdev = pv;
2691     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2692     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2693 
2694     k->save_extra_state(qbus->parent, f);
2695     return 0;
2696 }
2697 
2698 static const VMStateInfo vmstate_info_extra_state = {
2699     .name = "virtqueue_extra_state",
2700     .get = get_extra_state,
2701     .put = put_extra_state,
2702 };
2703 
2704 static const VMStateDescription vmstate_virtio_extra_state = {
2705     .name = "virtio/extra_state",
2706     .version_id = 1,
2707     .minimum_version_id = 1,
2708     .needed = &virtio_extra_state_needed,
2709     .fields = (const VMStateField[]) {
2710         {
2711             .name         = "extra_state",
2712             .version_id   = 0,
2713             .field_exists = NULL,
2714             .size         = 0,
2715             .info         = &vmstate_info_extra_state,
2716             .flags        = VMS_SINGLE,
2717             .offset       = 0,
2718         },
2719         VMSTATE_END_OF_LIST()
2720     }
2721 };
2722 
2723 static const VMStateDescription vmstate_virtio_device_endian = {
2724     .name = "virtio/device_endian",
2725     .version_id = 1,
2726     .minimum_version_id = 1,
2727     .needed = &virtio_device_endian_needed,
2728     .fields = (const VMStateField[]) {
2729         VMSTATE_UINT8(device_endian, VirtIODevice),
2730         VMSTATE_END_OF_LIST()
2731     }
2732 };
2733 
2734 static const VMStateDescription vmstate_virtio_64bit_features = {
2735     .name = "virtio/64bit_features",
2736     .version_id = 1,
2737     .minimum_version_id = 1,
2738     .needed = &virtio_64bit_features_needed,
2739     .fields = (const VMStateField[]) {
2740         VMSTATE_UINT64(guest_features, VirtIODevice),
2741         VMSTATE_END_OF_LIST()
2742     }
2743 };
2744 
2745 static const VMStateDescription vmstate_virtio_broken = {
2746     .name = "virtio/broken",
2747     .version_id = 1,
2748     .minimum_version_id = 1,
2749     .needed = &virtio_broken_needed,
2750     .fields = (const VMStateField[]) {
2751         VMSTATE_BOOL(broken, VirtIODevice),
2752         VMSTATE_END_OF_LIST()
2753     }
2754 };
2755 
2756 static const VMStateDescription vmstate_virtio_started = {
2757     .name = "virtio/started",
2758     .version_id = 1,
2759     .minimum_version_id = 1,
2760     .needed = &virtio_started_needed,
2761     .fields = (const VMStateField[]) {
2762         VMSTATE_BOOL(started, VirtIODevice),
2763         VMSTATE_END_OF_LIST()
2764     }
2765 };
2766 
2767 static const VMStateDescription vmstate_virtio_disabled = {
2768     .name = "virtio/disabled",
2769     .version_id = 1,
2770     .minimum_version_id = 1,
2771     .needed = &virtio_disabled_needed,
2772     .fields = (const VMStateField[]) {
2773         VMSTATE_BOOL(disabled, VirtIODevice),
2774         VMSTATE_END_OF_LIST()
2775     }
2776 };
2777 
2778 static const VMStateDescription vmstate_virtio = {
2779     .name = "virtio",
2780     .version_id = 1,
2781     .minimum_version_id = 1,
2782     .fields = (const VMStateField[]) {
2783         VMSTATE_END_OF_LIST()
2784     },
2785     .subsections = (const VMStateDescription * const []) {
2786         &vmstate_virtio_device_endian,
2787         &vmstate_virtio_64bit_features,
2788         &vmstate_virtio_virtqueues,
2789         &vmstate_virtio_ringsize,
2790         &vmstate_virtio_broken,
2791         &vmstate_virtio_extra_state,
2792         &vmstate_virtio_started,
2793         &vmstate_virtio_packed_virtqueues,
2794         &vmstate_virtio_disabled,
2795         NULL
2796     }
2797 };
2798 
virtio_save(VirtIODevice * vdev,QEMUFile * f)2799 int virtio_save(VirtIODevice *vdev, QEMUFile *f)
2800 {
2801     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2802     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2803     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2804     uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
2805     int i;
2806 
2807     if (k->save_config) {
2808         k->save_config(qbus->parent, f);
2809     }
2810 
2811     qemu_put_8s(f, &vdev->status);
2812     qemu_put_8s(f, &vdev->isr);
2813     qemu_put_be16s(f, &vdev->queue_sel);
2814     qemu_put_be32s(f, &guest_features_lo);
2815     qemu_put_be32(f, vdev->config_len);
2816     qemu_put_buffer(f, vdev->config, vdev->config_len);
2817 
2818     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2819         if (vdev->vq[i].vring.num == 0)
2820             break;
2821     }
2822 
2823     qemu_put_be32(f, i);
2824 
2825     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2826         if (vdev->vq[i].vring.num == 0)
2827             break;
2828 
2829         qemu_put_be32(f, vdev->vq[i].vring.num);
2830         if (k->has_variable_vring_alignment) {
2831             qemu_put_be32(f, vdev->vq[i].vring.align);
2832         }
2833         /*
2834          * Save desc now, the rest of the ring addresses are saved in
2835          * subsections for VIRTIO-1 devices.
2836          */
2837         qemu_put_be64(f, vdev->vq[i].vring.desc);
2838         qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
2839         if (k->save_queue) {
2840             k->save_queue(qbus->parent, i, f);
2841         }
2842     }
2843 
2844     if (vdc->save != NULL) {
2845         vdc->save(vdev, f);
2846     }
2847 
2848     if (vdc->vmsd) {
2849         int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
2850         if (ret) {
2851             return ret;
2852         }
2853     }
2854 
2855     /* Subsections */
2856     return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
2857 }
2858 
2859 /* A wrapper for use as a VMState .put function */
virtio_device_put(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)2860 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
2861                               const VMStateField *field, JSONWriter *vmdesc)
2862 {
2863     return virtio_save(VIRTIO_DEVICE(opaque), f);
2864 }
2865 
2866 /* A wrapper for use as a VMState .get function */
2867 static int coroutine_mixed_fn
virtio_device_get(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)2868 virtio_device_get(QEMUFile *f, void *opaque, size_t size,
2869                   const VMStateField *field)
2870 {
2871     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
2872     DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
2873 
2874     return virtio_load(vdev, f, dc->vmsd->version_id);
2875 }
2876 
2877 const VMStateInfo  virtio_vmstate_info = {
2878     .name = "virtio",
2879     .get = virtio_device_get,
2880     .put = virtio_device_put,
2881 };
2882 
virtio_set_features_nocheck(VirtIODevice * vdev,uint64_t val)2883 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
2884 {
2885     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2886     bool bad = (val & ~(vdev->host_features)) != 0;
2887 
2888     val &= vdev->host_features;
2889     if (k->set_features) {
2890         k->set_features(vdev, val);
2891     }
2892     vdev->guest_features = val;
2893     return bad ? -1 : 0;
2894 }
2895 
2896 typedef struct VirtioSetFeaturesNocheckData {
2897     Coroutine *co;
2898     VirtIODevice *vdev;
2899     uint64_t val;
2900     int ret;
2901 } VirtioSetFeaturesNocheckData;
2902 
virtio_set_features_nocheck_bh(void * opaque)2903 static void virtio_set_features_nocheck_bh(void *opaque)
2904 {
2905     VirtioSetFeaturesNocheckData *data = opaque;
2906 
2907     data->ret = virtio_set_features_nocheck(data->vdev, data->val);
2908     aio_co_wake(data->co);
2909 }
2910 
2911 static int coroutine_mixed_fn
virtio_set_features_nocheck_maybe_co(VirtIODevice * vdev,uint64_t val)2912 virtio_set_features_nocheck_maybe_co(VirtIODevice *vdev, uint64_t val)
2913 {
2914     if (qemu_in_coroutine()) {
2915         VirtioSetFeaturesNocheckData data = {
2916             .co = qemu_coroutine_self(),
2917             .vdev = vdev,
2918             .val = val,
2919         };
2920         aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
2921                                 virtio_set_features_nocheck_bh, &data);
2922         qemu_coroutine_yield();
2923         return data.ret;
2924     } else {
2925         return virtio_set_features_nocheck(vdev, val);
2926     }
2927 }
2928 
virtio_set_features(VirtIODevice * vdev,uint64_t val)2929 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
2930 {
2931     int ret;
2932     /*
2933      * The driver must not attempt to set features after feature negotiation
2934      * has finished.
2935      */
2936     if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
2937         return -EINVAL;
2938     }
2939 
2940     if (val & (1ull << VIRTIO_F_BAD_FEATURE)) {
2941         qemu_log_mask(LOG_GUEST_ERROR,
2942                       "%s: guest driver for %s has enabled UNUSED(30) feature bit!\n",
2943                       __func__, vdev->name);
2944     }
2945 
2946     ret = virtio_set_features_nocheck(vdev, val);
2947     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2948         /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches.  */
2949         int i;
2950         for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2951             if (vdev->vq[i].vring.num != 0) {
2952                 virtio_init_region_cache(vdev, i);
2953             }
2954         }
2955     }
2956     if (!ret) {
2957         if (!virtio_device_started(vdev, vdev->status) &&
2958             !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2959             vdev->start_on_kick = true;
2960         }
2961     }
2962     return ret;
2963 }
2964 
virtio_get_config_size(const VirtIOConfigSizeParams * params,uint64_t host_features)2965 size_t virtio_get_config_size(const VirtIOConfigSizeParams *params,
2966                               uint64_t host_features)
2967 {
2968     size_t config_size = params->min_size;
2969     const VirtIOFeature *feature_sizes = params->feature_sizes;
2970     size_t i;
2971 
2972     for (i = 0; feature_sizes[i].flags != 0; i++) {
2973         if (host_features & feature_sizes[i].flags) {
2974             config_size = MAX(feature_sizes[i].end, config_size);
2975         }
2976     }
2977 
2978     assert(config_size <= params->max_size);
2979     return config_size;
2980 }
2981 
2982 int coroutine_mixed_fn
virtio_load(VirtIODevice * vdev,QEMUFile * f,int version_id)2983 virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
2984 {
2985     int i, ret;
2986     int32_t config_len;
2987     uint32_t num;
2988     uint32_t features;
2989     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2990     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2991     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2992 
2993     /*
2994      * We poison the endianness to ensure it does not get used before
2995      * subsections have been loaded.
2996      */
2997     vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
2998 
2999     if (k->load_config) {
3000         ret = k->load_config(qbus->parent, f);
3001         if (ret)
3002             return ret;
3003     }
3004 
3005     qemu_get_8s(f, &vdev->status);
3006     qemu_get_8s(f, &vdev->isr);
3007     qemu_get_be16s(f, &vdev->queue_sel);
3008     if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
3009         return -1;
3010     }
3011     qemu_get_be32s(f, &features);
3012 
3013     /*
3014      * Temporarily set guest_features low bits - needed by
3015      * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3016      * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3017      *
3018      * Note: devices should always test host features in future - don't create
3019      * new dependencies like this.
3020      */
3021     vdev->guest_features = features;
3022 
3023     config_len = qemu_get_be32(f);
3024 
3025     /*
3026      * There are cases where the incoming config can be bigger or smaller
3027      * than what we have; so load what we have space for, and skip
3028      * any excess that's in the stream.
3029      */
3030     qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
3031 
3032     while (config_len > vdev->config_len) {
3033         qemu_get_byte(f);
3034         config_len--;
3035     }
3036 
3037     num = qemu_get_be32(f);
3038 
3039     if (num > VIRTIO_QUEUE_MAX) {
3040         error_report("Invalid number of virtqueues: 0x%x", num);
3041         return -1;
3042     }
3043 
3044     for (i = 0; i < num; i++) {
3045         vdev->vq[i].vring.num = qemu_get_be32(f);
3046         if (k->has_variable_vring_alignment) {
3047             vdev->vq[i].vring.align = qemu_get_be32(f);
3048         }
3049         vdev->vq[i].vring.desc = qemu_get_be64(f);
3050         qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
3051         vdev->vq[i].signalled_used_valid = false;
3052         vdev->vq[i].notification = true;
3053 
3054         if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
3055             error_report("VQ %d address 0x0 "
3056                          "inconsistent with Host index 0x%x",
3057                          i, vdev->vq[i].last_avail_idx);
3058             return -1;
3059         }
3060         if (k->load_queue) {
3061             ret = k->load_queue(qbus->parent, i, f);
3062             if (ret)
3063                 return ret;
3064         }
3065     }
3066 
3067     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
3068 
3069     if (vdc->load != NULL) {
3070         ret = vdc->load(vdev, f, version_id);
3071         if (ret) {
3072             return ret;
3073         }
3074     }
3075 
3076     if (vdc->vmsd) {
3077         ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
3078         if (ret) {
3079             return ret;
3080         }
3081     }
3082 
3083     /* Subsections */
3084     ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
3085     if (ret) {
3086         return ret;
3087     }
3088 
3089     if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
3090         vdev->device_endian = virtio_default_endian();
3091     }
3092 
3093     if (virtio_64bit_features_needed(vdev)) {
3094         /*
3095          * Subsection load filled vdev->guest_features.  Run them
3096          * through virtio_set_features to sanity-check them against
3097          * host_features.
3098          */
3099         uint64_t features64 = vdev->guest_features;
3100         if (virtio_set_features_nocheck_maybe_co(vdev, features64) < 0) {
3101             error_report("Features 0x%" PRIx64 " unsupported. "
3102                          "Allowed features: 0x%" PRIx64,
3103                          features64, vdev->host_features);
3104             return -1;
3105         }
3106     } else {
3107         if (virtio_set_features_nocheck_maybe_co(vdev, features) < 0) {
3108             error_report("Features 0x%x unsupported. "
3109                          "Allowed features: 0x%" PRIx64,
3110                          features, vdev->host_features);
3111             return -1;
3112         }
3113     }
3114 
3115     if (!virtio_device_started(vdev, vdev->status) &&
3116         !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3117         vdev->start_on_kick = true;
3118     }
3119 
3120     RCU_READ_LOCK_GUARD();
3121     for (i = 0; i < num; i++) {
3122         if (vdev->vq[i].vring.desc) {
3123             uint16_t nheads;
3124 
3125             /*
3126              * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3127              * only the region cache needs to be set up.  Legacy devices need
3128              * to calculate used and avail ring addresses based on the desc
3129              * address.
3130              */
3131             if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3132                 virtio_init_region_cache(vdev, i);
3133             } else {
3134                 virtio_queue_update_rings(vdev, i);
3135             }
3136 
3137             if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3138                 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
3139                 vdev->vq[i].shadow_avail_wrap_counter =
3140                                         vdev->vq[i].last_avail_wrap_counter;
3141                 continue;
3142             }
3143 
3144             nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
3145             /* Check it isn't doing strange things with descriptor numbers. */
3146             if (nheads > vdev->vq[i].vring.num) {
3147                 virtio_error(vdev, "VQ %d size 0x%x Guest index 0x%x "
3148                              "inconsistent with Host index 0x%x: delta 0x%x",
3149                              i, vdev->vq[i].vring.num,
3150                              vring_avail_idx(&vdev->vq[i]),
3151                              vdev->vq[i].last_avail_idx, nheads);
3152                 vdev->vq[i].used_idx = 0;
3153                 vdev->vq[i].shadow_avail_idx = 0;
3154                 vdev->vq[i].inuse = 0;
3155                 continue;
3156             }
3157             vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
3158             vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
3159 
3160             /*
3161              * Some devices migrate VirtQueueElements that have been popped
3162              * from the avail ring but not yet returned to the used ring.
3163              * Since max ring size < UINT16_MAX it's safe to use modulo
3164              * UINT16_MAX + 1 subtraction.
3165              */
3166             vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
3167                                 vdev->vq[i].used_idx);
3168             if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
3169                 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3170                              "used_idx 0x%x",
3171                              i, vdev->vq[i].vring.num,
3172                              vdev->vq[i].last_avail_idx,
3173                              vdev->vq[i].used_idx);
3174                 return -1;
3175             }
3176         }
3177     }
3178 
3179     if (vdc->post_load) {
3180         ret = vdc->post_load(vdev);
3181         if (ret) {
3182             return ret;
3183         }
3184     }
3185 
3186     return 0;
3187 }
3188 
virtio_cleanup(VirtIODevice * vdev)3189 void virtio_cleanup(VirtIODevice *vdev)
3190 {
3191     qemu_del_vm_change_state_handler(vdev->vmstate);
3192 }
3193 
virtio_vmstate_change(void * opaque,bool running,RunState state)3194 static void virtio_vmstate_change(void *opaque, bool running, RunState state)
3195 {
3196     VirtIODevice *vdev = opaque;
3197     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3198     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3199     bool backend_run = running && virtio_device_started(vdev, vdev->status);
3200     vdev->vm_running = running;
3201 
3202     if (backend_run) {
3203         virtio_set_status(vdev, vdev->status);
3204     }
3205 
3206     if (k->vmstate_change) {
3207         k->vmstate_change(qbus->parent, backend_run);
3208     }
3209 
3210     if (!backend_run) {
3211         virtio_set_status(vdev, vdev->status);
3212     }
3213 }
3214 
virtio_instance_init_common(Object * proxy_obj,void * data,size_t vdev_size,const char * vdev_name)3215 void virtio_instance_init_common(Object *proxy_obj, void *data,
3216                                  size_t vdev_size, const char *vdev_name)
3217 {
3218     DeviceState *vdev = data;
3219 
3220     object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev,
3221                                        vdev_size, vdev_name, &error_abort,
3222                                        NULL);
3223     qdev_alias_all_properties(vdev, proxy_obj);
3224 }
3225 
virtio_init(VirtIODevice * vdev,uint16_t device_id,size_t config_size)3226 void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size)
3227 {
3228     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3229     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3230     int i;
3231     int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
3232 
3233     if (nvectors) {
3234         vdev->vector_queues =
3235             g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
3236     }
3237 
3238     vdev->start_on_kick = false;
3239     vdev->started = false;
3240     vdev->vhost_started = false;
3241     vdev->device_id = device_id;
3242     vdev->status = 0;
3243     qatomic_set(&vdev->isr, 0);
3244     vdev->queue_sel = 0;
3245     vdev->config_vector = VIRTIO_NO_VECTOR;
3246     vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX);
3247     vdev->vm_running = runstate_is_running();
3248     vdev->broken = false;
3249     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3250         vdev->vq[i].vector = VIRTIO_NO_VECTOR;
3251         vdev->vq[i].vdev = vdev;
3252         vdev->vq[i].queue_index = i;
3253         vdev->vq[i].host_notifier_enabled = false;
3254     }
3255 
3256     vdev->name = virtio_id_to_name(device_id);
3257     vdev->config_len = config_size;
3258     if (vdev->config_len) {
3259         vdev->config = g_malloc0(config_size);
3260     } else {
3261         vdev->config = NULL;
3262     }
3263     vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
3264             virtio_vmstate_change, vdev);
3265     vdev->device_endian = virtio_default_endian();
3266     vdev->use_guest_notifier_mask = true;
3267 }
3268 
3269 /*
3270  * Only devices that have already been around prior to defining the virtio
3271  * standard support legacy mode; this includes devices not specified in the
3272  * standard. All newer devices conform to the virtio standard only.
3273  */
virtio_legacy_allowed(VirtIODevice * vdev)3274 bool virtio_legacy_allowed(VirtIODevice *vdev)
3275 {
3276     switch (vdev->device_id) {
3277     case VIRTIO_ID_NET:
3278     case VIRTIO_ID_BLOCK:
3279     case VIRTIO_ID_CONSOLE:
3280     case VIRTIO_ID_RNG:
3281     case VIRTIO_ID_BALLOON:
3282     case VIRTIO_ID_RPMSG:
3283     case VIRTIO_ID_SCSI:
3284     case VIRTIO_ID_9P:
3285     case VIRTIO_ID_RPROC_SERIAL:
3286     case VIRTIO_ID_CAIF:
3287         return true;
3288     default:
3289         return false;
3290     }
3291 }
3292 
virtio_legacy_check_disabled(VirtIODevice * vdev)3293 bool virtio_legacy_check_disabled(VirtIODevice *vdev)
3294 {
3295     return vdev->disable_legacy_check;
3296 }
3297 
virtio_queue_get_desc_addr(VirtIODevice * vdev,int n)3298 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
3299 {
3300     return vdev->vq[n].vring.desc;
3301 }
3302 
virtio_queue_enabled_legacy(VirtIODevice * vdev,int n)3303 bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n)
3304 {
3305     return virtio_queue_get_desc_addr(vdev, n) != 0;
3306 }
3307 
virtio_queue_enabled(VirtIODevice * vdev,int n)3308 bool virtio_queue_enabled(VirtIODevice *vdev, int n)
3309 {
3310     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3311     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3312 
3313     if (k->queue_enabled) {
3314         return k->queue_enabled(qbus->parent, n);
3315     }
3316     return virtio_queue_enabled_legacy(vdev, n);
3317 }
3318 
virtio_queue_get_avail_addr(VirtIODevice * vdev,int n)3319 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
3320 {
3321     return vdev->vq[n].vring.avail;
3322 }
3323 
virtio_queue_get_used_addr(VirtIODevice * vdev,int n)3324 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
3325 {
3326     return vdev->vq[n].vring.used;
3327 }
3328 
virtio_queue_get_desc_size(VirtIODevice * vdev,int n)3329 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
3330 {
3331     return sizeof(VRingDesc) * vdev->vq[n].vring.num;
3332 }
3333 
virtio_queue_get_avail_size(VirtIODevice * vdev,int n)3334 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
3335 {
3336     int s;
3337 
3338     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3339         return sizeof(struct VRingPackedDescEvent);
3340     }
3341 
3342     s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3343     return offsetof(VRingAvail, ring) +
3344         sizeof(uint16_t) * vdev->vq[n].vring.num + s;
3345 }
3346 
virtio_queue_get_used_size(VirtIODevice * vdev,int n)3347 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
3348 {
3349     int s;
3350 
3351     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3352         return sizeof(struct VRingPackedDescEvent);
3353     }
3354 
3355     s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3356     return offsetof(VRingUsed, ring) +
3357         sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
3358 }
3359 
virtio_queue_packed_get_last_avail_idx(VirtIODevice * vdev,int n)3360 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev,
3361                                                            int n)
3362 {
3363     unsigned int avail, used;
3364 
3365     avail = vdev->vq[n].last_avail_idx;
3366     avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
3367 
3368     used = vdev->vq[n].used_idx;
3369     used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
3370 
3371     return avail | used << 16;
3372 }
3373 
virtio_queue_split_get_last_avail_idx(VirtIODevice * vdev,int n)3374 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
3375                                                       int n)
3376 {
3377     return vdev->vq[n].last_avail_idx;
3378 }
3379 
virtio_queue_get_last_avail_idx(VirtIODevice * vdev,int n)3380 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
3381 {
3382     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3383         return virtio_queue_packed_get_last_avail_idx(vdev, n);
3384     } else {
3385         return virtio_queue_split_get_last_avail_idx(vdev, n);
3386     }
3387 }
3388 
virtio_queue_packed_set_last_avail_idx(VirtIODevice * vdev,int n,unsigned int idx)3389 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
3390                                                    int n, unsigned int idx)
3391 {
3392     struct VirtQueue *vq = &vdev->vq[n];
3393 
3394     vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
3395     vq->last_avail_wrap_counter =
3396         vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
3397     idx >>= 16;
3398     vq->used_idx = idx & 0x7fff;
3399     vq->used_wrap_counter = !!(idx & 0x8000);
3400 }
3401 
virtio_queue_split_set_last_avail_idx(VirtIODevice * vdev,int n,unsigned int idx)3402 static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev,
3403                                                   int n, unsigned int idx)
3404 {
3405         vdev->vq[n].last_avail_idx = idx;
3406         vdev->vq[n].shadow_avail_idx = idx;
3407 }
3408 
virtio_queue_set_last_avail_idx(VirtIODevice * vdev,int n,unsigned int idx)3409 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
3410                                      unsigned int idx)
3411 {
3412     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3413         virtio_queue_packed_set_last_avail_idx(vdev, n, idx);
3414     } else {
3415         virtio_queue_split_set_last_avail_idx(vdev, n, idx);
3416     }
3417 }
3418 
virtio_queue_packed_restore_last_avail_idx(VirtIODevice * vdev,int n)3419 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
3420                                                        int n)
3421 {
3422     /* We don't have a reference like avail idx in shared memory */
3423     return;
3424 }
3425 
virtio_queue_split_restore_last_avail_idx(VirtIODevice * vdev,int n)3426 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
3427                                                       int n)
3428 {
3429     RCU_READ_LOCK_GUARD();
3430     if (vdev->vq[n].vring.desc) {
3431         vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
3432         vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
3433     }
3434 }
3435 
virtio_queue_restore_last_avail_idx(VirtIODevice * vdev,int n)3436 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
3437 {
3438     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3439         virtio_queue_packed_restore_last_avail_idx(vdev, n);
3440     } else {
3441         virtio_queue_split_restore_last_avail_idx(vdev, n);
3442     }
3443 }
3444 
virtio_queue_packed_update_used_idx(VirtIODevice * vdev,int n)3445 static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
3446 {
3447     /* used idx was updated through set_last_avail_idx() */
3448     return;
3449 }
3450 
virtio_split_packed_update_used_idx(VirtIODevice * vdev,int n)3451 static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
3452 {
3453     RCU_READ_LOCK_GUARD();
3454     if (vdev->vq[n].vring.desc) {
3455         vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
3456     }
3457 }
3458 
virtio_queue_update_used_idx(VirtIODevice * vdev,int n)3459 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
3460 {
3461     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3462         return virtio_queue_packed_update_used_idx(vdev, n);
3463     } else {
3464         return virtio_split_packed_update_used_idx(vdev, n);
3465     }
3466 }
3467 
virtio_queue_invalidate_signalled_used(VirtIODevice * vdev,int n)3468 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
3469 {
3470     vdev->vq[n].signalled_used_valid = false;
3471 }
3472 
virtio_get_queue(VirtIODevice * vdev,int n)3473 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
3474 {
3475     return vdev->vq + n;
3476 }
3477 
virtio_get_queue_index(VirtQueue * vq)3478 uint16_t virtio_get_queue_index(VirtQueue *vq)
3479 {
3480     return vq->queue_index;
3481 }
3482 
virtio_queue_guest_notifier_read(EventNotifier * n)3483 static void virtio_queue_guest_notifier_read(EventNotifier *n)
3484 {
3485     VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
3486     if (event_notifier_test_and_clear(n)) {
3487         virtio_irq(vq);
3488     }
3489 }
virtio_config_guest_notifier_read(EventNotifier * n)3490 static void virtio_config_guest_notifier_read(EventNotifier *n)
3491 {
3492     VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
3493 
3494     if (event_notifier_test_and_clear(n)) {
3495         virtio_notify_config(vdev);
3496     }
3497 }
virtio_queue_set_guest_notifier_fd_handler(VirtQueue * vq,bool assign,bool with_irqfd)3498 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
3499                                                 bool with_irqfd)
3500 {
3501     if (assign && !with_irqfd) {
3502         event_notifier_set_handler(&vq->guest_notifier,
3503                                    virtio_queue_guest_notifier_read);
3504     } else {
3505         event_notifier_set_handler(&vq->guest_notifier, NULL);
3506     }
3507     if (!assign) {
3508         /* Test and clear notifier before closing it,
3509          * in case poll callback didn't have time to run. */
3510         virtio_queue_guest_notifier_read(&vq->guest_notifier);
3511     }
3512 }
3513 
virtio_config_set_guest_notifier_fd_handler(VirtIODevice * vdev,bool assign,bool with_irqfd)3514 void virtio_config_set_guest_notifier_fd_handler(VirtIODevice *vdev,
3515                                                  bool assign, bool with_irqfd)
3516 {
3517     EventNotifier *n;
3518     n = &vdev->config_notifier;
3519     if (assign && !with_irqfd) {
3520         event_notifier_set_handler(n, virtio_config_guest_notifier_read);
3521     } else {
3522         event_notifier_set_handler(n, NULL);
3523     }
3524     if (!assign) {
3525         /* Test and clear notifier before closing it,*/
3526         /* in case poll callback didn't have time to run. */
3527         virtio_config_guest_notifier_read(n);
3528     }
3529 }
3530 
virtio_queue_get_guest_notifier(VirtQueue * vq)3531 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
3532 {
3533     return &vq->guest_notifier;
3534 }
3535 
virtio_queue_host_notifier_aio_poll_begin(EventNotifier * n)3536 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
3537 {
3538     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3539 
3540     virtio_queue_set_notification(vq, 0);
3541 }
3542 
virtio_queue_host_notifier_aio_poll(void * opaque)3543 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
3544 {
3545     EventNotifier *n = opaque;
3546     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3547 
3548     return vq->vring.desc && !virtio_queue_empty(vq);
3549 }
3550 
virtio_queue_host_notifier_aio_poll_ready(EventNotifier * n)3551 static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n)
3552 {
3553     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3554 
3555     virtio_queue_notify_vq(vq);
3556 }
3557 
virtio_queue_host_notifier_aio_poll_end(EventNotifier * n)3558 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
3559 {
3560     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3561 
3562     /* Caller polls once more after this to catch requests that race with us */
3563     virtio_queue_set_notification(vq, 1);
3564 }
3565 
virtio_queue_aio_attach_host_notifier(VirtQueue * vq,AioContext * ctx)3566 void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
3567 {
3568     /*
3569      * virtio_queue_aio_detach_host_notifier() can leave notifications disabled.
3570      * Re-enable them.  (And if detach has not been used before, notifications
3571      * being enabled is still the default state while a notifier is attached;
3572      * see virtio_queue_host_notifier_aio_poll_end(), which will always leave
3573      * notifications enabled once the polling section is left.)
3574      */
3575     if (!virtio_queue_get_notification(vq)) {
3576         virtio_queue_set_notification(vq, 1);
3577     }
3578 
3579     aio_set_event_notifier(ctx, &vq->host_notifier,
3580                            virtio_queue_host_notifier_read,
3581                            virtio_queue_host_notifier_aio_poll,
3582                            virtio_queue_host_notifier_aio_poll_ready);
3583     aio_set_event_notifier_poll(ctx, &vq->host_notifier,
3584                                 virtio_queue_host_notifier_aio_poll_begin,
3585                                 virtio_queue_host_notifier_aio_poll_end);
3586 
3587     /*
3588      * We will have ignored notifications about new requests from the guest
3589      * while no notifiers were attached, so "kick" the virt queue to process
3590      * those requests now.
3591      */
3592     event_notifier_set(&vq->host_notifier);
3593 }
3594 
3595 /*
3596  * Same as virtio_queue_aio_attach_host_notifier() but without polling. Use
3597  * this for rx virtqueues and similar cases where the virtqueue handler
3598  * function does not pop all elements. When the virtqueue is left non-empty
3599  * polling consumes CPU cycles and should not be used.
3600  */
virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue * vq,AioContext * ctx)3601 void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx)
3602 {
3603     /* See virtio_queue_aio_attach_host_notifier() */
3604     if (!virtio_queue_get_notification(vq)) {
3605         virtio_queue_set_notification(vq, 1);
3606     }
3607 
3608     aio_set_event_notifier(ctx, &vq->host_notifier,
3609                            virtio_queue_host_notifier_read,
3610                            NULL, NULL);
3611 
3612     /*
3613      * See virtio_queue_aio_attach_host_notifier().
3614      * Note that this may be unnecessary for the type of virtqueues this
3615      * function is used for.  Still, it will not hurt to have a quick look into
3616      * whether we can/should process any of the virtqueue elements.
3617      */
3618     event_notifier_set(&vq->host_notifier);
3619 }
3620 
virtio_queue_aio_detach_host_notifier(VirtQueue * vq,AioContext * ctx)3621 void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
3622 {
3623     aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL);
3624 
3625     /*
3626      * aio_set_event_notifier_poll() does not guarantee whether io_poll_end()
3627      * will run after io_poll_begin(), so by removing the notifier, we do not
3628      * know whether virtio_queue_host_notifier_aio_poll_end() has run after a
3629      * previous virtio_queue_host_notifier_aio_poll_begin(), i.e. whether
3630      * notifications are enabled or disabled.  It does not really matter anyway;
3631      * we just removed the notifier, so we do not care about notifications until
3632      * we potentially re-attach it.  The attach_host_notifier functions will
3633      * ensure that notifications are enabled again when they are needed.
3634      */
3635 }
3636 
virtio_queue_host_notifier_read(EventNotifier * n)3637 void virtio_queue_host_notifier_read(EventNotifier *n)
3638 {
3639     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3640     if (event_notifier_test_and_clear(n)) {
3641         virtio_queue_notify_vq(vq);
3642     }
3643 }
3644 
virtio_queue_get_host_notifier(VirtQueue * vq)3645 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
3646 {
3647     return &vq->host_notifier;
3648 }
3649 
virtio_config_get_guest_notifier(VirtIODevice * vdev)3650 EventNotifier *virtio_config_get_guest_notifier(VirtIODevice *vdev)
3651 {
3652     return &vdev->config_notifier;
3653 }
3654 
virtio_queue_set_host_notifier_enabled(VirtQueue * vq,bool enabled)3655 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
3656 {
3657     vq->host_notifier_enabled = enabled;
3658 }
3659 
virtio_queue_set_host_notifier_mr(VirtIODevice * vdev,int n,MemoryRegion * mr,bool assign)3660 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
3661                                       MemoryRegion *mr, bool assign)
3662 {
3663     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3664     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3665 
3666     if (k->set_host_notifier_mr) {
3667         return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
3668     }
3669 
3670     return -1;
3671 }
3672 
virtio_device_set_child_bus_name(VirtIODevice * vdev,char * bus_name)3673 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
3674 {
3675     g_free(vdev->bus_name);
3676     vdev->bus_name = g_strdup(bus_name);
3677 }
3678 
virtio_error(VirtIODevice * vdev,const char * fmt,...)3679 void G_GNUC_PRINTF(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
3680 {
3681     va_list ap;
3682 
3683     va_start(ap, fmt);
3684     error_vreport(fmt, ap);
3685     va_end(ap);
3686 
3687     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3688         vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
3689         virtio_notify_config(vdev);
3690     }
3691 
3692     vdev->broken = true;
3693 }
3694 
virtio_memory_listener_commit(MemoryListener * listener)3695 static void virtio_memory_listener_commit(MemoryListener *listener)
3696 {
3697     VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
3698     int i;
3699 
3700     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3701         if (vdev->vq[i].vring.num == 0) {
3702             break;
3703         }
3704         virtio_init_region_cache(vdev, i);
3705     }
3706 }
3707 
virtio_device_realize(DeviceState * dev,Error ** errp)3708 static void virtio_device_realize(DeviceState *dev, Error **errp)
3709 {
3710     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3711     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3712     Error *err = NULL;
3713 
3714     /* Devices should either use vmsd or the load/save methods */
3715     assert(!vdc->vmsd || !vdc->load);
3716 
3717     if (vdc->realize != NULL) {
3718         vdc->realize(dev, &err);
3719         if (err != NULL) {
3720             error_propagate(errp, err);
3721             return;
3722         }
3723     }
3724 
3725     virtio_bus_device_plugged(vdev, &err);
3726     if (err != NULL) {
3727         error_propagate(errp, err);
3728         vdc->unrealize(dev);
3729         return;
3730     }
3731 
3732     vdev->listener.commit = virtio_memory_listener_commit;
3733     vdev->listener.name = "virtio";
3734     memory_listener_register(&vdev->listener, vdev->dma_as);
3735 }
3736 
virtio_device_unrealize(DeviceState * dev)3737 static void virtio_device_unrealize(DeviceState *dev)
3738 {
3739     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3740     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3741 
3742     memory_listener_unregister(&vdev->listener);
3743     virtio_bus_device_unplugged(vdev);
3744 
3745     if (vdc->unrealize != NULL) {
3746         vdc->unrealize(dev);
3747     }
3748 
3749     g_free(vdev->bus_name);
3750     vdev->bus_name = NULL;
3751 }
3752 
virtio_device_free_virtqueues(VirtIODevice * vdev)3753 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
3754 {
3755     int i;
3756     if (!vdev->vq) {
3757         return;
3758     }
3759 
3760     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3761         if (vdev->vq[i].vring.num == 0) {
3762             break;
3763         }
3764         virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
3765     }
3766     g_free(vdev->vq);
3767 }
3768 
virtio_device_instance_finalize(Object * obj)3769 static void virtio_device_instance_finalize(Object *obj)
3770 {
3771     VirtIODevice *vdev = VIRTIO_DEVICE(obj);
3772 
3773     virtio_device_free_virtqueues(vdev);
3774 
3775     g_free(vdev->config);
3776     g_free(vdev->vector_queues);
3777 }
3778 
3779 static Property virtio_properties[] = {
3780     DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
3781     DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
3782     DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
3783     DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
3784                      disable_legacy_check, false),
3785     DEFINE_PROP_END_OF_LIST(),
3786 };
3787 
virtio_device_start_ioeventfd_impl(VirtIODevice * vdev)3788 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
3789 {
3790     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3791     int i, n, r, err;
3792 
3793     /*
3794      * Batch all the host notifiers in a single transaction to avoid
3795      * quadratic time complexity in address_space_update_ioeventfds().
3796      */
3797     memory_region_transaction_begin();
3798     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3799         VirtQueue *vq = &vdev->vq[n];
3800         if (!virtio_queue_get_num(vdev, n)) {
3801             continue;
3802         }
3803         r = virtio_bus_set_host_notifier(qbus, n, true);
3804         if (r < 0) {
3805             err = r;
3806             goto assign_error;
3807         }
3808         event_notifier_set_handler(&vq->host_notifier,
3809                                    virtio_queue_host_notifier_read);
3810     }
3811 
3812     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3813         /* Kick right away to begin processing requests already in vring */
3814         VirtQueue *vq = &vdev->vq[n];
3815         if (!vq->vring.num) {
3816             continue;
3817         }
3818         event_notifier_set(&vq->host_notifier);
3819     }
3820     memory_region_transaction_commit();
3821     return 0;
3822 
3823 assign_error:
3824     i = n; /* save n for a second iteration after transaction is committed. */
3825     while (--n >= 0) {
3826         VirtQueue *vq = &vdev->vq[n];
3827         if (!virtio_queue_get_num(vdev, n)) {
3828             continue;
3829         }
3830 
3831         event_notifier_set_handler(&vq->host_notifier, NULL);
3832         r = virtio_bus_set_host_notifier(qbus, n, false);
3833         assert(r >= 0);
3834     }
3835     /*
3836      * The transaction expects the ioeventfds to be open when it
3837      * commits. Do it now, before the cleanup loop.
3838      */
3839     memory_region_transaction_commit();
3840 
3841     while (--i >= 0) {
3842         if (!virtio_queue_get_num(vdev, i)) {
3843             continue;
3844         }
3845         virtio_bus_cleanup_host_notifier(qbus, i);
3846     }
3847     return err;
3848 }
3849 
virtio_device_start_ioeventfd(VirtIODevice * vdev)3850 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
3851 {
3852     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3853     VirtioBusState *vbus = VIRTIO_BUS(qbus);
3854 
3855     return virtio_bus_start_ioeventfd(vbus);
3856 }
3857 
virtio_device_stop_ioeventfd_impl(VirtIODevice * vdev)3858 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
3859 {
3860     VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3861     int n, r;
3862 
3863     /*
3864      * Batch all the host notifiers in a single transaction to avoid
3865      * quadratic time complexity in address_space_update_ioeventfds().
3866      */
3867     memory_region_transaction_begin();
3868     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3869         VirtQueue *vq = &vdev->vq[n];
3870 
3871         if (!virtio_queue_get_num(vdev, n)) {
3872             continue;
3873         }
3874         event_notifier_set_handler(&vq->host_notifier, NULL);
3875         r = virtio_bus_set_host_notifier(qbus, n, false);
3876         assert(r >= 0);
3877     }
3878     /*
3879      * The transaction expects the ioeventfds to be open when it
3880      * commits. Do it now, before the cleanup loop.
3881      */
3882     memory_region_transaction_commit();
3883 
3884     for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3885         if (!virtio_queue_get_num(vdev, n)) {
3886             continue;
3887         }
3888         virtio_bus_cleanup_host_notifier(qbus, n);
3889     }
3890 }
3891 
virtio_device_grab_ioeventfd(VirtIODevice * vdev)3892 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
3893 {
3894     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3895     VirtioBusState *vbus = VIRTIO_BUS(qbus);
3896 
3897     return virtio_bus_grab_ioeventfd(vbus);
3898 }
3899 
virtio_device_release_ioeventfd(VirtIODevice * vdev)3900 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
3901 {
3902     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3903     VirtioBusState *vbus = VIRTIO_BUS(qbus);
3904 
3905     virtio_bus_release_ioeventfd(vbus);
3906 }
3907 
virtio_device_class_init(ObjectClass * klass,void * data)3908 static void virtio_device_class_init(ObjectClass *klass, void *data)
3909 {
3910     /* Set the default value here. */
3911     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
3912     DeviceClass *dc = DEVICE_CLASS(klass);
3913 
3914     dc->realize = virtio_device_realize;
3915     dc->unrealize = virtio_device_unrealize;
3916     dc->bus_type = TYPE_VIRTIO_BUS;
3917     device_class_set_props(dc, virtio_properties);
3918     vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
3919     vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
3920 
3921     vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
3922 }
3923 
virtio_device_ioeventfd_enabled(VirtIODevice * vdev)3924 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
3925 {
3926     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3927     VirtioBusState *vbus = VIRTIO_BUS(qbus);
3928 
3929     return virtio_bus_ioeventfd_enabled(vbus);
3930 }
3931 
qmp_x_query_virtio_queue_status(const char * path,uint16_t queue,Error ** errp)3932 VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path,
3933                                                  uint16_t queue,
3934                                                  Error **errp)
3935 {
3936     VirtIODevice *vdev;
3937     VirtQueueStatus *status;
3938 
3939     vdev = qmp_find_virtio_device(path);
3940     if (vdev == NULL) {
3941         error_setg(errp, "Path %s is not a VirtIODevice", path);
3942         return NULL;
3943     }
3944 
3945     if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
3946         error_setg(errp, "Invalid virtqueue number %d", queue);
3947         return NULL;
3948     }
3949 
3950     status = g_new0(VirtQueueStatus, 1);
3951     status->name = g_strdup(vdev->name);
3952     status->queue_index = vdev->vq[queue].queue_index;
3953     status->inuse = vdev->vq[queue].inuse;
3954     status->vring_num = vdev->vq[queue].vring.num;
3955     status->vring_num_default = vdev->vq[queue].vring.num_default;
3956     status->vring_align = vdev->vq[queue].vring.align;
3957     status->vring_desc = vdev->vq[queue].vring.desc;
3958     status->vring_avail = vdev->vq[queue].vring.avail;
3959     status->vring_used = vdev->vq[queue].vring.used;
3960     status->used_idx = vdev->vq[queue].used_idx;
3961     status->signalled_used = vdev->vq[queue].signalled_used;
3962     status->signalled_used_valid = vdev->vq[queue].signalled_used_valid;
3963 
3964     if (vdev->vhost_started) {
3965         VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
3966         struct vhost_dev *hdev = vdc->get_vhost(vdev);
3967 
3968         /* check if vq index exists for vhost as well  */
3969         if (queue >= hdev->vq_index && queue < hdev->vq_index + hdev->nvqs) {
3970             status->has_last_avail_idx = true;
3971 
3972             int vhost_vq_index =
3973                 hdev->vhost_ops->vhost_get_vq_index(hdev, queue);
3974             struct vhost_vring_state state = {
3975                 .index = vhost_vq_index,
3976             };
3977 
3978             status->last_avail_idx =
3979                 hdev->vhost_ops->vhost_get_vring_base(hdev, &state);
3980         }
3981     } else {
3982         status->has_shadow_avail_idx = true;
3983         status->has_last_avail_idx = true;
3984         status->last_avail_idx = vdev->vq[queue].last_avail_idx;
3985         status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx;
3986     }
3987 
3988     return status;
3989 }
3990 
qmp_decode_vring_desc_flags(uint16_t flags)3991 static strList *qmp_decode_vring_desc_flags(uint16_t flags)
3992 {
3993     strList *list = NULL;
3994     strList *node;
3995     int i;
3996 
3997     struct {
3998         uint16_t flag;
3999         const char *value;
4000     } map[] = {
4001         { VRING_DESC_F_NEXT, "next" },
4002         { VRING_DESC_F_WRITE, "write" },
4003         { VRING_DESC_F_INDIRECT, "indirect" },
4004         { 1 << VRING_PACKED_DESC_F_AVAIL, "avail" },
4005         { 1 << VRING_PACKED_DESC_F_USED, "used" },
4006         { 0, "" }
4007     };
4008 
4009     for (i = 0; map[i].flag; i++) {
4010         if ((map[i].flag & flags) == 0) {
4011             continue;
4012         }
4013         node = g_malloc0(sizeof(strList));
4014         node->value = g_strdup(map[i].value);
4015         node->next = list;
4016         list = node;
4017     }
4018 
4019     return list;
4020 }
4021 
qmp_x_query_virtio_queue_element(const char * path,uint16_t queue,bool has_index,uint16_t index,Error ** errp)4022 VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path,
4023                                                      uint16_t queue,
4024                                                      bool has_index,
4025                                                      uint16_t index,
4026                                                      Error **errp)
4027 {
4028     VirtIODevice *vdev;
4029     VirtQueue *vq;
4030     VirtioQueueElement *element = NULL;
4031 
4032     vdev = qmp_find_virtio_device(path);
4033     if (vdev == NULL) {
4034         error_setg(errp, "Path %s is not a VirtIO device", path);
4035         return NULL;
4036     }
4037 
4038     if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
4039         error_setg(errp, "Invalid virtqueue number %d", queue);
4040         return NULL;
4041     }
4042     vq = &vdev->vq[queue];
4043 
4044     if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
4045         error_setg(errp, "Packed ring not supported");
4046         return NULL;
4047     } else {
4048         unsigned int head, i, max;
4049         VRingMemoryRegionCaches *caches;
4050         MemoryRegionCache indirect_desc_cache;
4051         MemoryRegionCache *desc_cache;
4052         VRingDesc desc;
4053         VirtioRingDescList *list = NULL;
4054         VirtioRingDescList *node;
4055         int rc; int ndescs;
4056 
4057         address_space_cache_init_empty(&indirect_desc_cache);
4058 
4059         RCU_READ_LOCK_GUARD();
4060 
4061         max = vq->vring.num;
4062 
4063         if (!has_index) {
4064             head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num);
4065         } else {
4066             head = vring_avail_ring(vq, index % vq->vring.num);
4067         }
4068         i = head;
4069 
4070         caches = vring_get_region_caches(vq);
4071         if (!caches) {
4072             error_setg(errp, "Region caches not initialized");
4073             return NULL;
4074         }
4075         if (caches->desc.len < max * sizeof(VRingDesc)) {
4076             error_setg(errp, "Cannot map descriptor ring");
4077             return NULL;
4078         }
4079 
4080         desc_cache = &caches->desc;
4081         vring_split_desc_read(vdev, &desc, desc_cache, i);
4082         if (desc.flags & VRING_DESC_F_INDIRECT) {
4083             int64_t len;
4084             len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
4085                                            desc.addr, desc.len, false);
4086             desc_cache = &indirect_desc_cache;
4087             if (len < desc.len) {
4088                 error_setg(errp, "Cannot map indirect buffer");
4089                 goto done;
4090             }
4091 
4092             max = desc.len / sizeof(VRingDesc);
4093             i = 0;
4094             vring_split_desc_read(vdev, &desc, desc_cache, i);
4095         }
4096 
4097         element = g_new0(VirtioQueueElement, 1);
4098         element->avail = g_new0(VirtioRingAvail, 1);
4099         element->used = g_new0(VirtioRingUsed, 1);
4100         element->name = g_strdup(vdev->name);
4101         element->index = head;
4102         element->avail->flags = vring_avail_flags(vq);
4103         element->avail->idx = vring_avail_idx(vq);
4104         element->avail->ring = head;
4105         element->used->flags = vring_used_flags(vq);
4106         element->used->idx = vring_used_idx(vq);
4107         ndescs = 0;
4108 
4109         do {
4110             /* A buggy driver may produce an infinite loop */
4111             if (ndescs >= max) {
4112                 break;
4113             }
4114             node = g_new0(VirtioRingDescList, 1);
4115             node->value = g_new0(VirtioRingDesc, 1);
4116             node->value->addr = desc.addr;
4117             node->value->len = desc.len;
4118             node->value->flags = qmp_decode_vring_desc_flags(desc.flags);
4119             node->next = list;
4120             list = node;
4121 
4122             ndescs++;
4123             rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
4124         } while (rc == VIRTQUEUE_READ_DESC_MORE);
4125         element->descs = list;
4126 done:
4127         address_space_cache_destroy(&indirect_desc_cache);
4128     }
4129 
4130     return element;
4131 }
4132 
4133 static const TypeInfo virtio_device_info = {
4134     .name = TYPE_VIRTIO_DEVICE,
4135     .parent = TYPE_DEVICE,
4136     .instance_size = sizeof(VirtIODevice),
4137     .class_init = virtio_device_class_init,
4138     .instance_finalize = virtio_device_instance_finalize,
4139     .abstract = true,
4140     .class_size = sizeof(VirtioDeviceClass),
4141 };
4142 
virtio_register_types(void)4143 static void virtio_register_types(void)
4144 {
4145     type_register_static(&virtio_device_info);
4146 }
4147 
type_init(virtio_register_types)4148 type_init(virtio_register_types)
4149 
4150 QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev,
4151                                    QEMUBHFunc *cb, void *opaque,
4152                                    const char *name)
4153 {
4154     DeviceState *transport = qdev_get_parent_bus(dev)->parent;
4155 
4156     return qemu_bh_new_full(cb, opaque, name,
4157                             &transport->mem_reentrancy_guard);
4158 }
4159