1 /*
2 * Virtio Support
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "cpu.h"
17 #include "trace.h"
18 #include "exec/address-spaces.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "hw/virtio/virtio.h"
23 #include "migration/qemu-file-types.h"
24 #include "qemu/atomic.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "sysemu/dma.h"
29 #include "sysemu/runstate.h"
30
31 /*
32 * The alignment to use between consumer and producer parts of vring.
33 * x86 pagesize again. This is the default, used by transports like PCI
34 * which don't provide a means for the guest to tell the host the alignment.
35 */
36 #define VIRTIO_PCI_VRING_ALIGN 4096
37
38 typedef struct VRingDesc
39 {
40 uint64_t addr;
41 uint32_t len;
42 uint16_t flags;
43 uint16_t next;
44 } VRingDesc;
45
46 typedef struct VRingPackedDesc {
47 uint64_t addr;
48 uint32_t len;
49 uint16_t id;
50 uint16_t flags;
51 } VRingPackedDesc;
52
53 typedef struct VRingAvail
54 {
55 uint16_t flags;
56 uint16_t idx;
57 uint16_t ring[0];
58 } VRingAvail;
59
60 typedef struct VRingUsedElem
61 {
62 uint32_t id;
63 uint32_t len;
64 } VRingUsedElem;
65
66 typedef struct VRingUsed
67 {
68 uint16_t flags;
69 uint16_t idx;
70 VRingUsedElem ring[0];
71 } VRingUsed;
72
73 typedef struct VRingMemoryRegionCaches {
74 struct rcu_head rcu;
75 MemoryRegionCache desc;
76 MemoryRegionCache avail;
77 MemoryRegionCache used;
78 } VRingMemoryRegionCaches;
79
80 typedef struct VRing
81 {
82 unsigned int num;
83 unsigned int num_default;
84 unsigned int align;
85 hwaddr desc;
86 hwaddr avail;
87 hwaddr used;
88 VRingMemoryRegionCaches *caches;
89 } VRing;
90
91 typedef struct VRingPackedDescEvent {
92 uint16_t off_wrap;
93 uint16_t flags;
94 } VRingPackedDescEvent ;
95
96 struct VirtQueue
97 {
98 VRing vring;
99 VirtQueueElement *used_elems;
100
101 /* Next head to pop */
102 uint16_t last_avail_idx;
103 bool last_avail_wrap_counter;
104
105 /* Last avail_idx read from VQ. */
106 uint16_t shadow_avail_idx;
107 bool shadow_avail_wrap_counter;
108
109 uint16_t used_idx;
110 bool used_wrap_counter;
111
112 /* Last used index value we have signalled on */
113 uint16_t signalled_used;
114
115 /* Last used index value we have signalled on */
116 bool signalled_used_valid;
117
118 /* Notification enabled? */
119 bool notification;
120
121 uint16_t queue_index;
122
123 unsigned int inuse;
124
125 uint16_t vector;
126 VirtIOHandleOutput handle_output;
127 VirtIOHandleAIOOutput handle_aio_output;
128 VirtIODevice *vdev;
129 EventNotifier guest_notifier;
130 EventNotifier host_notifier;
131 bool host_notifier_enabled;
132 QLIST_ENTRY(VirtQueue) node;
133 };
134
virtio_free_region_cache(VRingMemoryRegionCaches * caches)135 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
136 {
137 if (!caches) {
138 return;
139 }
140
141 address_space_cache_destroy(&caches->desc);
142 address_space_cache_destroy(&caches->avail);
143 address_space_cache_destroy(&caches->used);
144 g_free(caches);
145 }
146
virtio_virtqueue_reset_region_cache(struct VirtQueue * vq)147 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
148 {
149 VRingMemoryRegionCaches *caches;
150
151 caches = atomic_read(&vq->vring.caches);
152 atomic_rcu_set(&vq->vring.caches, NULL);
153 if (caches) {
154 call_rcu(caches, virtio_free_region_cache, rcu);
155 }
156 }
157
virtio_init_region_cache(VirtIODevice * vdev,int n)158 static void virtio_init_region_cache(VirtIODevice *vdev, int n)
159 {
160 VirtQueue *vq = &vdev->vq[n];
161 VRingMemoryRegionCaches *old = vq->vring.caches;
162 VRingMemoryRegionCaches *new = NULL;
163 hwaddr addr, size;
164 int64_t len;
165 bool packed;
166
167
168 addr = vq->vring.desc;
169 if (!addr) {
170 goto out_no_cache;
171 }
172 new = g_new0(VRingMemoryRegionCaches, 1);
173 size = virtio_queue_get_desc_size(vdev, n);
174 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
175 true : false;
176 len = address_space_cache_init(&new->desc, vdev->dma_as,
177 addr, size, packed);
178 if (len < size) {
179 virtio_error(vdev, "Cannot map desc");
180 goto err_desc;
181 }
182
183 size = virtio_queue_get_used_size(vdev, n);
184 len = address_space_cache_init(&new->used, vdev->dma_as,
185 vq->vring.used, size, true);
186 if (len < size) {
187 virtio_error(vdev, "Cannot map used");
188 goto err_used;
189 }
190
191 size = virtio_queue_get_avail_size(vdev, n);
192 len = address_space_cache_init(&new->avail, vdev->dma_as,
193 vq->vring.avail, size, false);
194 if (len < size) {
195 virtio_error(vdev, "Cannot map avail");
196 goto err_avail;
197 }
198
199 atomic_rcu_set(&vq->vring.caches, new);
200 if (old) {
201 call_rcu(old, virtio_free_region_cache, rcu);
202 }
203 return;
204
205 err_avail:
206 address_space_cache_destroy(&new->avail);
207 err_used:
208 address_space_cache_destroy(&new->used);
209 err_desc:
210 address_space_cache_destroy(&new->desc);
211 out_no_cache:
212 g_free(new);
213 virtio_virtqueue_reset_region_cache(vq);
214 }
215
216 /* virt queue functions */
virtio_queue_update_rings(VirtIODevice * vdev,int n)217 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
218 {
219 VRing *vring = &vdev->vq[n].vring;
220
221 if (!vring->num || !vring->desc || !vring->align) {
222 /* not yet setup -> nothing to do */
223 return;
224 }
225 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
226 vring->used = vring_align(vring->avail +
227 offsetof(VRingAvail, ring[vring->num]),
228 vring->align);
229 virtio_init_region_cache(vdev, n);
230 }
231
232 /* Called within rcu_read_lock(). */
vring_split_desc_read(VirtIODevice * vdev,VRingDesc * desc,MemoryRegionCache * cache,int i)233 static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc,
234 MemoryRegionCache *cache, int i)
235 {
236 address_space_read_cached(cache, i * sizeof(VRingDesc),
237 desc, sizeof(VRingDesc));
238 virtio_tswap64s(vdev, &desc->addr);
239 virtio_tswap32s(vdev, &desc->len);
240 virtio_tswap16s(vdev, &desc->flags);
241 virtio_tswap16s(vdev, &desc->next);
242 }
243
vring_packed_event_read(VirtIODevice * vdev,MemoryRegionCache * cache,VRingPackedDescEvent * e)244 static void vring_packed_event_read(VirtIODevice *vdev,
245 MemoryRegionCache *cache,
246 VRingPackedDescEvent *e)
247 {
248 hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap);
249 hwaddr off_flags = offsetof(VRingPackedDescEvent, flags);
250
251 address_space_read_cached(cache, off_flags, &e->flags,
252 sizeof(e->flags));
253 /* Make sure flags is seen before off_wrap */
254 smp_rmb();
255 address_space_read_cached(cache, off_off, &e->off_wrap,
256 sizeof(e->off_wrap));
257 virtio_tswap16s(vdev, &e->off_wrap);
258 virtio_tswap16s(vdev, &e->flags);
259 }
260
vring_packed_off_wrap_write(VirtIODevice * vdev,MemoryRegionCache * cache,uint16_t off_wrap)261 static void vring_packed_off_wrap_write(VirtIODevice *vdev,
262 MemoryRegionCache *cache,
263 uint16_t off_wrap)
264 {
265 hwaddr off = offsetof(VRingPackedDescEvent, off_wrap);
266
267 virtio_tswap16s(vdev, &off_wrap);
268 address_space_write_cached(cache, off, &off_wrap, sizeof(off_wrap));
269 address_space_cache_invalidate(cache, off, sizeof(off_wrap));
270 }
271
vring_packed_flags_write(VirtIODevice * vdev,MemoryRegionCache * cache,uint16_t flags)272 static void vring_packed_flags_write(VirtIODevice *vdev,
273 MemoryRegionCache *cache, uint16_t flags)
274 {
275 hwaddr off = offsetof(VRingPackedDescEvent, flags);
276
277 virtio_tswap16s(vdev, &flags);
278 address_space_write_cached(cache, off, &flags, sizeof(flags));
279 address_space_cache_invalidate(cache, off, sizeof(flags));
280 }
281
282 /* Called within rcu_read_lock(). */
vring_get_region_caches(struct VirtQueue * vq)283 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
284 {
285 return atomic_rcu_read(&vq->vring.caches);
286 }
287
288 /* Called within rcu_read_lock(). */
vring_avail_flags(VirtQueue * vq)289 static inline uint16_t vring_avail_flags(VirtQueue *vq)
290 {
291 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
292 hwaddr pa = offsetof(VRingAvail, flags);
293
294 if (!caches) {
295 return 0;
296 }
297
298 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
299 }
300
301 /* Called within rcu_read_lock(). */
vring_avail_idx(VirtQueue * vq)302 static inline uint16_t vring_avail_idx(VirtQueue *vq)
303 {
304 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
305 hwaddr pa = offsetof(VRingAvail, idx);
306
307 if (!caches) {
308 return 0;
309 }
310
311 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
312 return vq->shadow_avail_idx;
313 }
314
315 /* Called within rcu_read_lock(). */
vring_avail_ring(VirtQueue * vq,int i)316 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
317 {
318 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
319 hwaddr pa = offsetof(VRingAvail, ring[i]);
320
321 if (!caches) {
322 return 0;
323 }
324
325 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
326 }
327
328 /* Called within rcu_read_lock(). */
vring_get_used_event(VirtQueue * vq)329 static inline uint16_t vring_get_used_event(VirtQueue *vq)
330 {
331 return vring_avail_ring(vq, vq->vring.num);
332 }
333
334 /* Called within rcu_read_lock(). */
vring_used_write(VirtQueue * vq,VRingUsedElem * uelem,int i)335 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
336 int i)
337 {
338 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
339 hwaddr pa = offsetof(VRingUsed, ring[i]);
340
341 if (!caches) {
342 return;
343 }
344
345 virtio_tswap32s(vq->vdev, &uelem->id);
346 virtio_tswap32s(vq->vdev, &uelem->len);
347 address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
348 address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
349 }
350
351 /* Called within rcu_read_lock(). */
vring_used_idx(VirtQueue * vq)352 static uint16_t vring_used_idx(VirtQueue *vq)
353 {
354 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
355 hwaddr pa = offsetof(VRingUsed, idx);
356
357 if (!caches) {
358 return 0;
359 }
360
361 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
362 }
363
364 /* Called within rcu_read_lock(). */
vring_used_idx_set(VirtQueue * vq,uint16_t val)365 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
366 {
367 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
368 hwaddr pa = offsetof(VRingUsed, idx);
369
370 if (caches) {
371 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
372 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
373 }
374
375 vq->used_idx = val;
376 }
377
378 /* Called within rcu_read_lock(). */
vring_used_flags_set_bit(VirtQueue * vq,int mask)379 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
380 {
381 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
382 VirtIODevice *vdev = vq->vdev;
383 hwaddr pa = offsetof(VRingUsed, flags);
384 uint16_t flags;
385
386 if (!caches) {
387 return;
388 }
389
390 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
391 virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
392 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
393 }
394
395 /* Called within rcu_read_lock(). */
vring_used_flags_unset_bit(VirtQueue * vq,int mask)396 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
397 {
398 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
399 VirtIODevice *vdev = vq->vdev;
400 hwaddr pa = offsetof(VRingUsed, flags);
401 uint16_t flags;
402
403 if (!caches) {
404 return;
405 }
406
407 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
408 virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
409 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
410 }
411
412 /* Called within rcu_read_lock(). */
vring_set_avail_event(VirtQueue * vq,uint16_t val)413 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
414 {
415 VRingMemoryRegionCaches *caches;
416 hwaddr pa;
417 if (!vq->notification) {
418 return;
419 }
420
421 caches = vring_get_region_caches(vq);
422 if (!caches) {
423 return;
424 }
425
426 pa = offsetof(VRingUsed, ring[vq->vring.num]);
427 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
428 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
429 }
430
virtio_queue_split_set_notification(VirtQueue * vq,int enable)431 static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
432 {
433 RCU_READ_LOCK_GUARD();
434
435 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
436 vring_set_avail_event(vq, vring_avail_idx(vq));
437 } else if (enable) {
438 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
439 } else {
440 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
441 }
442 if (enable) {
443 /* Expose avail event/used flags before caller checks the avail idx. */
444 smp_mb();
445 }
446 }
447
virtio_queue_packed_set_notification(VirtQueue * vq,int enable)448 static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
449 {
450 uint16_t off_wrap;
451 VRingPackedDescEvent e;
452 VRingMemoryRegionCaches *caches;
453
454 RCU_READ_LOCK_GUARD();
455 caches = vring_get_region_caches(vq);
456 if (!caches) {
457 return;
458 }
459
460 vring_packed_event_read(vq->vdev, &caches->used, &e);
461
462 if (!enable) {
463 e.flags = VRING_PACKED_EVENT_FLAG_DISABLE;
464 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
465 off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
466 vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
467 /* Make sure off_wrap is wrote before flags */
468 smp_wmb();
469 e.flags = VRING_PACKED_EVENT_FLAG_DESC;
470 } else {
471 e.flags = VRING_PACKED_EVENT_FLAG_ENABLE;
472 }
473
474 vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
475 if (enable) {
476 /* Expose avail event/used flags before caller checks the avail idx. */
477 smp_mb();
478 }
479 }
480
virtio_queue_set_notification(VirtQueue * vq,int enable)481 void virtio_queue_set_notification(VirtQueue *vq, int enable)
482 {
483 vq->notification = enable;
484
485 if (!vq->vring.desc) {
486 return;
487 }
488
489 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
490 virtio_queue_packed_set_notification(vq, enable);
491 } else {
492 virtio_queue_split_set_notification(vq, enable);
493 }
494 }
495
virtio_queue_ready(VirtQueue * vq)496 int virtio_queue_ready(VirtQueue *vq)
497 {
498 return vq->vring.avail != 0;
499 }
500
vring_packed_desc_read_flags(VirtIODevice * vdev,uint16_t * flags,MemoryRegionCache * cache,int i)501 static void vring_packed_desc_read_flags(VirtIODevice *vdev,
502 uint16_t *flags,
503 MemoryRegionCache *cache,
504 int i)
505 {
506 address_space_read_cached(cache,
507 i * sizeof(VRingPackedDesc) +
508 offsetof(VRingPackedDesc, flags),
509 flags, sizeof(*flags));
510 virtio_tswap16s(vdev, flags);
511 }
512
vring_packed_desc_read(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i,bool strict_order)513 static void vring_packed_desc_read(VirtIODevice *vdev,
514 VRingPackedDesc *desc,
515 MemoryRegionCache *cache,
516 int i, bool strict_order)
517 {
518 hwaddr off = i * sizeof(VRingPackedDesc);
519
520 vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
521
522 if (strict_order) {
523 /* Make sure flags is read before the rest fields. */
524 smp_rmb();
525 }
526
527 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr),
528 &desc->addr, sizeof(desc->addr));
529 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id),
530 &desc->id, sizeof(desc->id));
531 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len),
532 &desc->len, sizeof(desc->len));
533 virtio_tswap64s(vdev, &desc->addr);
534 virtio_tswap16s(vdev, &desc->id);
535 virtio_tswap32s(vdev, &desc->len);
536 }
537
vring_packed_desc_write_data(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i)538 static void vring_packed_desc_write_data(VirtIODevice *vdev,
539 VRingPackedDesc *desc,
540 MemoryRegionCache *cache,
541 int i)
542 {
543 hwaddr off_id = i * sizeof(VRingPackedDesc) +
544 offsetof(VRingPackedDesc, id);
545 hwaddr off_len = i * sizeof(VRingPackedDesc) +
546 offsetof(VRingPackedDesc, len);
547
548 virtio_tswap32s(vdev, &desc->len);
549 virtio_tswap16s(vdev, &desc->id);
550 address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
551 address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
552 address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
553 address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
554 }
555
vring_packed_desc_write_flags(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i)556 static void vring_packed_desc_write_flags(VirtIODevice *vdev,
557 VRingPackedDesc *desc,
558 MemoryRegionCache *cache,
559 int i)
560 {
561 hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
562
563 virtio_tswap16s(vdev, &desc->flags);
564 address_space_write_cached(cache, off, &desc->flags, sizeof(desc->flags));
565 address_space_cache_invalidate(cache, off, sizeof(desc->flags));
566 }
567
vring_packed_desc_write(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i,bool strict_order)568 static void vring_packed_desc_write(VirtIODevice *vdev,
569 VRingPackedDesc *desc,
570 MemoryRegionCache *cache,
571 int i, bool strict_order)
572 {
573 vring_packed_desc_write_data(vdev, desc, cache, i);
574 if (strict_order) {
575 /* Make sure data is wrote before flags. */
576 smp_wmb();
577 }
578 vring_packed_desc_write_flags(vdev, desc, cache, i);
579 }
580
is_desc_avail(uint16_t flags,bool wrap_counter)581 static inline bool is_desc_avail(uint16_t flags, bool wrap_counter)
582 {
583 bool avail, used;
584
585 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
586 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
587 return (avail != used) && (avail == wrap_counter);
588 }
589
590 /* Fetch avail_idx from VQ memory only when we really need to know if
591 * guest has added some buffers.
592 * Called within rcu_read_lock(). */
virtio_queue_empty_rcu(VirtQueue * vq)593 static int virtio_queue_empty_rcu(VirtQueue *vq)
594 {
595 if (unlikely(vq->vdev->broken)) {
596 return 1;
597 }
598
599 if (unlikely(!vq->vring.avail)) {
600 return 1;
601 }
602
603 if (vq->shadow_avail_idx != vq->last_avail_idx) {
604 return 0;
605 }
606
607 return vring_avail_idx(vq) == vq->last_avail_idx;
608 }
609
virtio_queue_split_empty(VirtQueue * vq)610 static int virtio_queue_split_empty(VirtQueue *vq)
611 {
612 bool empty;
613
614 if (unlikely(vq->vdev->broken)) {
615 return 1;
616 }
617
618 if (unlikely(!vq->vring.avail)) {
619 return 1;
620 }
621
622 if (vq->shadow_avail_idx != vq->last_avail_idx) {
623 return 0;
624 }
625
626 RCU_READ_LOCK_GUARD();
627 empty = vring_avail_idx(vq) == vq->last_avail_idx;
628 return empty;
629 }
630
virtio_queue_packed_empty_rcu(VirtQueue * vq)631 static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
632 {
633 struct VRingPackedDesc desc;
634 VRingMemoryRegionCaches *cache;
635
636 if (unlikely(!vq->vring.desc)) {
637 return 1;
638 }
639
640 cache = vring_get_region_caches(vq);
641 if (!cache) {
642 return 1;
643 }
644
645 vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
646 vq->last_avail_idx);
647
648 return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
649 }
650
virtio_queue_packed_empty(VirtQueue * vq)651 static int virtio_queue_packed_empty(VirtQueue *vq)
652 {
653 RCU_READ_LOCK_GUARD();
654 return virtio_queue_packed_empty_rcu(vq);
655 }
656
virtio_queue_empty(VirtQueue * vq)657 int virtio_queue_empty(VirtQueue *vq)
658 {
659 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
660 return virtio_queue_packed_empty(vq);
661 } else {
662 return virtio_queue_split_empty(vq);
663 }
664 }
665
virtqueue_unmap_sg(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)666 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
667 unsigned int len)
668 {
669 AddressSpace *dma_as = vq->vdev->dma_as;
670 unsigned int offset;
671 int i;
672
673 offset = 0;
674 for (i = 0; i < elem->in_num; i++) {
675 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
676
677 dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
678 elem->in_sg[i].iov_len,
679 DMA_DIRECTION_FROM_DEVICE, size);
680
681 offset += size;
682 }
683
684 for (i = 0; i < elem->out_num; i++)
685 dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
686 elem->out_sg[i].iov_len,
687 DMA_DIRECTION_TO_DEVICE,
688 elem->out_sg[i].iov_len);
689 }
690
691 /* virtqueue_detach_element:
692 * @vq: The #VirtQueue
693 * @elem: The #VirtQueueElement
694 * @len: number of bytes written
695 *
696 * Detach the element from the virtqueue. This function is suitable for device
697 * reset or other situations where a #VirtQueueElement is simply freed and will
698 * not be pushed or discarded.
699 */
virtqueue_detach_element(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)700 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
701 unsigned int len)
702 {
703 vq->inuse -= elem->ndescs;
704 virtqueue_unmap_sg(vq, elem, len);
705 }
706
virtqueue_split_rewind(VirtQueue * vq,unsigned int num)707 static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num)
708 {
709 vq->last_avail_idx -= num;
710 }
711
virtqueue_packed_rewind(VirtQueue * vq,unsigned int num)712 static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num)
713 {
714 if (vq->last_avail_idx < num) {
715 vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
716 vq->last_avail_wrap_counter ^= 1;
717 } else {
718 vq->last_avail_idx -= num;
719 }
720 }
721
722 /* virtqueue_unpop:
723 * @vq: The #VirtQueue
724 * @elem: The #VirtQueueElement
725 * @len: number of bytes written
726 *
727 * Pretend the most recent element wasn't popped from the virtqueue. The next
728 * call to virtqueue_pop() will refetch the element.
729 */
virtqueue_unpop(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)730 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
731 unsigned int len)
732 {
733
734 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
735 virtqueue_packed_rewind(vq, 1);
736 } else {
737 virtqueue_split_rewind(vq, 1);
738 }
739
740 virtqueue_detach_element(vq, elem, len);
741 }
742
743 /* virtqueue_rewind:
744 * @vq: The #VirtQueue
745 * @num: Number of elements to push back
746 *
747 * Pretend that elements weren't popped from the virtqueue. The next
748 * virtqueue_pop() will refetch the oldest element.
749 *
750 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
751 *
752 * Returns: true on success, false if @num is greater than the number of in use
753 * elements.
754 */
virtqueue_rewind(VirtQueue * vq,unsigned int num)755 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
756 {
757 if (num > vq->inuse) {
758 return false;
759 }
760
761 vq->inuse -= num;
762 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
763 virtqueue_packed_rewind(vq, num);
764 } else {
765 virtqueue_split_rewind(vq, num);
766 }
767 return true;
768 }
769
virtqueue_split_fill(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len,unsigned int idx)770 static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
771 unsigned int len, unsigned int idx)
772 {
773 VRingUsedElem uelem;
774
775 if (unlikely(!vq->vring.used)) {
776 return;
777 }
778
779 idx = (idx + vq->used_idx) % vq->vring.num;
780
781 uelem.id = elem->index;
782 uelem.len = len;
783 vring_used_write(vq, &uelem, idx);
784 }
785
virtqueue_packed_fill(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len,unsigned int idx)786 static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
787 unsigned int len, unsigned int idx)
788 {
789 vq->used_elems[idx].index = elem->index;
790 vq->used_elems[idx].len = len;
791 vq->used_elems[idx].ndescs = elem->ndescs;
792 }
793
virtqueue_packed_fill_desc(VirtQueue * vq,const VirtQueueElement * elem,unsigned int idx,bool strict_order)794 static void virtqueue_packed_fill_desc(VirtQueue *vq,
795 const VirtQueueElement *elem,
796 unsigned int idx,
797 bool strict_order)
798 {
799 uint16_t head;
800 VRingMemoryRegionCaches *caches;
801 VRingPackedDesc desc = {
802 .id = elem->index,
803 .len = elem->len,
804 };
805 bool wrap_counter = vq->used_wrap_counter;
806
807 if (unlikely(!vq->vring.desc)) {
808 return;
809 }
810
811 head = vq->used_idx + idx;
812 if (head >= vq->vring.num) {
813 head -= vq->vring.num;
814 wrap_counter ^= 1;
815 }
816 if (wrap_counter) {
817 desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
818 desc.flags |= (1 << VRING_PACKED_DESC_F_USED);
819 } else {
820 desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
821 desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED);
822 }
823
824 caches = vring_get_region_caches(vq);
825 if (!caches) {
826 return;
827 }
828
829 vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
830 }
831
832 /* Called within rcu_read_lock(). */
virtqueue_fill(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len,unsigned int idx)833 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
834 unsigned int len, unsigned int idx)
835 {
836 trace_virtqueue_fill(vq, elem, len, idx);
837
838 virtqueue_unmap_sg(vq, elem, len);
839
840 if (unlikely(vq->vdev->broken)) {
841 return;
842 }
843
844 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
845 virtqueue_packed_fill(vq, elem, len, idx);
846 } else {
847 virtqueue_split_fill(vq, elem, len, idx);
848 }
849 }
850
851 /* Called within rcu_read_lock(). */
virtqueue_split_flush(VirtQueue * vq,unsigned int count)852 static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
853 {
854 uint16_t old, new;
855
856 if (unlikely(!vq->vring.used)) {
857 return;
858 }
859
860 /* Make sure buffer is written before we update index. */
861 smp_wmb();
862 trace_virtqueue_flush(vq, count);
863 old = vq->used_idx;
864 new = old + count;
865 vring_used_idx_set(vq, new);
866 vq->inuse -= count;
867 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
868 vq->signalled_used_valid = false;
869 }
870
virtqueue_packed_flush(VirtQueue * vq,unsigned int count)871 static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
872 {
873 unsigned int i, ndescs = 0;
874
875 if (unlikely(!vq->vring.desc)) {
876 return;
877 }
878
879 for (i = 1; i < count; i++) {
880 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], i, false);
881 ndescs += vq->used_elems[i].ndescs;
882 }
883 virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
884 ndescs += vq->used_elems[0].ndescs;
885
886 vq->inuse -= ndescs;
887 vq->used_idx += ndescs;
888 if (vq->used_idx >= vq->vring.num) {
889 vq->used_idx -= vq->vring.num;
890 vq->used_wrap_counter ^= 1;
891 }
892 }
893
virtqueue_flush(VirtQueue * vq,unsigned int count)894 void virtqueue_flush(VirtQueue *vq, unsigned int count)
895 {
896 if (unlikely(vq->vdev->broken)) {
897 vq->inuse -= count;
898 return;
899 }
900
901 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
902 virtqueue_packed_flush(vq, count);
903 } else {
904 virtqueue_split_flush(vq, count);
905 }
906 }
907
virtqueue_push(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)908 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
909 unsigned int len)
910 {
911 RCU_READ_LOCK_GUARD();
912 virtqueue_fill(vq, elem, len, 0);
913 virtqueue_flush(vq, 1);
914 }
915
916 /* Called within rcu_read_lock(). */
virtqueue_num_heads(VirtQueue * vq,unsigned int idx)917 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
918 {
919 uint16_t num_heads = vring_avail_idx(vq) - idx;
920
921 /* Check it isn't doing very strange things with descriptor numbers. */
922 if (num_heads > vq->vring.num) {
923 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
924 idx, vq->shadow_avail_idx);
925 return -EINVAL;
926 }
927 /* On success, callers read a descriptor at vq->last_avail_idx.
928 * Make sure descriptor read does not bypass avail index read. */
929 if (num_heads) {
930 smp_rmb();
931 }
932
933 return num_heads;
934 }
935
936 /* Called within rcu_read_lock(). */
virtqueue_get_head(VirtQueue * vq,unsigned int idx,unsigned int * head)937 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
938 unsigned int *head)
939 {
940 /* Grab the next descriptor number they're advertising, and increment
941 * the index we've seen. */
942 *head = vring_avail_ring(vq, idx % vq->vring.num);
943
944 /* If their number is silly, that's a fatal mistake. */
945 if (*head >= vq->vring.num) {
946 virtio_error(vq->vdev, "Guest says index %u is available", *head);
947 return false;
948 }
949
950 return true;
951 }
952
953 enum {
954 VIRTQUEUE_READ_DESC_ERROR = -1,
955 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
956 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
957 };
958
virtqueue_split_read_next_desc(VirtIODevice * vdev,VRingDesc * desc,MemoryRegionCache * desc_cache,unsigned int max,unsigned int * next)959 static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
960 MemoryRegionCache *desc_cache,
961 unsigned int max, unsigned int *next)
962 {
963 /* If this descriptor says it doesn't chain, we're done. */
964 if (!(desc->flags & VRING_DESC_F_NEXT)) {
965 return VIRTQUEUE_READ_DESC_DONE;
966 }
967
968 /* Check they're not leading us off end of descriptors. */
969 *next = desc->next;
970 /* Make sure compiler knows to grab that: we don't want it changing! */
971 smp_wmb();
972
973 if (*next >= max) {
974 virtio_error(vdev, "Desc next is %u", *next);
975 return VIRTQUEUE_READ_DESC_ERROR;
976 }
977
978 vring_split_desc_read(vdev, desc, desc_cache, *next);
979 return VIRTQUEUE_READ_DESC_MORE;
980 }
981
virtqueue_split_get_avail_bytes(VirtQueue * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes)982 static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
983 unsigned int *in_bytes, unsigned int *out_bytes,
984 unsigned max_in_bytes, unsigned max_out_bytes)
985 {
986 VirtIODevice *vdev = vq->vdev;
987 unsigned int max, idx;
988 unsigned int total_bufs, in_total, out_total;
989 VRingMemoryRegionCaches *caches;
990 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
991 int64_t len = 0;
992 int rc;
993
994 RCU_READ_LOCK_GUARD();
995
996 idx = vq->last_avail_idx;
997 total_bufs = in_total = out_total = 0;
998
999 max = vq->vring.num;
1000 caches = vring_get_region_caches(vq);
1001 if (!caches) {
1002 goto err;
1003 }
1004
1005 while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
1006 MemoryRegionCache *desc_cache = &caches->desc;
1007 unsigned int num_bufs;
1008 VRingDesc desc;
1009 unsigned int i;
1010
1011 num_bufs = total_bufs;
1012
1013 if (!virtqueue_get_head(vq, idx++, &i)) {
1014 goto err;
1015 }
1016
1017 vring_split_desc_read(vdev, &desc, desc_cache, i);
1018
1019 if (desc.flags & VRING_DESC_F_INDIRECT) {
1020 if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1021 virtio_error(vdev, "Invalid size for indirect buffer table");
1022 goto err;
1023 }
1024
1025 /* If we've got too many, that implies a descriptor loop. */
1026 if (num_bufs >= max) {
1027 virtio_error(vdev, "Looped descriptor");
1028 goto err;
1029 }
1030
1031 /* loop over the indirect descriptor table */
1032 len = address_space_cache_init(&indirect_desc_cache,
1033 vdev->dma_as,
1034 desc.addr, desc.len, false);
1035 desc_cache = &indirect_desc_cache;
1036 if (len < desc.len) {
1037 virtio_error(vdev, "Cannot map indirect buffer");
1038 goto err;
1039 }
1040
1041 max = desc.len / sizeof(VRingDesc);
1042 num_bufs = i = 0;
1043 vring_split_desc_read(vdev, &desc, desc_cache, i);
1044 }
1045
1046 do {
1047 /* If we've got too many, that implies a descriptor loop. */
1048 if (++num_bufs > max) {
1049 virtio_error(vdev, "Looped descriptor");
1050 goto err;
1051 }
1052
1053 if (desc.flags & VRING_DESC_F_WRITE) {
1054 in_total += desc.len;
1055 } else {
1056 out_total += desc.len;
1057 }
1058 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1059 goto done;
1060 }
1061
1062 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i);
1063 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1064
1065 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1066 goto err;
1067 }
1068
1069 if (desc_cache == &indirect_desc_cache) {
1070 address_space_cache_destroy(&indirect_desc_cache);
1071 total_bufs++;
1072 } else {
1073 total_bufs = num_bufs;
1074 }
1075 }
1076
1077 if (rc < 0) {
1078 goto err;
1079 }
1080
1081 done:
1082 address_space_cache_destroy(&indirect_desc_cache);
1083 if (in_bytes) {
1084 *in_bytes = in_total;
1085 }
1086 if (out_bytes) {
1087 *out_bytes = out_total;
1088 }
1089 return;
1090
1091 err:
1092 in_total = out_total = 0;
1093 goto done;
1094 }
1095
virtqueue_packed_read_next_desc(VirtQueue * vq,VRingPackedDesc * desc,MemoryRegionCache * desc_cache,unsigned int max,unsigned int * next,bool indirect)1096 static int virtqueue_packed_read_next_desc(VirtQueue *vq,
1097 VRingPackedDesc *desc,
1098 MemoryRegionCache
1099 *desc_cache,
1100 unsigned int max,
1101 unsigned int *next,
1102 bool indirect)
1103 {
1104 /* If this descriptor says it doesn't chain, we're done. */
1105 if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
1106 return VIRTQUEUE_READ_DESC_DONE;
1107 }
1108
1109 ++*next;
1110 if (*next == max) {
1111 if (indirect) {
1112 return VIRTQUEUE_READ_DESC_DONE;
1113 } else {
1114 (*next) -= vq->vring.num;
1115 }
1116 }
1117
1118 vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
1119 return VIRTQUEUE_READ_DESC_MORE;
1120 }
1121
virtqueue_packed_get_avail_bytes(VirtQueue * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes)1122 static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
1123 unsigned int *in_bytes,
1124 unsigned int *out_bytes,
1125 unsigned max_in_bytes,
1126 unsigned max_out_bytes)
1127 {
1128 VirtIODevice *vdev = vq->vdev;
1129 unsigned int max, idx;
1130 unsigned int total_bufs, in_total, out_total;
1131 MemoryRegionCache *desc_cache;
1132 VRingMemoryRegionCaches *caches;
1133 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1134 int64_t len = 0;
1135 VRingPackedDesc desc;
1136 bool wrap_counter;
1137
1138 RCU_READ_LOCK_GUARD();
1139 idx = vq->last_avail_idx;
1140 wrap_counter = vq->last_avail_wrap_counter;
1141 total_bufs = in_total = out_total = 0;
1142
1143 max = vq->vring.num;
1144 caches = vring_get_region_caches(vq);
1145 if (!caches) {
1146 goto err;
1147 }
1148
1149 for (;;) {
1150 unsigned int num_bufs = total_bufs;
1151 unsigned int i = idx;
1152 int rc;
1153
1154 desc_cache = &caches->desc;
1155 vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
1156 if (!is_desc_avail(desc.flags, wrap_counter)) {
1157 break;
1158 }
1159
1160 if (desc.flags & VRING_DESC_F_INDIRECT) {
1161 if (desc.len % sizeof(VRingPackedDesc)) {
1162 virtio_error(vdev, "Invalid size for indirect buffer table");
1163 goto err;
1164 }
1165
1166 /* If we've got too many, that implies a descriptor loop. */
1167 if (num_bufs >= max) {
1168 virtio_error(vdev, "Looped descriptor");
1169 goto err;
1170 }
1171
1172 /* loop over the indirect descriptor table */
1173 len = address_space_cache_init(&indirect_desc_cache,
1174 vdev->dma_as,
1175 desc.addr, desc.len, false);
1176 desc_cache = &indirect_desc_cache;
1177 if (len < desc.len) {
1178 virtio_error(vdev, "Cannot map indirect buffer");
1179 goto err;
1180 }
1181
1182 max = desc.len / sizeof(VRingPackedDesc);
1183 num_bufs = i = 0;
1184 vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1185 }
1186
1187 do {
1188 /* If we've got too many, that implies a descriptor loop. */
1189 if (++num_bufs > max) {
1190 virtio_error(vdev, "Looped descriptor");
1191 goto err;
1192 }
1193
1194 if (desc.flags & VRING_DESC_F_WRITE) {
1195 in_total += desc.len;
1196 } else {
1197 out_total += desc.len;
1198 }
1199 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1200 goto done;
1201 }
1202
1203 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max,
1204 &i, desc_cache ==
1205 &indirect_desc_cache);
1206 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1207
1208 if (desc_cache == &indirect_desc_cache) {
1209 address_space_cache_destroy(&indirect_desc_cache);
1210 total_bufs++;
1211 idx++;
1212 } else {
1213 idx += num_bufs - total_bufs;
1214 total_bufs = num_bufs;
1215 }
1216
1217 if (idx >= vq->vring.num) {
1218 idx -= vq->vring.num;
1219 wrap_counter ^= 1;
1220 }
1221 }
1222
1223 /* Record the index and wrap counter for a kick we want */
1224 vq->shadow_avail_idx = idx;
1225 vq->shadow_avail_wrap_counter = wrap_counter;
1226 done:
1227 address_space_cache_destroy(&indirect_desc_cache);
1228 if (in_bytes) {
1229 *in_bytes = in_total;
1230 }
1231 if (out_bytes) {
1232 *out_bytes = out_total;
1233 }
1234 return;
1235
1236 err:
1237 in_total = out_total = 0;
1238 goto done;
1239 }
1240
virtqueue_get_avail_bytes(VirtQueue * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes)1241 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
1242 unsigned int *out_bytes,
1243 unsigned max_in_bytes, unsigned max_out_bytes)
1244 {
1245 uint16_t desc_size;
1246 VRingMemoryRegionCaches *caches;
1247
1248 if (unlikely(!vq->vring.desc)) {
1249 goto err;
1250 }
1251
1252 caches = vring_get_region_caches(vq);
1253 if (!caches) {
1254 goto err;
1255 }
1256
1257 desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
1258 sizeof(VRingPackedDesc) : sizeof(VRingDesc);
1259 if (caches->desc.len < vq->vring.num * desc_size) {
1260 virtio_error(vq->vdev, "Cannot map descriptor ring");
1261 goto err;
1262 }
1263
1264 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1265 virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes,
1266 max_in_bytes, max_out_bytes);
1267 } else {
1268 virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
1269 max_in_bytes, max_out_bytes);
1270 }
1271
1272 return;
1273 err:
1274 if (in_bytes) {
1275 *in_bytes = 0;
1276 }
1277 if (out_bytes) {
1278 *out_bytes = 0;
1279 }
1280 }
1281
virtqueue_avail_bytes(VirtQueue * vq,unsigned int in_bytes,unsigned int out_bytes)1282 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
1283 unsigned int out_bytes)
1284 {
1285 unsigned int in_total, out_total;
1286
1287 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
1288 return in_bytes <= in_total && out_bytes <= out_total;
1289 }
1290
virtqueue_map_desc(VirtIODevice * vdev,unsigned int * p_num_sg,hwaddr * addr,struct iovec * iov,unsigned int max_num_sg,bool is_write,hwaddr pa,size_t sz)1291 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
1292 hwaddr *addr, struct iovec *iov,
1293 unsigned int max_num_sg, bool is_write,
1294 hwaddr pa, size_t sz)
1295 {
1296 bool ok = false;
1297 unsigned num_sg = *p_num_sg;
1298 assert(num_sg <= max_num_sg);
1299
1300 if (!sz) {
1301 virtio_error(vdev, "virtio: zero sized buffers are not allowed");
1302 goto out;
1303 }
1304
1305 while (sz) {
1306 hwaddr len = sz;
1307
1308 if (num_sg == max_num_sg) {
1309 virtio_error(vdev, "virtio: too many write descriptors in "
1310 "indirect table");
1311 goto out;
1312 }
1313
1314 iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
1315 is_write ?
1316 DMA_DIRECTION_FROM_DEVICE :
1317 DMA_DIRECTION_TO_DEVICE);
1318 if (!iov[num_sg].iov_base) {
1319 virtio_error(vdev, "virtio: bogus descriptor or out of resources");
1320 goto out;
1321 }
1322
1323 iov[num_sg].iov_len = len;
1324 addr[num_sg] = pa;
1325
1326 sz -= len;
1327 pa += len;
1328 num_sg++;
1329 }
1330 ok = true;
1331
1332 out:
1333 *p_num_sg = num_sg;
1334 return ok;
1335 }
1336
1337 /* Only used by error code paths before we have a VirtQueueElement (therefore
1338 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
1339 * yet.
1340 */
virtqueue_undo_map_desc(unsigned int out_num,unsigned int in_num,struct iovec * iov)1341 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
1342 struct iovec *iov)
1343 {
1344 unsigned int i;
1345
1346 for (i = 0; i < out_num + in_num; i++) {
1347 int is_write = i >= out_num;
1348
1349 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
1350 iov++;
1351 }
1352 }
1353
virtqueue_map_iovec(VirtIODevice * vdev,struct iovec * sg,hwaddr * addr,unsigned int num_sg,int is_write)1354 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
1355 hwaddr *addr, unsigned int num_sg,
1356 int is_write)
1357 {
1358 unsigned int i;
1359 hwaddr len;
1360
1361 for (i = 0; i < num_sg; i++) {
1362 len = sg[i].iov_len;
1363 sg[i].iov_base = dma_memory_map(vdev->dma_as,
1364 addr[i], &len, is_write ?
1365 DMA_DIRECTION_FROM_DEVICE :
1366 DMA_DIRECTION_TO_DEVICE);
1367 if (!sg[i].iov_base) {
1368 error_report("virtio: error trying to map MMIO memory");
1369 exit(1);
1370 }
1371 if (len != sg[i].iov_len) {
1372 error_report("virtio: unexpected memory split");
1373 exit(1);
1374 }
1375 }
1376 }
1377
virtqueue_map(VirtIODevice * vdev,VirtQueueElement * elem)1378 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
1379 {
1380 virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, 1);
1381 virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num, 0);
1382 }
1383
virtqueue_alloc_element(size_t sz,unsigned out_num,unsigned in_num)1384 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
1385 {
1386 VirtQueueElement *elem;
1387 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
1388 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
1389 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
1390 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
1391 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
1392 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
1393
1394 assert(sz >= sizeof(VirtQueueElement));
1395 elem = g_malloc(out_sg_end);
1396 trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
1397 elem->out_num = out_num;
1398 elem->in_num = in_num;
1399 elem->in_addr = (void *)elem + in_addr_ofs;
1400 elem->out_addr = (void *)elem + out_addr_ofs;
1401 elem->in_sg = (void *)elem + in_sg_ofs;
1402 elem->out_sg = (void *)elem + out_sg_ofs;
1403 return elem;
1404 }
1405
virtqueue_split_pop(VirtQueue * vq,size_t sz)1406 static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
1407 {
1408 unsigned int i, head, max;
1409 VRingMemoryRegionCaches *caches;
1410 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1411 MemoryRegionCache *desc_cache;
1412 int64_t len;
1413 VirtIODevice *vdev = vq->vdev;
1414 VirtQueueElement *elem = NULL;
1415 unsigned out_num, in_num, elem_entries;
1416 hwaddr addr[VIRTQUEUE_MAX_SIZE];
1417 struct iovec iov[VIRTQUEUE_MAX_SIZE];
1418 VRingDesc desc;
1419 int rc;
1420
1421 RCU_READ_LOCK_GUARD();
1422 if (virtio_queue_empty_rcu(vq)) {
1423 goto done;
1424 }
1425 /* Needed after virtio_queue_empty(), see comment in
1426 * virtqueue_num_heads(). */
1427 smp_rmb();
1428
1429 /* When we start there are none of either input nor output. */
1430 out_num = in_num = elem_entries = 0;
1431
1432 max = vq->vring.num;
1433
1434 if (vq->inuse >= vq->vring.num) {
1435 virtio_error(vdev, "Virtqueue size exceeded");
1436 goto done;
1437 }
1438
1439 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
1440 goto done;
1441 }
1442
1443 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1444 vring_set_avail_event(vq, vq->last_avail_idx);
1445 }
1446
1447 i = head;
1448
1449 caches = vring_get_region_caches(vq);
1450 if (!caches) {
1451 virtio_error(vdev, "Region caches not initialized");
1452 goto done;
1453 }
1454
1455 if (caches->desc.len < max * sizeof(VRingDesc)) {
1456 virtio_error(vdev, "Cannot map descriptor ring");
1457 goto done;
1458 }
1459
1460 desc_cache = &caches->desc;
1461 vring_split_desc_read(vdev, &desc, desc_cache, i);
1462 if (desc.flags & VRING_DESC_F_INDIRECT) {
1463 if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1464 virtio_error(vdev, "Invalid size for indirect buffer table");
1465 goto done;
1466 }
1467
1468 /* loop over the indirect descriptor table */
1469 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1470 desc.addr, desc.len, false);
1471 desc_cache = &indirect_desc_cache;
1472 if (len < desc.len) {
1473 virtio_error(vdev, "Cannot map indirect buffer");
1474 goto done;
1475 }
1476
1477 max = desc.len / sizeof(VRingDesc);
1478 i = 0;
1479 vring_split_desc_read(vdev, &desc, desc_cache, i);
1480 }
1481
1482 /* Collect all the descriptors */
1483 do {
1484 bool map_ok;
1485
1486 if (desc.flags & VRING_DESC_F_WRITE) {
1487 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1488 iov + out_num,
1489 VIRTQUEUE_MAX_SIZE - out_num, true,
1490 desc.addr, desc.len);
1491 } else {
1492 if (in_num) {
1493 virtio_error(vdev, "Incorrect order for descriptors");
1494 goto err_undo_map;
1495 }
1496 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1497 VIRTQUEUE_MAX_SIZE, false,
1498 desc.addr, desc.len);
1499 }
1500 if (!map_ok) {
1501 goto err_undo_map;
1502 }
1503
1504 /* If we've got too many, that implies a descriptor loop. */
1505 if (++elem_entries > max) {
1506 virtio_error(vdev, "Looped descriptor");
1507 goto err_undo_map;
1508 }
1509
1510 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i);
1511 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1512
1513 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1514 goto err_undo_map;
1515 }
1516
1517 /* Now copy what we have collected and mapped */
1518 elem = virtqueue_alloc_element(sz, out_num, in_num);
1519 elem->index = head;
1520 elem->ndescs = 1;
1521 for (i = 0; i < out_num; i++) {
1522 elem->out_addr[i] = addr[i];
1523 elem->out_sg[i] = iov[i];
1524 }
1525 for (i = 0; i < in_num; i++) {
1526 elem->in_addr[i] = addr[out_num + i];
1527 elem->in_sg[i] = iov[out_num + i];
1528 }
1529
1530 vq->inuse++;
1531
1532 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1533 done:
1534 address_space_cache_destroy(&indirect_desc_cache);
1535
1536 return elem;
1537
1538 err_undo_map:
1539 virtqueue_undo_map_desc(out_num, in_num, iov);
1540 goto done;
1541 }
1542
virtqueue_packed_pop(VirtQueue * vq,size_t sz)1543 static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
1544 {
1545 unsigned int i, max;
1546 VRingMemoryRegionCaches *caches;
1547 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1548 MemoryRegionCache *desc_cache;
1549 int64_t len;
1550 VirtIODevice *vdev = vq->vdev;
1551 VirtQueueElement *elem = NULL;
1552 unsigned out_num, in_num, elem_entries;
1553 hwaddr addr[VIRTQUEUE_MAX_SIZE];
1554 struct iovec iov[VIRTQUEUE_MAX_SIZE];
1555 VRingPackedDesc desc;
1556 uint16_t id;
1557 int rc;
1558
1559 RCU_READ_LOCK_GUARD();
1560 if (virtio_queue_packed_empty_rcu(vq)) {
1561 goto done;
1562 }
1563
1564 /* When we start there are none of either input nor output. */
1565 out_num = in_num = elem_entries = 0;
1566
1567 max = vq->vring.num;
1568
1569 if (vq->inuse >= vq->vring.num) {
1570 virtio_error(vdev, "Virtqueue size exceeded");
1571 goto done;
1572 }
1573
1574 i = vq->last_avail_idx;
1575
1576 caches = vring_get_region_caches(vq);
1577 if (!caches) {
1578 virtio_error(vdev, "Region caches not initialized");
1579 goto done;
1580 }
1581
1582 if (caches->desc.len < max * sizeof(VRingDesc)) {
1583 virtio_error(vdev, "Cannot map descriptor ring");
1584 goto done;
1585 }
1586
1587 desc_cache = &caches->desc;
1588 vring_packed_desc_read(vdev, &desc, desc_cache, i, true);
1589 id = desc.id;
1590 if (desc.flags & VRING_DESC_F_INDIRECT) {
1591 if (desc.len % sizeof(VRingPackedDesc)) {
1592 virtio_error(vdev, "Invalid size for indirect buffer table");
1593 goto done;
1594 }
1595
1596 /* loop over the indirect descriptor table */
1597 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1598 desc.addr, desc.len, false);
1599 desc_cache = &indirect_desc_cache;
1600 if (len < desc.len) {
1601 virtio_error(vdev, "Cannot map indirect buffer");
1602 goto done;
1603 }
1604
1605 max = desc.len / sizeof(VRingPackedDesc);
1606 i = 0;
1607 vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1608 }
1609
1610 /* Collect all the descriptors */
1611 do {
1612 bool map_ok;
1613
1614 if (desc.flags & VRING_DESC_F_WRITE) {
1615 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1616 iov + out_num,
1617 VIRTQUEUE_MAX_SIZE - out_num, true,
1618 desc.addr, desc.len);
1619 } else {
1620 if (in_num) {
1621 virtio_error(vdev, "Incorrect order for descriptors");
1622 goto err_undo_map;
1623 }
1624 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1625 VIRTQUEUE_MAX_SIZE, false,
1626 desc.addr, desc.len);
1627 }
1628 if (!map_ok) {
1629 goto err_undo_map;
1630 }
1631
1632 /* If we've got too many, that implies a descriptor loop. */
1633 if (++elem_entries > max) {
1634 virtio_error(vdev, "Looped descriptor");
1635 goto err_undo_map;
1636 }
1637
1638 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i,
1639 desc_cache ==
1640 &indirect_desc_cache);
1641 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1642
1643 /* Now copy what we have collected and mapped */
1644 elem = virtqueue_alloc_element(sz, out_num, in_num);
1645 for (i = 0; i < out_num; i++) {
1646 elem->out_addr[i] = addr[i];
1647 elem->out_sg[i] = iov[i];
1648 }
1649 for (i = 0; i < in_num; i++) {
1650 elem->in_addr[i] = addr[out_num + i];
1651 elem->in_sg[i] = iov[out_num + i];
1652 }
1653
1654 elem->index = id;
1655 elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
1656 vq->last_avail_idx += elem->ndescs;
1657 vq->inuse += elem->ndescs;
1658
1659 if (vq->last_avail_idx >= vq->vring.num) {
1660 vq->last_avail_idx -= vq->vring.num;
1661 vq->last_avail_wrap_counter ^= 1;
1662 }
1663
1664 vq->shadow_avail_idx = vq->last_avail_idx;
1665 vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
1666
1667 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1668 done:
1669 address_space_cache_destroy(&indirect_desc_cache);
1670
1671 return elem;
1672
1673 err_undo_map:
1674 virtqueue_undo_map_desc(out_num, in_num, iov);
1675 goto done;
1676 }
1677
virtqueue_pop(VirtQueue * vq,size_t sz)1678 void *virtqueue_pop(VirtQueue *vq, size_t sz)
1679 {
1680 if (unlikely(vq->vdev->broken)) {
1681 return NULL;
1682 }
1683
1684 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1685 return virtqueue_packed_pop(vq, sz);
1686 } else {
1687 return virtqueue_split_pop(vq, sz);
1688 }
1689 }
1690
virtqueue_packed_drop_all(VirtQueue * vq)1691 static unsigned int virtqueue_packed_drop_all(VirtQueue *vq)
1692 {
1693 VRingMemoryRegionCaches *caches;
1694 MemoryRegionCache *desc_cache;
1695 unsigned int dropped = 0;
1696 VirtQueueElement elem = {};
1697 VirtIODevice *vdev = vq->vdev;
1698 VRingPackedDesc desc;
1699
1700 caches = vring_get_region_caches(vq);
1701 if (!caches) {
1702 return 0;
1703 }
1704
1705 desc_cache = &caches->desc;
1706
1707 virtio_queue_set_notification(vq, 0);
1708
1709 while (vq->inuse < vq->vring.num) {
1710 unsigned int idx = vq->last_avail_idx;
1711 /*
1712 * works similar to virtqueue_pop but does not map buffers
1713 * and does not allocate any memory.
1714 */
1715 vring_packed_desc_read(vdev, &desc, desc_cache,
1716 vq->last_avail_idx , true);
1717 if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
1718 break;
1719 }
1720 elem.index = desc.id;
1721 elem.ndescs = 1;
1722 while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache,
1723 vq->vring.num, &idx, false)) {
1724 ++elem.ndescs;
1725 }
1726 /*
1727 * immediately push the element, nothing to unmap
1728 * as both in_num and out_num are set to 0.
1729 */
1730 virtqueue_push(vq, &elem, 0);
1731 dropped++;
1732 vq->last_avail_idx += elem.ndescs;
1733 if (vq->last_avail_idx >= vq->vring.num) {
1734 vq->last_avail_idx -= vq->vring.num;
1735 vq->last_avail_wrap_counter ^= 1;
1736 }
1737 }
1738
1739 return dropped;
1740 }
1741
virtqueue_split_drop_all(VirtQueue * vq)1742 static unsigned int virtqueue_split_drop_all(VirtQueue *vq)
1743 {
1744 unsigned int dropped = 0;
1745 VirtQueueElement elem = {};
1746 VirtIODevice *vdev = vq->vdev;
1747 bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1748
1749 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
1750 /* works similar to virtqueue_pop but does not map buffers
1751 * and does not allocate any memory */
1752 smp_rmb();
1753 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
1754 break;
1755 }
1756 vq->inuse++;
1757 vq->last_avail_idx++;
1758 if (fEventIdx) {
1759 vring_set_avail_event(vq, vq->last_avail_idx);
1760 }
1761 /* immediately push the element, nothing to unmap
1762 * as both in_num and out_num are set to 0 */
1763 virtqueue_push(vq, &elem, 0);
1764 dropped++;
1765 }
1766
1767 return dropped;
1768 }
1769
1770 /* virtqueue_drop_all:
1771 * @vq: The #VirtQueue
1772 * Drops all queued buffers and indicates them to the guest
1773 * as if they are done. Useful when buffers can not be
1774 * processed but must be returned to the guest.
1775 */
virtqueue_drop_all(VirtQueue * vq)1776 unsigned int virtqueue_drop_all(VirtQueue *vq)
1777 {
1778 struct VirtIODevice *vdev = vq->vdev;
1779
1780 if (unlikely(vdev->broken)) {
1781 return 0;
1782 }
1783
1784 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1785 return virtqueue_packed_drop_all(vq);
1786 } else {
1787 return virtqueue_split_drop_all(vq);
1788 }
1789 }
1790
1791 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1792 * it is what QEMU has always done by mistake. We can change it sooner
1793 * or later by bumping the version number of the affected vm states.
1794 * In the meanwhile, since the in-memory layout of VirtQueueElement
1795 * has changed, we need to marshal to and from the layout that was
1796 * used before the change.
1797 */
1798 typedef struct VirtQueueElementOld {
1799 unsigned int index;
1800 unsigned int out_num;
1801 unsigned int in_num;
1802 hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1803 hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1804 struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1805 struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1806 } VirtQueueElementOld;
1807
qemu_get_virtqueue_element(VirtIODevice * vdev,QEMUFile * f,size_t sz)1808 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
1809 {
1810 VirtQueueElement *elem;
1811 VirtQueueElementOld data;
1812 int i;
1813
1814 qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1815
1816 /* TODO: teach all callers that this can fail, and return failure instead
1817 * of asserting here.
1818 * This is just one thing (there are probably more) that must be
1819 * fixed before we can allow NDEBUG compilation.
1820 */
1821 assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1822 assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1823
1824 elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1825 elem->index = data.index;
1826
1827 for (i = 0; i < elem->in_num; i++) {
1828 elem->in_addr[i] = data.in_addr[i];
1829 }
1830
1831 for (i = 0; i < elem->out_num; i++) {
1832 elem->out_addr[i] = data.out_addr[i];
1833 }
1834
1835 for (i = 0; i < elem->in_num; i++) {
1836 /* Base is overwritten by virtqueue_map. */
1837 elem->in_sg[i].iov_base = 0;
1838 elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1839 }
1840
1841 for (i = 0; i < elem->out_num; i++) {
1842 /* Base is overwritten by virtqueue_map. */
1843 elem->out_sg[i].iov_base = 0;
1844 elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1845 }
1846
1847 if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1848 qemu_get_be32s(f, &elem->ndescs);
1849 }
1850
1851 virtqueue_map(vdev, elem);
1852 return elem;
1853 }
1854
qemu_put_virtqueue_element(VirtIODevice * vdev,QEMUFile * f,VirtQueueElement * elem)1855 void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
1856 VirtQueueElement *elem)
1857 {
1858 VirtQueueElementOld data;
1859 int i;
1860
1861 memset(&data, 0, sizeof(data));
1862 data.index = elem->index;
1863 data.in_num = elem->in_num;
1864 data.out_num = elem->out_num;
1865
1866 for (i = 0; i < elem->in_num; i++) {
1867 data.in_addr[i] = elem->in_addr[i];
1868 }
1869
1870 for (i = 0; i < elem->out_num; i++) {
1871 data.out_addr[i] = elem->out_addr[i];
1872 }
1873
1874 for (i = 0; i < elem->in_num; i++) {
1875 /* Base is overwritten by virtqueue_map when loading. Do not
1876 * save it, as it would leak the QEMU address space layout. */
1877 data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1878 }
1879
1880 for (i = 0; i < elem->out_num; i++) {
1881 /* Do not save iov_base as above. */
1882 data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1883 }
1884
1885 if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1886 qemu_put_be32s(f, &elem->ndescs);
1887 }
1888
1889 qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1890 }
1891
1892 /* virtio device */
virtio_notify_vector(VirtIODevice * vdev,uint16_t vector)1893 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
1894 {
1895 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1896 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1897
1898 if (unlikely(vdev->broken)) {
1899 return;
1900 }
1901
1902 if (k->notify) {
1903 k->notify(qbus->parent, vector);
1904 }
1905 }
1906
virtio_update_irq(VirtIODevice * vdev)1907 void virtio_update_irq(VirtIODevice *vdev)
1908 {
1909 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1910 }
1911
virtio_validate_features(VirtIODevice * vdev)1912 static int virtio_validate_features(VirtIODevice *vdev)
1913 {
1914 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1915
1916 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
1917 !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1918 return -EFAULT;
1919 }
1920
1921 if (k->validate_features) {
1922 return k->validate_features(vdev);
1923 } else {
1924 return 0;
1925 }
1926 }
1927
virtio_set_status(VirtIODevice * vdev,uint8_t val)1928 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
1929 {
1930 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1931 trace_virtio_set_status(vdev, val);
1932
1933 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1934 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
1935 val & VIRTIO_CONFIG_S_FEATURES_OK) {
1936 int ret = virtio_validate_features(vdev);
1937
1938 if (ret) {
1939 return ret;
1940 }
1941 }
1942 }
1943
1944 if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
1945 (val & VIRTIO_CONFIG_S_DRIVER_OK)) {
1946 virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
1947 }
1948
1949 if (k->set_status) {
1950 k->set_status(vdev, val);
1951 }
1952 vdev->status = val;
1953
1954 return 0;
1955 }
1956
virtio_default_endian(void)1957 static enum virtio_device_endian virtio_default_endian(void)
1958 {
1959 if (target_words_bigendian()) {
1960 return VIRTIO_DEVICE_ENDIAN_BIG;
1961 } else {
1962 return VIRTIO_DEVICE_ENDIAN_LITTLE;
1963 }
1964 }
1965
virtio_current_cpu_endian(void)1966 static enum virtio_device_endian virtio_current_cpu_endian(void)
1967 {
1968 CPUClass *cc = CPU_GET_CLASS(current_cpu);
1969
1970 if (cc->virtio_is_big_endian(current_cpu)) {
1971 return VIRTIO_DEVICE_ENDIAN_BIG;
1972 } else {
1973 return VIRTIO_DEVICE_ENDIAN_LITTLE;
1974 }
1975 }
1976
virtio_reset(void * opaque)1977 void virtio_reset(void *opaque)
1978 {
1979 VirtIODevice *vdev = opaque;
1980 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1981 int i;
1982
1983 virtio_set_status(vdev, 0);
1984 if (current_cpu) {
1985 /* Guest initiated reset */
1986 vdev->device_endian = virtio_current_cpu_endian();
1987 } else {
1988 /* System reset */
1989 vdev->device_endian = virtio_default_endian();
1990 }
1991
1992 if (k->reset) {
1993 k->reset(vdev);
1994 }
1995
1996 vdev->start_on_kick = false;
1997 vdev->started = false;
1998 vdev->broken = false;
1999 vdev->guest_features = 0;
2000 vdev->queue_sel = 0;
2001 vdev->status = 0;
2002 atomic_set(&vdev->isr, 0);
2003 vdev->config_vector = VIRTIO_NO_VECTOR;
2004 virtio_notify_vector(vdev, vdev->config_vector);
2005
2006 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2007 vdev->vq[i].vring.desc = 0;
2008 vdev->vq[i].vring.avail = 0;
2009 vdev->vq[i].vring.used = 0;
2010 vdev->vq[i].last_avail_idx = 0;
2011 vdev->vq[i].shadow_avail_idx = 0;
2012 vdev->vq[i].used_idx = 0;
2013 vdev->vq[i].last_avail_wrap_counter = true;
2014 vdev->vq[i].shadow_avail_wrap_counter = true;
2015 vdev->vq[i].used_wrap_counter = true;
2016 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
2017 vdev->vq[i].signalled_used = 0;
2018 vdev->vq[i].signalled_used_valid = false;
2019 vdev->vq[i].notification = true;
2020 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
2021 vdev->vq[i].inuse = 0;
2022 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2023 }
2024 }
2025
virtio_config_readb(VirtIODevice * vdev,uint32_t addr)2026 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
2027 {
2028 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2029 uint8_t val;
2030
2031 if (addr + sizeof(val) > vdev->config_len) {
2032 return (uint32_t)-1;
2033 }
2034
2035 k->get_config(vdev, vdev->config);
2036
2037 val = ldub_p(vdev->config + addr);
2038 return val;
2039 }
2040
virtio_config_readw(VirtIODevice * vdev,uint32_t addr)2041 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
2042 {
2043 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2044 uint16_t val;
2045
2046 if (addr + sizeof(val) > vdev->config_len) {
2047 return (uint32_t)-1;
2048 }
2049
2050 k->get_config(vdev, vdev->config);
2051
2052 val = lduw_p(vdev->config + addr);
2053 return val;
2054 }
2055
virtio_config_readl(VirtIODevice * vdev,uint32_t addr)2056 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
2057 {
2058 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2059 uint32_t val;
2060
2061 if (addr + sizeof(val) > vdev->config_len) {
2062 return (uint32_t)-1;
2063 }
2064
2065 k->get_config(vdev, vdev->config);
2066
2067 val = ldl_p(vdev->config + addr);
2068 return val;
2069 }
2070
virtio_config_writeb(VirtIODevice * vdev,uint32_t addr,uint32_t data)2071 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2072 {
2073 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2074 uint8_t val = data;
2075
2076 if (addr + sizeof(val) > vdev->config_len) {
2077 return;
2078 }
2079
2080 stb_p(vdev->config + addr, val);
2081
2082 if (k->set_config) {
2083 k->set_config(vdev, vdev->config);
2084 }
2085 }
2086
virtio_config_writew(VirtIODevice * vdev,uint32_t addr,uint32_t data)2087 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2088 {
2089 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2090 uint16_t val = data;
2091
2092 if (addr + sizeof(val) > vdev->config_len) {
2093 return;
2094 }
2095
2096 stw_p(vdev->config + addr, val);
2097
2098 if (k->set_config) {
2099 k->set_config(vdev, vdev->config);
2100 }
2101 }
2102
virtio_config_writel(VirtIODevice * vdev,uint32_t addr,uint32_t data)2103 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2104 {
2105 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2106 uint32_t val = data;
2107
2108 if (addr + sizeof(val) > vdev->config_len) {
2109 return;
2110 }
2111
2112 stl_p(vdev->config + addr, val);
2113
2114 if (k->set_config) {
2115 k->set_config(vdev, vdev->config);
2116 }
2117 }
2118
virtio_config_modern_readb(VirtIODevice * vdev,uint32_t addr)2119 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
2120 {
2121 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2122 uint8_t val;
2123
2124 if (addr + sizeof(val) > vdev->config_len) {
2125 return (uint32_t)-1;
2126 }
2127
2128 k->get_config(vdev, vdev->config);
2129
2130 val = ldub_p(vdev->config + addr);
2131 return val;
2132 }
2133
virtio_config_modern_readw(VirtIODevice * vdev,uint32_t addr)2134 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
2135 {
2136 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2137 uint16_t val;
2138
2139 if (addr + sizeof(val) > vdev->config_len) {
2140 return (uint32_t)-1;
2141 }
2142
2143 k->get_config(vdev, vdev->config);
2144
2145 val = lduw_le_p(vdev->config + addr);
2146 return val;
2147 }
2148
virtio_config_modern_readl(VirtIODevice * vdev,uint32_t addr)2149 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
2150 {
2151 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2152 uint32_t val;
2153
2154 if (addr + sizeof(val) > vdev->config_len) {
2155 return (uint32_t)-1;
2156 }
2157
2158 k->get_config(vdev, vdev->config);
2159
2160 val = ldl_le_p(vdev->config + addr);
2161 return val;
2162 }
2163
virtio_config_modern_writeb(VirtIODevice * vdev,uint32_t addr,uint32_t data)2164 void virtio_config_modern_writeb(VirtIODevice *vdev,
2165 uint32_t addr, uint32_t data)
2166 {
2167 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2168 uint8_t val = data;
2169
2170 if (addr + sizeof(val) > vdev->config_len) {
2171 return;
2172 }
2173
2174 stb_p(vdev->config + addr, val);
2175
2176 if (k->set_config) {
2177 k->set_config(vdev, vdev->config);
2178 }
2179 }
2180
virtio_config_modern_writew(VirtIODevice * vdev,uint32_t addr,uint32_t data)2181 void virtio_config_modern_writew(VirtIODevice *vdev,
2182 uint32_t addr, uint32_t data)
2183 {
2184 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2185 uint16_t val = data;
2186
2187 if (addr + sizeof(val) > vdev->config_len) {
2188 return;
2189 }
2190
2191 stw_le_p(vdev->config + addr, val);
2192
2193 if (k->set_config) {
2194 k->set_config(vdev, vdev->config);
2195 }
2196 }
2197
virtio_config_modern_writel(VirtIODevice * vdev,uint32_t addr,uint32_t data)2198 void virtio_config_modern_writel(VirtIODevice *vdev,
2199 uint32_t addr, uint32_t data)
2200 {
2201 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2202 uint32_t val = data;
2203
2204 if (addr + sizeof(val) > vdev->config_len) {
2205 return;
2206 }
2207
2208 stl_le_p(vdev->config + addr, val);
2209
2210 if (k->set_config) {
2211 k->set_config(vdev, vdev->config);
2212 }
2213 }
2214
virtio_queue_set_addr(VirtIODevice * vdev,int n,hwaddr addr)2215 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
2216 {
2217 if (!vdev->vq[n].vring.num) {
2218 return;
2219 }
2220 vdev->vq[n].vring.desc = addr;
2221 virtio_queue_update_rings(vdev, n);
2222 }
2223
virtio_queue_get_addr(VirtIODevice * vdev,int n)2224 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
2225 {
2226 return vdev->vq[n].vring.desc;
2227 }
2228
virtio_queue_set_rings(VirtIODevice * vdev,int n,hwaddr desc,hwaddr avail,hwaddr used)2229 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
2230 hwaddr avail, hwaddr used)
2231 {
2232 if (!vdev->vq[n].vring.num) {
2233 return;
2234 }
2235 vdev->vq[n].vring.desc = desc;
2236 vdev->vq[n].vring.avail = avail;
2237 vdev->vq[n].vring.used = used;
2238 virtio_init_region_cache(vdev, n);
2239 }
2240
virtio_queue_set_num(VirtIODevice * vdev,int n,int num)2241 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
2242 {
2243 /* Don't allow guest to flip queue between existent and
2244 * nonexistent states, or to set it to an invalid size.
2245 */
2246 if (!!num != !!vdev->vq[n].vring.num ||
2247 num > VIRTQUEUE_MAX_SIZE ||
2248 num < 0) {
2249 return;
2250 }
2251 vdev->vq[n].vring.num = num;
2252 }
2253
virtio_vector_first_queue(VirtIODevice * vdev,uint16_t vector)2254 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
2255 {
2256 return QLIST_FIRST(&vdev->vector_queues[vector]);
2257 }
2258
virtio_vector_next_queue(VirtQueue * vq)2259 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
2260 {
2261 return QLIST_NEXT(vq, node);
2262 }
2263
virtio_queue_get_num(VirtIODevice * vdev,int n)2264 int virtio_queue_get_num(VirtIODevice *vdev, int n)
2265 {
2266 return vdev->vq[n].vring.num;
2267 }
2268
virtio_queue_get_max_num(VirtIODevice * vdev,int n)2269 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
2270 {
2271 return vdev->vq[n].vring.num_default;
2272 }
2273
virtio_get_num_queues(VirtIODevice * vdev)2274 int virtio_get_num_queues(VirtIODevice *vdev)
2275 {
2276 int i;
2277
2278 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2279 if (!virtio_queue_get_num(vdev, i)) {
2280 break;
2281 }
2282 }
2283
2284 return i;
2285 }
2286
virtio_queue_set_align(VirtIODevice * vdev,int n,int align)2287 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
2288 {
2289 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2290 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2291
2292 /* virtio-1 compliant devices cannot change the alignment */
2293 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2294 error_report("tried to modify queue alignment for virtio-1 device");
2295 return;
2296 }
2297 /* Check that the transport told us it was going to do this
2298 * (so a buggy transport will immediately assert rather than
2299 * silently failing to migrate this state)
2300 */
2301 assert(k->has_variable_vring_alignment);
2302
2303 if (align) {
2304 vdev->vq[n].vring.align = align;
2305 virtio_queue_update_rings(vdev, n);
2306 }
2307 }
2308
virtio_queue_notify_aio_vq(VirtQueue * vq)2309 static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
2310 {
2311 bool ret = false;
2312
2313 if (vq->vring.desc && vq->handle_aio_output) {
2314 VirtIODevice *vdev = vq->vdev;
2315
2316 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2317 ret = vq->handle_aio_output(vdev, vq);
2318
2319 if (unlikely(vdev->start_on_kick)) {
2320 virtio_set_started(vdev, true);
2321 }
2322 }
2323
2324 return ret;
2325 }
2326
virtio_queue_notify_vq(VirtQueue * vq)2327 static void virtio_queue_notify_vq(VirtQueue *vq)
2328 {
2329 if (vq->vring.desc && vq->handle_output) {
2330 VirtIODevice *vdev = vq->vdev;
2331
2332 if (unlikely(vdev->broken)) {
2333 return;
2334 }
2335
2336 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2337 vq->handle_output(vdev, vq);
2338
2339 if (unlikely(vdev->start_on_kick)) {
2340 virtio_set_started(vdev, true);
2341 }
2342 }
2343 }
2344
virtio_queue_notify(VirtIODevice * vdev,int n)2345 void virtio_queue_notify(VirtIODevice *vdev, int n)
2346 {
2347 VirtQueue *vq = &vdev->vq[n];
2348
2349 if (unlikely(!vq->vring.desc || vdev->broken)) {
2350 return;
2351 }
2352
2353 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2354 if (vq->host_notifier_enabled) {
2355 event_notifier_set(&vq->host_notifier);
2356 } else if (vq->handle_output) {
2357 vq->handle_output(vdev, vq);
2358
2359 if (unlikely(vdev->start_on_kick)) {
2360 virtio_set_started(vdev, true);
2361 }
2362 }
2363 }
2364
virtio_queue_vector(VirtIODevice * vdev,int n)2365 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
2366 {
2367 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
2368 VIRTIO_NO_VECTOR;
2369 }
2370
virtio_queue_set_vector(VirtIODevice * vdev,int n,uint16_t vector)2371 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
2372 {
2373 VirtQueue *vq = &vdev->vq[n];
2374
2375 if (n < VIRTIO_QUEUE_MAX) {
2376 if (vdev->vector_queues &&
2377 vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
2378 QLIST_REMOVE(vq, node);
2379 }
2380 vdev->vq[n].vector = vector;
2381 if (vdev->vector_queues &&
2382 vector != VIRTIO_NO_VECTOR) {
2383 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
2384 }
2385 }
2386 }
2387
virtio_add_queue(VirtIODevice * vdev,int queue_size,VirtIOHandleOutput handle_output)2388 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
2389 VirtIOHandleOutput handle_output)
2390 {
2391 int i;
2392
2393 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2394 if (vdev->vq[i].vring.num == 0)
2395 break;
2396 }
2397
2398 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
2399 abort();
2400
2401 vdev->vq[i].vring.num = queue_size;
2402 vdev->vq[i].vring.num_default = queue_size;
2403 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
2404 vdev->vq[i].handle_output = handle_output;
2405 vdev->vq[i].handle_aio_output = NULL;
2406 vdev->vq[i].used_elems = g_malloc0(sizeof(VirtQueueElement) *
2407 queue_size);
2408
2409 return &vdev->vq[i];
2410 }
2411
virtio_delete_queue(VirtQueue * vq)2412 void virtio_delete_queue(VirtQueue *vq)
2413 {
2414 vq->vring.num = 0;
2415 vq->vring.num_default = 0;
2416 vq->handle_output = NULL;
2417 vq->handle_aio_output = NULL;
2418 g_free(vq->used_elems);
2419 vq->used_elems = NULL;
2420 virtio_virtqueue_reset_region_cache(vq);
2421 }
2422
virtio_del_queue(VirtIODevice * vdev,int n)2423 void virtio_del_queue(VirtIODevice *vdev, int n)
2424 {
2425 if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
2426 abort();
2427 }
2428
2429 virtio_delete_queue(&vdev->vq[n]);
2430 }
2431
virtio_set_isr(VirtIODevice * vdev,int value)2432 static void virtio_set_isr(VirtIODevice *vdev, int value)
2433 {
2434 uint8_t old = atomic_read(&vdev->isr);
2435
2436 /* Do not write ISR if it does not change, so that its cacheline remains
2437 * shared in the common case where the guest does not read it.
2438 */
2439 if ((old & value) != value) {
2440 atomic_or(&vdev->isr, value);
2441 }
2442 }
2443
virtio_split_should_notify(VirtIODevice * vdev,VirtQueue * vq)2444 static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2445 {
2446 uint16_t old, new;
2447 bool v;
2448 /* We need to expose used array entries before checking used event. */
2449 smp_mb();
2450 /* Always notify when queue is empty (when feature acknowledge) */
2451 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2452 !vq->inuse && virtio_queue_empty(vq)) {
2453 return true;
2454 }
2455
2456 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2457 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2458 }
2459
2460 v = vq->signalled_used_valid;
2461 vq->signalled_used_valid = true;
2462 old = vq->signalled_used;
2463 new = vq->signalled_used = vq->used_idx;
2464 return !v || vring_need_event(vring_get_used_event(vq), new, old);
2465 }
2466
vring_packed_need_event(VirtQueue * vq,bool wrap,uint16_t off_wrap,uint16_t new,uint16_t old)2467 static bool vring_packed_need_event(VirtQueue *vq, bool wrap,
2468 uint16_t off_wrap, uint16_t new,
2469 uint16_t old)
2470 {
2471 int off = off_wrap & ~(1 << 15);
2472
2473 if (wrap != off_wrap >> 15) {
2474 off -= vq->vring.num;
2475 }
2476
2477 return vring_need_event(off, new, old);
2478 }
2479
virtio_packed_should_notify(VirtIODevice * vdev,VirtQueue * vq)2480 static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2481 {
2482 VRingPackedDescEvent e;
2483 uint16_t old, new;
2484 bool v;
2485 VRingMemoryRegionCaches *caches;
2486
2487 caches = vring_get_region_caches(vq);
2488 if (!caches) {
2489 return false;
2490 }
2491
2492 vring_packed_event_read(vdev, &caches->avail, &e);
2493
2494 old = vq->signalled_used;
2495 new = vq->signalled_used = vq->used_idx;
2496 v = vq->signalled_used_valid;
2497 vq->signalled_used_valid = true;
2498
2499 if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) {
2500 return false;
2501 } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) {
2502 return true;
2503 }
2504
2505 return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
2506 e.off_wrap, new, old);
2507 }
2508
2509 /* Called within rcu_read_lock(). */
virtio_should_notify(VirtIODevice * vdev,VirtQueue * vq)2510 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2511 {
2512 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2513 return virtio_packed_should_notify(vdev, vq);
2514 } else {
2515 return virtio_split_should_notify(vdev, vq);
2516 }
2517 }
2518
virtio_notify_irqfd(VirtIODevice * vdev,VirtQueue * vq)2519 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
2520 {
2521 WITH_RCU_READ_LOCK_GUARD() {
2522 if (!virtio_should_notify(vdev, vq)) {
2523 return;
2524 }
2525 }
2526
2527 trace_virtio_notify_irqfd(vdev, vq);
2528
2529 /*
2530 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2531 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2532 * incorrectly polling this bit during crashdump and hibernation
2533 * in MSI mode, causing a hang if this bit is never updated.
2534 * Recent releases of Windows do not really shut down, but rather
2535 * log out and hibernate to make the next startup faster. Hence,
2536 * this manifested as a more serious hang during shutdown with
2537 *
2538 * Next driver release from 2016 fixed this problem, so working around it
2539 * is not a must, but it's easy to do so let's do it here.
2540 *
2541 * Note: it's safe to update ISR from any thread as it was switched
2542 * to an atomic operation.
2543 */
2544 virtio_set_isr(vq->vdev, 0x1);
2545 event_notifier_set(&vq->guest_notifier);
2546 }
2547
virtio_irq(VirtQueue * vq)2548 static void virtio_irq(VirtQueue *vq)
2549 {
2550 virtio_set_isr(vq->vdev, 0x1);
2551 virtio_notify_vector(vq->vdev, vq->vector);
2552 }
2553
virtio_notify(VirtIODevice * vdev,VirtQueue * vq)2554 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
2555 {
2556 WITH_RCU_READ_LOCK_GUARD() {
2557 if (!virtio_should_notify(vdev, vq)) {
2558 return;
2559 }
2560 }
2561
2562 trace_virtio_notify(vdev, vq);
2563 virtio_irq(vq);
2564 }
2565
virtio_notify_config(VirtIODevice * vdev)2566 void virtio_notify_config(VirtIODevice *vdev)
2567 {
2568 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
2569 return;
2570
2571 virtio_set_isr(vdev, 0x3);
2572 vdev->generation++;
2573 virtio_notify_vector(vdev, vdev->config_vector);
2574 }
2575
virtio_device_endian_needed(void * opaque)2576 static bool virtio_device_endian_needed(void *opaque)
2577 {
2578 VirtIODevice *vdev = opaque;
2579
2580 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
2581 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2582 return vdev->device_endian != virtio_default_endian();
2583 }
2584 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2585 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
2586 }
2587
virtio_64bit_features_needed(void * opaque)2588 static bool virtio_64bit_features_needed(void *opaque)
2589 {
2590 VirtIODevice *vdev = opaque;
2591
2592 return (vdev->host_features >> 32) != 0;
2593 }
2594
virtio_virtqueue_needed(void * opaque)2595 static bool virtio_virtqueue_needed(void *opaque)
2596 {
2597 VirtIODevice *vdev = opaque;
2598
2599 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
2600 }
2601
virtio_packed_virtqueue_needed(void * opaque)2602 static bool virtio_packed_virtqueue_needed(void *opaque)
2603 {
2604 VirtIODevice *vdev = opaque;
2605
2606 return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
2607 }
2608
virtio_ringsize_needed(void * opaque)2609 static bool virtio_ringsize_needed(void *opaque)
2610 {
2611 VirtIODevice *vdev = opaque;
2612 int i;
2613
2614 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2615 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
2616 return true;
2617 }
2618 }
2619 return false;
2620 }
2621
virtio_extra_state_needed(void * opaque)2622 static bool virtio_extra_state_needed(void *opaque)
2623 {
2624 VirtIODevice *vdev = opaque;
2625 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2626 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2627
2628 return k->has_extra_state &&
2629 k->has_extra_state(qbus->parent);
2630 }
2631
virtio_broken_needed(void * opaque)2632 static bool virtio_broken_needed(void *opaque)
2633 {
2634 VirtIODevice *vdev = opaque;
2635
2636 return vdev->broken;
2637 }
2638
virtio_started_needed(void * opaque)2639 static bool virtio_started_needed(void *opaque)
2640 {
2641 VirtIODevice *vdev = opaque;
2642
2643 return vdev->started;
2644 }
2645
2646 static const VMStateDescription vmstate_virtqueue = {
2647 .name = "virtqueue_state",
2648 .version_id = 1,
2649 .minimum_version_id = 1,
2650 .fields = (VMStateField[]) {
2651 VMSTATE_UINT64(vring.avail, struct VirtQueue),
2652 VMSTATE_UINT64(vring.used, struct VirtQueue),
2653 VMSTATE_END_OF_LIST()
2654 }
2655 };
2656
2657 static const VMStateDescription vmstate_packed_virtqueue = {
2658 .name = "packed_virtqueue_state",
2659 .version_id = 1,
2660 .minimum_version_id = 1,
2661 .fields = (VMStateField[]) {
2662 VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
2663 VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
2664 VMSTATE_UINT16(used_idx, struct VirtQueue),
2665 VMSTATE_BOOL(used_wrap_counter, struct VirtQueue),
2666 VMSTATE_UINT32(inuse, struct VirtQueue),
2667 VMSTATE_END_OF_LIST()
2668 }
2669 };
2670
2671 static const VMStateDescription vmstate_virtio_virtqueues = {
2672 .name = "virtio/virtqueues",
2673 .version_id = 1,
2674 .minimum_version_id = 1,
2675 .needed = &virtio_virtqueue_needed,
2676 .fields = (VMStateField[]) {
2677 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2678 VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
2679 VMSTATE_END_OF_LIST()
2680 }
2681 };
2682
2683 static const VMStateDescription vmstate_virtio_packed_virtqueues = {
2684 .name = "virtio/packed_virtqueues",
2685 .version_id = 1,
2686 .minimum_version_id = 1,
2687 .needed = &virtio_packed_virtqueue_needed,
2688 .fields = (VMStateField[]) {
2689 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2690 VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue),
2691 VMSTATE_END_OF_LIST()
2692 }
2693 };
2694
2695 static const VMStateDescription vmstate_ringsize = {
2696 .name = "ringsize_state",
2697 .version_id = 1,
2698 .minimum_version_id = 1,
2699 .fields = (VMStateField[]) {
2700 VMSTATE_UINT32(vring.num_default, struct VirtQueue),
2701 VMSTATE_END_OF_LIST()
2702 }
2703 };
2704
2705 static const VMStateDescription vmstate_virtio_ringsize = {
2706 .name = "virtio/ringsize",
2707 .version_id = 1,
2708 .minimum_version_id = 1,
2709 .needed = &virtio_ringsize_needed,
2710 .fields = (VMStateField[]) {
2711 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2712 VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
2713 VMSTATE_END_OF_LIST()
2714 }
2715 };
2716
get_extra_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field)2717 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
2718 const VMStateField *field)
2719 {
2720 VirtIODevice *vdev = pv;
2721 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2722 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2723
2724 if (!k->load_extra_state) {
2725 return -1;
2726 } else {
2727 return k->load_extra_state(qbus->parent, f);
2728 }
2729 }
2730
put_extra_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field,QJSON * vmdesc)2731 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
2732 const VMStateField *field, QJSON *vmdesc)
2733 {
2734 VirtIODevice *vdev = pv;
2735 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2736 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2737
2738 k->save_extra_state(qbus->parent, f);
2739 return 0;
2740 }
2741
2742 static const VMStateInfo vmstate_info_extra_state = {
2743 .name = "virtqueue_extra_state",
2744 .get = get_extra_state,
2745 .put = put_extra_state,
2746 };
2747
2748 static const VMStateDescription vmstate_virtio_extra_state = {
2749 .name = "virtio/extra_state",
2750 .version_id = 1,
2751 .minimum_version_id = 1,
2752 .needed = &virtio_extra_state_needed,
2753 .fields = (VMStateField[]) {
2754 {
2755 .name = "extra_state",
2756 .version_id = 0,
2757 .field_exists = NULL,
2758 .size = 0,
2759 .info = &vmstate_info_extra_state,
2760 .flags = VMS_SINGLE,
2761 .offset = 0,
2762 },
2763 VMSTATE_END_OF_LIST()
2764 }
2765 };
2766
2767 static const VMStateDescription vmstate_virtio_device_endian = {
2768 .name = "virtio/device_endian",
2769 .version_id = 1,
2770 .minimum_version_id = 1,
2771 .needed = &virtio_device_endian_needed,
2772 .fields = (VMStateField[]) {
2773 VMSTATE_UINT8(device_endian, VirtIODevice),
2774 VMSTATE_END_OF_LIST()
2775 }
2776 };
2777
2778 static const VMStateDescription vmstate_virtio_64bit_features = {
2779 .name = "virtio/64bit_features",
2780 .version_id = 1,
2781 .minimum_version_id = 1,
2782 .needed = &virtio_64bit_features_needed,
2783 .fields = (VMStateField[]) {
2784 VMSTATE_UINT64(guest_features, VirtIODevice),
2785 VMSTATE_END_OF_LIST()
2786 }
2787 };
2788
2789 static const VMStateDescription vmstate_virtio_broken = {
2790 .name = "virtio/broken",
2791 .version_id = 1,
2792 .minimum_version_id = 1,
2793 .needed = &virtio_broken_needed,
2794 .fields = (VMStateField[]) {
2795 VMSTATE_BOOL(broken, VirtIODevice),
2796 VMSTATE_END_OF_LIST()
2797 }
2798 };
2799
2800 static const VMStateDescription vmstate_virtio_started = {
2801 .name = "virtio/started",
2802 .version_id = 1,
2803 .minimum_version_id = 1,
2804 .needed = &virtio_started_needed,
2805 .fields = (VMStateField[]) {
2806 VMSTATE_BOOL(started, VirtIODevice),
2807 VMSTATE_END_OF_LIST()
2808 }
2809 };
2810
2811 static const VMStateDescription vmstate_virtio = {
2812 .name = "virtio",
2813 .version_id = 1,
2814 .minimum_version_id = 1,
2815 .minimum_version_id_old = 1,
2816 .fields = (VMStateField[]) {
2817 VMSTATE_END_OF_LIST()
2818 },
2819 .subsections = (const VMStateDescription*[]) {
2820 &vmstate_virtio_device_endian,
2821 &vmstate_virtio_64bit_features,
2822 &vmstate_virtio_virtqueues,
2823 &vmstate_virtio_ringsize,
2824 &vmstate_virtio_broken,
2825 &vmstate_virtio_extra_state,
2826 &vmstate_virtio_started,
2827 &vmstate_virtio_packed_virtqueues,
2828 NULL
2829 }
2830 };
2831
virtio_save(VirtIODevice * vdev,QEMUFile * f)2832 int virtio_save(VirtIODevice *vdev, QEMUFile *f)
2833 {
2834 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2835 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2836 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2837 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
2838 int i;
2839
2840 if (k->save_config) {
2841 k->save_config(qbus->parent, f);
2842 }
2843
2844 qemu_put_8s(f, &vdev->status);
2845 qemu_put_8s(f, &vdev->isr);
2846 qemu_put_be16s(f, &vdev->queue_sel);
2847 qemu_put_be32s(f, &guest_features_lo);
2848 qemu_put_be32(f, vdev->config_len);
2849 qemu_put_buffer(f, vdev->config, vdev->config_len);
2850
2851 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2852 if (vdev->vq[i].vring.num == 0)
2853 break;
2854 }
2855
2856 qemu_put_be32(f, i);
2857
2858 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2859 if (vdev->vq[i].vring.num == 0)
2860 break;
2861
2862 qemu_put_be32(f, vdev->vq[i].vring.num);
2863 if (k->has_variable_vring_alignment) {
2864 qemu_put_be32(f, vdev->vq[i].vring.align);
2865 }
2866 /*
2867 * Save desc now, the rest of the ring addresses are saved in
2868 * subsections for VIRTIO-1 devices.
2869 */
2870 qemu_put_be64(f, vdev->vq[i].vring.desc);
2871 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
2872 if (k->save_queue) {
2873 k->save_queue(qbus->parent, i, f);
2874 }
2875 }
2876
2877 if (vdc->save != NULL) {
2878 vdc->save(vdev, f);
2879 }
2880
2881 if (vdc->vmsd) {
2882 int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
2883 if (ret) {
2884 return ret;
2885 }
2886 }
2887
2888 /* Subsections */
2889 return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
2890 }
2891
2892 /* A wrapper for use as a VMState .put function */
virtio_device_put(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,QJSON * vmdesc)2893 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
2894 const VMStateField *field, QJSON *vmdesc)
2895 {
2896 return virtio_save(VIRTIO_DEVICE(opaque), f);
2897 }
2898
2899 /* A wrapper for use as a VMState .get function */
virtio_device_get(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)2900 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
2901 const VMStateField *field)
2902 {
2903 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
2904 DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
2905
2906 return virtio_load(vdev, f, dc->vmsd->version_id);
2907 }
2908
2909 const VMStateInfo virtio_vmstate_info = {
2910 .name = "virtio",
2911 .get = virtio_device_get,
2912 .put = virtio_device_put,
2913 };
2914
virtio_set_features_nocheck(VirtIODevice * vdev,uint64_t val)2915 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
2916 {
2917 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2918 bool bad = (val & ~(vdev->host_features)) != 0;
2919
2920 val &= vdev->host_features;
2921 if (k->set_features) {
2922 k->set_features(vdev, val);
2923 }
2924 vdev->guest_features = val;
2925 return bad ? -1 : 0;
2926 }
2927
virtio_set_features(VirtIODevice * vdev,uint64_t val)2928 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
2929 {
2930 int ret;
2931 /*
2932 * The driver must not attempt to set features after feature negotiation
2933 * has finished.
2934 */
2935 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
2936 return -EINVAL;
2937 }
2938 ret = virtio_set_features_nocheck(vdev, val);
2939 if (!ret) {
2940 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2941 /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
2942 int i;
2943 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2944 if (vdev->vq[i].vring.num != 0) {
2945 virtio_init_region_cache(vdev, i);
2946 }
2947 }
2948 }
2949
2950 if (!virtio_device_started(vdev, vdev->status) &&
2951 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2952 vdev->start_on_kick = true;
2953 }
2954 }
2955 return ret;
2956 }
2957
virtio_feature_get_config_size(VirtIOFeature * feature_sizes,uint64_t host_features)2958 size_t virtio_feature_get_config_size(VirtIOFeature *feature_sizes,
2959 uint64_t host_features)
2960 {
2961 size_t config_size = 0;
2962 int i;
2963
2964 for (i = 0; feature_sizes[i].flags != 0; i++) {
2965 if (host_features & feature_sizes[i].flags) {
2966 config_size = MAX(feature_sizes[i].end, config_size);
2967 }
2968 }
2969
2970 return config_size;
2971 }
2972
virtio_load(VirtIODevice * vdev,QEMUFile * f,int version_id)2973 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
2974 {
2975 int i, ret;
2976 int32_t config_len;
2977 uint32_t num;
2978 uint32_t features;
2979 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2980 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2981 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2982
2983 /*
2984 * We poison the endianness to ensure it does not get used before
2985 * subsections have been loaded.
2986 */
2987 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
2988
2989 if (k->load_config) {
2990 ret = k->load_config(qbus->parent, f);
2991 if (ret)
2992 return ret;
2993 }
2994
2995 qemu_get_8s(f, &vdev->status);
2996 qemu_get_8s(f, &vdev->isr);
2997 qemu_get_be16s(f, &vdev->queue_sel);
2998 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
2999 return -1;
3000 }
3001 qemu_get_be32s(f, &features);
3002
3003 /*
3004 * Temporarily set guest_features low bits - needed by
3005 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3006 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3007 *
3008 * Note: devices should always test host features in future - don't create
3009 * new dependencies like this.
3010 */
3011 vdev->guest_features = features;
3012
3013 config_len = qemu_get_be32(f);
3014
3015 /*
3016 * There are cases where the incoming config can be bigger or smaller
3017 * than what we have; so load what we have space for, and skip
3018 * any excess that's in the stream.
3019 */
3020 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
3021
3022 while (config_len > vdev->config_len) {
3023 qemu_get_byte(f);
3024 config_len--;
3025 }
3026
3027 num = qemu_get_be32(f);
3028
3029 if (num > VIRTIO_QUEUE_MAX) {
3030 error_report("Invalid number of virtqueues: 0x%x", num);
3031 return -1;
3032 }
3033
3034 for (i = 0; i < num; i++) {
3035 vdev->vq[i].vring.num = qemu_get_be32(f);
3036 if (k->has_variable_vring_alignment) {
3037 vdev->vq[i].vring.align = qemu_get_be32(f);
3038 }
3039 vdev->vq[i].vring.desc = qemu_get_be64(f);
3040 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
3041 vdev->vq[i].signalled_used_valid = false;
3042 vdev->vq[i].notification = true;
3043
3044 if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
3045 error_report("VQ %d address 0x0 "
3046 "inconsistent with Host index 0x%x",
3047 i, vdev->vq[i].last_avail_idx);
3048 return -1;
3049 }
3050 if (k->load_queue) {
3051 ret = k->load_queue(qbus->parent, i, f);
3052 if (ret)
3053 return ret;
3054 }
3055 }
3056
3057 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
3058
3059 if (vdc->load != NULL) {
3060 ret = vdc->load(vdev, f, version_id);
3061 if (ret) {
3062 return ret;
3063 }
3064 }
3065
3066 if (vdc->vmsd) {
3067 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
3068 if (ret) {
3069 return ret;
3070 }
3071 }
3072
3073 /* Subsections */
3074 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
3075 if (ret) {
3076 return ret;
3077 }
3078
3079 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
3080 vdev->device_endian = virtio_default_endian();
3081 }
3082
3083 if (virtio_64bit_features_needed(vdev)) {
3084 /*
3085 * Subsection load filled vdev->guest_features. Run them
3086 * through virtio_set_features to sanity-check them against
3087 * host_features.
3088 */
3089 uint64_t features64 = vdev->guest_features;
3090 if (virtio_set_features_nocheck(vdev, features64) < 0) {
3091 error_report("Features 0x%" PRIx64 " unsupported. "
3092 "Allowed features: 0x%" PRIx64,
3093 features64, vdev->host_features);
3094 return -1;
3095 }
3096 } else {
3097 if (virtio_set_features_nocheck(vdev, features) < 0) {
3098 error_report("Features 0x%x unsupported. "
3099 "Allowed features: 0x%" PRIx64,
3100 features, vdev->host_features);
3101 return -1;
3102 }
3103 }
3104
3105 if (!virtio_device_started(vdev, vdev->status) &&
3106 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3107 vdev->start_on_kick = true;
3108 }
3109
3110 RCU_READ_LOCK_GUARD();
3111 for (i = 0; i < num; i++) {
3112 if (vdev->vq[i].vring.desc) {
3113 uint16_t nheads;
3114
3115 /*
3116 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3117 * only the region cache needs to be set up. Legacy devices need
3118 * to calculate used and avail ring addresses based on the desc
3119 * address.
3120 */
3121 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3122 virtio_init_region_cache(vdev, i);
3123 } else {
3124 virtio_queue_update_rings(vdev, i);
3125 }
3126
3127 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3128 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
3129 vdev->vq[i].shadow_avail_wrap_counter =
3130 vdev->vq[i].last_avail_wrap_counter;
3131 continue;
3132 }
3133
3134 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
3135 /* Check it isn't doing strange things with descriptor numbers. */
3136 if (nheads > vdev->vq[i].vring.num) {
3137 error_report("VQ %d size 0x%x Guest index 0x%x "
3138 "inconsistent with Host index 0x%x: delta 0x%x",
3139 i, vdev->vq[i].vring.num,
3140 vring_avail_idx(&vdev->vq[i]),
3141 vdev->vq[i].last_avail_idx, nheads);
3142 return -1;
3143 }
3144 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
3145 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
3146
3147 /*
3148 * Some devices migrate VirtQueueElements that have been popped
3149 * from the avail ring but not yet returned to the used ring.
3150 * Since max ring size < UINT16_MAX it's safe to use modulo
3151 * UINT16_MAX + 1 subtraction.
3152 */
3153 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
3154 vdev->vq[i].used_idx);
3155 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
3156 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3157 "used_idx 0x%x",
3158 i, vdev->vq[i].vring.num,
3159 vdev->vq[i].last_avail_idx,
3160 vdev->vq[i].used_idx);
3161 return -1;
3162 }
3163 }
3164 }
3165
3166 if (vdc->post_load) {
3167 ret = vdc->post_load(vdev);
3168 if (ret) {
3169 return ret;
3170 }
3171 }
3172
3173 return 0;
3174 }
3175
virtio_cleanup(VirtIODevice * vdev)3176 void virtio_cleanup(VirtIODevice *vdev)
3177 {
3178 qemu_del_vm_change_state_handler(vdev->vmstate);
3179 }
3180
virtio_vmstate_change(void * opaque,int running,RunState state)3181 static void virtio_vmstate_change(void *opaque, int running, RunState state)
3182 {
3183 VirtIODevice *vdev = opaque;
3184 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3185 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3186 bool backend_run = running && virtio_device_started(vdev, vdev->status);
3187 vdev->vm_running = running;
3188
3189 if (backend_run) {
3190 virtio_set_status(vdev, vdev->status);
3191 }
3192
3193 if (k->vmstate_change) {
3194 k->vmstate_change(qbus->parent, backend_run);
3195 }
3196
3197 if (!backend_run) {
3198 virtio_set_status(vdev, vdev->status);
3199 }
3200 }
3201
virtio_instance_init_common(Object * proxy_obj,void * data,size_t vdev_size,const char * vdev_name)3202 void virtio_instance_init_common(Object *proxy_obj, void *data,
3203 size_t vdev_size, const char *vdev_name)
3204 {
3205 DeviceState *vdev = data;
3206
3207 object_initialize_child(proxy_obj, "virtio-backend", vdev, vdev_size,
3208 vdev_name, &error_abort, NULL);
3209 qdev_alias_all_properties(vdev, proxy_obj);
3210 }
3211
virtio_init(VirtIODevice * vdev,const char * name,uint16_t device_id,size_t config_size)3212 void virtio_init(VirtIODevice *vdev, const char *name,
3213 uint16_t device_id, size_t config_size)
3214 {
3215 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3216 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3217 int i;
3218 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
3219
3220 if (nvectors) {
3221 vdev->vector_queues =
3222 g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
3223 }
3224
3225 vdev->start_on_kick = false;
3226 vdev->started = false;
3227 vdev->device_id = device_id;
3228 vdev->status = 0;
3229 atomic_set(&vdev->isr, 0);
3230 vdev->queue_sel = 0;
3231 vdev->config_vector = VIRTIO_NO_VECTOR;
3232 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
3233 vdev->vm_running = runstate_is_running();
3234 vdev->broken = false;
3235 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3236 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
3237 vdev->vq[i].vdev = vdev;
3238 vdev->vq[i].queue_index = i;
3239 vdev->vq[i].host_notifier_enabled = false;
3240 }
3241
3242 vdev->name = name;
3243 vdev->config_len = config_size;
3244 if (vdev->config_len) {
3245 vdev->config = g_malloc0(config_size);
3246 } else {
3247 vdev->config = NULL;
3248 }
3249 vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
3250 virtio_vmstate_change, vdev);
3251 vdev->device_endian = virtio_default_endian();
3252 vdev->use_guest_notifier_mask = true;
3253 }
3254
virtio_queue_get_desc_addr(VirtIODevice * vdev,int n)3255 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
3256 {
3257 return vdev->vq[n].vring.desc;
3258 }
3259
virtio_queue_enabled(VirtIODevice * vdev,int n)3260 bool virtio_queue_enabled(VirtIODevice *vdev, int n)
3261 {
3262 return virtio_queue_get_desc_addr(vdev, n) != 0;
3263 }
3264
virtio_queue_get_avail_addr(VirtIODevice * vdev,int n)3265 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
3266 {
3267 return vdev->vq[n].vring.avail;
3268 }
3269
virtio_queue_get_used_addr(VirtIODevice * vdev,int n)3270 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
3271 {
3272 return vdev->vq[n].vring.used;
3273 }
3274
virtio_queue_get_desc_size(VirtIODevice * vdev,int n)3275 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
3276 {
3277 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
3278 }
3279
virtio_queue_get_avail_size(VirtIODevice * vdev,int n)3280 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
3281 {
3282 int s;
3283
3284 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3285 return sizeof(struct VRingPackedDescEvent);
3286 }
3287
3288 s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3289 return offsetof(VRingAvail, ring) +
3290 sizeof(uint16_t) * vdev->vq[n].vring.num + s;
3291 }
3292
virtio_queue_get_used_size(VirtIODevice * vdev,int n)3293 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
3294 {
3295 int s;
3296
3297 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3298 return sizeof(struct VRingPackedDescEvent);
3299 }
3300
3301 s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3302 return offsetof(VRingUsed, ring) +
3303 sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
3304 }
3305
virtio_queue_packed_get_last_avail_idx(VirtIODevice * vdev,int n)3306 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev,
3307 int n)
3308 {
3309 unsigned int avail, used;
3310
3311 avail = vdev->vq[n].last_avail_idx;
3312 avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
3313
3314 used = vdev->vq[n].used_idx;
3315 used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
3316
3317 return avail | used << 16;
3318 }
3319
virtio_queue_split_get_last_avail_idx(VirtIODevice * vdev,int n)3320 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
3321 int n)
3322 {
3323 return vdev->vq[n].last_avail_idx;
3324 }
3325
virtio_queue_get_last_avail_idx(VirtIODevice * vdev,int n)3326 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
3327 {
3328 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3329 return virtio_queue_packed_get_last_avail_idx(vdev, n);
3330 } else {
3331 return virtio_queue_split_get_last_avail_idx(vdev, n);
3332 }
3333 }
3334
virtio_queue_packed_set_last_avail_idx(VirtIODevice * vdev,int n,unsigned int idx)3335 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
3336 int n, unsigned int idx)
3337 {
3338 struct VirtQueue *vq = &vdev->vq[n];
3339
3340 vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
3341 vq->last_avail_wrap_counter =
3342 vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
3343 idx >>= 16;
3344 vq->used_idx = idx & 0x7ffff;
3345 vq->used_wrap_counter = !!(idx & 0x8000);
3346 }
3347
virtio_queue_split_set_last_avail_idx(VirtIODevice * vdev,int n,unsigned int idx)3348 static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev,
3349 int n, unsigned int idx)
3350 {
3351 vdev->vq[n].last_avail_idx = idx;
3352 vdev->vq[n].shadow_avail_idx = idx;
3353 }
3354
virtio_queue_set_last_avail_idx(VirtIODevice * vdev,int n,unsigned int idx)3355 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
3356 unsigned int idx)
3357 {
3358 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3359 virtio_queue_packed_set_last_avail_idx(vdev, n, idx);
3360 } else {
3361 virtio_queue_split_set_last_avail_idx(vdev, n, idx);
3362 }
3363 }
3364
virtio_queue_packed_restore_last_avail_idx(VirtIODevice * vdev,int n)3365 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
3366 int n)
3367 {
3368 /* We don't have a reference like avail idx in shared memory */
3369 return;
3370 }
3371
virtio_queue_split_restore_last_avail_idx(VirtIODevice * vdev,int n)3372 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
3373 int n)
3374 {
3375 RCU_READ_LOCK_GUARD();
3376 if (vdev->vq[n].vring.desc) {
3377 vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
3378 vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
3379 }
3380 }
3381
virtio_queue_restore_last_avail_idx(VirtIODevice * vdev,int n)3382 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
3383 {
3384 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3385 virtio_queue_packed_restore_last_avail_idx(vdev, n);
3386 } else {
3387 virtio_queue_split_restore_last_avail_idx(vdev, n);
3388 }
3389 }
3390
virtio_queue_packed_update_used_idx(VirtIODevice * vdev,int n)3391 static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
3392 {
3393 /* used idx was updated through set_last_avail_idx() */
3394 return;
3395 }
3396
virtio_split_packed_update_used_idx(VirtIODevice * vdev,int n)3397 static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
3398 {
3399 RCU_READ_LOCK_GUARD();
3400 if (vdev->vq[n].vring.desc) {
3401 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
3402 }
3403 }
3404
virtio_queue_update_used_idx(VirtIODevice * vdev,int n)3405 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
3406 {
3407 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3408 return virtio_queue_packed_update_used_idx(vdev, n);
3409 } else {
3410 return virtio_split_packed_update_used_idx(vdev, n);
3411 }
3412 }
3413
virtio_queue_invalidate_signalled_used(VirtIODevice * vdev,int n)3414 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
3415 {
3416 vdev->vq[n].signalled_used_valid = false;
3417 }
3418
virtio_get_queue(VirtIODevice * vdev,int n)3419 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
3420 {
3421 return vdev->vq + n;
3422 }
3423
virtio_get_queue_index(VirtQueue * vq)3424 uint16_t virtio_get_queue_index(VirtQueue *vq)
3425 {
3426 return vq->queue_index;
3427 }
3428
virtio_queue_guest_notifier_read(EventNotifier * n)3429 static void virtio_queue_guest_notifier_read(EventNotifier *n)
3430 {
3431 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
3432 if (event_notifier_test_and_clear(n)) {
3433 virtio_irq(vq);
3434 }
3435 }
3436
virtio_queue_set_guest_notifier_fd_handler(VirtQueue * vq,bool assign,bool with_irqfd)3437 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
3438 bool with_irqfd)
3439 {
3440 if (assign && !with_irqfd) {
3441 event_notifier_set_handler(&vq->guest_notifier,
3442 virtio_queue_guest_notifier_read);
3443 } else {
3444 event_notifier_set_handler(&vq->guest_notifier, NULL);
3445 }
3446 if (!assign) {
3447 /* Test and clear notifier before closing it,
3448 * in case poll callback didn't have time to run. */
3449 virtio_queue_guest_notifier_read(&vq->guest_notifier);
3450 }
3451 }
3452
virtio_queue_get_guest_notifier(VirtQueue * vq)3453 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
3454 {
3455 return &vq->guest_notifier;
3456 }
3457
virtio_queue_host_notifier_aio_read(EventNotifier * n)3458 static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
3459 {
3460 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3461 if (event_notifier_test_and_clear(n)) {
3462 virtio_queue_notify_aio_vq(vq);
3463 }
3464 }
3465
virtio_queue_host_notifier_aio_poll_begin(EventNotifier * n)3466 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
3467 {
3468 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3469
3470 virtio_queue_set_notification(vq, 0);
3471 }
3472
virtio_queue_host_notifier_aio_poll(void * opaque)3473 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
3474 {
3475 EventNotifier *n = opaque;
3476 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3477 bool progress;
3478
3479 if (!vq->vring.desc || virtio_queue_empty(vq)) {
3480 return false;
3481 }
3482
3483 progress = virtio_queue_notify_aio_vq(vq);
3484
3485 /* In case the handler function re-enabled notifications */
3486 virtio_queue_set_notification(vq, 0);
3487 return progress;
3488 }
3489
virtio_queue_host_notifier_aio_poll_end(EventNotifier * n)3490 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
3491 {
3492 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3493
3494 /* Caller polls once more after this to catch requests that race with us */
3495 virtio_queue_set_notification(vq, 1);
3496 }
3497
virtio_queue_aio_set_host_notifier_handler(VirtQueue * vq,AioContext * ctx,VirtIOHandleAIOOutput handle_output)3498 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
3499 VirtIOHandleAIOOutput handle_output)
3500 {
3501 if (handle_output) {
3502 vq->handle_aio_output = handle_output;
3503 aio_set_event_notifier(ctx, &vq->host_notifier, true,
3504 virtio_queue_host_notifier_aio_read,
3505 virtio_queue_host_notifier_aio_poll);
3506 aio_set_event_notifier_poll(ctx, &vq->host_notifier,
3507 virtio_queue_host_notifier_aio_poll_begin,
3508 virtio_queue_host_notifier_aio_poll_end);
3509 } else {
3510 aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
3511 /* Test and clear notifier before after disabling event,
3512 * in case poll callback didn't have time to run. */
3513 virtio_queue_host_notifier_aio_read(&vq->host_notifier);
3514 vq->handle_aio_output = NULL;
3515 }
3516 }
3517
virtio_queue_host_notifier_read(EventNotifier * n)3518 void virtio_queue_host_notifier_read(EventNotifier *n)
3519 {
3520 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3521 if (event_notifier_test_and_clear(n)) {
3522 virtio_queue_notify_vq(vq);
3523 }
3524 }
3525
virtio_queue_get_host_notifier(VirtQueue * vq)3526 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
3527 {
3528 return &vq->host_notifier;
3529 }
3530
virtio_queue_set_host_notifier_enabled(VirtQueue * vq,bool enabled)3531 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
3532 {
3533 vq->host_notifier_enabled = enabled;
3534 }
3535
virtio_queue_set_host_notifier_mr(VirtIODevice * vdev,int n,MemoryRegion * mr,bool assign)3536 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
3537 MemoryRegion *mr, bool assign)
3538 {
3539 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3540 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3541
3542 if (k->set_host_notifier_mr) {
3543 return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
3544 }
3545
3546 return -1;
3547 }
3548
virtio_device_set_child_bus_name(VirtIODevice * vdev,char * bus_name)3549 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
3550 {
3551 g_free(vdev->bus_name);
3552 vdev->bus_name = g_strdup(bus_name);
3553 }
3554
virtio_error(VirtIODevice * vdev,const char * fmt,...)3555 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
3556 {
3557 va_list ap;
3558
3559 va_start(ap, fmt);
3560 error_vreport(fmt, ap);
3561 va_end(ap);
3562
3563 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3564 vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
3565 virtio_notify_config(vdev);
3566 }
3567
3568 vdev->broken = true;
3569 }
3570
virtio_memory_listener_commit(MemoryListener * listener)3571 static void virtio_memory_listener_commit(MemoryListener *listener)
3572 {
3573 VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
3574 int i;
3575
3576 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3577 if (vdev->vq[i].vring.num == 0) {
3578 break;
3579 }
3580 virtio_init_region_cache(vdev, i);
3581 }
3582 }
3583
virtio_device_realize(DeviceState * dev,Error ** errp)3584 static void virtio_device_realize(DeviceState *dev, Error **errp)
3585 {
3586 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3587 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3588 Error *err = NULL;
3589
3590 /* Devices should either use vmsd or the load/save methods */
3591 assert(!vdc->vmsd || !vdc->load);
3592
3593 if (vdc->realize != NULL) {
3594 vdc->realize(dev, &err);
3595 if (err != NULL) {
3596 error_propagate(errp, err);
3597 return;
3598 }
3599 }
3600
3601 virtio_bus_device_plugged(vdev, &err);
3602 if (err != NULL) {
3603 error_propagate(errp, err);
3604 vdc->unrealize(dev, NULL);
3605 return;
3606 }
3607
3608 vdev->listener.commit = virtio_memory_listener_commit;
3609 memory_listener_register(&vdev->listener, vdev->dma_as);
3610 }
3611
virtio_device_unrealize(DeviceState * dev,Error ** errp)3612 static void virtio_device_unrealize(DeviceState *dev, Error **errp)
3613 {
3614 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3615 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3616 Error *err = NULL;
3617
3618 virtio_bus_device_unplugged(vdev);
3619
3620 if (vdc->unrealize != NULL) {
3621 vdc->unrealize(dev, &err);
3622 if (err != NULL) {
3623 error_propagate(errp, err);
3624 return;
3625 }
3626 }
3627
3628 g_free(vdev->bus_name);
3629 vdev->bus_name = NULL;
3630 }
3631
virtio_device_free_virtqueues(VirtIODevice * vdev)3632 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
3633 {
3634 int i;
3635 if (!vdev->vq) {
3636 return;
3637 }
3638
3639 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3640 if (vdev->vq[i].vring.num == 0) {
3641 break;
3642 }
3643 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
3644 }
3645 g_free(vdev->vq);
3646 }
3647
virtio_device_instance_finalize(Object * obj)3648 static void virtio_device_instance_finalize(Object *obj)
3649 {
3650 VirtIODevice *vdev = VIRTIO_DEVICE(obj);
3651
3652 memory_listener_unregister(&vdev->listener);
3653 virtio_device_free_virtqueues(vdev);
3654
3655 g_free(vdev->config);
3656 g_free(vdev->vector_queues);
3657 }
3658
3659 static Property virtio_properties[] = {
3660 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
3661 DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
3662 DEFINE_PROP_END_OF_LIST(),
3663 };
3664
virtio_device_start_ioeventfd_impl(VirtIODevice * vdev)3665 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
3666 {
3667 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3668 int i, n, r, err;
3669
3670 memory_region_transaction_begin();
3671 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3672 VirtQueue *vq = &vdev->vq[n];
3673 if (!virtio_queue_get_num(vdev, n)) {
3674 continue;
3675 }
3676 r = virtio_bus_set_host_notifier(qbus, n, true);
3677 if (r < 0) {
3678 err = r;
3679 goto assign_error;
3680 }
3681 event_notifier_set_handler(&vq->host_notifier,
3682 virtio_queue_host_notifier_read);
3683 }
3684
3685 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3686 /* Kick right away to begin processing requests already in vring */
3687 VirtQueue *vq = &vdev->vq[n];
3688 if (!vq->vring.num) {
3689 continue;
3690 }
3691 event_notifier_set(&vq->host_notifier);
3692 }
3693 memory_region_transaction_commit();
3694 return 0;
3695
3696 assign_error:
3697 i = n; /* save n for a second iteration after transaction is committed. */
3698 while (--n >= 0) {
3699 VirtQueue *vq = &vdev->vq[n];
3700 if (!virtio_queue_get_num(vdev, n)) {
3701 continue;
3702 }
3703
3704 event_notifier_set_handler(&vq->host_notifier, NULL);
3705 r = virtio_bus_set_host_notifier(qbus, n, false);
3706 assert(r >= 0);
3707 }
3708 memory_region_transaction_commit();
3709
3710 while (--i >= 0) {
3711 if (!virtio_queue_get_num(vdev, i)) {
3712 continue;
3713 }
3714 virtio_bus_cleanup_host_notifier(qbus, i);
3715 }
3716 return err;
3717 }
3718
virtio_device_start_ioeventfd(VirtIODevice * vdev)3719 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
3720 {
3721 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3722 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3723
3724 return virtio_bus_start_ioeventfd(vbus);
3725 }
3726
virtio_device_stop_ioeventfd_impl(VirtIODevice * vdev)3727 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
3728 {
3729 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3730 int n, r;
3731
3732 memory_region_transaction_begin();
3733 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3734 VirtQueue *vq = &vdev->vq[n];
3735
3736 if (!virtio_queue_get_num(vdev, n)) {
3737 continue;
3738 }
3739 event_notifier_set_handler(&vq->host_notifier, NULL);
3740 r = virtio_bus_set_host_notifier(qbus, n, false);
3741 assert(r >= 0);
3742 }
3743 memory_region_transaction_commit();
3744
3745 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3746 if (!virtio_queue_get_num(vdev, n)) {
3747 continue;
3748 }
3749 virtio_bus_cleanup_host_notifier(qbus, n);
3750 }
3751 }
3752
virtio_device_grab_ioeventfd(VirtIODevice * vdev)3753 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
3754 {
3755 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3756 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3757
3758 return virtio_bus_grab_ioeventfd(vbus);
3759 }
3760
virtio_device_release_ioeventfd(VirtIODevice * vdev)3761 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
3762 {
3763 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3764 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3765
3766 virtio_bus_release_ioeventfd(vbus);
3767 }
3768
virtio_device_class_init(ObjectClass * klass,void * data)3769 static void virtio_device_class_init(ObjectClass *klass, void *data)
3770 {
3771 /* Set the default value here. */
3772 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
3773 DeviceClass *dc = DEVICE_CLASS(klass);
3774
3775 dc->realize = virtio_device_realize;
3776 dc->unrealize = virtio_device_unrealize;
3777 dc->bus_type = TYPE_VIRTIO_BUS;
3778 dc->props = virtio_properties;
3779 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
3780 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
3781
3782 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
3783 }
3784
virtio_device_ioeventfd_enabled(VirtIODevice * vdev)3785 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
3786 {
3787 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3788 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3789
3790 return virtio_bus_ioeventfd_enabled(vbus);
3791 }
3792
3793 static const TypeInfo virtio_device_info = {
3794 .name = TYPE_VIRTIO_DEVICE,
3795 .parent = TYPE_DEVICE,
3796 .instance_size = sizeof(VirtIODevice),
3797 .class_init = virtio_device_class_init,
3798 .instance_finalize = virtio_device_instance_finalize,
3799 .abstract = true,
3800 .class_size = sizeof(VirtioDeviceClass),
3801 };
3802
virtio_register_types(void)3803 static void virtio_register_types(void)
3804 {
3805 type_register_static(&virtio_device_info);
3806 }
3807
3808 type_init(virtio_register_types)
3809