1 /*
2 * Virtio Support
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "cpu.h"
17 #include "trace.h"
18 #include "qemu/error-report.h"
19 #include "qemu/log.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "hw/virtio/virtio.h"
23 #include "migration/qemu-file-types.h"
24 #include "qemu/atomic.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "sysemu/dma.h"
29 #include "sysemu/runstate.h"
30 #include "standard-headers/linux/virtio_ids.h"
31
32 /*
33 * The alignment to use between consumer and producer parts of vring.
34 * x86 pagesize again. This is the default, used by transports like PCI
35 * which don't provide a means for the guest to tell the host the alignment.
36 */
37 #define VIRTIO_PCI_VRING_ALIGN 4096
38
39 typedef struct VRingDesc
40 {
41 uint64_t addr;
42 uint32_t len;
43 uint16_t flags;
44 uint16_t next;
45 } VRingDesc;
46
47 typedef struct VRingPackedDesc {
48 uint64_t addr;
49 uint32_t len;
50 uint16_t id;
51 uint16_t flags;
52 } VRingPackedDesc;
53
54 typedef struct VRingAvail
55 {
56 uint16_t flags;
57 uint16_t idx;
58 uint16_t ring[];
59 } VRingAvail;
60
61 typedef struct VRingUsedElem
62 {
63 uint32_t id;
64 uint32_t len;
65 } VRingUsedElem;
66
67 typedef struct VRingUsed
68 {
69 uint16_t flags;
70 uint16_t idx;
71 VRingUsedElem ring[];
72 } VRingUsed;
73
74 typedef struct VRingMemoryRegionCaches {
75 struct rcu_head rcu;
76 MemoryRegionCache desc;
77 MemoryRegionCache avail;
78 MemoryRegionCache used;
79 } VRingMemoryRegionCaches;
80
81 typedef struct VRing
82 {
83 unsigned int num;
84 unsigned int num_default;
85 unsigned int align;
86 hwaddr desc;
87 hwaddr avail;
88 hwaddr used;
89 VRingMemoryRegionCaches *caches;
90 } VRing;
91
92 typedef struct VRingPackedDescEvent {
93 uint16_t off_wrap;
94 uint16_t flags;
95 } VRingPackedDescEvent ;
96
97 struct VirtQueue
98 {
99 VRing vring;
100 VirtQueueElement *used_elems;
101
102 /* Next head to pop */
103 uint16_t last_avail_idx;
104 bool last_avail_wrap_counter;
105
106 /* Last avail_idx read from VQ. */
107 uint16_t shadow_avail_idx;
108 bool shadow_avail_wrap_counter;
109
110 uint16_t used_idx;
111 bool used_wrap_counter;
112
113 /* Last used index value we have signalled on */
114 uint16_t signalled_used;
115
116 /* Last used index value we have signalled on */
117 bool signalled_used_valid;
118
119 /* Notification enabled? */
120 bool notification;
121
122 uint16_t queue_index;
123
124 unsigned int inuse;
125
126 uint16_t vector;
127 VirtIOHandleOutput handle_output;
128 VirtIOHandleAIOOutput handle_aio_output;
129 VirtIODevice *vdev;
130 EventNotifier guest_notifier;
131 EventNotifier host_notifier;
132 bool host_notifier_enabled;
133 QLIST_ENTRY(VirtQueue) node;
134 };
135
136 /* Called within call_rcu(). */
virtio_free_region_cache(VRingMemoryRegionCaches * caches)137 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
138 {
139 assert(caches != NULL);
140 address_space_cache_destroy(&caches->desc);
141 address_space_cache_destroy(&caches->avail);
142 address_space_cache_destroy(&caches->used);
143 g_free(caches);
144 }
145
virtio_virtqueue_reset_region_cache(struct VirtQueue * vq)146 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
147 {
148 VRingMemoryRegionCaches *caches;
149
150 caches = qatomic_read(&vq->vring.caches);
151 qatomic_rcu_set(&vq->vring.caches, NULL);
152 if (caches) {
153 call_rcu(caches, virtio_free_region_cache, rcu);
154 }
155 }
156
virtio_init_region_cache(VirtIODevice * vdev,int n)157 static void virtio_init_region_cache(VirtIODevice *vdev, int n)
158 {
159 VirtQueue *vq = &vdev->vq[n];
160 VRingMemoryRegionCaches *old = vq->vring.caches;
161 VRingMemoryRegionCaches *new = NULL;
162 hwaddr addr, size;
163 int64_t len;
164 bool packed;
165
166
167 addr = vq->vring.desc;
168 if (!addr) {
169 goto out_no_cache;
170 }
171 new = g_new0(VRingMemoryRegionCaches, 1);
172 size = virtio_queue_get_desc_size(vdev, n);
173 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
174 true : false;
175 len = address_space_cache_init(&new->desc, vdev->dma_as,
176 addr, size, packed);
177 if (len < size) {
178 virtio_error(vdev, "Cannot map desc");
179 goto err_desc;
180 }
181
182 size = virtio_queue_get_used_size(vdev, n);
183 len = address_space_cache_init(&new->used, vdev->dma_as,
184 vq->vring.used, size, true);
185 if (len < size) {
186 virtio_error(vdev, "Cannot map used");
187 goto err_used;
188 }
189
190 size = virtio_queue_get_avail_size(vdev, n);
191 len = address_space_cache_init(&new->avail, vdev->dma_as,
192 vq->vring.avail, size, false);
193 if (len < size) {
194 virtio_error(vdev, "Cannot map avail");
195 goto err_avail;
196 }
197
198 qatomic_rcu_set(&vq->vring.caches, new);
199 if (old) {
200 call_rcu(old, virtio_free_region_cache, rcu);
201 }
202 return;
203
204 err_avail:
205 address_space_cache_destroy(&new->avail);
206 err_used:
207 address_space_cache_destroy(&new->used);
208 err_desc:
209 address_space_cache_destroy(&new->desc);
210 out_no_cache:
211 g_free(new);
212 virtio_virtqueue_reset_region_cache(vq);
213 }
214
215 /* virt queue functions */
virtio_queue_update_rings(VirtIODevice * vdev,int n)216 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
217 {
218 VRing *vring = &vdev->vq[n].vring;
219
220 if (!vring->num || !vring->desc || !vring->align) {
221 /* not yet setup -> nothing to do */
222 return;
223 }
224 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
225 vring->used = vring_align(vring->avail +
226 offsetof(VRingAvail, ring[vring->num]),
227 vring->align);
228 virtio_init_region_cache(vdev, n);
229 }
230
231 /* Called within rcu_read_lock(). */
vring_split_desc_read(VirtIODevice * vdev,VRingDesc * desc,MemoryRegionCache * cache,int i)232 static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc,
233 MemoryRegionCache *cache, int i)
234 {
235 address_space_read_cached(cache, i * sizeof(VRingDesc),
236 desc, sizeof(VRingDesc));
237 virtio_tswap64s(vdev, &desc->addr);
238 virtio_tswap32s(vdev, &desc->len);
239 virtio_tswap16s(vdev, &desc->flags);
240 virtio_tswap16s(vdev, &desc->next);
241 }
242
vring_packed_event_read(VirtIODevice * vdev,MemoryRegionCache * cache,VRingPackedDescEvent * e)243 static void vring_packed_event_read(VirtIODevice *vdev,
244 MemoryRegionCache *cache,
245 VRingPackedDescEvent *e)
246 {
247 hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap);
248 hwaddr off_flags = offsetof(VRingPackedDescEvent, flags);
249
250 address_space_read_cached(cache, off_flags, &e->flags,
251 sizeof(e->flags));
252 /* Make sure flags is seen before off_wrap */
253 smp_rmb();
254 address_space_read_cached(cache, off_off, &e->off_wrap,
255 sizeof(e->off_wrap));
256 virtio_tswap16s(vdev, &e->off_wrap);
257 virtio_tswap16s(vdev, &e->flags);
258 }
259
vring_packed_off_wrap_write(VirtIODevice * vdev,MemoryRegionCache * cache,uint16_t off_wrap)260 static void vring_packed_off_wrap_write(VirtIODevice *vdev,
261 MemoryRegionCache *cache,
262 uint16_t off_wrap)
263 {
264 hwaddr off = offsetof(VRingPackedDescEvent, off_wrap);
265
266 virtio_tswap16s(vdev, &off_wrap);
267 address_space_write_cached(cache, off, &off_wrap, sizeof(off_wrap));
268 address_space_cache_invalidate(cache, off, sizeof(off_wrap));
269 }
270
vring_packed_flags_write(VirtIODevice * vdev,MemoryRegionCache * cache,uint16_t flags)271 static void vring_packed_flags_write(VirtIODevice *vdev,
272 MemoryRegionCache *cache, uint16_t flags)
273 {
274 hwaddr off = offsetof(VRingPackedDescEvent, flags);
275
276 virtio_tswap16s(vdev, &flags);
277 address_space_write_cached(cache, off, &flags, sizeof(flags));
278 address_space_cache_invalidate(cache, off, sizeof(flags));
279 }
280
281 /* Called within rcu_read_lock(). */
vring_get_region_caches(struct VirtQueue * vq)282 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
283 {
284 return qatomic_rcu_read(&vq->vring.caches);
285 }
286
287 /* Called within rcu_read_lock(). */
vring_avail_flags(VirtQueue * vq)288 static inline uint16_t vring_avail_flags(VirtQueue *vq)
289 {
290 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
291 hwaddr pa = offsetof(VRingAvail, flags);
292
293 if (!caches) {
294 return 0;
295 }
296
297 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
298 }
299
300 /* Called within rcu_read_lock(). */
vring_avail_idx(VirtQueue * vq)301 static inline uint16_t vring_avail_idx(VirtQueue *vq)
302 {
303 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
304 hwaddr pa = offsetof(VRingAvail, idx);
305
306 if (!caches) {
307 return 0;
308 }
309
310 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
311 return vq->shadow_avail_idx;
312 }
313
314 /* Called within rcu_read_lock(). */
vring_avail_ring(VirtQueue * vq,int i)315 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
316 {
317 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
318 hwaddr pa = offsetof(VRingAvail, ring[i]);
319
320 if (!caches) {
321 return 0;
322 }
323
324 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
325 }
326
327 /* Called within rcu_read_lock(). */
vring_get_used_event(VirtQueue * vq)328 static inline uint16_t vring_get_used_event(VirtQueue *vq)
329 {
330 return vring_avail_ring(vq, vq->vring.num);
331 }
332
333 /* Called within rcu_read_lock(). */
vring_used_write(VirtQueue * vq,VRingUsedElem * uelem,int i)334 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
335 int i)
336 {
337 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
338 hwaddr pa = offsetof(VRingUsed, ring[i]);
339
340 if (!caches) {
341 return;
342 }
343
344 virtio_tswap32s(vq->vdev, &uelem->id);
345 virtio_tswap32s(vq->vdev, &uelem->len);
346 address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
347 address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
348 }
349
350 /* Called within rcu_read_lock(). */
vring_used_idx(VirtQueue * vq)351 static uint16_t vring_used_idx(VirtQueue *vq)
352 {
353 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
354 hwaddr pa = offsetof(VRingUsed, idx);
355
356 if (!caches) {
357 return 0;
358 }
359
360 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
361 }
362
363 /* Called within rcu_read_lock(). */
vring_used_idx_set(VirtQueue * vq,uint16_t val)364 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
365 {
366 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
367 hwaddr pa = offsetof(VRingUsed, idx);
368
369 if (caches) {
370 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
371 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
372 }
373
374 vq->used_idx = val;
375 }
376
377 /* Called within rcu_read_lock(). */
vring_used_flags_set_bit(VirtQueue * vq,int mask)378 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
379 {
380 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
381 VirtIODevice *vdev = vq->vdev;
382 hwaddr pa = offsetof(VRingUsed, flags);
383 uint16_t flags;
384
385 if (!caches) {
386 return;
387 }
388
389 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
390 virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
391 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
392 }
393
394 /* Called within rcu_read_lock(). */
vring_used_flags_unset_bit(VirtQueue * vq,int mask)395 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
396 {
397 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
398 VirtIODevice *vdev = vq->vdev;
399 hwaddr pa = offsetof(VRingUsed, flags);
400 uint16_t flags;
401
402 if (!caches) {
403 return;
404 }
405
406 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
407 virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
408 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
409 }
410
411 /* Called within rcu_read_lock(). */
vring_set_avail_event(VirtQueue * vq,uint16_t val)412 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
413 {
414 VRingMemoryRegionCaches *caches;
415 hwaddr pa;
416 if (!vq->notification) {
417 return;
418 }
419
420 caches = vring_get_region_caches(vq);
421 if (!caches) {
422 return;
423 }
424
425 pa = offsetof(VRingUsed, ring[vq->vring.num]);
426 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
427 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
428 }
429
virtio_queue_split_set_notification(VirtQueue * vq,int enable)430 static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
431 {
432 RCU_READ_LOCK_GUARD();
433
434 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
435 vring_set_avail_event(vq, vring_avail_idx(vq));
436 } else if (enable) {
437 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
438 } else {
439 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
440 }
441 if (enable) {
442 /* Expose avail event/used flags before caller checks the avail idx. */
443 smp_mb();
444 }
445 }
446
virtio_queue_packed_set_notification(VirtQueue * vq,int enable)447 static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
448 {
449 uint16_t off_wrap;
450 VRingPackedDescEvent e;
451 VRingMemoryRegionCaches *caches;
452
453 RCU_READ_LOCK_GUARD();
454 caches = vring_get_region_caches(vq);
455 if (!caches) {
456 return;
457 }
458
459 vring_packed_event_read(vq->vdev, &caches->used, &e);
460
461 if (!enable) {
462 e.flags = VRING_PACKED_EVENT_FLAG_DISABLE;
463 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
464 off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
465 vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
466 /* Make sure off_wrap is wrote before flags */
467 smp_wmb();
468 e.flags = VRING_PACKED_EVENT_FLAG_DESC;
469 } else {
470 e.flags = VRING_PACKED_EVENT_FLAG_ENABLE;
471 }
472
473 vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
474 if (enable) {
475 /* Expose avail event/used flags before caller checks the avail idx. */
476 smp_mb();
477 }
478 }
479
virtio_queue_get_notification(VirtQueue * vq)480 bool virtio_queue_get_notification(VirtQueue *vq)
481 {
482 return vq->notification;
483 }
484
virtio_queue_set_notification(VirtQueue * vq,int enable)485 void virtio_queue_set_notification(VirtQueue *vq, int enable)
486 {
487 vq->notification = enable;
488
489 if (!vq->vring.desc) {
490 return;
491 }
492
493 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
494 virtio_queue_packed_set_notification(vq, enable);
495 } else {
496 virtio_queue_split_set_notification(vq, enable);
497 }
498 }
499
virtio_queue_ready(VirtQueue * vq)500 int virtio_queue_ready(VirtQueue *vq)
501 {
502 return vq->vring.avail != 0;
503 }
504
vring_packed_desc_read_flags(VirtIODevice * vdev,uint16_t * flags,MemoryRegionCache * cache,int i)505 static void vring_packed_desc_read_flags(VirtIODevice *vdev,
506 uint16_t *flags,
507 MemoryRegionCache *cache,
508 int i)
509 {
510 address_space_read_cached(cache,
511 i * sizeof(VRingPackedDesc) +
512 offsetof(VRingPackedDesc, flags),
513 flags, sizeof(*flags));
514 virtio_tswap16s(vdev, flags);
515 }
516
vring_packed_desc_read(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i,bool strict_order)517 static void vring_packed_desc_read(VirtIODevice *vdev,
518 VRingPackedDesc *desc,
519 MemoryRegionCache *cache,
520 int i, bool strict_order)
521 {
522 hwaddr off = i * sizeof(VRingPackedDesc);
523
524 vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
525
526 if (strict_order) {
527 /* Make sure flags is read before the rest fields. */
528 smp_rmb();
529 }
530
531 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr),
532 &desc->addr, sizeof(desc->addr));
533 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id),
534 &desc->id, sizeof(desc->id));
535 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len),
536 &desc->len, sizeof(desc->len));
537 virtio_tswap64s(vdev, &desc->addr);
538 virtio_tswap16s(vdev, &desc->id);
539 virtio_tswap32s(vdev, &desc->len);
540 }
541
vring_packed_desc_write_data(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i)542 static void vring_packed_desc_write_data(VirtIODevice *vdev,
543 VRingPackedDesc *desc,
544 MemoryRegionCache *cache,
545 int i)
546 {
547 hwaddr off_id = i * sizeof(VRingPackedDesc) +
548 offsetof(VRingPackedDesc, id);
549 hwaddr off_len = i * sizeof(VRingPackedDesc) +
550 offsetof(VRingPackedDesc, len);
551
552 virtio_tswap32s(vdev, &desc->len);
553 virtio_tswap16s(vdev, &desc->id);
554 address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
555 address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
556 address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
557 address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
558 }
559
vring_packed_desc_write_flags(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i)560 static void vring_packed_desc_write_flags(VirtIODevice *vdev,
561 VRingPackedDesc *desc,
562 MemoryRegionCache *cache,
563 int i)
564 {
565 hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
566
567 virtio_tswap16s(vdev, &desc->flags);
568 address_space_write_cached(cache, off, &desc->flags, sizeof(desc->flags));
569 address_space_cache_invalidate(cache, off, sizeof(desc->flags));
570 }
571
vring_packed_desc_write(VirtIODevice * vdev,VRingPackedDesc * desc,MemoryRegionCache * cache,int i,bool strict_order)572 static void vring_packed_desc_write(VirtIODevice *vdev,
573 VRingPackedDesc *desc,
574 MemoryRegionCache *cache,
575 int i, bool strict_order)
576 {
577 vring_packed_desc_write_data(vdev, desc, cache, i);
578 if (strict_order) {
579 /* Make sure data is wrote before flags. */
580 smp_wmb();
581 }
582 vring_packed_desc_write_flags(vdev, desc, cache, i);
583 }
584
is_desc_avail(uint16_t flags,bool wrap_counter)585 static inline bool is_desc_avail(uint16_t flags, bool wrap_counter)
586 {
587 bool avail, used;
588
589 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
590 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
591 return (avail != used) && (avail == wrap_counter);
592 }
593
594 /* Fetch avail_idx from VQ memory only when we really need to know if
595 * guest has added some buffers.
596 * Called within rcu_read_lock(). */
virtio_queue_empty_rcu(VirtQueue * vq)597 static int virtio_queue_empty_rcu(VirtQueue *vq)
598 {
599 if (virtio_device_disabled(vq->vdev)) {
600 return 1;
601 }
602
603 if (unlikely(!vq->vring.avail)) {
604 return 1;
605 }
606
607 if (vq->shadow_avail_idx != vq->last_avail_idx) {
608 return 0;
609 }
610
611 return vring_avail_idx(vq) == vq->last_avail_idx;
612 }
613
virtio_queue_split_empty(VirtQueue * vq)614 static int virtio_queue_split_empty(VirtQueue *vq)
615 {
616 bool empty;
617
618 if (virtio_device_disabled(vq->vdev)) {
619 return 1;
620 }
621
622 if (unlikely(!vq->vring.avail)) {
623 return 1;
624 }
625
626 if (vq->shadow_avail_idx != vq->last_avail_idx) {
627 return 0;
628 }
629
630 RCU_READ_LOCK_GUARD();
631 empty = vring_avail_idx(vq) == vq->last_avail_idx;
632 return empty;
633 }
634
635 /* Called within rcu_read_lock(). */
virtio_queue_packed_empty_rcu(VirtQueue * vq)636 static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
637 {
638 struct VRingPackedDesc desc;
639 VRingMemoryRegionCaches *cache;
640
641 if (unlikely(!vq->vring.desc)) {
642 return 1;
643 }
644
645 cache = vring_get_region_caches(vq);
646 if (!cache) {
647 return 1;
648 }
649
650 vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
651 vq->last_avail_idx);
652
653 return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
654 }
655
virtio_queue_packed_empty(VirtQueue * vq)656 static int virtio_queue_packed_empty(VirtQueue *vq)
657 {
658 RCU_READ_LOCK_GUARD();
659 return virtio_queue_packed_empty_rcu(vq);
660 }
661
virtio_queue_empty(VirtQueue * vq)662 int virtio_queue_empty(VirtQueue *vq)
663 {
664 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
665 return virtio_queue_packed_empty(vq);
666 } else {
667 return virtio_queue_split_empty(vq);
668 }
669 }
670
virtqueue_unmap_sg(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)671 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
672 unsigned int len)
673 {
674 AddressSpace *dma_as = vq->vdev->dma_as;
675 unsigned int offset;
676 int i;
677
678 offset = 0;
679 for (i = 0; i < elem->in_num; i++) {
680 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
681
682 dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
683 elem->in_sg[i].iov_len,
684 DMA_DIRECTION_FROM_DEVICE, size);
685
686 offset += size;
687 }
688
689 for (i = 0; i < elem->out_num; i++)
690 dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
691 elem->out_sg[i].iov_len,
692 DMA_DIRECTION_TO_DEVICE,
693 elem->out_sg[i].iov_len);
694 }
695
696 /* virtqueue_detach_element:
697 * @vq: The #VirtQueue
698 * @elem: The #VirtQueueElement
699 * @len: number of bytes written
700 *
701 * Detach the element from the virtqueue. This function is suitable for device
702 * reset or other situations where a #VirtQueueElement is simply freed and will
703 * not be pushed or discarded.
704 */
virtqueue_detach_element(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)705 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
706 unsigned int len)
707 {
708 vq->inuse -= elem->ndescs;
709 virtqueue_unmap_sg(vq, elem, len);
710 }
711
virtqueue_split_rewind(VirtQueue * vq,unsigned int num)712 static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num)
713 {
714 vq->last_avail_idx -= num;
715 }
716
virtqueue_packed_rewind(VirtQueue * vq,unsigned int num)717 static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num)
718 {
719 if (vq->last_avail_idx < num) {
720 vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
721 vq->last_avail_wrap_counter ^= 1;
722 } else {
723 vq->last_avail_idx -= num;
724 }
725 }
726
727 /* virtqueue_unpop:
728 * @vq: The #VirtQueue
729 * @elem: The #VirtQueueElement
730 * @len: number of bytes written
731 *
732 * Pretend the most recent element wasn't popped from the virtqueue. The next
733 * call to virtqueue_pop() will refetch the element.
734 */
virtqueue_unpop(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)735 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
736 unsigned int len)
737 {
738
739 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
740 virtqueue_packed_rewind(vq, 1);
741 } else {
742 virtqueue_split_rewind(vq, 1);
743 }
744
745 virtqueue_detach_element(vq, elem, len);
746 }
747
748 /* virtqueue_rewind:
749 * @vq: The #VirtQueue
750 * @num: Number of elements to push back
751 *
752 * Pretend that elements weren't popped from the virtqueue. The next
753 * virtqueue_pop() will refetch the oldest element.
754 *
755 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
756 *
757 * Returns: true on success, false if @num is greater than the number of in use
758 * elements.
759 */
virtqueue_rewind(VirtQueue * vq,unsigned int num)760 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
761 {
762 if (num > vq->inuse) {
763 return false;
764 }
765
766 vq->inuse -= num;
767 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
768 virtqueue_packed_rewind(vq, num);
769 } else {
770 virtqueue_split_rewind(vq, num);
771 }
772 return true;
773 }
774
virtqueue_split_fill(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len,unsigned int idx)775 static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
776 unsigned int len, unsigned int idx)
777 {
778 VRingUsedElem uelem;
779
780 if (unlikely(!vq->vring.used)) {
781 return;
782 }
783
784 idx = (idx + vq->used_idx) % vq->vring.num;
785
786 uelem.id = elem->index;
787 uelem.len = len;
788 vring_used_write(vq, &uelem, idx);
789 }
790
virtqueue_packed_fill(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len,unsigned int idx)791 static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
792 unsigned int len, unsigned int idx)
793 {
794 vq->used_elems[idx].index = elem->index;
795 vq->used_elems[idx].len = len;
796 vq->used_elems[idx].ndescs = elem->ndescs;
797 }
798
virtqueue_packed_fill_desc(VirtQueue * vq,const VirtQueueElement * elem,unsigned int idx,bool strict_order)799 static void virtqueue_packed_fill_desc(VirtQueue *vq,
800 const VirtQueueElement *elem,
801 unsigned int idx,
802 bool strict_order)
803 {
804 uint16_t head;
805 VRingMemoryRegionCaches *caches;
806 VRingPackedDesc desc = {
807 .id = elem->index,
808 .len = elem->len,
809 };
810 bool wrap_counter = vq->used_wrap_counter;
811
812 if (unlikely(!vq->vring.desc)) {
813 return;
814 }
815
816 head = vq->used_idx + idx;
817 if (head >= vq->vring.num) {
818 head -= vq->vring.num;
819 wrap_counter ^= 1;
820 }
821 if (wrap_counter) {
822 desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
823 desc.flags |= (1 << VRING_PACKED_DESC_F_USED);
824 } else {
825 desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
826 desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED);
827 }
828
829 caches = vring_get_region_caches(vq);
830 if (!caches) {
831 return;
832 }
833
834 vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
835 }
836
837 /* Called within rcu_read_lock(). */
virtqueue_fill(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len,unsigned int idx)838 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
839 unsigned int len, unsigned int idx)
840 {
841 trace_virtqueue_fill(vq, elem, len, idx);
842
843 virtqueue_unmap_sg(vq, elem, len);
844
845 if (virtio_device_disabled(vq->vdev)) {
846 return;
847 }
848
849 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
850 virtqueue_packed_fill(vq, elem, len, idx);
851 } else {
852 virtqueue_split_fill(vq, elem, len, idx);
853 }
854 }
855
856 /* Called within rcu_read_lock(). */
virtqueue_split_flush(VirtQueue * vq,unsigned int count)857 static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
858 {
859 uint16_t old, new;
860
861 if (unlikely(!vq->vring.used)) {
862 return;
863 }
864
865 /* Make sure buffer is written before we update index. */
866 smp_wmb();
867 trace_virtqueue_flush(vq, count);
868 old = vq->used_idx;
869 new = old + count;
870 vring_used_idx_set(vq, new);
871 vq->inuse -= count;
872 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
873 vq->signalled_used_valid = false;
874 }
875
virtqueue_packed_flush(VirtQueue * vq,unsigned int count)876 static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
877 {
878 unsigned int i, ndescs = 0;
879
880 if (unlikely(!vq->vring.desc)) {
881 return;
882 }
883
884 for (i = 1; i < count; i++) {
885 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], i, false);
886 ndescs += vq->used_elems[i].ndescs;
887 }
888 virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
889 ndescs += vq->used_elems[0].ndescs;
890
891 vq->inuse -= ndescs;
892 vq->used_idx += ndescs;
893 if (vq->used_idx >= vq->vring.num) {
894 vq->used_idx -= vq->vring.num;
895 vq->used_wrap_counter ^= 1;
896 }
897 }
898
virtqueue_flush(VirtQueue * vq,unsigned int count)899 void virtqueue_flush(VirtQueue *vq, unsigned int count)
900 {
901 if (virtio_device_disabled(vq->vdev)) {
902 vq->inuse -= count;
903 return;
904 }
905
906 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
907 virtqueue_packed_flush(vq, count);
908 } else {
909 virtqueue_split_flush(vq, count);
910 }
911 }
912
virtqueue_push(VirtQueue * vq,const VirtQueueElement * elem,unsigned int len)913 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
914 unsigned int len)
915 {
916 RCU_READ_LOCK_GUARD();
917 virtqueue_fill(vq, elem, len, 0);
918 virtqueue_flush(vq, 1);
919 }
920
921 /* Called within rcu_read_lock(). */
virtqueue_num_heads(VirtQueue * vq,unsigned int idx)922 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
923 {
924 uint16_t num_heads = vring_avail_idx(vq) - idx;
925
926 /* Check it isn't doing very strange things with descriptor numbers. */
927 if (num_heads > vq->vring.num) {
928 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
929 idx, vq->shadow_avail_idx);
930 return -EINVAL;
931 }
932 /* On success, callers read a descriptor at vq->last_avail_idx.
933 * Make sure descriptor read does not bypass avail index read. */
934 if (num_heads) {
935 smp_rmb();
936 }
937
938 return num_heads;
939 }
940
941 /* Called within rcu_read_lock(). */
virtqueue_get_head(VirtQueue * vq,unsigned int idx,unsigned int * head)942 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
943 unsigned int *head)
944 {
945 /* Grab the next descriptor number they're advertising, and increment
946 * the index we've seen. */
947 *head = vring_avail_ring(vq, idx % vq->vring.num);
948
949 /* If their number is silly, that's a fatal mistake. */
950 if (*head >= vq->vring.num) {
951 virtio_error(vq->vdev, "Guest says index %u is available", *head);
952 return false;
953 }
954
955 return true;
956 }
957
958 enum {
959 VIRTQUEUE_READ_DESC_ERROR = -1,
960 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
961 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
962 };
963
virtqueue_split_read_next_desc(VirtIODevice * vdev,VRingDesc * desc,MemoryRegionCache * desc_cache,unsigned int max,unsigned int * next)964 static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
965 MemoryRegionCache *desc_cache,
966 unsigned int max, unsigned int *next)
967 {
968 /* If this descriptor says it doesn't chain, we're done. */
969 if (!(desc->flags & VRING_DESC_F_NEXT)) {
970 return VIRTQUEUE_READ_DESC_DONE;
971 }
972
973 /* Check they're not leading us off end of descriptors. */
974 *next = desc->next;
975 /* Make sure compiler knows to grab that: we don't want it changing! */
976 smp_wmb();
977
978 if (*next >= max) {
979 virtio_error(vdev, "Desc next is %u", *next);
980 return VIRTQUEUE_READ_DESC_ERROR;
981 }
982
983 vring_split_desc_read(vdev, desc, desc_cache, *next);
984 return VIRTQUEUE_READ_DESC_MORE;
985 }
986
virtqueue_split_get_avail_bytes(VirtQueue * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes)987 static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
988 unsigned int *in_bytes, unsigned int *out_bytes,
989 unsigned max_in_bytes, unsigned max_out_bytes)
990 {
991 VirtIODevice *vdev = vq->vdev;
992 unsigned int max, idx;
993 unsigned int total_bufs, in_total, out_total;
994 VRingMemoryRegionCaches *caches;
995 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
996 int64_t len = 0;
997 int rc;
998
999 RCU_READ_LOCK_GUARD();
1000
1001 idx = vq->last_avail_idx;
1002 total_bufs = in_total = out_total = 0;
1003
1004 max = vq->vring.num;
1005 caches = vring_get_region_caches(vq);
1006 if (!caches) {
1007 goto err;
1008 }
1009
1010 while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
1011 MemoryRegionCache *desc_cache = &caches->desc;
1012 unsigned int num_bufs;
1013 VRingDesc desc;
1014 unsigned int i;
1015
1016 num_bufs = total_bufs;
1017
1018 if (!virtqueue_get_head(vq, idx++, &i)) {
1019 goto err;
1020 }
1021
1022 vring_split_desc_read(vdev, &desc, desc_cache, i);
1023
1024 if (desc.flags & VRING_DESC_F_INDIRECT) {
1025 if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1026 virtio_error(vdev, "Invalid size for indirect buffer table");
1027 goto err;
1028 }
1029
1030 /* If we've got too many, that implies a descriptor loop. */
1031 if (num_bufs >= max) {
1032 virtio_error(vdev, "Looped descriptor");
1033 goto err;
1034 }
1035
1036 /* loop over the indirect descriptor table */
1037 len = address_space_cache_init(&indirect_desc_cache,
1038 vdev->dma_as,
1039 desc.addr, desc.len, false);
1040 desc_cache = &indirect_desc_cache;
1041 if (len < desc.len) {
1042 virtio_error(vdev, "Cannot map indirect buffer");
1043 goto err;
1044 }
1045
1046 max = desc.len / sizeof(VRingDesc);
1047 num_bufs = i = 0;
1048 vring_split_desc_read(vdev, &desc, desc_cache, i);
1049 }
1050
1051 do {
1052 /* If we've got too many, that implies a descriptor loop. */
1053 if (++num_bufs > max) {
1054 virtio_error(vdev, "Looped descriptor");
1055 goto err;
1056 }
1057
1058 if (desc.flags & VRING_DESC_F_WRITE) {
1059 in_total += desc.len;
1060 } else {
1061 out_total += desc.len;
1062 }
1063 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1064 goto done;
1065 }
1066
1067 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i);
1068 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1069
1070 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1071 goto err;
1072 }
1073
1074 if (desc_cache == &indirect_desc_cache) {
1075 address_space_cache_destroy(&indirect_desc_cache);
1076 total_bufs++;
1077 } else {
1078 total_bufs = num_bufs;
1079 }
1080 }
1081
1082 if (rc < 0) {
1083 goto err;
1084 }
1085
1086 done:
1087 address_space_cache_destroy(&indirect_desc_cache);
1088 if (in_bytes) {
1089 *in_bytes = in_total;
1090 }
1091 if (out_bytes) {
1092 *out_bytes = out_total;
1093 }
1094 return;
1095
1096 err:
1097 in_total = out_total = 0;
1098 goto done;
1099 }
1100
virtqueue_packed_read_next_desc(VirtQueue * vq,VRingPackedDesc * desc,MemoryRegionCache * desc_cache,unsigned int max,unsigned int * next,bool indirect)1101 static int virtqueue_packed_read_next_desc(VirtQueue *vq,
1102 VRingPackedDesc *desc,
1103 MemoryRegionCache
1104 *desc_cache,
1105 unsigned int max,
1106 unsigned int *next,
1107 bool indirect)
1108 {
1109 /* If this descriptor says it doesn't chain, we're done. */
1110 if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
1111 return VIRTQUEUE_READ_DESC_DONE;
1112 }
1113
1114 ++*next;
1115 if (*next == max) {
1116 if (indirect) {
1117 return VIRTQUEUE_READ_DESC_DONE;
1118 } else {
1119 (*next) -= vq->vring.num;
1120 }
1121 }
1122
1123 vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
1124 return VIRTQUEUE_READ_DESC_MORE;
1125 }
1126
virtqueue_packed_get_avail_bytes(VirtQueue * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes)1127 static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
1128 unsigned int *in_bytes,
1129 unsigned int *out_bytes,
1130 unsigned max_in_bytes,
1131 unsigned max_out_bytes)
1132 {
1133 VirtIODevice *vdev = vq->vdev;
1134 unsigned int max, idx;
1135 unsigned int total_bufs, in_total, out_total;
1136 MemoryRegionCache *desc_cache;
1137 VRingMemoryRegionCaches *caches;
1138 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1139 int64_t len = 0;
1140 VRingPackedDesc desc;
1141 bool wrap_counter;
1142
1143 RCU_READ_LOCK_GUARD();
1144 idx = vq->last_avail_idx;
1145 wrap_counter = vq->last_avail_wrap_counter;
1146 total_bufs = in_total = out_total = 0;
1147
1148 max = vq->vring.num;
1149 caches = vring_get_region_caches(vq);
1150 if (!caches) {
1151 goto err;
1152 }
1153
1154 for (;;) {
1155 unsigned int num_bufs = total_bufs;
1156 unsigned int i = idx;
1157 int rc;
1158
1159 desc_cache = &caches->desc;
1160 vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
1161 if (!is_desc_avail(desc.flags, wrap_counter)) {
1162 break;
1163 }
1164
1165 if (desc.flags & VRING_DESC_F_INDIRECT) {
1166 if (desc.len % sizeof(VRingPackedDesc)) {
1167 virtio_error(vdev, "Invalid size for indirect buffer table");
1168 goto err;
1169 }
1170
1171 /* If we've got too many, that implies a descriptor loop. */
1172 if (num_bufs >= max) {
1173 virtio_error(vdev, "Looped descriptor");
1174 goto err;
1175 }
1176
1177 /* loop over the indirect descriptor table */
1178 len = address_space_cache_init(&indirect_desc_cache,
1179 vdev->dma_as,
1180 desc.addr, desc.len, false);
1181 desc_cache = &indirect_desc_cache;
1182 if (len < desc.len) {
1183 virtio_error(vdev, "Cannot map indirect buffer");
1184 goto err;
1185 }
1186
1187 max = desc.len / sizeof(VRingPackedDesc);
1188 num_bufs = i = 0;
1189 vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1190 }
1191
1192 do {
1193 /* If we've got too many, that implies a descriptor loop. */
1194 if (++num_bufs > max) {
1195 virtio_error(vdev, "Looped descriptor");
1196 goto err;
1197 }
1198
1199 if (desc.flags & VRING_DESC_F_WRITE) {
1200 in_total += desc.len;
1201 } else {
1202 out_total += desc.len;
1203 }
1204 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1205 goto done;
1206 }
1207
1208 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max,
1209 &i, desc_cache ==
1210 &indirect_desc_cache);
1211 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1212
1213 if (desc_cache == &indirect_desc_cache) {
1214 address_space_cache_destroy(&indirect_desc_cache);
1215 total_bufs++;
1216 idx++;
1217 } else {
1218 idx += num_bufs - total_bufs;
1219 total_bufs = num_bufs;
1220 }
1221
1222 if (idx >= vq->vring.num) {
1223 idx -= vq->vring.num;
1224 wrap_counter ^= 1;
1225 }
1226 }
1227
1228 /* Record the index and wrap counter for a kick we want */
1229 vq->shadow_avail_idx = idx;
1230 vq->shadow_avail_wrap_counter = wrap_counter;
1231 done:
1232 address_space_cache_destroy(&indirect_desc_cache);
1233 if (in_bytes) {
1234 *in_bytes = in_total;
1235 }
1236 if (out_bytes) {
1237 *out_bytes = out_total;
1238 }
1239 return;
1240
1241 err:
1242 in_total = out_total = 0;
1243 goto done;
1244 }
1245
virtqueue_get_avail_bytes(VirtQueue * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes)1246 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
1247 unsigned int *out_bytes,
1248 unsigned max_in_bytes, unsigned max_out_bytes)
1249 {
1250 uint16_t desc_size;
1251 VRingMemoryRegionCaches *caches;
1252
1253 if (unlikely(!vq->vring.desc)) {
1254 goto err;
1255 }
1256
1257 caches = vring_get_region_caches(vq);
1258 if (!caches) {
1259 goto err;
1260 }
1261
1262 desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
1263 sizeof(VRingPackedDesc) : sizeof(VRingDesc);
1264 if (caches->desc.len < vq->vring.num * desc_size) {
1265 virtio_error(vq->vdev, "Cannot map descriptor ring");
1266 goto err;
1267 }
1268
1269 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1270 virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes,
1271 max_in_bytes, max_out_bytes);
1272 } else {
1273 virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
1274 max_in_bytes, max_out_bytes);
1275 }
1276
1277 return;
1278 err:
1279 if (in_bytes) {
1280 *in_bytes = 0;
1281 }
1282 if (out_bytes) {
1283 *out_bytes = 0;
1284 }
1285 }
1286
virtqueue_avail_bytes(VirtQueue * vq,unsigned int in_bytes,unsigned int out_bytes)1287 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
1288 unsigned int out_bytes)
1289 {
1290 unsigned int in_total, out_total;
1291
1292 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
1293 return in_bytes <= in_total && out_bytes <= out_total;
1294 }
1295
virtqueue_map_desc(VirtIODevice * vdev,unsigned int * p_num_sg,hwaddr * addr,struct iovec * iov,unsigned int max_num_sg,bool is_write,hwaddr pa,size_t sz)1296 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
1297 hwaddr *addr, struct iovec *iov,
1298 unsigned int max_num_sg, bool is_write,
1299 hwaddr pa, size_t sz)
1300 {
1301 bool ok = false;
1302 unsigned num_sg = *p_num_sg;
1303 assert(num_sg <= max_num_sg);
1304
1305 if (!sz) {
1306 virtio_error(vdev, "virtio: zero sized buffers are not allowed");
1307 goto out;
1308 }
1309
1310 while (sz) {
1311 hwaddr len = sz;
1312
1313 if (num_sg == max_num_sg) {
1314 virtio_error(vdev, "virtio: too many write descriptors in "
1315 "indirect table");
1316 goto out;
1317 }
1318
1319 iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
1320 is_write ?
1321 DMA_DIRECTION_FROM_DEVICE :
1322 DMA_DIRECTION_TO_DEVICE);
1323 if (!iov[num_sg].iov_base) {
1324 virtio_error(vdev, "virtio: bogus descriptor or out of resources");
1325 goto out;
1326 }
1327
1328 iov[num_sg].iov_len = len;
1329 addr[num_sg] = pa;
1330
1331 sz -= len;
1332 pa += len;
1333 num_sg++;
1334 }
1335 ok = true;
1336
1337 out:
1338 *p_num_sg = num_sg;
1339 return ok;
1340 }
1341
1342 /* Only used by error code paths before we have a VirtQueueElement (therefore
1343 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
1344 * yet.
1345 */
virtqueue_undo_map_desc(unsigned int out_num,unsigned int in_num,struct iovec * iov)1346 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
1347 struct iovec *iov)
1348 {
1349 unsigned int i;
1350
1351 for (i = 0; i < out_num + in_num; i++) {
1352 int is_write = i >= out_num;
1353
1354 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
1355 iov++;
1356 }
1357 }
1358
virtqueue_map_iovec(VirtIODevice * vdev,struct iovec * sg,hwaddr * addr,unsigned int num_sg,bool is_write)1359 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
1360 hwaddr *addr, unsigned int num_sg,
1361 bool is_write)
1362 {
1363 unsigned int i;
1364 hwaddr len;
1365
1366 for (i = 0; i < num_sg; i++) {
1367 len = sg[i].iov_len;
1368 sg[i].iov_base = dma_memory_map(vdev->dma_as,
1369 addr[i], &len, is_write ?
1370 DMA_DIRECTION_FROM_DEVICE :
1371 DMA_DIRECTION_TO_DEVICE);
1372 if (!sg[i].iov_base) {
1373 error_report("virtio: error trying to map MMIO memory");
1374 exit(1);
1375 }
1376 if (len != sg[i].iov_len) {
1377 error_report("virtio: unexpected memory split");
1378 exit(1);
1379 }
1380 }
1381 }
1382
virtqueue_map(VirtIODevice * vdev,VirtQueueElement * elem)1383 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
1384 {
1385 virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true);
1386 virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num,
1387 false);
1388 }
1389
virtqueue_alloc_element(size_t sz,unsigned out_num,unsigned in_num)1390 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
1391 {
1392 VirtQueueElement *elem;
1393 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
1394 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
1395 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
1396 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
1397 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
1398 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
1399
1400 assert(sz >= sizeof(VirtQueueElement));
1401 elem = g_malloc(out_sg_end);
1402 trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
1403 elem->out_num = out_num;
1404 elem->in_num = in_num;
1405 elem->in_addr = (void *)elem + in_addr_ofs;
1406 elem->out_addr = (void *)elem + out_addr_ofs;
1407 elem->in_sg = (void *)elem + in_sg_ofs;
1408 elem->out_sg = (void *)elem + out_sg_ofs;
1409 return elem;
1410 }
1411
virtqueue_split_pop(VirtQueue * vq,size_t sz)1412 static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
1413 {
1414 unsigned int i, head, max;
1415 VRingMemoryRegionCaches *caches;
1416 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1417 MemoryRegionCache *desc_cache;
1418 int64_t len;
1419 VirtIODevice *vdev = vq->vdev;
1420 VirtQueueElement *elem = NULL;
1421 unsigned out_num, in_num, elem_entries;
1422 hwaddr addr[VIRTQUEUE_MAX_SIZE];
1423 struct iovec iov[VIRTQUEUE_MAX_SIZE];
1424 VRingDesc desc;
1425 int rc;
1426
1427 RCU_READ_LOCK_GUARD();
1428 if (virtio_queue_empty_rcu(vq)) {
1429 goto done;
1430 }
1431 /* Needed after virtio_queue_empty(), see comment in
1432 * virtqueue_num_heads(). */
1433 smp_rmb();
1434
1435 /* When we start there are none of either input nor output. */
1436 out_num = in_num = elem_entries = 0;
1437
1438 max = vq->vring.num;
1439
1440 if (vq->inuse >= vq->vring.num) {
1441 virtio_error(vdev, "Virtqueue size exceeded");
1442 goto done;
1443 }
1444
1445 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
1446 goto done;
1447 }
1448
1449 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1450 vring_set_avail_event(vq, vq->last_avail_idx);
1451 }
1452
1453 i = head;
1454
1455 caches = vring_get_region_caches(vq);
1456 if (!caches) {
1457 virtio_error(vdev, "Region caches not initialized");
1458 goto done;
1459 }
1460
1461 if (caches->desc.len < max * sizeof(VRingDesc)) {
1462 virtio_error(vdev, "Cannot map descriptor ring");
1463 goto done;
1464 }
1465
1466 desc_cache = &caches->desc;
1467 vring_split_desc_read(vdev, &desc, desc_cache, i);
1468 if (desc.flags & VRING_DESC_F_INDIRECT) {
1469 if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1470 virtio_error(vdev, "Invalid size for indirect buffer table");
1471 goto done;
1472 }
1473
1474 /* loop over the indirect descriptor table */
1475 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1476 desc.addr, desc.len, false);
1477 desc_cache = &indirect_desc_cache;
1478 if (len < desc.len) {
1479 virtio_error(vdev, "Cannot map indirect buffer");
1480 goto done;
1481 }
1482
1483 max = desc.len / sizeof(VRingDesc);
1484 i = 0;
1485 vring_split_desc_read(vdev, &desc, desc_cache, i);
1486 }
1487
1488 /* Collect all the descriptors */
1489 do {
1490 bool map_ok;
1491
1492 if (desc.flags & VRING_DESC_F_WRITE) {
1493 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1494 iov + out_num,
1495 VIRTQUEUE_MAX_SIZE - out_num, true,
1496 desc.addr, desc.len);
1497 } else {
1498 if (in_num) {
1499 virtio_error(vdev, "Incorrect order for descriptors");
1500 goto err_undo_map;
1501 }
1502 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1503 VIRTQUEUE_MAX_SIZE, false,
1504 desc.addr, desc.len);
1505 }
1506 if (!map_ok) {
1507 goto err_undo_map;
1508 }
1509
1510 /* If we've got too many, that implies a descriptor loop. */
1511 if (++elem_entries > max) {
1512 virtio_error(vdev, "Looped descriptor");
1513 goto err_undo_map;
1514 }
1515
1516 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i);
1517 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1518
1519 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1520 goto err_undo_map;
1521 }
1522
1523 /* Now copy what we have collected and mapped */
1524 elem = virtqueue_alloc_element(sz, out_num, in_num);
1525 elem->index = head;
1526 elem->ndescs = 1;
1527 for (i = 0; i < out_num; i++) {
1528 elem->out_addr[i] = addr[i];
1529 elem->out_sg[i] = iov[i];
1530 }
1531 for (i = 0; i < in_num; i++) {
1532 elem->in_addr[i] = addr[out_num + i];
1533 elem->in_sg[i] = iov[out_num + i];
1534 }
1535
1536 vq->inuse++;
1537
1538 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1539 done:
1540 address_space_cache_destroy(&indirect_desc_cache);
1541
1542 return elem;
1543
1544 err_undo_map:
1545 virtqueue_undo_map_desc(out_num, in_num, iov);
1546 goto done;
1547 }
1548
virtqueue_packed_pop(VirtQueue * vq,size_t sz)1549 static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
1550 {
1551 unsigned int i, max;
1552 VRingMemoryRegionCaches *caches;
1553 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1554 MemoryRegionCache *desc_cache;
1555 int64_t len;
1556 VirtIODevice *vdev = vq->vdev;
1557 VirtQueueElement *elem = NULL;
1558 unsigned out_num, in_num, elem_entries;
1559 hwaddr addr[VIRTQUEUE_MAX_SIZE];
1560 struct iovec iov[VIRTQUEUE_MAX_SIZE];
1561 VRingPackedDesc desc;
1562 uint16_t id;
1563 int rc;
1564
1565 RCU_READ_LOCK_GUARD();
1566 if (virtio_queue_packed_empty_rcu(vq)) {
1567 goto done;
1568 }
1569
1570 /* When we start there are none of either input nor output. */
1571 out_num = in_num = elem_entries = 0;
1572
1573 max = vq->vring.num;
1574
1575 if (vq->inuse >= vq->vring.num) {
1576 virtio_error(vdev, "Virtqueue size exceeded");
1577 goto done;
1578 }
1579
1580 i = vq->last_avail_idx;
1581
1582 caches = vring_get_region_caches(vq);
1583 if (!caches) {
1584 virtio_error(vdev, "Region caches not initialized");
1585 goto done;
1586 }
1587
1588 if (caches->desc.len < max * sizeof(VRingDesc)) {
1589 virtio_error(vdev, "Cannot map descriptor ring");
1590 goto done;
1591 }
1592
1593 desc_cache = &caches->desc;
1594 vring_packed_desc_read(vdev, &desc, desc_cache, i, true);
1595 id = desc.id;
1596 if (desc.flags & VRING_DESC_F_INDIRECT) {
1597 if (desc.len % sizeof(VRingPackedDesc)) {
1598 virtio_error(vdev, "Invalid size for indirect buffer table");
1599 goto done;
1600 }
1601
1602 /* loop over the indirect descriptor table */
1603 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1604 desc.addr, desc.len, false);
1605 desc_cache = &indirect_desc_cache;
1606 if (len < desc.len) {
1607 virtio_error(vdev, "Cannot map indirect buffer");
1608 goto done;
1609 }
1610
1611 max = desc.len / sizeof(VRingPackedDesc);
1612 i = 0;
1613 vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1614 }
1615
1616 /* Collect all the descriptors */
1617 do {
1618 bool map_ok;
1619
1620 if (desc.flags & VRING_DESC_F_WRITE) {
1621 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1622 iov + out_num,
1623 VIRTQUEUE_MAX_SIZE - out_num, true,
1624 desc.addr, desc.len);
1625 } else {
1626 if (in_num) {
1627 virtio_error(vdev, "Incorrect order for descriptors");
1628 goto err_undo_map;
1629 }
1630 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1631 VIRTQUEUE_MAX_SIZE, false,
1632 desc.addr, desc.len);
1633 }
1634 if (!map_ok) {
1635 goto err_undo_map;
1636 }
1637
1638 /* If we've got too many, that implies a descriptor loop. */
1639 if (++elem_entries > max) {
1640 virtio_error(vdev, "Looped descriptor");
1641 goto err_undo_map;
1642 }
1643
1644 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i,
1645 desc_cache ==
1646 &indirect_desc_cache);
1647 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1648
1649 /* Now copy what we have collected and mapped */
1650 elem = virtqueue_alloc_element(sz, out_num, in_num);
1651 for (i = 0; i < out_num; i++) {
1652 elem->out_addr[i] = addr[i];
1653 elem->out_sg[i] = iov[i];
1654 }
1655 for (i = 0; i < in_num; i++) {
1656 elem->in_addr[i] = addr[out_num + i];
1657 elem->in_sg[i] = iov[out_num + i];
1658 }
1659
1660 elem->index = id;
1661 elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
1662 vq->last_avail_idx += elem->ndescs;
1663 vq->inuse += elem->ndescs;
1664
1665 if (vq->last_avail_idx >= vq->vring.num) {
1666 vq->last_avail_idx -= vq->vring.num;
1667 vq->last_avail_wrap_counter ^= 1;
1668 }
1669
1670 vq->shadow_avail_idx = vq->last_avail_idx;
1671 vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
1672
1673 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1674 done:
1675 address_space_cache_destroy(&indirect_desc_cache);
1676
1677 return elem;
1678
1679 err_undo_map:
1680 virtqueue_undo_map_desc(out_num, in_num, iov);
1681 goto done;
1682 }
1683
virtqueue_pop(VirtQueue * vq,size_t sz)1684 void *virtqueue_pop(VirtQueue *vq, size_t sz)
1685 {
1686 if (virtio_device_disabled(vq->vdev)) {
1687 return NULL;
1688 }
1689
1690 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1691 return virtqueue_packed_pop(vq, sz);
1692 } else {
1693 return virtqueue_split_pop(vq, sz);
1694 }
1695 }
1696
virtqueue_packed_drop_all(VirtQueue * vq)1697 static unsigned int virtqueue_packed_drop_all(VirtQueue *vq)
1698 {
1699 VRingMemoryRegionCaches *caches;
1700 MemoryRegionCache *desc_cache;
1701 unsigned int dropped = 0;
1702 VirtQueueElement elem = {};
1703 VirtIODevice *vdev = vq->vdev;
1704 VRingPackedDesc desc;
1705
1706 caches = vring_get_region_caches(vq);
1707 if (!caches) {
1708 return 0;
1709 }
1710
1711 desc_cache = &caches->desc;
1712
1713 virtio_queue_set_notification(vq, 0);
1714
1715 while (vq->inuse < vq->vring.num) {
1716 unsigned int idx = vq->last_avail_idx;
1717 /*
1718 * works similar to virtqueue_pop but does not map buffers
1719 * and does not allocate any memory.
1720 */
1721 vring_packed_desc_read(vdev, &desc, desc_cache,
1722 vq->last_avail_idx , true);
1723 if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
1724 break;
1725 }
1726 elem.index = desc.id;
1727 elem.ndescs = 1;
1728 while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache,
1729 vq->vring.num, &idx, false)) {
1730 ++elem.ndescs;
1731 }
1732 /*
1733 * immediately push the element, nothing to unmap
1734 * as both in_num and out_num are set to 0.
1735 */
1736 virtqueue_push(vq, &elem, 0);
1737 dropped++;
1738 vq->last_avail_idx += elem.ndescs;
1739 if (vq->last_avail_idx >= vq->vring.num) {
1740 vq->last_avail_idx -= vq->vring.num;
1741 vq->last_avail_wrap_counter ^= 1;
1742 }
1743 }
1744
1745 return dropped;
1746 }
1747
virtqueue_split_drop_all(VirtQueue * vq)1748 static unsigned int virtqueue_split_drop_all(VirtQueue *vq)
1749 {
1750 unsigned int dropped = 0;
1751 VirtQueueElement elem = {};
1752 VirtIODevice *vdev = vq->vdev;
1753 bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1754
1755 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
1756 /* works similar to virtqueue_pop but does not map buffers
1757 * and does not allocate any memory */
1758 smp_rmb();
1759 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
1760 break;
1761 }
1762 vq->inuse++;
1763 vq->last_avail_idx++;
1764 if (fEventIdx) {
1765 vring_set_avail_event(vq, vq->last_avail_idx);
1766 }
1767 /* immediately push the element, nothing to unmap
1768 * as both in_num and out_num are set to 0 */
1769 virtqueue_push(vq, &elem, 0);
1770 dropped++;
1771 }
1772
1773 return dropped;
1774 }
1775
1776 /* virtqueue_drop_all:
1777 * @vq: The #VirtQueue
1778 * Drops all queued buffers and indicates them to the guest
1779 * as if they are done. Useful when buffers can not be
1780 * processed but must be returned to the guest.
1781 */
virtqueue_drop_all(VirtQueue * vq)1782 unsigned int virtqueue_drop_all(VirtQueue *vq)
1783 {
1784 struct VirtIODevice *vdev = vq->vdev;
1785
1786 if (virtio_device_disabled(vq->vdev)) {
1787 return 0;
1788 }
1789
1790 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1791 return virtqueue_packed_drop_all(vq);
1792 } else {
1793 return virtqueue_split_drop_all(vq);
1794 }
1795 }
1796
1797 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1798 * it is what QEMU has always done by mistake. We can change it sooner
1799 * or later by bumping the version number of the affected vm states.
1800 * In the meanwhile, since the in-memory layout of VirtQueueElement
1801 * has changed, we need to marshal to and from the layout that was
1802 * used before the change.
1803 */
1804 typedef struct VirtQueueElementOld {
1805 unsigned int index;
1806 unsigned int out_num;
1807 unsigned int in_num;
1808 hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1809 hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1810 struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1811 struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1812 } VirtQueueElementOld;
1813
qemu_get_virtqueue_element(VirtIODevice * vdev,QEMUFile * f,size_t sz)1814 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
1815 {
1816 VirtQueueElement *elem;
1817 VirtQueueElementOld data;
1818 int i;
1819
1820 qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1821
1822 /* TODO: teach all callers that this can fail, and return failure instead
1823 * of asserting here.
1824 * This is just one thing (there are probably more) that must be
1825 * fixed before we can allow NDEBUG compilation.
1826 */
1827 assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1828 assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1829
1830 elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1831 elem->index = data.index;
1832
1833 for (i = 0; i < elem->in_num; i++) {
1834 elem->in_addr[i] = data.in_addr[i];
1835 }
1836
1837 for (i = 0; i < elem->out_num; i++) {
1838 elem->out_addr[i] = data.out_addr[i];
1839 }
1840
1841 for (i = 0; i < elem->in_num; i++) {
1842 /* Base is overwritten by virtqueue_map. */
1843 elem->in_sg[i].iov_base = 0;
1844 elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1845 }
1846
1847 for (i = 0; i < elem->out_num; i++) {
1848 /* Base is overwritten by virtqueue_map. */
1849 elem->out_sg[i].iov_base = 0;
1850 elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1851 }
1852
1853 if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1854 qemu_get_be32s(f, &elem->ndescs);
1855 }
1856
1857 virtqueue_map(vdev, elem);
1858 return elem;
1859 }
1860
qemu_put_virtqueue_element(VirtIODevice * vdev,QEMUFile * f,VirtQueueElement * elem)1861 void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
1862 VirtQueueElement *elem)
1863 {
1864 VirtQueueElementOld data;
1865 int i;
1866
1867 memset(&data, 0, sizeof(data));
1868 data.index = elem->index;
1869 data.in_num = elem->in_num;
1870 data.out_num = elem->out_num;
1871
1872 for (i = 0; i < elem->in_num; i++) {
1873 data.in_addr[i] = elem->in_addr[i];
1874 }
1875
1876 for (i = 0; i < elem->out_num; i++) {
1877 data.out_addr[i] = elem->out_addr[i];
1878 }
1879
1880 for (i = 0; i < elem->in_num; i++) {
1881 /* Base is overwritten by virtqueue_map when loading. Do not
1882 * save it, as it would leak the QEMU address space layout. */
1883 data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1884 }
1885
1886 for (i = 0; i < elem->out_num; i++) {
1887 /* Do not save iov_base as above. */
1888 data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1889 }
1890
1891 if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1892 qemu_put_be32s(f, &elem->ndescs);
1893 }
1894
1895 qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1896 }
1897
1898 /* virtio device */
virtio_notify_vector(VirtIODevice * vdev,uint16_t vector)1899 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
1900 {
1901 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1902 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1903
1904 if (virtio_device_disabled(vdev)) {
1905 return;
1906 }
1907
1908 if (k->notify) {
1909 k->notify(qbus->parent, vector);
1910 }
1911 }
1912
virtio_update_irq(VirtIODevice * vdev)1913 void virtio_update_irq(VirtIODevice *vdev)
1914 {
1915 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1916 }
1917
virtio_validate_features(VirtIODevice * vdev)1918 static int virtio_validate_features(VirtIODevice *vdev)
1919 {
1920 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1921
1922 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
1923 !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1924 return -EFAULT;
1925 }
1926
1927 if (k->validate_features) {
1928 return k->validate_features(vdev);
1929 } else {
1930 return 0;
1931 }
1932 }
1933
virtio_set_status(VirtIODevice * vdev,uint8_t val)1934 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
1935 {
1936 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1937 trace_virtio_set_status(vdev, val);
1938
1939 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1940 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
1941 val & VIRTIO_CONFIG_S_FEATURES_OK) {
1942 int ret = virtio_validate_features(vdev);
1943
1944 if (ret) {
1945 return ret;
1946 }
1947 }
1948 }
1949
1950 if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
1951 (val & VIRTIO_CONFIG_S_DRIVER_OK)) {
1952 virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
1953 }
1954
1955 if (k->set_status) {
1956 k->set_status(vdev, val);
1957 }
1958 vdev->status = val;
1959
1960 return 0;
1961 }
1962
virtio_default_endian(void)1963 static enum virtio_device_endian virtio_default_endian(void)
1964 {
1965 if (target_words_bigendian()) {
1966 return VIRTIO_DEVICE_ENDIAN_BIG;
1967 } else {
1968 return VIRTIO_DEVICE_ENDIAN_LITTLE;
1969 }
1970 }
1971
virtio_current_cpu_endian(void)1972 static enum virtio_device_endian virtio_current_cpu_endian(void)
1973 {
1974 if (cpu_virtio_is_big_endian(current_cpu)) {
1975 return VIRTIO_DEVICE_ENDIAN_BIG;
1976 } else {
1977 return VIRTIO_DEVICE_ENDIAN_LITTLE;
1978 }
1979 }
1980
virtio_reset(void * opaque)1981 void virtio_reset(void *opaque)
1982 {
1983 VirtIODevice *vdev = opaque;
1984 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1985 int i;
1986
1987 virtio_set_status(vdev, 0);
1988 if (current_cpu) {
1989 /* Guest initiated reset */
1990 vdev->device_endian = virtio_current_cpu_endian();
1991 } else {
1992 /* System reset */
1993 vdev->device_endian = virtio_default_endian();
1994 }
1995
1996 if (k->reset) {
1997 k->reset(vdev);
1998 }
1999
2000 vdev->start_on_kick = false;
2001 vdev->started = false;
2002 vdev->broken = false;
2003 vdev->guest_features = 0;
2004 vdev->queue_sel = 0;
2005 vdev->status = 0;
2006 vdev->disabled = false;
2007 qatomic_set(&vdev->isr, 0);
2008 vdev->config_vector = VIRTIO_NO_VECTOR;
2009 virtio_notify_vector(vdev, vdev->config_vector);
2010
2011 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2012 vdev->vq[i].vring.desc = 0;
2013 vdev->vq[i].vring.avail = 0;
2014 vdev->vq[i].vring.used = 0;
2015 vdev->vq[i].last_avail_idx = 0;
2016 vdev->vq[i].shadow_avail_idx = 0;
2017 vdev->vq[i].used_idx = 0;
2018 vdev->vq[i].last_avail_wrap_counter = true;
2019 vdev->vq[i].shadow_avail_wrap_counter = true;
2020 vdev->vq[i].used_wrap_counter = true;
2021 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
2022 vdev->vq[i].signalled_used = 0;
2023 vdev->vq[i].signalled_used_valid = false;
2024 vdev->vq[i].notification = true;
2025 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
2026 vdev->vq[i].inuse = 0;
2027 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2028 }
2029 }
2030
virtio_config_readb(VirtIODevice * vdev,uint32_t addr)2031 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
2032 {
2033 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2034 uint8_t val;
2035
2036 if (addr + sizeof(val) > vdev->config_len) {
2037 return (uint32_t)-1;
2038 }
2039
2040 k->get_config(vdev, vdev->config);
2041
2042 val = ldub_p(vdev->config + addr);
2043 return val;
2044 }
2045
virtio_config_readw(VirtIODevice * vdev,uint32_t addr)2046 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
2047 {
2048 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2049 uint16_t val;
2050
2051 if (addr + sizeof(val) > vdev->config_len) {
2052 return (uint32_t)-1;
2053 }
2054
2055 k->get_config(vdev, vdev->config);
2056
2057 val = lduw_p(vdev->config + addr);
2058 return val;
2059 }
2060
virtio_config_readl(VirtIODevice * vdev,uint32_t addr)2061 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
2062 {
2063 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2064 uint32_t val;
2065
2066 if (addr + sizeof(val) > vdev->config_len) {
2067 return (uint32_t)-1;
2068 }
2069
2070 k->get_config(vdev, vdev->config);
2071
2072 val = ldl_p(vdev->config + addr);
2073 return val;
2074 }
2075
virtio_config_writeb(VirtIODevice * vdev,uint32_t addr,uint32_t data)2076 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2077 {
2078 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2079 uint8_t val = data;
2080
2081 if (addr + sizeof(val) > vdev->config_len) {
2082 return;
2083 }
2084
2085 stb_p(vdev->config + addr, val);
2086
2087 if (k->set_config) {
2088 k->set_config(vdev, vdev->config);
2089 }
2090 }
2091
virtio_config_writew(VirtIODevice * vdev,uint32_t addr,uint32_t data)2092 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2093 {
2094 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2095 uint16_t val = data;
2096
2097 if (addr + sizeof(val) > vdev->config_len) {
2098 return;
2099 }
2100
2101 stw_p(vdev->config + addr, val);
2102
2103 if (k->set_config) {
2104 k->set_config(vdev, vdev->config);
2105 }
2106 }
2107
virtio_config_writel(VirtIODevice * vdev,uint32_t addr,uint32_t data)2108 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2109 {
2110 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2111 uint32_t val = data;
2112
2113 if (addr + sizeof(val) > vdev->config_len) {
2114 return;
2115 }
2116
2117 stl_p(vdev->config + addr, val);
2118
2119 if (k->set_config) {
2120 k->set_config(vdev, vdev->config);
2121 }
2122 }
2123
virtio_config_modern_readb(VirtIODevice * vdev,uint32_t addr)2124 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
2125 {
2126 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2127 uint8_t val;
2128
2129 if (addr + sizeof(val) > vdev->config_len) {
2130 return (uint32_t)-1;
2131 }
2132
2133 k->get_config(vdev, vdev->config);
2134
2135 val = ldub_p(vdev->config + addr);
2136 return val;
2137 }
2138
virtio_config_modern_readw(VirtIODevice * vdev,uint32_t addr)2139 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
2140 {
2141 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2142 uint16_t val;
2143
2144 if (addr + sizeof(val) > vdev->config_len) {
2145 return (uint32_t)-1;
2146 }
2147
2148 k->get_config(vdev, vdev->config);
2149
2150 val = lduw_le_p(vdev->config + addr);
2151 return val;
2152 }
2153
virtio_config_modern_readl(VirtIODevice * vdev,uint32_t addr)2154 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
2155 {
2156 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2157 uint32_t val;
2158
2159 if (addr + sizeof(val) > vdev->config_len) {
2160 return (uint32_t)-1;
2161 }
2162
2163 k->get_config(vdev, vdev->config);
2164
2165 val = ldl_le_p(vdev->config + addr);
2166 return val;
2167 }
2168
virtio_config_modern_writeb(VirtIODevice * vdev,uint32_t addr,uint32_t data)2169 void virtio_config_modern_writeb(VirtIODevice *vdev,
2170 uint32_t addr, uint32_t data)
2171 {
2172 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2173 uint8_t val = data;
2174
2175 if (addr + sizeof(val) > vdev->config_len) {
2176 return;
2177 }
2178
2179 stb_p(vdev->config + addr, val);
2180
2181 if (k->set_config) {
2182 k->set_config(vdev, vdev->config);
2183 }
2184 }
2185
virtio_config_modern_writew(VirtIODevice * vdev,uint32_t addr,uint32_t data)2186 void virtio_config_modern_writew(VirtIODevice *vdev,
2187 uint32_t addr, uint32_t data)
2188 {
2189 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2190 uint16_t val = data;
2191
2192 if (addr + sizeof(val) > vdev->config_len) {
2193 return;
2194 }
2195
2196 stw_le_p(vdev->config + addr, val);
2197
2198 if (k->set_config) {
2199 k->set_config(vdev, vdev->config);
2200 }
2201 }
2202
virtio_config_modern_writel(VirtIODevice * vdev,uint32_t addr,uint32_t data)2203 void virtio_config_modern_writel(VirtIODevice *vdev,
2204 uint32_t addr, uint32_t data)
2205 {
2206 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2207 uint32_t val = data;
2208
2209 if (addr + sizeof(val) > vdev->config_len) {
2210 return;
2211 }
2212
2213 stl_le_p(vdev->config + addr, val);
2214
2215 if (k->set_config) {
2216 k->set_config(vdev, vdev->config);
2217 }
2218 }
2219
virtio_queue_set_addr(VirtIODevice * vdev,int n,hwaddr addr)2220 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
2221 {
2222 if (!vdev->vq[n].vring.num) {
2223 return;
2224 }
2225 vdev->vq[n].vring.desc = addr;
2226 virtio_queue_update_rings(vdev, n);
2227 }
2228
virtio_queue_get_addr(VirtIODevice * vdev,int n)2229 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
2230 {
2231 return vdev->vq[n].vring.desc;
2232 }
2233
virtio_queue_set_rings(VirtIODevice * vdev,int n,hwaddr desc,hwaddr avail,hwaddr used)2234 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
2235 hwaddr avail, hwaddr used)
2236 {
2237 if (!vdev->vq[n].vring.num) {
2238 return;
2239 }
2240 vdev->vq[n].vring.desc = desc;
2241 vdev->vq[n].vring.avail = avail;
2242 vdev->vq[n].vring.used = used;
2243 virtio_init_region_cache(vdev, n);
2244 }
2245
virtio_queue_set_num(VirtIODevice * vdev,int n,int num)2246 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
2247 {
2248 /* Don't allow guest to flip queue between existent and
2249 * nonexistent states, or to set it to an invalid size.
2250 */
2251 if (!!num != !!vdev->vq[n].vring.num ||
2252 num > VIRTQUEUE_MAX_SIZE ||
2253 num < 0) {
2254 return;
2255 }
2256 vdev->vq[n].vring.num = num;
2257 }
2258
virtio_vector_first_queue(VirtIODevice * vdev,uint16_t vector)2259 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
2260 {
2261 return QLIST_FIRST(&vdev->vector_queues[vector]);
2262 }
2263
virtio_vector_next_queue(VirtQueue * vq)2264 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
2265 {
2266 return QLIST_NEXT(vq, node);
2267 }
2268
virtio_queue_get_num(VirtIODevice * vdev,int n)2269 int virtio_queue_get_num(VirtIODevice *vdev, int n)
2270 {
2271 return vdev->vq[n].vring.num;
2272 }
2273
virtio_queue_get_max_num(VirtIODevice * vdev,int n)2274 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
2275 {
2276 return vdev->vq[n].vring.num_default;
2277 }
2278
virtio_get_num_queues(VirtIODevice * vdev)2279 int virtio_get_num_queues(VirtIODevice *vdev)
2280 {
2281 int i;
2282
2283 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2284 if (!virtio_queue_get_num(vdev, i)) {
2285 break;
2286 }
2287 }
2288
2289 return i;
2290 }
2291
virtio_queue_set_align(VirtIODevice * vdev,int n,int align)2292 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
2293 {
2294 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2295 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2296
2297 /* virtio-1 compliant devices cannot change the alignment */
2298 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2299 error_report("tried to modify queue alignment for virtio-1 device");
2300 return;
2301 }
2302 /* Check that the transport told us it was going to do this
2303 * (so a buggy transport will immediately assert rather than
2304 * silently failing to migrate this state)
2305 */
2306 assert(k->has_variable_vring_alignment);
2307
2308 if (align) {
2309 vdev->vq[n].vring.align = align;
2310 virtio_queue_update_rings(vdev, n);
2311 }
2312 }
2313
virtio_queue_notify_aio_vq(VirtQueue * vq)2314 static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
2315 {
2316 bool ret = false;
2317
2318 if (vq->vring.desc && vq->handle_aio_output) {
2319 VirtIODevice *vdev = vq->vdev;
2320
2321 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2322 ret = vq->handle_aio_output(vdev, vq);
2323
2324 if (unlikely(vdev->start_on_kick)) {
2325 virtio_set_started(vdev, true);
2326 }
2327 }
2328
2329 return ret;
2330 }
2331
virtio_queue_notify_vq(VirtQueue * vq)2332 static void virtio_queue_notify_vq(VirtQueue *vq)
2333 {
2334 if (vq->vring.desc && vq->handle_output) {
2335 VirtIODevice *vdev = vq->vdev;
2336
2337 if (unlikely(vdev->broken)) {
2338 return;
2339 }
2340
2341 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2342 vq->handle_output(vdev, vq);
2343
2344 if (unlikely(vdev->start_on_kick)) {
2345 virtio_set_started(vdev, true);
2346 }
2347 }
2348 }
2349
virtio_queue_notify(VirtIODevice * vdev,int n)2350 void virtio_queue_notify(VirtIODevice *vdev, int n)
2351 {
2352 VirtQueue *vq = &vdev->vq[n];
2353
2354 if (unlikely(!vq->vring.desc || vdev->broken)) {
2355 return;
2356 }
2357
2358 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2359 if (vq->host_notifier_enabled) {
2360 event_notifier_set(&vq->host_notifier);
2361 } else if (vq->handle_output) {
2362 vq->handle_output(vdev, vq);
2363
2364 if (unlikely(vdev->start_on_kick)) {
2365 virtio_set_started(vdev, true);
2366 }
2367 }
2368 }
2369
virtio_queue_vector(VirtIODevice * vdev,int n)2370 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
2371 {
2372 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
2373 VIRTIO_NO_VECTOR;
2374 }
2375
virtio_queue_set_vector(VirtIODevice * vdev,int n,uint16_t vector)2376 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
2377 {
2378 VirtQueue *vq = &vdev->vq[n];
2379
2380 if (n < VIRTIO_QUEUE_MAX) {
2381 if (vdev->vector_queues &&
2382 vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
2383 QLIST_REMOVE(vq, node);
2384 }
2385 vdev->vq[n].vector = vector;
2386 if (vdev->vector_queues &&
2387 vector != VIRTIO_NO_VECTOR) {
2388 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
2389 }
2390 }
2391 }
2392
virtio_add_queue(VirtIODevice * vdev,int queue_size,VirtIOHandleOutput handle_output)2393 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
2394 VirtIOHandleOutput handle_output)
2395 {
2396 int i;
2397
2398 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2399 if (vdev->vq[i].vring.num == 0)
2400 break;
2401 }
2402
2403 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
2404 abort();
2405
2406 vdev->vq[i].vring.num = queue_size;
2407 vdev->vq[i].vring.num_default = queue_size;
2408 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
2409 vdev->vq[i].handle_output = handle_output;
2410 vdev->vq[i].handle_aio_output = NULL;
2411 vdev->vq[i].used_elems = g_malloc0(sizeof(VirtQueueElement) *
2412 queue_size);
2413
2414 return &vdev->vq[i];
2415 }
2416
virtio_delete_queue(VirtQueue * vq)2417 void virtio_delete_queue(VirtQueue *vq)
2418 {
2419 vq->vring.num = 0;
2420 vq->vring.num_default = 0;
2421 vq->handle_output = NULL;
2422 vq->handle_aio_output = NULL;
2423 g_free(vq->used_elems);
2424 vq->used_elems = NULL;
2425 virtio_virtqueue_reset_region_cache(vq);
2426 }
2427
virtio_del_queue(VirtIODevice * vdev,int n)2428 void virtio_del_queue(VirtIODevice *vdev, int n)
2429 {
2430 if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
2431 abort();
2432 }
2433
2434 virtio_delete_queue(&vdev->vq[n]);
2435 }
2436
virtio_set_isr(VirtIODevice * vdev,int value)2437 static void virtio_set_isr(VirtIODevice *vdev, int value)
2438 {
2439 uint8_t old = qatomic_read(&vdev->isr);
2440
2441 /* Do not write ISR if it does not change, so that its cacheline remains
2442 * shared in the common case where the guest does not read it.
2443 */
2444 if ((old & value) != value) {
2445 qatomic_or(&vdev->isr, value);
2446 }
2447 }
2448
2449 /* Called within rcu_read_lock(). */
virtio_split_should_notify(VirtIODevice * vdev,VirtQueue * vq)2450 static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2451 {
2452 uint16_t old, new;
2453 bool v;
2454 /* We need to expose used array entries before checking used event. */
2455 smp_mb();
2456 /* Always notify when queue is empty (when feature acknowledge) */
2457 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2458 !vq->inuse && virtio_queue_empty(vq)) {
2459 return true;
2460 }
2461
2462 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2463 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2464 }
2465
2466 v = vq->signalled_used_valid;
2467 vq->signalled_used_valid = true;
2468 old = vq->signalled_used;
2469 new = vq->signalled_used = vq->used_idx;
2470 return !v || vring_need_event(vring_get_used_event(vq), new, old);
2471 }
2472
vring_packed_need_event(VirtQueue * vq,bool wrap,uint16_t off_wrap,uint16_t new,uint16_t old)2473 static bool vring_packed_need_event(VirtQueue *vq, bool wrap,
2474 uint16_t off_wrap, uint16_t new,
2475 uint16_t old)
2476 {
2477 int off = off_wrap & ~(1 << 15);
2478
2479 if (wrap != off_wrap >> 15) {
2480 off -= vq->vring.num;
2481 }
2482
2483 return vring_need_event(off, new, old);
2484 }
2485
2486 /* Called within rcu_read_lock(). */
virtio_packed_should_notify(VirtIODevice * vdev,VirtQueue * vq)2487 static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2488 {
2489 VRingPackedDescEvent e;
2490 uint16_t old, new;
2491 bool v;
2492 VRingMemoryRegionCaches *caches;
2493
2494 caches = vring_get_region_caches(vq);
2495 if (!caches) {
2496 return false;
2497 }
2498
2499 vring_packed_event_read(vdev, &caches->avail, &e);
2500
2501 old = vq->signalled_used;
2502 new = vq->signalled_used = vq->used_idx;
2503 v = vq->signalled_used_valid;
2504 vq->signalled_used_valid = true;
2505
2506 if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) {
2507 return false;
2508 } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) {
2509 return true;
2510 }
2511
2512 return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
2513 e.off_wrap, new, old);
2514 }
2515
2516 /* Called within rcu_read_lock(). */
virtio_should_notify(VirtIODevice * vdev,VirtQueue * vq)2517 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2518 {
2519 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2520 return virtio_packed_should_notify(vdev, vq);
2521 } else {
2522 return virtio_split_should_notify(vdev, vq);
2523 }
2524 }
2525
virtio_notify_irqfd(VirtIODevice * vdev,VirtQueue * vq)2526 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
2527 {
2528 WITH_RCU_READ_LOCK_GUARD() {
2529 if (!virtio_should_notify(vdev, vq)) {
2530 return;
2531 }
2532 }
2533
2534 trace_virtio_notify_irqfd(vdev, vq);
2535
2536 /*
2537 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2538 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2539 * incorrectly polling this bit during crashdump and hibernation
2540 * in MSI mode, causing a hang if this bit is never updated.
2541 * Recent releases of Windows do not really shut down, but rather
2542 * log out and hibernate to make the next startup faster. Hence,
2543 * this manifested as a more serious hang during shutdown with
2544 *
2545 * Next driver release from 2016 fixed this problem, so working around it
2546 * is not a must, but it's easy to do so let's do it here.
2547 *
2548 * Note: it's safe to update ISR from any thread as it was switched
2549 * to an atomic operation.
2550 */
2551 virtio_set_isr(vq->vdev, 0x1);
2552 event_notifier_set(&vq->guest_notifier);
2553 }
2554
virtio_irq(VirtQueue * vq)2555 static void virtio_irq(VirtQueue *vq)
2556 {
2557 virtio_set_isr(vq->vdev, 0x1);
2558 virtio_notify_vector(vq->vdev, vq->vector);
2559 }
2560
virtio_notify(VirtIODevice * vdev,VirtQueue * vq)2561 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
2562 {
2563 WITH_RCU_READ_LOCK_GUARD() {
2564 if (!virtio_should_notify(vdev, vq)) {
2565 return;
2566 }
2567 }
2568
2569 trace_virtio_notify(vdev, vq);
2570 virtio_irq(vq);
2571 }
2572
virtio_notify_config(VirtIODevice * vdev)2573 void virtio_notify_config(VirtIODevice *vdev)
2574 {
2575 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
2576 return;
2577
2578 virtio_set_isr(vdev, 0x3);
2579 vdev->generation++;
2580 virtio_notify_vector(vdev, vdev->config_vector);
2581 }
2582
virtio_device_endian_needed(void * opaque)2583 static bool virtio_device_endian_needed(void *opaque)
2584 {
2585 VirtIODevice *vdev = opaque;
2586
2587 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
2588 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2589 return vdev->device_endian != virtio_default_endian();
2590 }
2591 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2592 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
2593 }
2594
virtio_64bit_features_needed(void * opaque)2595 static bool virtio_64bit_features_needed(void *opaque)
2596 {
2597 VirtIODevice *vdev = opaque;
2598
2599 return (vdev->host_features >> 32) != 0;
2600 }
2601
virtio_virtqueue_needed(void * opaque)2602 static bool virtio_virtqueue_needed(void *opaque)
2603 {
2604 VirtIODevice *vdev = opaque;
2605
2606 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
2607 }
2608
virtio_packed_virtqueue_needed(void * opaque)2609 static bool virtio_packed_virtqueue_needed(void *opaque)
2610 {
2611 VirtIODevice *vdev = opaque;
2612
2613 return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
2614 }
2615
virtio_ringsize_needed(void * opaque)2616 static bool virtio_ringsize_needed(void *opaque)
2617 {
2618 VirtIODevice *vdev = opaque;
2619 int i;
2620
2621 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2622 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
2623 return true;
2624 }
2625 }
2626 return false;
2627 }
2628
virtio_extra_state_needed(void * opaque)2629 static bool virtio_extra_state_needed(void *opaque)
2630 {
2631 VirtIODevice *vdev = opaque;
2632 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2633 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2634
2635 return k->has_extra_state &&
2636 k->has_extra_state(qbus->parent);
2637 }
2638
virtio_broken_needed(void * opaque)2639 static bool virtio_broken_needed(void *opaque)
2640 {
2641 VirtIODevice *vdev = opaque;
2642
2643 return vdev->broken;
2644 }
2645
virtio_started_needed(void * opaque)2646 static bool virtio_started_needed(void *opaque)
2647 {
2648 VirtIODevice *vdev = opaque;
2649
2650 return vdev->started;
2651 }
2652
virtio_disabled_needed(void * opaque)2653 static bool virtio_disabled_needed(void *opaque)
2654 {
2655 VirtIODevice *vdev = opaque;
2656
2657 return vdev->disabled;
2658 }
2659
2660 static const VMStateDescription vmstate_virtqueue = {
2661 .name = "virtqueue_state",
2662 .version_id = 1,
2663 .minimum_version_id = 1,
2664 .fields = (VMStateField[]) {
2665 VMSTATE_UINT64(vring.avail, struct VirtQueue),
2666 VMSTATE_UINT64(vring.used, struct VirtQueue),
2667 VMSTATE_END_OF_LIST()
2668 }
2669 };
2670
2671 static const VMStateDescription vmstate_packed_virtqueue = {
2672 .name = "packed_virtqueue_state",
2673 .version_id = 1,
2674 .minimum_version_id = 1,
2675 .fields = (VMStateField[]) {
2676 VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
2677 VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
2678 VMSTATE_UINT16(used_idx, struct VirtQueue),
2679 VMSTATE_BOOL(used_wrap_counter, struct VirtQueue),
2680 VMSTATE_UINT32(inuse, struct VirtQueue),
2681 VMSTATE_END_OF_LIST()
2682 }
2683 };
2684
2685 static const VMStateDescription vmstate_virtio_virtqueues = {
2686 .name = "virtio/virtqueues",
2687 .version_id = 1,
2688 .minimum_version_id = 1,
2689 .needed = &virtio_virtqueue_needed,
2690 .fields = (VMStateField[]) {
2691 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2692 VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
2693 VMSTATE_END_OF_LIST()
2694 }
2695 };
2696
2697 static const VMStateDescription vmstate_virtio_packed_virtqueues = {
2698 .name = "virtio/packed_virtqueues",
2699 .version_id = 1,
2700 .minimum_version_id = 1,
2701 .needed = &virtio_packed_virtqueue_needed,
2702 .fields = (VMStateField[]) {
2703 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2704 VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue),
2705 VMSTATE_END_OF_LIST()
2706 }
2707 };
2708
2709 static const VMStateDescription vmstate_ringsize = {
2710 .name = "ringsize_state",
2711 .version_id = 1,
2712 .minimum_version_id = 1,
2713 .fields = (VMStateField[]) {
2714 VMSTATE_UINT32(vring.num_default, struct VirtQueue),
2715 VMSTATE_END_OF_LIST()
2716 }
2717 };
2718
2719 static const VMStateDescription vmstate_virtio_ringsize = {
2720 .name = "virtio/ringsize",
2721 .version_id = 1,
2722 .minimum_version_id = 1,
2723 .needed = &virtio_ringsize_needed,
2724 .fields = (VMStateField[]) {
2725 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2726 VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
2727 VMSTATE_END_OF_LIST()
2728 }
2729 };
2730
get_extra_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field)2731 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
2732 const VMStateField *field)
2733 {
2734 VirtIODevice *vdev = pv;
2735 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2736 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2737
2738 if (!k->load_extra_state) {
2739 return -1;
2740 } else {
2741 return k->load_extra_state(qbus->parent, f);
2742 }
2743 }
2744
put_extra_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)2745 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
2746 const VMStateField *field, JSONWriter *vmdesc)
2747 {
2748 VirtIODevice *vdev = pv;
2749 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2750 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2751
2752 k->save_extra_state(qbus->parent, f);
2753 return 0;
2754 }
2755
2756 static const VMStateInfo vmstate_info_extra_state = {
2757 .name = "virtqueue_extra_state",
2758 .get = get_extra_state,
2759 .put = put_extra_state,
2760 };
2761
2762 static const VMStateDescription vmstate_virtio_extra_state = {
2763 .name = "virtio/extra_state",
2764 .version_id = 1,
2765 .minimum_version_id = 1,
2766 .needed = &virtio_extra_state_needed,
2767 .fields = (VMStateField[]) {
2768 {
2769 .name = "extra_state",
2770 .version_id = 0,
2771 .field_exists = NULL,
2772 .size = 0,
2773 .info = &vmstate_info_extra_state,
2774 .flags = VMS_SINGLE,
2775 .offset = 0,
2776 },
2777 VMSTATE_END_OF_LIST()
2778 }
2779 };
2780
2781 static const VMStateDescription vmstate_virtio_device_endian = {
2782 .name = "virtio/device_endian",
2783 .version_id = 1,
2784 .minimum_version_id = 1,
2785 .needed = &virtio_device_endian_needed,
2786 .fields = (VMStateField[]) {
2787 VMSTATE_UINT8(device_endian, VirtIODevice),
2788 VMSTATE_END_OF_LIST()
2789 }
2790 };
2791
2792 static const VMStateDescription vmstate_virtio_64bit_features = {
2793 .name = "virtio/64bit_features",
2794 .version_id = 1,
2795 .minimum_version_id = 1,
2796 .needed = &virtio_64bit_features_needed,
2797 .fields = (VMStateField[]) {
2798 VMSTATE_UINT64(guest_features, VirtIODevice),
2799 VMSTATE_END_OF_LIST()
2800 }
2801 };
2802
2803 static const VMStateDescription vmstate_virtio_broken = {
2804 .name = "virtio/broken",
2805 .version_id = 1,
2806 .minimum_version_id = 1,
2807 .needed = &virtio_broken_needed,
2808 .fields = (VMStateField[]) {
2809 VMSTATE_BOOL(broken, VirtIODevice),
2810 VMSTATE_END_OF_LIST()
2811 }
2812 };
2813
2814 static const VMStateDescription vmstate_virtio_started = {
2815 .name = "virtio/started",
2816 .version_id = 1,
2817 .minimum_version_id = 1,
2818 .needed = &virtio_started_needed,
2819 .fields = (VMStateField[]) {
2820 VMSTATE_BOOL(started, VirtIODevice),
2821 VMSTATE_END_OF_LIST()
2822 }
2823 };
2824
2825 static const VMStateDescription vmstate_virtio_disabled = {
2826 .name = "virtio/disabled",
2827 .version_id = 1,
2828 .minimum_version_id = 1,
2829 .needed = &virtio_disabled_needed,
2830 .fields = (VMStateField[]) {
2831 VMSTATE_BOOL(disabled, VirtIODevice),
2832 VMSTATE_END_OF_LIST()
2833 }
2834 };
2835
2836 static const VMStateDescription vmstate_virtio = {
2837 .name = "virtio",
2838 .version_id = 1,
2839 .minimum_version_id = 1,
2840 .minimum_version_id_old = 1,
2841 .fields = (VMStateField[]) {
2842 VMSTATE_END_OF_LIST()
2843 },
2844 .subsections = (const VMStateDescription*[]) {
2845 &vmstate_virtio_device_endian,
2846 &vmstate_virtio_64bit_features,
2847 &vmstate_virtio_virtqueues,
2848 &vmstate_virtio_ringsize,
2849 &vmstate_virtio_broken,
2850 &vmstate_virtio_extra_state,
2851 &vmstate_virtio_started,
2852 &vmstate_virtio_packed_virtqueues,
2853 &vmstate_virtio_disabled,
2854 NULL
2855 }
2856 };
2857
virtio_save(VirtIODevice * vdev,QEMUFile * f)2858 int virtio_save(VirtIODevice *vdev, QEMUFile *f)
2859 {
2860 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2861 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2862 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2863 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
2864 int i;
2865
2866 if (k->save_config) {
2867 k->save_config(qbus->parent, f);
2868 }
2869
2870 qemu_put_8s(f, &vdev->status);
2871 qemu_put_8s(f, &vdev->isr);
2872 qemu_put_be16s(f, &vdev->queue_sel);
2873 qemu_put_be32s(f, &guest_features_lo);
2874 qemu_put_be32(f, vdev->config_len);
2875 qemu_put_buffer(f, vdev->config, vdev->config_len);
2876
2877 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2878 if (vdev->vq[i].vring.num == 0)
2879 break;
2880 }
2881
2882 qemu_put_be32(f, i);
2883
2884 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2885 if (vdev->vq[i].vring.num == 0)
2886 break;
2887
2888 qemu_put_be32(f, vdev->vq[i].vring.num);
2889 if (k->has_variable_vring_alignment) {
2890 qemu_put_be32(f, vdev->vq[i].vring.align);
2891 }
2892 /*
2893 * Save desc now, the rest of the ring addresses are saved in
2894 * subsections for VIRTIO-1 devices.
2895 */
2896 qemu_put_be64(f, vdev->vq[i].vring.desc);
2897 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
2898 if (k->save_queue) {
2899 k->save_queue(qbus->parent, i, f);
2900 }
2901 }
2902
2903 if (vdc->save != NULL) {
2904 vdc->save(vdev, f);
2905 }
2906
2907 if (vdc->vmsd) {
2908 int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
2909 if (ret) {
2910 return ret;
2911 }
2912 }
2913
2914 /* Subsections */
2915 return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
2916 }
2917
2918 /* A wrapper for use as a VMState .put function */
virtio_device_put(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)2919 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
2920 const VMStateField *field, JSONWriter *vmdesc)
2921 {
2922 return virtio_save(VIRTIO_DEVICE(opaque), f);
2923 }
2924
2925 /* A wrapper for use as a VMState .get function */
virtio_device_get(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)2926 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
2927 const VMStateField *field)
2928 {
2929 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
2930 DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
2931
2932 return virtio_load(vdev, f, dc->vmsd->version_id);
2933 }
2934
2935 const VMStateInfo virtio_vmstate_info = {
2936 .name = "virtio",
2937 .get = virtio_device_get,
2938 .put = virtio_device_put,
2939 };
2940
virtio_set_features_nocheck(VirtIODevice * vdev,uint64_t val)2941 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
2942 {
2943 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2944 bool bad = (val & ~(vdev->host_features)) != 0;
2945
2946 val &= vdev->host_features;
2947 if (k->set_features) {
2948 k->set_features(vdev, val);
2949 }
2950 vdev->guest_features = val;
2951 return bad ? -1 : 0;
2952 }
2953
virtio_set_features(VirtIODevice * vdev,uint64_t val)2954 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
2955 {
2956 int ret;
2957 /*
2958 * The driver must not attempt to set features after feature negotiation
2959 * has finished.
2960 */
2961 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
2962 return -EINVAL;
2963 }
2964 ret = virtio_set_features_nocheck(vdev, val);
2965 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2966 /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
2967 int i;
2968 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2969 if (vdev->vq[i].vring.num != 0) {
2970 virtio_init_region_cache(vdev, i);
2971 }
2972 }
2973 }
2974 if (!ret) {
2975 if (!virtio_device_started(vdev, vdev->status) &&
2976 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2977 vdev->start_on_kick = true;
2978 }
2979 }
2980 return ret;
2981 }
2982
virtio_feature_get_config_size(const VirtIOFeature * feature_sizes,uint64_t host_features)2983 size_t virtio_feature_get_config_size(const VirtIOFeature *feature_sizes,
2984 uint64_t host_features)
2985 {
2986 size_t config_size = 0;
2987 int i;
2988
2989 for (i = 0; feature_sizes[i].flags != 0; i++) {
2990 if (host_features & feature_sizes[i].flags) {
2991 config_size = MAX(feature_sizes[i].end, config_size);
2992 }
2993 }
2994
2995 return config_size;
2996 }
2997
virtio_load(VirtIODevice * vdev,QEMUFile * f,int version_id)2998 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
2999 {
3000 int i, ret;
3001 int32_t config_len;
3002 uint32_t num;
3003 uint32_t features;
3004 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3005 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3006 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
3007
3008 /*
3009 * We poison the endianness to ensure it does not get used before
3010 * subsections have been loaded.
3011 */
3012 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
3013
3014 if (k->load_config) {
3015 ret = k->load_config(qbus->parent, f);
3016 if (ret)
3017 return ret;
3018 }
3019
3020 qemu_get_8s(f, &vdev->status);
3021 qemu_get_8s(f, &vdev->isr);
3022 qemu_get_be16s(f, &vdev->queue_sel);
3023 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
3024 return -1;
3025 }
3026 qemu_get_be32s(f, &features);
3027
3028 /*
3029 * Temporarily set guest_features low bits - needed by
3030 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3031 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3032 *
3033 * Note: devices should always test host features in future - don't create
3034 * new dependencies like this.
3035 */
3036 vdev->guest_features = features;
3037
3038 config_len = qemu_get_be32(f);
3039
3040 /*
3041 * There are cases where the incoming config can be bigger or smaller
3042 * than what we have; so load what we have space for, and skip
3043 * any excess that's in the stream.
3044 */
3045 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
3046
3047 while (config_len > vdev->config_len) {
3048 qemu_get_byte(f);
3049 config_len--;
3050 }
3051
3052 num = qemu_get_be32(f);
3053
3054 if (num > VIRTIO_QUEUE_MAX) {
3055 error_report("Invalid number of virtqueues: 0x%x", num);
3056 return -1;
3057 }
3058
3059 for (i = 0; i < num; i++) {
3060 vdev->vq[i].vring.num = qemu_get_be32(f);
3061 if (k->has_variable_vring_alignment) {
3062 vdev->vq[i].vring.align = qemu_get_be32(f);
3063 }
3064 vdev->vq[i].vring.desc = qemu_get_be64(f);
3065 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
3066 vdev->vq[i].signalled_used_valid = false;
3067 vdev->vq[i].notification = true;
3068
3069 if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
3070 error_report("VQ %d address 0x0 "
3071 "inconsistent with Host index 0x%x",
3072 i, vdev->vq[i].last_avail_idx);
3073 return -1;
3074 }
3075 if (k->load_queue) {
3076 ret = k->load_queue(qbus->parent, i, f);
3077 if (ret)
3078 return ret;
3079 }
3080 }
3081
3082 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
3083
3084 if (vdc->load != NULL) {
3085 ret = vdc->load(vdev, f, version_id);
3086 if (ret) {
3087 return ret;
3088 }
3089 }
3090
3091 if (vdc->vmsd) {
3092 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
3093 if (ret) {
3094 return ret;
3095 }
3096 }
3097
3098 /* Subsections */
3099 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
3100 if (ret) {
3101 return ret;
3102 }
3103
3104 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
3105 vdev->device_endian = virtio_default_endian();
3106 }
3107
3108 if (virtio_64bit_features_needed(vdev)) {
3109 /*
3110 * Subsection load filled vdev->guest_features. Run them
3111 * through virtio_set_features to sanity-check them against
3112 * host_features.
3113 */
3114 uint64_t features64 = vdev->guest_features;
3115 if (virtio_set_features_nocheck(vdev, features64) < 0) {
3116 error_report("Features 0x%" PRIx64 " unsupported. "
3117 "Allowed features: 0x%" PRIx64,
3118 features64, vdev->host_features);
3119 return -1;
3120 }
3121 } else {
3122 if (virtio_set_features_nocheck(vdev, features) < 0) {
3123 error_report("Features 0x%x unsupported. "
3124 "Allowed features: 0x%" PRIx64,
3125 features, vdev->host_features);
3126 return -1;
3127 }
3128 }
3129
3130 if (!virtio_device_started(vdev, vdev->status) &&
3131 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3132 vdev->start_on_kick = true;
3133 }
3134
3135 RCU_READ_LOCK_GUARD();
3136 for (i = 0; i < num; i++) {
3137 if (vdev->vq[i].vring.desc) {
3138 uint16_t nheads;
3139
3140 /*
3141 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3142 * only the region cache needs to be set up. Legacy devices need
3143 * to calculate used and avail ring addresses based on the desc
3144 * address.
3145 */
3146 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3147 virtio_init_region_cache(vdev, i);
3148 } else {
3149 virtio_queue_update_rings(vdev, i);
3150 }
3151
3152 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3153 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
3154 vdev->vq[i].shadow_avail_wrap_counter =
3155 vdev->vq[i].last_avail_wrap_counter;
3156 continue;
3157 }
3158
3159 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
3160 /* Check it isn't doing strange things with descriptor numbers. */
3161 if (nheads > vdev->vq[i].vring.num) {
3162 virtio_error(vdev, "VQ %d size 0x%x Guest index 0x%x "
3163 "inconsistent with Host index 0x%x: delta 0x%x",
3164 i, vdev->vq[i].vring.num,
3165 vring_avail_idx(&vdev->vq[i]),
3166 vdev->vq[i].last_avail_idx, nheads);
3167 vdev->vq[i].used_idx = 0;
3168 vdev->vq[i].shadow_avail_idx = 0;
3169 vdev->vq[i].inuse = 0;
3170 continue;
3171 }
3172 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
3173 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
3174
3175 /*
3176 * Some devices migrate VirtQueueElements that have been popped
3177 * from the avail ring but not yet returned to the used ring.
3178 * Since max ring size < UINT16_MAX it's safe to use modulo
3179 * UINT16_MAX + 1 subtraction.
3180 */
3181 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
3182 vdev->vq[i].used_idx);
3183 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
3184 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3185 "used_idx 0x%x",
3186 i, vdev->vq[i].vring.num,
3187 vdev->vq[i].last_avail_idx,
3188 vdev->vq[i].used_idx);
3189 return -1;
3190 }
3191 }
3192 }
3193
3194 if (vdc->post_load) {
3195 ret = vdc->post_load(vdev);
3196 if (ret) {
3197 return ret;
3198 }
3199 }
3200
3201 return 0;
3202 }
3203
virtio_cleanup(VirtIODevice * vdev)3204 void virtio_cleanup(VirtIODevice *vdev)
3205 {
3206 qemu_del_vm_change_state_handler(vdev->vmstate);
3207 }
3208
virtio_vmstate_change(void * opaque,bool running,RunState state)3209 static void virtio_vmstate_change(void *opaque, bool running, RunState state)
3210 {
3211 VirtIODevice *vdev = opaque;
3212 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3213 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3214 bool backend_run = running && virtio_device_started(vdev, vdev->status);
3215 vdev->vm_running = running;
3216
3217 if (backend_run) {
3218 virtio_set_status(vdev, vdev->status);
3219 }
3220
3221 if (k->vmstate_change) {
3222 k->vmstate_change(qbus->parent, backend_run);
3223 }
3224
3225 if (!backend_run) {
3226 virtio_set_status(vdev, vdev->status);
3227 }
3228 }
3229
virtio_instance_init_common(Object * proxy_obj,void * data,size_t vdev_size,const char * vdev_name)3230 void virtio_instance_init_common(Object *proxy_obj, void *data,
3231 size_t vdev_size, const char *vdev_name)
3232 {
3233 DeviceState *vdev = data;
3234
3235 object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev,
3236 vdev_size, vdev_name, &error_abort,
3237 NULL);
3238 qdev_alias_all_properties(vdev, proxy_obj);
3239 }
3240
virtio_init(VirtIODevice * vdev,const char * name,uint16_t device_id,size_t config_size)3241 void virtio_init(VirtIODevice *vdev, const char *name,
3242 uint16_t device_id, size_t config_size)
3243 {
3244 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3245 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3246 int i;
3247 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
3248
3249 if (nvectors) {
3250 vdev->vector_queues =
3251 g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
3252 }
3253
3254 vdev->start_on_kick = false;
3255 vdev->started = false;
3256 vdev->device_id = device_id;
3257 vdev->status = 0;
3258 qatomic_set(&vdev->isr, 0);
3259 vdev->queue_sel = 0;
3260 vdev->config_vector = VIRTIO_NO_VECTOR;
3261 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
3262 vdev->vm_running = runstate_is_running();
3263 vdev->broken = false;
3264 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3265 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
3266 vdev->vq[i].vdev = vdev;
3267 vdev->vq[i].queue_index = i;
3268 vdev->vq[i].host_notifier_enabled = false;
3269 }
3270
3271 vdev->name = name;
3272 vdev->config_len = config_size;
3273 if (vdev->config_len) {
3274 vdev->config = g_malloc0(config_size);
3275 } else {
3276 vdev->config = NULL;
3277 }
3278 vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
3279 virtio_vmstate_change, vdev);
3280 vdev->device_endian = virtio_default_endian();
3281 vdev->use_guest_notifier_mask = true;
3282 }
3283
3284 /*
3285 * Only devices that have already been around prior to defining the virtio
3286 * standard support legacy mode; this includes devices not specified in the
3287 * standard. All newer devices conform to the virtio standard only.
3288 */
virtio_legacy_allowed(VirtIODevice * vdev)3289 bool virtio_legacy_allowed(VirtIODevice *vdev)
3290 {
3291 switch (vdev->device_id) {
3292 case VIRTIO_ID_NET:
3293 case VIRTIO_ID_BLOCK:
3294 case VIRTIO_ID_CONSOLE:
3295 case VIRTIO_ID_RNG:
3296 case VIRTIO_ID_BALLOON:
3297 case VIRTIO_ID_RPMSG:
3298 case VIRTIO_ID_SCSI:
3299 case VIRTIO_ID_9P:
3300 case VIRTIO_ID_RPROC_SERIAL:
3301 case VIRTIO_ID_CAIF:
3302 return true;
3303 default:
3304 return false;
3305 }
3306 }
3307
virtio_legacy_check_disabled(VirtIODevice * vdev)3308 bool virtio_legacy_check_disabled(VirtIODevice *vdev)
3309 {
3310 return vdev->disable_legacy_check;
3311 }
3312
virtio_queue_get_desc_addr(VirtIODevice * vdev,int n)3313 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
3314 {
3315 return vdev->vq[n].vring.desc;
3316 }
3317
virtio_queue_enabled_legacy(VirtIODevice * vdev,int n)3318 bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n)
3319 {
3320 return virtio_queue_get_desc_addr(vdev, n) != 0;
3321 }
3322
virtio_queue_enabled(VirtIODevice * vdev,int n)3323 bool virtio_queue_enabled(VirtIODevice *vdev, int n)
3324 {
3325 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3326 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3327
3328 if (k->queue_enabled) {
3329 return k->queue_enabled(qbus->parent, n);
3330 }
3331 return virtio_queue_enabled_legacy(vdev, n);
3332 }
3333
virtio_queue_get_avail_addr(VirtIODevice * vdev,int n)3334 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
3335 {
3336 return vdev->vq[n].vring.avail;
3337 }
3338
virtio_queue_get_used_addr(VirtIODevice * vdev,int n)3339 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
3340 {
3341 return vdev->vq[n].vring.used;
3342 }
3343
virtio_queue_get_desc_size(VirtIODevice * vdev,int n)3344 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
3345 {
3346 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
3347 }
3348
virtio_queue_get_avail_size(VirtIODevice * vdev,int n)3349 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
3350 {
3351 int s;
3352
3353 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3354 return sizeof(struct VRingPackedDescEvent);
3355 }
3356
3357 s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3358 return offsetof(VRingAvail, ring) +
3359 sizeof(uint16_t) * vdev->vq[n].vring.num + s;
3360 }
3361
virtio_queue_get_used_size(VirtIODevice * vdev,int n)3362 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
3363 {
3364 int s;
3365
3366 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3367 return sizeof(struct VRingPackedDescEvent);
3368 }
3369
3370 s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3371 return offsetof(VRingUsed, ring) +
3372 sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
3373 }
3374
virtio_queue_packed_get_last_avail_idx(VirtIODevice * vdev,int n)3375 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev,
3376 int n)
3377 {
3378 unsigned int avail, used;
3379
3380 avail = vdev->vq[n].last_avail_idx;
3381 avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
3382
3383 used = vdev->vq[n].used_idx;
3384 used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
3385
3386 return avail | used << 16;
3387 }
3388
virtio_queue_split_get_last_avail_idx(VirtIODevice * vdev,int n)3389 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
3390 int n)
3391 {
3392 return vdev->vq[n].last_avail_idx;
3393 }
3394
virtio_queue_get_last_avail_idx(VirtIODevice * vdev,int n)3395 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
3396 {
3397 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3398 return virtio_queue_packed_get_last_avail_idx(vdev, n);
3399 } else {
3400 return virtio_queue_split_get_last_avail_idx(vdev, n);
3401 }
3402 }
3403
virtio_queue_packed_set_last_avail_idx(VirtIODevice * vdev,int n,unsigned int idx)3404 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
3405 int n, unsigned int idx)
3406 {
3407 struct VirtQueue *vq = &vdev->vq[n];
3408
3409 vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
3410 vq->last_avail_wrap_counter =
3411 vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
3412 idx >>= 16;
3413 vq->used_idx = idx & 0x7ffff;
3414 vq->used_wrap_counter = !!(idx & 0x8000);
3415 }
3416
virtio_queue_split_set_last_avail_idx(VirtIODevice * vdev,int n,unsigned int idx)3417 static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev,
3418 int n, unsigned int idx)
3419 {
3420 vdev->vq[n].last_avail_idx = idx;
3421 vdev->vq[n].shadow_avail_idx = idx;
3422 }
3423
virtio_queue_set_last_avail_idx(VirtIODevice * vdev,int n,unsigned int idx)3424 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
3425 unsigned int idx)
3426 {
3427 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3428 virtio_queue_packed_set_last_avail_idx(vdev, n, idx);
3429 } else {
3430 virtio_queue_split_set_last_avail_idx(vdev, n, idx);
3431 }
3432 }
3433
virtio_queue_packed_restore_last_avail_idx(VirtIODevice * vdev,int n)3434 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
3435 int n)
3436 {
3437 /* We don't have a reference like avail idx in shared memory */
3438 return;
3439 }
3440
virtio_queue_split_restore_last_avail_idx(VirtIODevice * vdev,int n)3441 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
3442 int n)
3443 {
3444 RCU_READ_LOCK_GUARD();
3445 if (vdev->vq[n].vring.desc) {
3446 vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
3447 vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
3448 }
3449 }
3450
virtio_queue_restore_last_avail_idx(VirtIODevice * vdev,int n)3451 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
3452 {
3453 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3454 virtio_queue_packed_restore_last_avail_idx(vdev, n);
3455 } else {
3456 virtio_queue_split_restore_last_avail_idx(vdev, n);
3457 }
3458 }
3459
virtio_queue_packed_update_used_idx(VirtIODevice * vdev,int n)3460 static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
3461 {
3462 /* used idx was updated through set_last_avail_idx() */
3463 return;
3464 }
3465
virtio_split_packed_update_used_idx(VirtIODevice * vdev,int n)3466 static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
3467 {
3468 RCU_READ_LOCK_GUARD();
3469 if (vdev->vq[n].vring.desc) {
3470 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
3471 }
3472 }
3473
virtio_queue_update_used_idx(VirtIODevice * vdev,int n)3474 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
3475 {
3476 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3477 return virtio_queue_packed_update_used_idx(vdev, n);
3478 } else {
3479 return virtio_split_packed_update_used_idx(vdev, n);
3480 }
3481 }
3482
virtio_queue_invalidate_signalled_used(VirtIODevice * vdev,int n)3483 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
3484 {
3485 vdev->vq[n].signalled_used_valid = false;
3486 }
3487
virtio_get_queue(VirtIODevice * vdev,int n)3488 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
3489 {
3490 return vdev->vq + n;
3491 }
3492
virtio_get_queue_index(VirtQueue * vq)3493 uint16_t virtio_get_queue_index(VirtQueue *vq)
3494 {
3495 return vq->queue_index;
3496 }
3497
virtio_queue_guest_notifier_read(EventNotifier * n)3498 static void virtio_queue_guest_notifier_read(EventNotifier *n)
3499 {
3500 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
3501 if (event_notifier_test_and_clear(n)) {
3502 virtio_irq(vq);
3503 }
3504 }
3505
virtio_queue_set_guest_notifier_fd_handler(VirtQueue * vq,bool assign,bool with_irqfd)3506 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
3507 bool with_irqfd)
3508 {
3509 if (assign && !with_irqfd) {
3510 event_notifier_set_handler(&vq->guest_notifier,
3511 virtio_queue_guest_notifier_read);
3512 } else {
3513 event_notifier_set_handler(&vq->guest_notifier, NULL);
3514 }
3515 if (!assign) {
3516 /* Test and clear notifier before closing it,
3517 * in case poll callback didn't have time to run. */
3518 virtio_queue_guest_notifier_read(&vq->guest_notifier);
3519 }
3520 }
3521
virtio_queue_get_guest_notifier(VirtQueue * vq)3522 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
3523 {
3524 return &vq->guest_notifier;
3525 }
3526
virtio_queue_host_notifier_aio_read(EventNotifier * n)3527 static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
3528 {
3529 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3530 if (event_notifier_test_and_clear(n)) {
3531 virtio_queue_notify_aio_vq(vq);
3532 }
3533 }
3534
virtio_queue_host_notifier_aio_poll_begin(EventNotifier * n)3535 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
3536 {
3537 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3538
3539 virtio_queue_set_notification(vq, 0);
3540 }
3541
virtio_queue_host_notifier_aio_poll(void * opaque)3542 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
3543 {
3544 EventNotifier *n = opaque;
3545 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3546
3547 if (!vq->vring.desc || virtio_queue_empty(vq)) {
3548 return false;
3549 }
3550
3551 return virtio_queue_notify_aio_vq(vq);
3552 }
3553
virtio_queue_host_notifier_aio_poll_end(EventNotifier * n)3554 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
3555 {
3556 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3557
3558 /* Caller polls once more after this to catch requests that race with us */
3559 virtio_queue_set_notification(vq, 1);
3560 }
3561
virtio_queue_aio_set_host_notifier_handler(VirtQueue * vq,AioContext * ctx,VirtIOHandleAIOOutput handle_output)3562 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
3563 VirtIOHandleAIOOutput handle_output)
3564 {
3565 if (handle_output) {
3566 vq->handle_aio_output = handle_output;
3567 aio_set_event_notifier(ctx, &vq->host_notifier, true,
3568 virtio_queue_host_notifier_aio_read,
3569 virtio_queue_host_notifier_aio_poll);
3570 aio_set_event_notifier_poll(ctx, &vq->host_notifier,
3571 virtio_queue_host_notifier_aio_poll_begin,
3572 virtio_queue_host_notifier_aio_poll_end);
3573 } else {
3574 aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
3575 /* Test and clear notifier before after disabling event,
3576 * in case poll callback didn't have time to run. */
3577 virtio_queue_host_notifier_aio_read(&vq->host_notifier);
3578 vq->handle_aio_output = NULL;
3579 }
3580 }
3581
virtio_queue_host_notifier_read(EventNotifier * n)3582 void virtio_queue_host_notifier_read(EventNotifier *n)
3583 {
3584 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3585 if (event_notifier_test_and_clear(n)) {
3586 virtio_queue_notify_vq(vq);
3587 }
3588 }
3589
virtio_queue_get_host_notifier(VirtQueue * vq)3590 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
3591 {
3592 return &vq->host_notifier;
3593 }
3594
virtio_queue_set_host_notifier_enabled(VirtQueue * vq,bool enabled)3595 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
3596 {
3597 vq->host_notifier_enabled = enabled;
3598 }
3599
virtio_queue_set_host_notifier_mr(VirtIODevice * vdev,int n,MemoryRegion * mr,bool assign)3600 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
3601 MemoryRegion *mr, bool assign)
3602 {
3603 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3604 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3605
3606 if (k->set_host_notifier_mr) {
3607 return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
3608 }
3609
3610 return -1;
3611 }
3612
virtio_device_set_child_bus_name(VirtIODevice * vdev,char * bus_name)3613 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
3614 {
3615 g_free(vdev->bus_name);
3616 vdev->bus_name = g_strdup(bus_name);
3617 }
3618
virtio_error(VirtIODevice * vdev,const char * fmt,...)3619 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
3620 {
3621 va_list ap;
3622
3623 va_start(ap, fmt);
3624 error_vreport(fmt, ap);
3625 va_end(ap);
3626
3627 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3628 vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
3629 virtio_notify_config(vdev);
3630 }
3631
3632 vdev->broken = true;
3633 }
3634
virtio_memory_listener_commit(MemoryListener * listener)3635 static void virtio_memory_listener_commit(MemoryListener *listener)
3636 {
3637 VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
3638 int i;
3639
3640 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3641 if (vdev->vq[i].vring.num == 0) {
3642 break;
3643 }
3644 virtio_init_region_cache(vdev, i);
3645 }
3646 }
3647
virtio_device_realize(DeviceState * dev,Error ** errp)3648 static void virtio_device_realize(DeviceState *dev, Error **errp)
3649 {
3650 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3651 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3652 Error *err = NULL;
3653
3654 /* Devices should either use vmsd or the load/save methods */
3655 assert(!vdc->vmsd || !vdc->load);
3656
3657 if (vdc->realize != NULL) {
3658 vdc->realize(dev, &err);
3659 if (err != NULL) {
3660 error_propagate(errp, err);
3661 return;
3662 }
3663 }
3664
3665 virtio_bus_device_plugged(vdev, &err);
3666 if (err != NULL) {
3667 error_propagate(errp, err);
3668 vdc->unrealize(dev);
3669 return;
3670 }
3671
3672 vdev->listener.commit = virtio_memory_listener_commit;
3673 memory_listener_register(&vdev->listener, vdev->dma_as);
3674 }
3675
virtio_device_unrealize(DeviceState * dev)3676 static void virtio_device_unrealize(DeviceState *dev)
3677 {
3678 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3679 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3680
3681 memory_listener_unregister(&vdev->listener);
3682 virtio_bus_device_unplugged(vdev);
3683
3684 if (vdc->unrealize != NULL) {
3685 vdc->unrealize(dev);
3686 }
3687
3688 g_free(vdev->bus_name);
3689 vdev->bus_name = NULL;
3690 }
3691
virtio_device_free_virtqueues(VirtIODevice * vdev)3692 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
3693 {
3694 int i;
3695 if (!vdev->vq) {
3696 return;
3697 }
3698
3699 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3700 if (vdev->vq[i].vring.num == 0) {
3701 break;
3702 }
3703 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
3704 }
3705 g_free(vdev->vq);
3706 }
3707
virtio_device_instance_finalize(Object * obj)3708 static void virtio_device_instance_finalize(Object *obj)
3709 {
3710 VirtIODevice *vdev = VIRTIO_DEVICE(obj);
3711
3712 virtio_device_free_virtqueues(vdev);
3713
3714 g_free(vdev->config);
3715 g_free(vdev->vector_queues);
3716 }
3717
3718 static Property virtio_properties[] = {
3719 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
3720 DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
3721 DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
3722 DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
3723 disable_legacy_check, false),
3724 DEFINE_PROP_END_OF_LIST(),
3725 };
3726
virtio_device_start_ioeventfd_impl(VirtIODevice * vdev)3727 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
3728 {
3729 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3730 int i, n, r, err;
3731
3732 /*
3733 * Batch all the host notifiers in a single transaction to avoid
3734 * quadratic time complexity in address_space_update_ioeventfds().
3735 */
3736 memory_region_transaction_begin();
3737 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3738 VirtQueue *vq = &vdev->vq[n];
3739 if (!virtio_queue_get_num(vdev, n)) {
3740 continue;
3741 }
3742 r = virtio_bus_set_host_notifier(qbus, n, true);
3743 if (r < 0) {
3744 err = r;
3745 goto assign_error;
3746 }
3747 event_notifier_set_handler(&vq->host_notifier,
3748 virtio_queue_host_notifier_read);
3749 }
3750
3751 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3752 /* Kick right away to begin processing requests already in vring */
3753 VirtQueue *vq = &vdev->vq[n];
3754 if (!vq->vring.num) {
3755 continue;
3756 }
3757 event_notifier_set(&vq->host_notifier);
3758 }
3759 memory_region_transaction_commit();
3760 return 0;
3761
3762 assign_error:
3763 i = n; /* save n for a second iteration after transaction is committed. */
3764 while (--n >= 0) {
3765 VirtQueue *vq = &vdev->vq[n];
3766 if (!virtio_queue_get_num(vdev, n)) {
3767 continue;
3768 }
3769
3770 event_notifier_set_handler(&vq->host_notifier, NULL);
3771 r = virtio_bus_set_host_notifier(qbus, n, false);
3772 assert(r >= 0);
3773 }
3774 /*
3775 * The transaction expects the ioeventfds to be open when it
3776 * commits. Do it now, before the cleanup loop.
3777 */
3778 memory_region_transaction_commit();
3779
3780 while (--i >= 0) {
3781 if (!virtio_queue_get_num(vdev, i)) {
3782 continue;
3783 }
3784 virtio_bus_cleanup_host_notifier(qbus, i);
3785 }
3786 return err;
3787 }
3788
virtio_device_start_ioeventfd(VirtIODevice * vdev)3789 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
3790 {
3791 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3792 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3793
3794 return virtio_bus_start_ioeventfd(vbus);
3795 }
3796
virtio_device_stop_ioeventfd_impl(VirtIODevice * vdev)3797 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
3798 {
3799 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3800 int n, r;
3801
3802 /*
3803 * Batch all the host notifiers in a single transaction to avoid
3804 * quadratic time complexity in address_space_update_ioeventfds().
3805 */
3806 memory_region_transaction_begin();
3807 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3808 VirtQueue *vq = &vdev->vq[n];
3809
3810 if (!virtio_queue_get_num(vdev, n)) {
3811 continue;
3812 }
3813 event_notifier_set_handler(&vq->host_notifier, NULL);
3814 r = virtio_bus_set_host_notifier(qbus, n, false);
3815 assert(r >= 0);
3816 }
3817 /*
3818 * The transaction expects the ioeventfds to be open when it
3819 * commits. Do it now, before the cleanup loop.
3820 */
3821 memory_region_transaction_commit();
3822
3823 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3824 if (!virtio_queue_get_num(vdev, n)) {
3825 continue;
3826 }
3827 virtio_bus_cleanup_host_notifier(qbus, n);
3828 }
3829 }
3830
virtio_device_grab_ioeventfd(VirtIODevice * vdev)3831 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
3832 {
3833 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3834 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3835
3836 return virtio_bus_grab_ioeventfd(vbus);
3837 }
3838
virtio_device_release_ioeventfd(VirtIODevice * vdev)3839 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
3840 {
3841 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3842 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3843
3844 virtio_bus_release_ioeventfd(vbus);
3845 }
3846
virtio_device_class_init(ObjectClass * klass,void * data)3847 static void virtio_device_class_init(ObjectClass *klass, void *data)
3848 {
3849 /* Set the default value here. */
3850 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
3851 DeviceClass *dc = DEVICE_CLASS(klass);
3852
3853 dc->realize = virtio_device_realize;
3854 dc->unrealize = virtio_device_unrealize;
3855 dc->bus_type = TYPE_VIRTIO_BUS;
3856 device_class_set_props(dc, virtio_properties);
3857 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
3858 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
3859
3860 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
3861 }
3862
virtio_device_ioeventfd_enabled(VirtIODevice * vdev)3863 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
3864 {
3865 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3866 VirtioBusState *vbus = VIRTIO_BUS(qbus);
3867
3868 return virtio_bus_ioeventfd_enabled(vbus);
3869 }
3870
3871 static const TypeInfo virtio_device_info = {
3872 .name = TYPE_VIRTIO_DEVICE,
3873 .parent = TYPE_DEVICE,
3874 .instance_size = sizeof(VirtIODevice),
3875 .class_init = virtio_device_class_init,
3876 .instance_finalize = virtio_device_instance_finalize,
3877 .abstract = true,
3878 .class_size = sizeof(VirtioDeviceClass),
3879 };
3880
virtio_register_types(void)3881 static void virtio_register_types(void)
3882 {
3883 type_register_static(&virtio_device_info);
3884 }
3885
3886 type_init(virtio_register_types)
3887