1 /* 2 * Virtio Support 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qapi/qapi-commands-virtio.h" 17 #include "trace.h" 18 #include "qemu/error-report.h" 19 #include "qemu/log.h" 20 #include "qemu/main-loop.h" 21 #include "qemu/module.h" 22 #include "qom/object_interfaces.h" 23 #include "hw/core/cpu.h" 24 #include "hw/virtio/virtio.h" 25 #include "hw/virtio/vhost.h" 26 #include "migration/qemu-file-types.h" 27 #include "qemu/atomic.h" 28 #include "hw/virtio/virtio-bus.h" 29 #include "hw/qdev-properties.h" 30 #include "hw/virtio/virtio-access.h" 31 #include "sysemu/dma.h" 32 #include "sysemu/runstate.h" 33 #include "virtio-qmp.h" 34 35 #include "standard-headers/linux/virtio_ids.h" 36 #include "standard-headers/linux/vhost_types.h" 37 #include "standard-headers/linux/virtio_blk.h" 38 #include "standard-headers/linux/virtio_console.h" 39 #include "standard-headers/linux/virtio_gpu.h" 40 #include "standard-headers/linux/virtio_net.h" 41 #include "standard-headers/linux/virtio_scsi.h" 42 #include "standard-headers/linux/virtio_i2c.h" 43 #include "standard-headers/linux/virtio_balloon.h" 44 #include "standard-headers/linux/virtio_iommu.h" 45 #include "standard-headers/linux/virtio_mem.h" 46 #include "standard-headers/linux/virtio_vsock.h" 47 48 /* 49 * Maximum size of virtio device config space 50 */ 51 #define VHOST_USER_MAX_CONFIG_SIZE 256 52 53 /* 54 * The alignment to use between consumer and producer parts of vring. 55 * x86 pagesize again. This is the default, used by transports like PCI 56 * which don't provide a means for the guest to tell the host the alignment. 57 */ 58 #define VIRTIO_PCI_VRING_ALIGN 4096 59 60 typedef struct VRingDesc 61 { 62 uint64_t addr; 63 uint32_t len; 64 uint16_t flags; 65 uint16_t next; 66 } VRingDesc; 67 68 typedef struct VRingPackedDesc { 69 uint64_t addr; 70 uint32_t len; 71 uint16_t id; 72 uint16_t flags; 73 } VRingPackedDesc; 74 75 typedef struct VRingAvail 76 { 77 uint16_t flags; 78 uint16_t idx; 79 uint16_t ring[]; 80 } VRingAvail; 81 82 typedef struct VRingUsedElem 83 { 84 uint32_t id; 85 uint32_t len; 86 } VRingUsedElem; 87 88 typedef struct VRingUsed 89 { 90 uint16_t flags; 91 uint16_t idx; 92 VRingUsedElem ring[]; 93 } VRingUsed; 94 95 typedef struct VRingMemoryRegionCaches { 96 struct rcu_head rcu; 97 MemoryRegionCache desc; 98 MemoryRegionCache avail; 99 MemoryRegionCache used; 100 } VRingMemoryRegionCaches; 101 102 typedef struct VRing 103 { 104 unsigned int num; 105 unsigned int num_default; 106 unsigned int align; 107 hwaddr desc; 108 hwaddr avail; 109 hwaddr used; 110 VRingMemoryRegionCaches *caches; 111 } VRing; 112 113 typedef struct VRingPackedDescEvent { 114 uint16_t off_wrap; 115 uint16_t flags; 116 } VRingPackedDescEvent ; 117 118 struct VirtQueue 119 { 120 VRing vring; 121 VirtQueueElement *used_elems; 122 123 /* Next head to pop */ 124 uint16_t last_avail_idx; 125 bool last_avail_wrap_counter; 126 127 /* Last avail_idx read from VQ. */ 128 uint16_t shadow_avail_idx; 129 bool shadow_avail_wrap_counter; 130 131 uint16_t used_idx; 132 bool used_wrap_counter; 133 134 /* Last used index value we have signalled on */ 135 uint16_t signalled_used; 136 137 /* Last used index value we have signalled on */ 138 bool signalled_used_valid; 139 140 /* Notification enabled? */ 141 bool notification; 142 143 uint16_t queue_index; 144 145 unsigned int inuse; 146 147 uint16_t vector; 148 VirtIOHandleOutput handle_output; 149 VirtIODevice *vdev; 150 EventNotifier guest_notifier; 151 EventNotifier host_notifier; 152 bool host_notifier_enabled; 153 QLIST_ENTRY(VirtQueue) node; 154 }; 155 156 const char *virtio_device_names[] = { 157 [VIRTIO_ID_NET] = "virtio-net", 158 [VIRTIO_ID_BLOCK] = "virtio-blk", 159 [VIRTIO_ID_CONSOLE] = "virtio-serial", 160 [VIRTIO_ID_RNG] = "virtio-rng", 161 [VIRTIO_ID_BALLOON] = "virtio-balloon", 162 [VIRTIO_ID_IOMEM] = "virtio-iomem", 163 [VIRTIO_ID_RPMSG] = "virtio-rpmsg", 164 [VIRTIO_ID_SCSI] = "virtio-scsi", 165 [VIRTIO_ID_9P] = "virtio-9p", 166 [VIRTIO_ID_MAC80211_WLAN] = "virtio-mac-wlan", 167 [VIRTIO_ID_RPROC_SERIAL] = "virtio-rproc-serial", 168 [VIRTIO_ID_CAIF] = "virtio-caif", 169 [VIRTIO_ID_MEMORY_BALLOON] = "virtio-mem-balloon", 170 [VIRTIO_ID_GPU] = "virtio-gpu", 171 [VIRTIO_ID_CLOCK] = "virtio-clk", 172 [VIRTIO_ID_INPUT] = "virtio-input", 173 [VIRTIO_ID_VSOCK] = "vhost-vsock", 174 [VIRTIO_ID_CRYPTO] = "virtio-crypto", 175 [VIRTIO_ID_SIGNAL_DIST] = "virtio-signal", 176 [VIRTIO_ID_PSTORE] = "virtio-pstore", 177 [VIRTIO_ID_IOMMU] = "virtio-iommu", 178 [VIRTIO_ID_MEM] = "virtio-mem", 179 [VIRTIO_ID_SOUND] = "virtio-sound", 180 [VIRTIO_ID_FS] = "virtio-user-fs", 181 [VIRTIO_ID_PMEM] = "virtio-pmem", 182 [VIRTIO_ID_RPMB] = "virtio-rpmb", 183 [VIRTIO_ID_MAC80211_HWSIM] = "virtio-mac-hwsim", 184 [VIRTIO_ID_VIDEO_ENCODER] = "virtio-vid-encoder", 185 [VIRTIO_ID_VIDEO_DECODER] = "virtio-vid-decoder", 186 [VIRTIO_ID_SCMI] = "virtio-scmi", 187 [VIRTIO_ID_NITRO_SEC_MOD] = "virtio-nitro-sec-mod", 188 [VIRTIO_ID_I2C_ADAPTER] = "vhost-user-i2c", 189 [VIRTIO_ID_WATCHDOG] = "virtio-watchdog", 190 [VIRTIO_ID_CAN] = "virtio-can", 191 [VIRTIO_ID_DMABUF] = "virtio-dmabuf", 192 [VIRTIO_ID_PARAM_SERV] = "virtio-param-serv", 193 [VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol", 194 [VIRTIO_ID_BT] = "virtio-bluetooth", 195 [VIRTIO_ID_GPIO] = "virtio-gpio" 196 }; 197 198 static const char *virtio_id_to_name(uint16_t device_id) 199 { 200 assert(device_id < G_N_ELEMENTS(virtio_device_names)); 201 const char *name = virtio_device_names[device_id]; 202 assert(name != NULL); 203 return name; 204 } 205 206 /* Called within call_rcu(). */ 207 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches) 208 { 209 assert(caches != NULL); 210 address_space_cache_destroy(&caches->desc); 211 address_space_cache_destroy(&caches->avail); 212 address_space_cache_destroy(&caches->used); 213 g_free(caches); 214 } 215 216 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq) 217 { 218 VRingMemoryRegionCaches *caches; 219 220 caches = qatomic_read(&vq->vring.caches); 221 qatomic_rcu_set(&vq->vring.caches, NULL); 222 if (caches) { 223 call_rcu(caches, virtio_free_region_cache, rcu); 224 } 225 } 226 227 void virtio_init_region_cache(VirtIODevice *vdev, int n) 228 { 229 VirtQueue *vq = &vdev->vq[n]; 230 VRingMemoryRegionCaches *old = vq->vring.caches; 231 VRingMemoryRegionCaches *new = NULL; 232 hwaddr addr, size; 233 int64_t len; 234 bool packed; 235 236 237 addr = vq->vring.desc; 238 if (!addr) { 239 goto out_no_cache; 240 } 241 new = g_new0(VRingMemoryRegionCaches, 1); 242 size = virtio_queue_get_desc_size(vdev, n); 243 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ? 244 true : false; 245 len = address_space_cache_init(&new->desc, vdev->dma_as, 246 addr, size, packed); 247 if (len < size) { 248 virtio_error(vdev, "Cannot map desc"); 249 goto err_desc; 250 } 251 252 size = virtio_queue_get_used_size(vdev, n); 253 len = address_space_cache_init(&new->used, vdev->dma_as, 254 vq->vring.used, size, true); 255 if (len < size) { 256 virtio_error(vdev, "Cannot map used"); 257 goto err_used; 258 } 259 260 size = virtio_queue_get_avail_size(vdev, n); 261 len = address_space_cache_init(&new->avail, vdev->dma_as, 262 vq->vring.avail, size, false); 263 if (len < size) { 264 virtio_error(vdev, "Cannot map avail"); 265 goto err_avail; 266 } 267 268 qatomic_rcu_set(&vq->vring.caches, new); 269 if (old) { 270 call_rcu(old, virtio_free_region_cache, rcu); 271 } 272 return; 273 274 err_avail: 275 address_space_cache_destroy(&new->avail); 276 err_used: 277 address_space_cache_destroy(&new->used); 278 err_desc: 279 address_space_cache_destroy(&new->desc); 280 out_no_cache: 281 g_free(new); 282 virtio_virtqueue_reset_region_cache(vq); 283 } 284 285 /* virt queue functions */ 286 void virtio_queue_update_rings(VirtIODevice *vdev, int n) 287 { 288 VRing *vring = &vdev->vq[n].vring; 289 290 if (!vring->num || !vring->desc || !vring->align) { 291 /* not yet setup -> nothing to do */ 292 return; 293 } 294 vring->avail = vring->desc + vring->num * sizeof(VRingDesc); 295 vring->used = vring_align(vring->avail + 296 offsetof(VRingAvail, ring[vring->num]), 297 vring->align); 298 virtio_init_region_cache(vdev, n); 299 } 300 301 /* Called within rcu_read_lock(). */ 302 static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc, 303 MemoryRegionCache *cache, int i) 304 { 305 address_space_read_cached(cache, i * sizeof(VRingDesc), 306 desc, sizeof(VRingDesc)); 307 virtio_tswap64s(vdev, &desc->addr); 308 virtio_tswap32s(vdev, &desc->len); 309 virtio_tswap16s(vdev, &desc->flags); 310 virtio_tswap16s(vdev, &desc->next); 311 } 312 313 static void vring_packed_event_read(VirtIODevice *vdev, 314 MemoryRegionCache *cache, 315 VRingPackedDescEvent *e) 316 { 317 hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap); 318 hwaddr off_flags = offsetof(VRingPackedDescEvent, flags); 319 320 e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags); 321 /* Make sure flags is seen before off_wrap */ 322 smp_rmb(); 323 e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off); 324 virtio_tswap16s(vdev, &e->flags); 325 } 326 327 static void vring_packed_off_wrap_write(VirtIODevice *vdev, 328 MemoryRegionCache *cache, 329 uint16_t off_wrap) 330 { 331 hwaddr off = offsetof(VRingPackedDescEvent, off_wrap); 332 333 virtio_stw_phys_cached(vdev, cache, off, off_wrap); 334 address_space_cache_invalidate(cache, off, sizeof(off_wrap)); 335 } 336 337 static void vring_packed_flags_write(VirtIODevice *vdev, 338 MemoryRegionCache *cache, uint16_t flags) 339 { 340 hwaddr off = offsetof(VRingPackedDescEvent, flags); 341 342 virtio_stw_phys_cached(vdev, cache, off, flags); 343 address_space_cache_invalidate(cache, off, sizeof(flags)); 344 } 345 346 /* Called within rcu_read_lock(). */ 347 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq) 348 { 349 return qatomic_rcu_read(&vq->vring.caches); 350 } 351 352 /* Called within rcu_read_lock(). */ 353 static inline uint16_t vring_avail_flags(VirtQueue *vq) 354 { 355 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 356 hwaddr pa = offsetof(VRingAvail, flags); 357 358 if (!caches) { 359 return 0; 360 } 361 362 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); 363 } 364 365 /* Called within rcu_read_lock(). */ 366 static inline uint16_t vring_avail_idx(VirtQueue *vq) 367 { 368 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 369 hwaddr pa = offsetof(VRingAvail, idx); 370 371 if (!caches) { 372 return 0; 373 } 374 375 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); 376 return vq->shadow_avail_idx; 377 } 378 379 /* Called within rcu_read_lock(). */ 380 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) 381 { 382 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 383 hwaddr pa = offsetof(VRingAvail, ring[i]); 384 385 if (!caches) { 386 return 0; 387 } 388 389 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); 390 } 391 392 /* Called within rcu_read_lock(). */ 393 static inline uint16_t vring_get_used_event(VirtQueue *vq) 394 { 395 return vring_avail_ring(vq, vq->vring.num); 396 } 397 398 /* Called within rcu_read_lock(). */ 399 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem, 400 int i) 401 { 402 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 403 hwaddr pa = offsetof(VRingUsed, ring[i]); 404 405 if (!caches) { 406 return; 407 } 408 409 virtio_tswap32s(vq->vdev, &uelem->id); 410 virtio_tswap32s(vq->vdev, &uelem->len); 411 address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem)); 412 address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem)); 413 } 414 415 /* Called within rcu_read_lock(). */ 416 static inline uint16_t vring_used_flags(VirtQueue *vq) 417 { 418 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 419 hwaddr pa = offsetof(VRingUsed, flags); 420 421 if (!caches) { 422 return 0; 423 } 424 425 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); 426 } 427 428 /* Called within rcu_read_lock(). */ 429 static uint16_t vring_used_idx(VirtQueue *vq) 430 { 431 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 432 hwaddr pa = offsetof(VRingUsed, idx); 433 434 if (!caches) { 435 return 0; 436 } 437 438 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); 439 } 440 441 /* Called within rcu_read_lock(). */ 442 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) 443 { 444 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 445 hwaddr pa = offsetof(VRingUsed, idx); 446 447 if (caches) { 448 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); 449 address_space_cache_invalidate(&caches->used, pa, sizeof(val)); 450 } 451 452 vq->used_idx = val; 453 } 454 455 /* Called within rcu_read_lock(). */ 456 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) 457 { 458 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 459 VirtIODevice *vdev = vq->vdev; 460 hwaddr pa = offsetof(VRingUsed, flags); 461 uint16_t flags; 462 463 if (!caches) { 464 return; 465 } 466 467 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); 468 virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask); 469 address_space_cache_invalidate(&caches->used, pa, sizeof(flags)); 470 } 471 472 /* Called within rcu_read_lock(). */ 473 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) 474 { 475 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 476 VirtIODevice *vdev = vq->vdev; 477 hwaddr pa = offsetof(VRingUsed, flags); 478 uint16_t flags; 479 480 if (!caches) { 481 return; 482 } 483 484 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); 485 virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask); 486 address_space_cache_invalidate(&caches->used, pa, sizeof(flags)); 487 } 488 489 /* Called within rcu_read_lock(). */ 490 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) 491 { 492 VRingMemoryRegionCaches *caches; 493 hwaddr pa; 494 if (!vq->notification) { 495 return; 496 } 497 498 caches = vring_get_region_caches(vq); 499 if (!caches) { 500 return; 501 } 502 503 pa = offsetof(VRingUsed, ring[vq->vring.num]); 504 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); 505 address_space_cache_invalidate(&caches->used, pa, sizeof(val)); 506 } 507 508 static void virtio_queue_split_set_notification(VirtQueue *vq, int enable) 509 { 510 RCU_READ_LOCK_GUARD(); 511 512 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { 513 vring_set_avail_event(vq, vring_avail_idx(vq)); 514 } else if (enable) { 515 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); 516 } else { 517 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); 518 } 519 if (enable) { 520 /* Expose avail event/used flags before caller checks the avail idx. */ 521 smp_mb(); 522 } 523 } 524 525 static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable) 526 { 527 uint16_t off_wrap; 528 VRingPackedDescEvent e; 529 VRingMemoryRegionCaches *caches; 530 531 RCU_READ_LOCK_GUARD(); 532 caches = vring_get_region_caches(vq); 533 if (!caches) { 534 return; 535 } 536 537 vring_packed_event_read(vq->vdev, &caches->used, &e); 538 539 if (!enable) { 540 e.flags = VRING_PACKED_EVENT_FLAG_DISABLE; 541 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { 542 off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15; 543 vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap); 544 /* Make sure off_wrap is wrote before flags */ 545 smp_wmb(); 546 e.flags = VRING_PACKED_EVENT_FLAG_DESC; 547 } else { 548 e.flags = VRING_PACKED_EVENT_FLAG_ENABLE; 549 } 550 551 vring_packed_flags_write(vq->vdev, &caches->used, e.flags); 552 if (enable) { 553 /* Expose avail event/used flags before caller checks the avail idx. */ 554 smp_mb(); 555 } 556 } 557 558 bool virtio_queue_get_notification(VirtQueue *vq) 559 { 560 return vq->notification; 561 } 562 563 void virtio_queue_set_notification(VirtQueue *vq, int enable) 564 { 565 vq->notification = enable; 566 567 if (!vq->vring.desc) { 568 return; 569 } 570 571 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { 572 virtio_queue_packed_set_notification(vq, enable); 573 } else { 574 virtio_queue_split_set_notification(vq, enable); 575 } 576 } 577 578 int virtio_queue_ready(VirtQueue *vq) 579 { 580 return vq->vring.avail != 0; 581 } 582 583 static void vring_packed_desc_read_flags(VirtIODevice *vdev, 584 uint16_t *flags, 585 MemoryRegionCache *cache, 586 int i) 587 { 588 hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags); 589 590 *flags = virtio_lduw_phys_cached(vdev, cache, off); 591 } 592 593 static void vring_packed_desc_read(VirtIODevice *vdev, 594 VRingPackedDesc *desc, 595 MemoryRegionCache *cache, 596 int i, bool strict_order) 597 { 598 hwaddr off = i * sizeof(VRingPackedDesc); 599 600 vring_packed_desc_read_flags(vdev, &desc->flags, cache, i); 601 602 if (strict_order) { 603 /* Make sure flags is read before the rest fields. */ 604 smp_rmb(); 605 } 606 607 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr), 608 &desc->addr, sizeof(desc->addr)); 609 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id), 610 &desc->id, sizeof(desc->id)); 611 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len), 612 &desc->len, sizeof(desc->len)); 613 virtio_tswap64s(vdev, &desc->addr); 614 virtio_tswap16s(vdev, &desc->id); 615 virtio_tswap32s(vdev, &desc->len); 616 } 617 618 static void vring_packed_desc_write_data(VirtIODevice *vdev, 619 VRingPackedDesc *desc, 620 MemoryRegionCache *cache, 621 int i) 622 { 623 hwaddr off_id = i * sizeof(VRingPackedDesc) + 624 offsetof(VRingPackedDesc, id); 625 hwaddr off_len = i * sizeof(VRingPackedDesc) + 626 offsetof(VRingPackedDesc, len); 627 628 virtio_tswap32s(vdev, &desc->len); 629 virtio_tswap16s(vdev, &desc->id); 630 address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id)); 631 address_space_cache_invalidate(cache, off_id, sizeof(desc->id)); 632 address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len)); 633 address_space_cache_invalidate(cache, off_len, sizeof(desc->len)); 634 } 635 636 static void vring_packed_desc_write_flags(VirtIODevice *vdev, 637 VRingPackedDesc *desc, 638 MemoryRegionCache *cache, 639 int i) 640 { 641 hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags); 642 643 virtio_stw_phys_cached(vdev, cache, off, desc->flags); 644 address_space_cache_invalidate(cache, off, sizeof(desc->flags)); 645 } 646 647 static void vring_packed_desc_write(VirtIODevice *vdev, 648 VRingPackedDesc *desc, 649 MemoryRegionCache *cache, 650 int i, bool strict_order) 651 { 652 vring_packed_desc_write_data(vdev, desc, cache, i); 653 if (strict_order) { 654 /* Make sure data is wrote before flags. */ 655 smp_wmb(); 656 } 657 vring_packed_desc_write_flags(vdev, desc, cache, i); 658 } 659 660 static inline bool is_desc_avail(uint16_t flags, bool wrap_counter) 661 { 662 bool avail, used; 663 664 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL)); 665 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED)); 666 return (avail != used) && (avail == wrap_counter); 667 } 668 669 /* Fetch avail_idx from VQ memory only when we really need to know if 670 * guest has added some buffers. 671 * Called within rcu_read_lock(). */ 672 static int virtio_queue_empty_rcu(VirtQueue *vq) 673 { 674 if (virtio_device_disabled(vq->vdev)) { 675 return 1; 676 } 677 678 if (unlikely(!vq->vring.avail)) { 679 return 1; 680 } 681 682 if (vq->shadow_avail_idx != vq->last_avail_idx) { 683 return 0; 684 } 685 686 return vring_avail_idx(vq) == vq->last_avail_idx; 687 } 688 689 static int virtio_queue_split_empty(VirtQueue *vq) 690 { 691 bool empty; 692 693 if (virtio_device_disabled(vq->vdev)) { 694 return 1; 695 } 696 697 if (unlikely(!vq->vring.avail)) { 698 return 1; 699 } 700 701 if (vq->shadow_avail_idx != vq->last_avail_idx) { 702 return 0; 703 } 704 705 RCU_READ_LOCK_GUARD(); 706 empty = vring_avail_idx(vq) == vq->last_avail_idx; 707 return empty; 708 } 709 710 /* Called within rcu_read_lock(). */ 711 static int virtio_queue_packed_empty_rcu(VirtQueue *vq) 712 { 713 struct VRingPackedDesc desc; 714 VRingMemoryRegionCaches *cache; 715 716 if (unlikely(!vq->vring.desc)) { 717 return 1; 718 } 719 720 cache = vring_get_region_caches(vq); 721 if (!cache) { 722 return 1; 723 } 724 725 vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc, 726 vq->last_avail_idx); 727 728 return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter); 729 } 730 731 static int virtio_queue_packed_empty(VirtQueue *vq) 732 { 733 RCU_READ_LOCK_GUARD(); 734 return virtio_queue_packed_empty_rcu(vq); 735 } 736 737 int virtio_queue_empty(VirtQueue *vq) 738 { 739 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { 740 return virtio_queue_packed_empty(vq); 741 } else { 742 return virtio_queue_split_empty(vq); 743 } 744 } 745 746 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, 747 unsigned int len) 748 { 749 AddressSpace *dma_as = vq->vdev->dma_as; 750 unsigned int offset; 751 int i; 752 753 offset = 0; 754 for (i = 0; i < elem->in_num; i++) { 755 size_t size = MIN(len - offset, elem->in_sg[i].iov_len); 756 757 dma_memory_unmap(dma_as, elem->in_sg[i].iov_base, 758 elem->in_sg[i].iov_len, 759 DMA_DIRECTION_FROM_DEVICE, size); 760 761 offset += size; 762 } 763 764 for (i = 0; i < elem->out_num; i++) 765 dma_memory_unmap(dma_as, elem->out_sg[i].iov_base, 766 elem->out_sg[i].iov_len, 767 DMA_DIRECTION_TO_DEVICE, 768 elem->out_sg[i].iov_len); 769 } 770 771 /* virtqueue_detach_element: 772 * @vq: The #VirtQueue 773 * @elem: The #VirtQueueElement 774 * @len: number of bytes written 775 * 776 * Detach the element from the virtqueue. This function is suitable for device 777 * reset or other situations where a #VirtQueueElement is simply freed and will 778 * not be pushed or discarded. 779 */ 780 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem, 781 unsigned int len) 782 { 783 vq->inuse -= elem->ndescs; 784 virtqueue_unmap_sg(vq, elem, len); 785 } 786 787 static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num) 788 { 789 vq->last_avail_idx -= num; 790 } 791 792 static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num) 793 { 794 if (vq->last_avail_idx < num) { 795 vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num; 796 vq->last_avail_wrap_counter ^= 1; 797 } else { 798 vq->last_avail_idx -= num; 799 } 800 } 801 802 /* virtqueue_unpop: 803 * @vq: The #VirtQueue 804 * @elem: The #VirtQueueElement 805 * @len: number of bytes written 806 * 807 * Pretend the most recent element wasn't popped from the virtqueue. The next 808 * call to virtqueue_pop() will refetch the element. 809 */ 810 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem, 811 unsigned int len) 812 { 813 814 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { 815 virtqueue_packed_rewind(vq, 1); 816 } else { 817 virtqueue_split_rewind(vq, 1); 818 } 819 820 virtqueue_detach_element(vq, elem, len); 821 } 822 823 /* virtqueue_rewind: 824 * @vq: The #VirtQueue 825 * @num: Number of elements to push back 826 * 827 * Pretend that elements weren't popped from the virtqueue. The next 828 * virtqueue_pop() will refetch the oldest element. 829 * 830 * Use virtqueue_unpop() instead if you have a VirtQueueElement. 831 * 832 * Returns: true on success, false if @num is greater than the number of in use 833 * elements. 834 */ 835 bool virtqueue_rewind(VirtQueue *vq, unsigned int num) 836 { 837 if (num > vq->inuse) { 838 return false; 839 } 840 841 vq->inuse -= num; 842 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { 843 virtqueue_packed_rewind(vq, num); 844 } else { 845 virtqueue_split_rewind(vq, num); 846 } 847 return true; 848 } 849 850 static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem, 851 unsigned int len, unsigned int idx) 852 { 853 VRingUsedElem uelem; 854 855 if (unlikely(!vq->vring.used)) { 856 return; 857 } 858 859 idx = (idx + vq->used_idx) % vq->vring.num; 860 861 uelem.id = elem->index; 862 uelem.len = len; 863 vring_used_write(vq, &uelem, idx); 864 } 865 866 static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem, 867 unsigned int len, unsigned int idx) 868 { 869 vq->used_elems[idx].index = elem->index; 870 vq->used_elems[idx].len = len; 871 vq->used_elems[idx].ndescs = elem->ndescs; 872 } 873 874 static void virtqueue_packed_fill_desc(VirtQueue *vq, 875 const VirtQueueElement *elem, 876 unsigned int idx, 877 bool strict_order) 878 { 879 uint16_t head; 880 VRingMemoryRegionCaches *caches; 881 VRingPackedDesc desc = { 882 .id = elem->index, 883 .len = elem->len, 884 }; 885 bool wrap_counter = vq->used_wrap_counter; 886 887 if (unlikely(!vq->vring.desc)) { 888 return; 889 } 890 891 head = vq->used_idx + idx; 892 if (head >= vq->vring.num) { 893 head -= vq->vring.num; 894 wrap_counter ^= 1; 895 } 896 if (wrap_counter) { 897 desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL); 898 desc.flags |= (1 << VRING_PACKED_DESC_F_USED); 899 } else { 900 desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL); 901 desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED); 902 } 903 904 caches = vring_get_region_caches(vq); 905 if (!caches) { 906 return; 907 } 908 909 vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order); 910 } 911 912 /* Called within rcu_read_lock(). */ 913 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, 914 unsigned int len, unsigned int idx) 915 { 916 trace_virtqueue_fill(vq, elem, len, idx); 917 918 virtqueue_unmap_sg(vq, elem, len); 919 920 if (virtio_device_disabled(vq->vdev)) { 921 return; 922 } 923 924 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { 925 virtqueue_packed_fill(vq, elem, len, idx); 926 } else { 927 virtqueue_split_fill(vq, elem, len, idx); 928 } 929 } 930 931 /* Called within rcu_read_lock(). */ 932 static void virtqueue_split_flush(VirtQueue *vq, unsigned int count) 933 { 934 uint16_t old, new; 935 936 if (unlikely(!vq->vring.used)) { 937 return; 938 } 939 940 /* Make sure buffer is written before we update index. */ 941 smp_wmb(); 942 trace_virtqueue_flush(vq, count); 943 old = vq->used_idx; 944 new = old + count; 945 vring_used_idx_set(vq, new); 946 vq->inuse -= count; 947 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) 948 vq->signalled_used_valid = false; 949 } 950 951 static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count) 952 { 953 unsigned int i, ndescs = 0; 954 955 if (unlikely(!vq->vring.desc)) { 956 return; 957 } 958 959 for (i = 1; i < count; i++) { 960 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], i, false); 961 ndescs += vq->used_elems[i].ndescs; 962 } 963 virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true); 964 ndescs += vq->used_elems[0].ndescs; 965 966 vq->inuse -= ndescs; 967 vq->used_idx += ndescs; 968 if (vq->used_idx >= vq->vring.num) { 969 vq->used_idx -= vq->vring.num; 970 vq->used_wrap_counter ^= 1; 971 vq->signalled_used_valid = false; 972 } 973 } 974 975 void virtqueue_flush(VirtQueue *vq, unsigned int count) 976 { 977 if (virtio_device_disabled(vq->vdev)) { 978 vq->inuse -= count; 979 return; 980 } 981 982 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { 983 virtqueue_packed_flush(vq, count); 984 } else { 985 virtqueue_split_flush(vq, count); 986 } 987 } 988 989 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, 990 unsigned int len) 991 { 992 RCU_READ_LOCK_GUARD(); 993 virtqueue_fill(vq, elem, len, 0); 994 virtqueue_flush(vq, 1); 995 } 996 997 /* Called within rcu_read_lock(). */ 998 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) 999 { 1000 uint16_t num_heads = vring_avail_idx(vq) - idx; 1001 1002 /* Check it isn't doing very strange things with descriptor numbers. */ 1003 if (num_heads > vq->vring.num) { 1004 virtio_error(vq->vdev, "Guest moved used index from %u to %u", 1005 idx, vq->shadow_avail_idx); 1006 return -EINVAL; 1007 } 1008 /* On success, callers read a descriptor at vq->last_avail_idx. 1009 * Make sure descriptor read does not bypass avail index read. */ 1010 if (num_heads) { 1011 smp_rmb(); 1012 } 1013 1014 return num_heads; 1015 } 1016 1017 /* Called within rcu_read_lock(). */ 1018 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx, 1019 unsigned int *head) 1020 { 1021 /* Grab the next descriptor number they're advertising, and increment 1022 * the index we've seen. */ 1023 *head = vring_avail_ring(vq, idx % vq->vring.num); 1024 1025 /* If their number is silly, that's a fatal mistake. */ 1026 if (*head >= vq->vring.num) { 1027 virtio_error(vq->vdev, "Guest says index %u is available", *head); 1028 return false; 1029 } 1030 1031 return true; 1032 } 1033 1034 enum { 1035 VIRTQUEUE_READ_DESC_ERROR = -1, 1036 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */ 1037 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */ 1038 }; 1039 1040 static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc, 1041 MemoryRegionCache *desc_cache, 1042 unsigned int max, unsigned int *next) 1043 { 1044 /* If this descriptor says it doesn't chain, we're done. */ 1045 if (!(desc->flags & VRING_DESC_F_NEXT)) { 1046 return VIRTQUEUE_READ_DESC_DONE; 1047 } 1048 1049 /* Check they're not leading us off end of descriptors. */ 1050 *next = desc->next; 1051 /* Make sure compiler knows to grab that: we don't want it changing! */ 1052 smp_wmb(); 1053 1054 if (*next >= max) { 1055 virtio_error(vdev, "Desc next is %u", *next); 1056 return VIRTQUEUE_READ_DESC_ERROR; 1057 } 1058 1059 vring_split_desc_read(vdev, desc, desc_cache, *next); 1060 return VIRTQUEUE_READ_DESC_MORE; 1061 } 1062 1063 /* Called within rcu_read_lock(). */ 1064 static void virtqueue_split_get_avail_bytes(VirtQueue *vq, 1065 unsigned int *in_bytes, unsigned int *out_bytes, 1066 unsigned max_in_bytes, unsigned max_out_bytes, 1067 VRingMemoryRegionCaches *caches) 1068 { 1069 VirtIODevice *vdev = vq->vdev; 1070 unsigned int idx; 1071 unsigned int total_bufs, in_total, out_total; 1072 MemoryRegionCache indirect_desc_cache; 1073 int64_t len = 0; 1074 int rc; 1075 1076 address_space_cache_init_empty(&indirect_desc_cache); 1077 1078 idx = vq->last_avail_idx; 1079 total_bufs = in_total = out_total = 0; 1080 1081 while ((rc = virtqueue_num_heads(vq, idx)) > 0) { 1082 MemoryRegionCache *desc_cache = &caches->desc; 1083 unsigned int num_bufs; 1084 VRingDesc desc; 1085 unsigned int i; 1086 unsigned int max = vq->vring.num; 1087 1088 num_bufs = total_bufs; 1089 1090 if (!virtqueue_get_head(vq, idx++, &i)) { 1091 goto err; 1092 } 1093 1094 vring_split_desc_read(vdev, &desc, desc_cache, i); 1095 1096 if (desc.flags & VRING_DESC_F_INDIRECT) { 1097 if (!desc.len || (desc.len % sizeof(VRingDesc))) { 1098 virtio_error(vdev, "Invalid size for indirect buffer table"); 1099 goto err; 1100 } 1101 1102 /* If we've got too many, that implies a descriptor loop. */ 1103 if (num_bufs >= max) { 1104 virtio_error(vdev, "Looped descriptor"); 1105 goto err; 1106 } 1107 1108 /* loop over the indirect descriptor table */ 1109 len = address_space_cache_init(&indirect_desc_cache, 1110 vdev->dma_as, 1111 desc.addr, desc.len, false); 1112 desc_cache = &indirect_desc_cache; 1113 if (len < desc.len) { 1114 virtio_error(vdev, "Cannot map indirect buffer"); 1115 goto err; 1116 } 1117 1118 max = desc.len / sizeof(VRingDesc); 1119 num_bufs = i = 0; 1120 vring_split_desc_read(vdev, &desc, desc_cache, i); 1121 } 1122 1123 do { 1124 /* If we've got too many, that implies a descriptor loop. */ 1125 if (++num_bufs > max) { 1126 virtio_error(vdev, "Looped descriptor"); 1127 goto err; 1128 } 1129 1130 if (desc.flags & VRING_DESC_F_WRITE) { 1131 in_total += desc.len; 1132 } else { 1133 out_total += desc.len; 1134 } 1135 if (in_total >= max_in_bytes && out_total >= max_out_bytes) { 1136 goto done; 1137 } 1138 1139 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i); 1140 } while (rc == VIRTQUEUE_READ_DESC_MORE); 1141 1142 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 1143 goto err; 1144 } 1145 1146 if (desc_cache == &indirect_desc_cache) { 1147 address_space_cache_destroy(&indirect_desc_cache); 1148 total_bufs++; 1149 } else { 1150 total_bufs = num_bufs; 1151 } 1152 } 1153 1154 if (rc < 0) { 1155 goto err; 1156 } 1157 1158 done: 1159 address_space_cache_destroy(&indirect_desc_cache); 1160 if (in_bytes) { 1161 *in_bytes = in_total; 1162 } 1163 if (out_bytes) { 1164 *out_bytes = out_total; 1165 } 1166 return; 1167 1168 err: 1169 in_total = out_total = 0; 1170 goto done; 1171 } 1172 1173 static int virtqueue_packed_read_next_desc(VirtQueue *vq, 1174 VRingPackedDesc *desc, 1175 MemoryRegionCache 1176 *desc_cache, 1177 unsigned int max, 1178 unsigned int *next, 1179 bool indirect) 1180 { 1181 /* If this descriptor says it doesn't chain, we're done. */ 1182 if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) { 1183 return VIRTQUEUE_READ_DESC_DONE; 1184 } 1185 1186 ++*next; 1187 if (*next == max) { 1188 if (indirect) { 1189 return VIRTQUEUE_READ_DESC_DONE; 1190 } else { 1191 (*next) -= vq->vring.num; 1192 } 1193 } 1194 1195 vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false); 1196 return VIRTQUEUE_READ_DESC_MORE; 1197 } 1198 1199 /* Called within rcu_read_lock(). */ 1200 static void virtqueue_packed_get_avail_bytes(VirtQueue *vq, 1201 unsigned int *in_bytes, 1202 unsigned int *out_bytes, 1203 unsigned max_in_bytes, 1204 unsigned max_out_bytes, 1205 VRingMemoryRegionCaches *caches) 1206 { 1207 VirtIODevice *vdev = vq->vdev; 1208 unsigned int idx; 1209 unsigned int total_bufs, in_total, out_total; 1210 MemoryRegionCache indirect_desc_cache; 1211 MemoryRegionCache *desc_cache; 1212 int64_t len = 0; 1213 VRingPackedDesc desc; 1214 bool wrap_counter; 1215 1216 address_space_cache_init_empty(&indirect_desc_cache); 1217 1218 idx = vq->last_avail_idx; 1219 wrap_counter = vq->last_avail_wrap_counter; 1220 total_bufs = in_total = out_total = 0; 1221 1222 for (;;) { 1223 unsigned int num_bufs = total_bufs; 1224 unsigned int i = idx; 1225 int rc; 1226 unsigned int max = vq->vring.num; 1227 1228 desc_cache = &caches->desc; 1229 1230 vring_packed_desc_read(vdev, &desc, desc_cache, idx, true); 1231 if (!is_desc_avail(desc.flags, wrap_counter)) { 1232 break; 1233 } 1234 1235 if (desc.flags & VRING_DESC_F_INDIRECT) { 1236 if (desc.len % sizeof(VRingPackedDesc)) { 1237 virtio_error(vdev, "Invalid size for indirect buffer table"); 1238 goto err; 1239 } 1240 1241 /* If we've got too many, that implies a descriptor loop. */ 1242 if (num_bufs >= max) { 1243 virtio_error(vdev, "Looped descriptor"); 1244 goto err; 1245 } 1246 1247 /* loop over the indirect descriptor table */ 1248 len = address_space_cache_init(&indirect_desc_cache, 1249 vdev->dma_as, 1250 desc.addr, desc.len, false); 1251 desc_cache = &indirect_desc_cache; 1252 if (len < desc.len) { 1253 virtio_error(vdev, "Cannot map indirect buffer"); 1254 goto err; 1255 } 1256 1257 max = desc.len / sizeof(VRingPackedDesc); 1258 num_bufs = i = 0; 1259 vring_packed_desc_read(vdev, &desc, desc_cache, i, false); 1260 } 1261 1262 do { 1263 /* If we've got too many, that implies a descriptor loop. */ 1264 if (++num_bufs > max) { 1265 virtio_error(vdev, "Looped descriptor"); 1266 goto err; 1267 } 1268 1269 if (desc.flags & VRING_DESC_F_WRITE) { 1270 in_total += desc.len; 1271 } else { 1272 out_total += desc.len; 1273 } 1274 if (in_total >= max_in_bytes && out_total >= max_out_bytes) { 1275 goto done; 1276 } 1277 1278 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, 1279 &i, desc_cache == 1280 &indirect_desc_cache); 1281 } while (rc == VIRTQUEUE_READ_DESC_MORE); 1282 1283 if (desc_cache == &indirect_desc_cache) { 1284 address_space_cache_destroy(&indirect_desc_cache); 1285 total_bufs++; 1286 idx++; 1287 } else { 1288 idx += num_bufs - total_bufs; 1289 total_bufs = num_bufs; 1290 } 1291 1292 if (idx >= vq->vring.num) { 1293 idx -= vq->vring.num; 1294 wrap_counter ^= 1; 1295 } 1296 } 1297 1298 /* Record the index and wrap counter for a kick we want */ 1299 vq->shadow_avail_idx = idx; 1300 vq->shadow_avail_wrap_counter = wrap_counter; 1301 done: 1302 address_space_cache_destroy(&indirect_desc_cache); 1303 if (in_bytes) { 1304 *in_bytes = in_total; 1305 } 1306 if (out_bytes) { 1307 *out_bytes = out_total; 1308 } 1309 return; 1310 1311 err: 1312 in_total = out_total = 0; 1313 goto done; 1314 } 1315 1316 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, 1317 unsigned int *out_bytes, 1318 unsigned max_in_bytes, unsigned max_out_bytes) 1319 { 1320 uint16_t desc_size; 1321 VRingMemoryRegionCaches *caches; 1322 1323 RCU_READ_LOCK_GUARD(); 1324 1325 if (unlikely(!vq->vring.desc)) { 1326 goto err; 1327 } 1328 1329 caches = vring_get_region_caches(vq); 1330 if (!caches) { 1331 goto err; 1332 } 1333 1334 desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ? 1335 sizeof(VRingPackedDesc) : sizeof(VRingDesc); 1336 if (caches->desc.len < vq->vring.num * desc_size) { 1337 virtio_error(vq->vdev, "Cannot map descriptor ring"); 1338 goto err; 1339 } 1340 1341 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { 1342 virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes, 1343 max_in_bytes, max_out_bytes, 1344 caches); 1345 } else { 1346 virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes, 1347 max_in_bytes, max_out_bytes, 1348 caches); 1349 } 1350 1351 return; 1352 err: 1353 if (in_bytes) { 1354 *in_bytes = 0; 1355 } 1356 if (out_bytes) { 1357 *out_bytes = 0; 1358 } 1359 } 1360 1361 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, 1362 unsigned int out_bytes) 1363 { 1364 unsigned int in_total, out_total; 1365 1366 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes); 1367 return in_bytes <= in_total && out_bytes <= out_total; 1368 } 1369 1370 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg, 1371 hwaddr *addr, struct iovec *iov, 1372 unsigned int max_num_sg, bool is_write, 1373 hwaddr pa, size_t sz) 1374 { 1375 bool ok = false; 1376 unsigned num_sg = *p_num_sg; 1377 assert(num_sg <= max_num_sg); 1378 1379 if (!sz) { 1380 virtio_error(vdev, "virtio: zero sized buffers are not allowed"); 1381 goto out; 1382 } 1383 1384 while (sz) { 1385 hwaddr len = sz; 1386 1387 if (num_sg == max_num_sg) { 1388 virtio_error(vdev, "virtio: too many write descriptors in " 1389 "indirect table"); 1390 goto out; 1391 } 1392 1393 iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len, 1394 is_write ? 1395 DMA_DIRECTION_FROM_DEVICE : 1396 DMA_DIRECTION_TO_DEVICE, 1397 MEMTXATTRS_UNSPECIFIED); 1398 if (!iov[num_sg].iov_base) { 1399 virtio_error(vdev, "virtio: bogus descriptor or out of resources"); 1400 goto out; 1401 } 1402 1403 iov[num_sg].iov_len = len; 1404 addr[num_sg] = pa; 1405 1406 sz -= len; 1407 pa += len; 1408 num_sg++; 1409 } 1410 ok = true; 1411 1412 out: 1413 *p_num_sg = num_sg; 1414 return ok; 1415 } 1416 1417 /* Only used by error code paths before we have a VirtQueueElement (therefore 1418 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to 1419 * yet. 1420 */ 1421 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num, 1422 struct iovec *iov) 1423 { 1424 unsigned int i; 1425 1426 for (i = 0; i < out_num + in_num; i++) { 1427 int is_write = i >= out_num; 1428 1429 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0); 1430 iov++; 1431 } 1432 } 1433 1434 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg, 1435 hwaddr *addr, unsigned int num_sg, 1436 bool is_write) 1437 { 1438 unsigned int i; 1439 hwaddr len; 1440 1441 for (i = 0; i < num_sg; i++) { 1442 len = sg[i].iov_len; 1443 sg[i].iov_base = dma_memory_map(vdev->dma_as, 1444 addr[i], &len, is_write ? 1445 DMA_DIRECTION_FROM_DEVICE : 1446 DMA_DIRECTION_TO_DEVICE, 1447 MEMTXATTRS_UNSPECIFIED); 1448 if (!sg[i].iov_base) { 1449 error_report("virtio: error trying to map MMIO memory"); 1450 exit(1); 1451 } 1452 if (len != sg[i].iov_len) { 1453 error_report("virtio: unexpected memory split"); 1454 exit(1); 1455 } 1456 } 1457 } 1458 1459 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem) 1460 { 1461 virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true); 1462 virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num, 1463 false); 1464 } 1465 1466 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num) 1467 { 1468 VirtQueueElement *elem; 1469 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0])); 1470 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]); 1471 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]); 1472 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0])); 1473 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); 1474 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); 1475 1476 assert(sz >= sizeof(VirtQueueElement)); 1477 elem = g_malloc(out_sg_end); 1478 trace_virtqueue_alloc_element(elem, sz, in_num, out_num); 1479 elem->out_num = out_num; 1480 elem->in_num = in_num; 1481 elem->in_addr = (void *)elem + in_addr_ofs; 1482 elem->out_addr = (void *)elem + out_addr_ofs; 1483 elem->in_sg = (void *)elem + in_sg_ofs; 1484 elem->out_sg = (void *)elem + out_sg_ofs; 1485 return elem; 1486 } 1487 1488 static void *virtqueue_split_pop(VirtQueue *vq, size_t sz) 1489 { 1490 unsigned int i, head, max; 1491 VRingMemoryRegionCaches *caches; 1492 MemoryRegionCache indirect_desc_cache; 1493 MemoryRegionCache *desc_cache; 1494 int64_t len; 1495 VirtIODevice *vdev = vq->vdev; 1496 VirtQueueElement *elem = NULL; 1497 unsigned out_num, in_num, elem_entries; 1498 hwaddr addr[VIRTQUEUE_MAX_SIZE]; 1499 struct iovec iov[VIRTQUEUE_MAX_SIZE]; 1500 VRingDesc desc; 1501 int rc; 1502 1503 address_space_cache_init_empty(&indirect_desc_cache); 1504 1505 RCU_READ_LOCK_GUARD(); 1506 if (virtio_queue_empty_rcu(vq)) { 1507 goto done; 1508 } 1509 /* Needed after virtio_queue_empty(), see comment in 1510 * virtqueue_num_heads(). */ 1511 smp_rmb(); 1512 1513 /* When we start there are none of either input nor output. */ 1514 out_num = in_num = elem_entries = 0; 1515 1516 max = vq->vring.num; 1517 1518 if (vq->inuse >= vq->vring.num) { 1519 virtio_error(vdev, "Virtqueue size exceeded"); 1520 goto done; 1521 } 1522 1523 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) { 1524 goto done; 1525 } 1526 1527 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 1528 vring_set_avail_event(vq, vq->last_avail_idx); 1529 } 1530 1531 i = head; 1532 1533 caches = vring_get_region_caches(vq); 1534 if (!caches) { 1535 virtio_error(vdev, "Region caches not initialized"); 1536 goto done; 1537 } 1538 1539 if (caches->desc.len < max * sizeof(VRingDesc)) { 1540 virtio_error(vdev, "Cannot map descriptor ring"); 1541 goto done; 1542 } 1543 1544 desc_cache = &caches->desc; 1545 vring_split_desc_read(vdev, &desc, desc_cache, i); 1546 if (desc.flags & VRING_DESC_F_INDIRECT) { 1547 if (!desc.len || (desc.len % sizeof(VRingDesc))) { 1548 virtio_error(vdev, "Invalid size for indirect buffer table"); 1549 goto done; 1550 } 1551 1552 /* loop over the indirect descriptor table */ 1553 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, 1554 desc.addr, desc.len, false); 1555 desc_cache = &indirect_desc_cache; 1556 if (len < desc.len) { 1557 virtio_error(vdev, "Cannot map indirect buffer"); 1558 goto done; 1559 } 1560 1561 max = desc.len / sizeof(VRingDesc); 1562 i = 0; 1563 vring_split_desc_read(vdev, &desc, desc_cache, i); 1564 } 1565 1566 /* Collect all the descriptors */ 1567 do { 1568 bool map_ok; 1569 1570 if (desc.flags & VRING_DESC_F_WRITE) { 1571 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num, 1572 iov + out_num, 1573 VIRTQUEUE_MAX_SIZE - out_num, true, 1574 desc.addr, desc.len); 1575 } else { 1576 if (in_num) { 1577 virtio_error(vdev, "Incorrect order for descriptors"); 1578 goto err_undo_map; 1579 } 1580 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov, 1581 VIRTQUEUE_MAX_SIZE, false, 1582 desc.addr, desc.len); 1583 } 1584 if (!map_ok) { 1585 goto err_undo_map; 1586 } 1587 1588 /* If we've got too many, that implies a descriptor loop. */ 1589 if (++elem_entries > max) { 1590 virtio_error(vdev, "Looped descriptor"); 1591 goto err_undo_map; 1592 } 1593 1594 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i); 1595 } while (rc == VIRTQUEUE_READ_DESC_MORE); 1596 1597 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 1598 goto err_undo_map; 1599 } 1600 1601 /* Now copy what we have collected and mapped */ 1602 elem = virtqueue_alloc_element(sz, out_num, in_num); 1603 elem->index = head; 1604 elem->ndescs = 1; 1605 for (i = 0; i < out_num; i++) { 1606 elem->out_addr[i] = addr[i]; 1607 elem->out_sg[i] = iov[i]; 1608 } 1609 for (i = 0; i < in_num; i++) { 1610 elem->in_addr[i] = addr[out_num + i]; 1611 elem->in_sg[i] = iov[out_num + i]; 1612 } 1613 1614 vq->inuse++; 1615 1616 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); 1617 done: 1618 address_space_cache_destroy(&indirect_desc_cache); 1619 1620 return elem; 1621 1622 err_undo_map: 1623 virtqueue_undo_map_desc(out_num, in_num, iov); 1624 goto done; 1625 } 1626 1627 static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz) 1628 { 1629 unsigned int i, max; 1630 VRingMemoryRegionCaches *caches; 1631 MemoryRegionCache indirect_desc_cache; 1632 MemoryRegionCache *desc_cache; 1633 int64_t len; 1634 VirtIODevice *vdev = vq->vdev; 1635 VirtQueueElement *elem = NULL; 1636 unsigned out_num, in_num, elem_entries; 1637 hwaddr addr[VIRTQUEUE_MAX_SIZE]; 1638 struct iovec iov[VIRTQUEUE_MAX_SIZE]; 1639 VRingPackedDesc desc; 1640 uint16_t id; 1641 int rc; 1642 1643 address_space_cache_init_empty(&indirect_desc_cache); 1644 1645 RCU_READ_LOCK_GUARD(); 1646 if (virtio_queue_packed_empty_rcu(vq)) { 1647 goto done; 1648 } 1649 1650 /* When we start there are none of either input nor output. */ 1651 out_num = in_num = elem_entries = 0; 1652 1653 max = vq->vring.num; 1654 1655 if (vq->inuse >= vq->vring.num) { 1656 virtio_error(vdev, "Virtqueue size exceeded"); 1657 goto done; 1658 } 1659 1660 i = vq->last_avail_idx; 1661 1662 caches = vring_get_region_caches(vq); 1663 if (!caches) { 1664 virtio_error(vdev, "Region caches not initialized"); 1665 goto done; 1666 } 1667 1668 if (caches->desc.len < max * sizeof(VRingDesc)) { 1669 virtio_error(vdev, "Cannot map descriptor ring"); 1670 goto done; 1671 } 1672 1673 desc_cache = &caches->desc; 1674 vring_packed_desc_read(vdev, &desc, desc_cache, i, true); 1675 id = desc.id; 1676 if (desc.flags & VRING_DESC_F_INDIRECT) { 1677 if (desc.len % sizeof(VRingPackedDesc)) { 1678 virtio_error(vdev, "Invalid size for indirect buffer table"); 1679 goto done; 1680 } 1681 1682 /* loop over the indirect descriptor table */ 1683 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, 1684 desc.addr, desc.len, false); 1685 desc_cache = &indirect_desc_cache; 1686 if (len < desc.len) { 1687 virtio_error(vdev, "Cannot map indirect buffer"); 1688 goto done; 1689 } 1690 1691 max = desc.len / sizeof(VRingPackedDesc); 1692 i = 0; 1693 vring_packed_desc_read(vdev, &desc, desc_cache, i, false); 1694 } 1695 1696 /* Collect all the descriptors */ 1697 do { 1698 bool map_ok; 1699 1700 if (desc.flags & VRING_DESC_F_WRITE) { 1701 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num, 1702 iov + out_num, 1703 VIRTQUEUE_MAX_SIZE - out_num, true, 1704 desc.addr, desc.len); 1705 } else { 1706 if (in_num) { 1707 virtio_error(vdev, "Incorrect order for descriptors"); 1708 goto err_undo_map; 1709 } 1710 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov, 1711 VIRTQUEUE_MAX_SIZE, false, 1712 desc.addr, desc.len); 1713 } 1714 if (!map_ok) { 1715 goto err_undo_map; 1716 } 1717 1718 /* If we've got too many, that implies a descriptor loop. */ 1719 if (++elem_entries > max) { 1720 virtio_error(vdev, "Looped descriptor"); 1721 goto err_undo_map; 1722 } 1723 1724 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i, 1725 desc_cache == 1726 &indirect_desc_cache); 1727 } while (rc == VIRTQUEUE_READ_DESC_MORE); 1728 1729 /* Now copy what we have collected and mapped */ 1730 elem = virtqueue_alloc_element(sz, out_num, in_num); 1731 for (i = 0; i < out_num; i++) { 1732 elem->out_addr[i] = addr[i]; 1733 elem->out_sg[i] = iov[i]; 1734 } 1735 for (i = 0; i < in_num; i++) { 1736 elem->in_addr[i] = addr[out_num + i]; 1737 elem->in_sg[i] = iov[out_num + i]; 1738 } 1739 1740 elem->index = id; 1741 elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries; 1742 vq->last_avail_idx += elem->ndescs; 1743 vq->inuse += elem->ndescs; 1744 1745 if (vq->last_avail_idx >= vq->vring.num) { 1746 vq->last_avail_idx -= vq->vring.num; 1747 vq->last_avail_wrap_counter ^= 1; 1748 } 1749 1750 vq->shadow_avail_idx = vq->last_avail_idx; 1751 vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter; 1752 1753 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); 1754 done: 1755 address_space_cache_destroy(&indirect_desc_cache); 1756 1757 return elem; 1758 1759 err_undo_map: 1760 virtqueue_undo_map_desc(out_num, in_num, iov); 1761 goto done; 1762 } 1763 1764 void *virtqueue_pop(VirtQueue *vq, size_t sz) 1765 { 1766 if (virtio_device_disabled(vq->vdev)) { 1767 return NULL; 1768 } 1769 1770 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { 1771 return virtqueue_packed_pop(vq, sz); 1772 } else { 1773 return virtqueue_split_pop(vq, sz); 1774 } 1775 } 1776 1777 static unsigned int virtqueue_packed_drop_all(VirtQueue *vq) 1778 { 1779 VRingMemoryRegionCaches *caches; 1780 MemoryRegionCache *desc_cache; 1781 unsigned int dropped = 0; 1782 VirtQueueElement elem = {}; 1783 VirtIODevice *vdev = vq->vdev; 1784 VRingPackedDesc desc; 1785 1786 RCU_READ_LOCK_GUARD(); 1787 1788 caches = vring_get_region_caches(vq); 1789 if (!caches) { 1790 return 0; 1791 } 1792 1793 desc_cache = &caches->desc; 1794 1795 virtio_queue_set_notification(vq, 0); 1796 1797 while (vq->inuse < vq->vring.num) { 1798 unsigned int idx = vq->last_avail_idx; 1799 /* 1800 * works similar to virtqueue_pop but does not map buffers 1801 * and does not allocate any memory. 1802 */ 1803 vring_packed_desc_read(vdev, &desc, desc_cache, 1804 vq->last_avail_idx , true); 1805 if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) { 1806 break; 1807 } 1808 elem.index = desc.id; 1809 elem.ndescs = 1; 1810 while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache, 1811 vq->vring.num, &idx, false)) { 1812 ++elem.ndescs; 1813 } 1814 /* 1815 * immediately push the element, nothing to unmap 1816 * as both in_num and out_num are set to 0. 1817 */ 1818 virtqueue_push(vq, &elem, 0); 1819 dropped++; 1820 vq->last_avail_idx += elem.ndescs; 1821 if (vq->last_avail_idx >= vq->vring.num) { 1822 vq->last_avail_idx -= vq->vring.num; 1823 vq->last_avail_wrap_counter ^= 1; 1824 } 1825 } 1826 1827 return dropped; 1828 } 1829 1830 static unsigned int virtqueue_split_drop_all(VirtQueue *vq) 1831 { 1832 unsigned int dropped = 0; 1833 VirtQueueElement elem = {}; 1834 VirtIODevice *vdev = vq->vdev; 1835 bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 1836 1837 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) { 1838 /* works similar to virtqueue_pop but does not map buffers 1839 * and does not allocate any memory */ 1840 smp_rmb(); 1841 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) { 1842 break; 1843 } 1844 vq->inuse++; 1845 vq->last_avail_idx++; 1846 if (fEventIdx) { 1847 vring_set_avail_event(vq, vq->last_avail_idx); 1848 } 1849 /* immediately push the element, nothing to unmap 1850 * as both in_num and out_num are set to 0 */ 1851 virtqueue_push(vq, &elem, 0); 1852 dropped++; 1853 } 1854 1855 return dropped; 1856 } 1857 1858 /* virtqueue_drop_all: 1859 * @vq: The #VirtQueue 1860 * Drops all queued buffers and indicates them to the guest 1861 * as if they are done. Useful when buffers can not be 1862 * processed but must be returned to the guest. 1863 */ 1864 unsigned int virtqueue_drop_all(VirtQueue *vq) 1865 { 1866 struct VirtIODevice *vdev = vq->vdev; 1867 1868 if (virtio_device_disabled(vq->vdev)) { 1869 return 0; 1870 } 1871 1872 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { 1873 return virtqueue_packed_drop_all(vq); 1874 } else { 1875 return virtqueue_split_drop_all(vq); 1876 } 1877 } 1878 1879 /* Reading and writing a structure directly to QEMUFile is *awful*, but 1880 * it is what QEMU has always done by mistake. We can change it sooner 1881 * or later by bumping the version number of the affected vm states. 1882 * In the meanwhile, since the in-memory layout of VirtQueueElement 1883 * has changed, we need to marshal to and from the layout that was 1884 * used before the change. 1885 */ 1886 typedef struct VirtQueueElementOld { 1887 unsigned int index; 1888 unsigned int out_num; 1889 unsigned int in_num; 1890 hwaddr in_addr[VIRTQUEUE_MAX_SIZE]; 1891 hwaddr out_addr[VIRTQUEUE_MAX_SIZE]; 1892 struct iovec in_sg[VIRTQUEUE_MAX_SIZE]; 1893 struct iovec out_sg[VIRTQUEUE_MAX_SIZE]; 1894 } VirtQueueElementOld; 1895 1896 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz) 1897 { 1898 VirtQueueElement *elem; 1899 VirtQueueElementOld data; 1900 int i; 1901 1902 qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld)); 1903 1904 /* TODO: teach all callers that this can fail, and return failure instead 1905 * of asserting here. 1906 * This is just one thing (there are probably more) that must be 1907 * fixed before we can allow NDEBUG compilation. 1908 */ 1909 assert(ARRAY_SIZE(data.in_addr) >= data.in_num); 1910 assert(ARRAY_SIZE(data.out_addr) >= data.out_num); 1911 1912 elem = virtqueue_alloc_element(sz, data.out_num, data.in_num); 1913 elem->index = data.index; 1914 1915 for (i = 0; i < elem->in_num; i++) { 1916 elem->in_addr[i] = data.in_addr[i]; 1917 } 1918 1919 for (i = 0; i < elem->out_num; i++) { 1920 elem->out_addr[i] = data.out_addr[i]; 1921 } 1922 1923 for (i = 0; i < elem->in_num; i++) { 1924 /* Base is overwritten by virtqueue_map. */ 1925 elem->in_sg[i].iov_base = 0; 1926 elem->in_sg[i].iov_len = data.in_sg[i].iov_len; 1927 } 1928 1929 for (i = 0; i < elem->out_num; i++) { 1930 /* Base is overwritten by virtqueue_map. */ 1931 elem->out_sg[i].iov_base = 0; 1932 elem->out_sg[i].iov_len = data.out_sg[i].iov_len; 1933 } 1934 1935 if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) { 1936 qemu_get_be32s(f, &elem->ndescs); 1937 } 1938 1939 virtqueue_map(vdev, elem); 1940 return elem; 1941 } 1942 1943 void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, 1944 VirtQueueElement *elem) 1945 { 1946 VirtQueueElementOld data; 1947 int i; 1948 1949 memset(&data, 0, sizeof(data)); 1950 data.index = elem->index; 1951 data.in_num = elem->in_num; 1952 data.out_num = elem->out_num; 1953 1954 for (i = 0; i < elem->in_num; i++) { 1955 data.in_addr[i] = elem->in_addr[i]; 1956 } 1957 1958 for (i = 0; i < elem->out_num; i++) { 1959 data.out_addr[i] = elem->out_addr[i]; 1960 } 1961 1962 for (i = 0; i < elem->in_num; i++) { 1963 /* Base is overwritten by virtqueue_map when loading. Do not 1964 * save it, as it would leak the QEMU address space layout. */ 1965 data.in_sg[i].iov_len = elem->in_sg[i].iov_len; 1966 } 1967 1968 for (i = 0; i < elem->out_num; i++) { 1969 /* Do not save iov_base as above. */ 1970 data.out_sg[i].iov_len = elem->out_sg[i].iov_len; 1971 } 1972 1973 if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) { 1974 qemu_put_be32s(f, &elem->ndescs); 1975 } 1976 1977 qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld)); 1978 } 1979 1980 /* virtio device */ 1981 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector) 1982 { 1983 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1984 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1985 1986 if (virtio_device_disabled(vdev)) { 1987 return; 1988 } 1989 1990 if (k->notify) { 1991 k->notify(qbus->parent, vector); 1992 } 1993 } 1994 1995 void virtio_update_irq(VirtIODevice *vdev) 1996 { 1997 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 1998 } 1999 2000 static int virtio_validate_features(VirtIODevice *vdev) 2001 { 2002 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 2003 2004 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) && 2005 !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { 2006 return -EFAULT; 2007 } 2008 2009 if (k->validate_features) { 2010 return k->validate_features(vdev); 2011 } else { 2012 return 0; 2013 } 2014 } 2015 2016 int virtio_set_status(VirtIODevice *vdev, uint8_t val) 2017 { 2018 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 2019 trace_virtio_set_status(vdev, val); 2020 2021 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 2022 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) && 2023 val & VIRTIO_CONFIG_S_FEATURES_OK) { 2024 int ret = virtio_validate_features(vdev); 2025 2026 if (ret) { 2027 return ret; 2028 } 2029 } 2030 } 2031 2032 if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) != 2033 (val & VIRTIO_CONFIG_S_DRIVER_OK)) { 2034 virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK); 2035 } 2036 2037 if (k->set_status) { 2038 k->set_status(vdev, val); 2039 } 2040 vdev->status = val; 2041 2042 return 0; 2043 } 2044 2045 static enum virtio_device_endian virtio_default_endian(void) 2046 { 2047 if (target_words_bigendian()) { 2048 return VIRTIO_DEVICE_ENDIAN_BIG; 2049 } else { 2050 return VIRTIO_DEVICE_ENDIAN_LITTLE; 2051 } 2052 } 2053 2054 static enum virtio_device_endian virtio_current_cpu_endian(void) 2055 { 2056 if (cpu_virtio_is_big_endian(current_cpu)) { 2057 return VIRTIO_DEVICE_ENDIAN_BIG; 2058 } else { 2059 return VIRTIO_DEVICE_ENDIAN_LITTLE; 2060 } 2061 } 2062 2063 static void __virtio_queue_reset(VirtIODevice *vdev, uint32_t i) 2064 { 2065 vdev->vq[i].vring.desc = 0; 2066 vdev->vq[i].vring.avail = 0; 2067 vdev->vq[i].vring.used = 0; 2068 vdev->vq[i].last_avail_idx = 0; 2069 vdev->vq[i].shadow_avail_idx = 0; 2070 vdev->vq[i].used_idx = 0; 2071 vdev->vq[i].last_avail_wrap_counter = true; 2072 vdev->vq[i].shadow_avail_wrap_counter = true; 2073 vdev->vq[i].used_wrap_counter = true; 2074 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR); 2075 vdev->vq[i].signalled_used = 0; 2076 vdev->vq[i].signalled_used_valid = false; 2077 vdev->vq[i].notification = true; 2078 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default; 2079 vdev->vq[i].inuse = 0; 2080 virtio_virtqueue_reset_region_cache(&vdev->vq[i]); 2081 } 2082 2083 void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index) 2084 { 2085 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 2086 2087 if (k->queue_reset) { 2088 k->queue_reset(vdev, queue_index); 2089 } 2090 2091 __virtio_queue_reset(vdev, queue_index); 2092 } 2093 2094 void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index) 2095 { 2096 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 2097 2098 /* 2099 * TODO: Seabios is currently out of spec and triggering this error. 2100 * So this needs to be fixed in Seabios, then this can 2101 * be re-enabled for new machine types only, and also after 2102 * being converted to LOG_GUEST_ERROR. 2103 * 2104 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 2105 error_report("queue_enable is only supported in devices of virtio " 2106 "1.0 or later."); 2107 } 2108 */ 2109 2110 if (k->queue_enable) { 2111 k->queue_enable(vdev, queue_index); 2112 } 2113 } 2114 2115 void virtio_reset(void *opaque) 2116 { 2117 VirtIODevice *vdev = opaque; 2118 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 2119 int i; 2120 2121 virtio_set_status(vdev, 0); 2122 if (current_cpu) { 2123 /* Guest initiated reset */ 2124 vdev->device_endian = virtio_current_cpu_endian(); 2125 } else { 2126 /* System reset */ 2127 vdev->device_endian = virtio_default_endian(); 2128 } 2129 2130 if (k->reset) { 2131 k->reset(vdev); 2132 } 2133 2134 vdev->start_on_kick = false; 2135 vdev->started = false; 2136 vdev->broken = false; 2137 vdev->guest_features = 0; 2138 vdev->queue_sel = 0; 2139 vdev->status = 0; 2140 vdev->disabled = false; 2141 qatomic_set(&vdev->isr, 0); 2142 vdev->config_vector = VIRTIO_NO_VECTOR; 2143 virtio_notify_vector(vdev, vdev->config_vector); 2144 2145 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) { 2146 __virtio_queue_reset(vdev, i); 2147 } 2148 } 2149 2150 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr) 2151 { 2152 if (!vdev->vq[n].vring.num) { 2153 return; 2154 } 2155 vdev->vq[n].vring.desc = addr; 2156 virtio_queue_update_rings(vdev, n); 2157 } 2158 2159 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n) 2160 { 2161 return vdev->vq[n].vring.desc; 2162 } 2163 2164 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc, 2165 hwaddr avail, hwaddr used) 2166 { 2167 if (!vdev->vq[n].vring.num) { 2168 return; 2169 } 2170 vdev->vq[n].vring.desc = desc; 2171 vdev->vq[n].vring.avail = avail; 2172 vdev->vq[n].vring.used = used; 2173 virtio_init_region_cache(vdev, n); 2174 } 2175 2176 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num) 2177 { 2178 /* Don't allow guest to flip queue between existent and 2179 * nonexistent states, or to set it to an invalid size. 2180 */ 2181 if (!!num != !!vdev->vq[n].vring.num || 2182 num > VIRTQUEUE_MAX_SIZE || 2183 num < 0) { 2184 return; 2185 } 2186 vdev->vq[n].vring.num = num; 2187 } 2188 2189 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector) 2190 { 2191 return QLIST_FIRST(&vdev->vector_queues[vector]); 2192 } 2193 2194 VirtQueue *virtio_vector_next_queue(VirtQueue *vq) 2195 { 2196 return QLIST_NEXT(vq, node); 2197 } 2198 2199 int virtio_queue_get_num(VirtIODevice *vdev, int n) 2200 { 2201 return vdev->vq[n].vring.num; 2202 } 2203 2204 int virtio_queue_get_max_num(VirtIODevice *vdev, int n) 2205 { 2206 return vdev->vq[n].vring.num_default; 2207 } 2208 2209 int virtio_get_num_queues(VirtIODevice *vdev) 2210 { 2211 int i; 2212 2213 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 2214 if (!virtio_queue_get_num(vdev, i)) { 2215 break; 2216 } 2217 } 2218 2219 return i; 2220 } 2221 2222 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) 2223 { 2224 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2225 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 2226 2227 /* virtio-1 compliant devices cannot change the alignment */ 2228 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 2229 error_report("tried to modify queue alignment for virtio-1 device"); 2230 return; 2231 } 2232 /* Check that the transport told us it was going to do this 2233 * (so a buggy transport will immediately assert rather than 2234 * silently failing to migrate this state) 2235 */ 2236 assert(k->has_variable_vring_alignment); 2237 2238 if (align) { 2239 vdev->vq[n].vring.align = align; 2240 virtio_queue_update_rings(vdev, n); 2241 } 2242 } 2243 2244 static void virtio_queue_notify_vq(VirtQueue *vq) 2245 { 2246 if (vq->vring.desc && vq->handle_output) { 2247 VirtIODevice *vdev = vq->vdev; 2248 2249 if (unlikely(vdev->broken)) { 2250 return; 2251 } 2252 2253 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); 2254 vq->handle_output(vdev, vq); 2255 2256 if (unlikely(vdev->start_on_kick)) { 2257 virtio_set_started(vdev, true); 2258 } 2259 } 2260 } 2261 2262 void virtio_queue_notify(VirtIODevice *vdev, int n) 2263 { 2264 VirtQueue *vq = &vdev->vq[n]; 2265 2266 if (unlikely(!vq->vring.desc || vdev->broken)) { 2267 return; 2268 } 2269 2270 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); 2271 if (vq->host_notifier_enabled) { 2272 event_notifier_set(&vq->host_notifier); 2273 } else if (vq->handle_output) { 2274 vq->handle_output(vdev, vq); 2275 2276 if (unlikely(vdev->start_on_kick)) { 2277 virtio_set_started(vdev, true); 2278 } 2279 } 2280 } 2281 2282 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n) 2283 { 2284 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector : 2285 VIRTIO_NO_VECTOR; 2286 } 2287 2288 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector) 2289 { 2290 VirtQueue *vq = &vdev->vq[n]; 2291 2292 if (n < VIRTIO_QUEUE_MAX) { 2293 if (vdev->vector_queues && 2294 vdev->vq[n].vector != VIRTIO_NO_VECTOR) { 2295 QLIST_REMOVE(vq, node); 2296 } 2297 vdev->vq[n].vector = vector; 2298 if (vdev->vector_queues && 2299 vector != VIRTIO_NO_VECTOR) { 2300 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node); 2301 } 2302 } 2303 } 2304 2305 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, 2306 VirtIOHandleOutput handle_output) 2307 { 2308 int i; 2309 2310 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 2311 if (vdev->vq[i].vring.num == 0) 2312 break; 2313 } 2314 2315 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) 2316 abort(); 2317 2318 vdev->vq[i].vring.num = queue_size; 2319 vdev->vq[i].vring.num_default = queue_size; 2320 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; 2321 vdev->vq[i].handle_output = handle_output; 2322 vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size); 2323 2324 return &vdev->vq[i]; 2325 } 2326 2327 void virtio_delete_queue(VirtQueue *vq) 2328 { 2329 vq->vring.num = 0; 2330 vq->vring.num_default = 0; 2331 vq->handle_output = NULL; 2332 g_free(vq->used_elems); 2333 vq->used_elems = NULL; 2334 virtio_virtqueue_reset_region_cache(vq); 2335 } 2336 2337 void virtio_del_queue(VirtIODevice *vdev, int n) 2338 { 2339 if (n < 0 || n >= VIRTIO_QUEUE_MAX) { 2340 abort(); 2341 } 2342 2343 virtio_delete_queue(&vdev->vq[n]); 2344 } 2345 2346 static void virtio_set_isr(VirtIODevice *vdev, int value) 2347 { 2348 uint8_t old = qatomic_read(&vdev->isr); 2349 2350 /* Do not write ISR if it does not change, so that its cacheline remains 2351 * shared in the common case where the guest does not read it. 2352 */ 2353 if ((old & value) != value) { 2354 qatomic_or(&vdev->isr, value); 2355 } 2356 } 2357 2358 /* Called within rcu_read_lock(). */ 2359 static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq) 2360 { 2361 uint16_t old, new; 2362 bool v; 2363 /* We need to expose used array entries before checking used event. */ 2364 smp_mb(); 2365 /* Always notify when queue is empty (when feature acknowledge) */ 2366 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) && 2367 !vq->inuse && virtio_queue_empty(vq)) { 2368 return true; 2369 } 2370 2371 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 2372 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); 2373 } 2374 2375 v = vq->signalled_used_valid; 2376 vq->signalled_used_valid = true; 2377 old = vq->signalled_used; 2378 new = vq->signalled_used = vq->used_idx; 2379 return !v || vring_need_event(vring_get_used_event(vq), new, old); 2380 } 2381 2382 static bool vring_packed_need_event(VirtQueue *vq, bool wrap, 2383 uint16_t off_wrap, uint16_t new, 2384 uint16_t old) 2385 { 2386 int off = off_wrap & ~(1 << 15); 2387 2388 if (wrap != off_wrap >> 15) { 2389 off -= vq->vring.num; 2390 } 2391 2392 return vring_need_event(off, new, old); 2393 } 2394 2395 /* Called within rcu_read_lock(). */ 2396 static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq) 2397 { 2398 VRingPackedDescEvent e; 2399 uint16_t old, new; 2400 bool v; 2401 VRingMemoryRegionCaches *caches; 2402 2403 caches = vring_get_region_caches(vq); 2404 if (!caches) { 2405 return false; 2406 } 2407 2408 vring_packed_event_read(vdev, &caches->avail, &e); 2409 2410 old = vq->signalled_used; 2411 new = vq->signalled_used = vq->used_idx; 2412 v = vq->signalled_used_valid; 2413 vq->signalled_used_valid = true; 2414 2415 if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) { 2416 return false; 2417 } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) { 2418 return true; 2419 } 2420 2421 return !v || vring_packed_need_event(vq, vq->used_wrap_counter, 2422 e.off_wrap, new, old); 2423 } 2424 2425 /* Called within rcu_read_lock(). */ 2426 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq) 2427 { 2428 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { 2429 return virtio_packed_should_notify(vdev, vq); 2430 } else { 2431 return virtio_split_should_notify(vdev, vq); 2432 } 2433 } 2434 2435 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq) 2436 { 2437 WITH_RCU_READ_LOCK_GUARD() { 2438 if (!virtio_should_notify(vdev, vq)) { 2439 return; 2440 } 2441 } 2442 2443 trace_virtio_notify_irqfd(vdev, vq); 2444 2445 /* 2446 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but 2447 * windows drivers included in virtio-win 1.8.0 (circa 2015) are 2448 * incorrectly polling this bit during crashdump and hibernation 2449 * in MSI mode, causing a hang if this bit is never updated. 2450 * Recent releases of Windows do not really shut down, but rather 2451 * log out and hibernate to make the next startup faster. Hence, 2452 * this manifested as a more serious hang during shutdown with 2453 * 2454 * Next driver release from 2016 fixed this problem, so working around it 2455 * is not a must, but it's easy to do so let's do it here. 2456 * 2457 * Note: it's safe to update ISR from any thread as it was switched 2458 * to an atomic operation. 2459 */ 2460 virtio_set_isr(vq->vdev, 0x1); 2461 event_notifier_set(&vq->guest_notifier); 2462 } 2463 2464 static void virtio_irq(VirtQueue *vq) 2465 { 2466 virtio_set_isr(vq->vdev, 0x1); 2467 virtio_notify_vector(vq->vdev, vq->vector); 2468 } 2469 2470 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) 2471 { 2472 WITH_RCU_READ_LOCK_GUARD() { 2473 if (!virtio_should_notify(vdev, vq)) { 2474 return; 2475 } 2476 } 2477 2478 trace_virtio_notify(vdev, vq); 2479 virtio_irq(vq); 2480 } 2481 2482 void virtio_notify_config(VirtIODevice *vdev) 2483 { 2484 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) 2485 return; 2486 2487 virtio_set_isr(vdev, 0x3); 2488 vdev->generation++; 2489 virtio_notify_vector(vdev, vdev->config_vector); 2490 } 2491 2492 static bool virtio_device_endian_needed(void *opaque) 2493 { 2494 VirtIODevice *vdev = opaque; 2495 2496 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN); 2497 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 2498 return vdev->device_endian != virtio_default_endian(); 2499 } 2500 /* Devices conforming to VIRTIO 1.0 or later are always LE. */ 2501 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE; 2502 } 2503 2504 static bool virtio_64bit_features_needed(void *opaque) 2505 { 2506 VirtIODevice *vdev = opaque; 2507 2508 return (vdev->host_features >> 32) != 0; 2509 } 2510 2511 static bool virtio_virtqueue_needed(void *opaque) 2512 { 2513 VirtIODevice *vdev = opaque; 2514 2515 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1); 2516 } 2517 2518 static bool virtio_packed_virtqueue_needed(void *opaque) 2519 { 2520 VirtIODevice *vdev = opaque; 2521 2522 return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED); 2523 } 2524 2525 static bool virtio_ringsize_needed(void *opaque) 2526 { 2527 VirtIODevice *vdev = opaque; 2528 int i; 2529 2530 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 2531 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) { 2532 return true; 2533 } 2534 } 2535 return false; 2536 } 2537 2538 static bool virtio_extra_state_needed(void *opaque) 2539 { 2540 VirtIODevice *vdev = opaque; 2541 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2542 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 2543 2544 return k->has_extra_state && 2545 k->has_extra_state(qbus->parent); 2546 } 2547 2548 static bool virtio_broken_needed(void *opaque) 2549 { 2550 VirtIODevice *vdev = opaque; 2551 2552 return vdev->broken; 2553 } 2554 2555 static bool virtio_started_needed(void *opaque) 2556 { 2557 VirtIODevice *vdev = opaque; 2558 2559 return vdev->started; 2560 } 2561 2562 static bool virtio_disabled_needed(void *opaque) 2563 { 2564 VirtIODevice *vdev = opaque; 2565 2566 return vdev->disabled; 2567 } 2568 2569 static const VMStateDescription vmstate_virtqueue = { 2570 .name = "virtqueue_state", 2571 .version_id = 1, 2572 .minimum_version_id = 1, 2573 .fields = (VMStateField[]) { 2574 VMSTATE_UINT64(vring.avail, struct VirtQueue), 2575 VMSTATE_UINT64(vring.used, struct VirtQueue), 2576 VMSTATE_END_OF_LIST() 2577 } 2578 }; 2579 2580 static const VMStateDescription vmstate_packed_virtqueue = { 2581 .name = "packed_virtqueue_state", 2582 .version_id = 1, 2583 .minimum_version_id = 1, 2584 .fields = (VMStateField[]) { 2585 VMSTATE_UINT16(last_avail_idx, struct VirtQueue), 2586 VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue), 2587 VMSTATE_UINT16(used_idx, struct VirtQueue), 2588 VMSTATE_BOOL(used_wrap_counter, struct VirtQueue), 2589 VMSTATE_UINT32(inuse, struct VirtQueue), 2590 VMSTATE_END_OF_LIST() 2591 } 2592 }; 2593 2594 static const VMStateDescription vmstate_virtio_virtqueues = { 2595 .name = "virtio/virtqueues", 2596 .version_id = 1, 2597 .minimum_version_id = 1, 2598 .needed = &virtio_virtqueue_needed, 2599 .fields = (VMStateField[]) { 2600 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, 2601 VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue), 2602 VMSTATE_END_OF_LIST() 2603 } 2604 }; 2605 2606 static const VMStateDescription vmstate_virtio_packed_virtqueues = { 2607 .name = "virtio/packed_virtqueues", 2608 .version_id = 1, 2609 .minimum_version_id = 1, 2610 .needed = &virtio_packed_virtqueue_needed, 2611 .fields = (VMStateField[]) { 2612 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, 2613 VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue), 2614 VMSTATE_END_OF_LIST() 2615 } 2616 }; 2617 2618 static const VMStateDescription vmstate_ringsize = { 2619 .name = "ringsize_state", 2620 .version_id = 1, 2621 .minimum_version_id = 1, 2622 .fields = (VMStateField[]) { 2623 VMSTATE_UINT32(vring.num_default, struct VirtQueue), 2624 VMSTATE_END_OF_LIST() 2625 } 2626 }; 2627 2628 static const VMStateDescription vmstate_virtio_ringsize = { 2629 .name = "virtio/ringsize", 2630 .version_id = 1, 2631 .minimum_version_id = 1, 2632 .needed = &virtio_ringsize_needed, 2633 .fields = (VMStateField[]) { 2634 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, 2635 VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue), 2636 VMSTATE_END_OF_LIST() 2637 } 2638 }; 2639 2640 static int get_extra_state(QEMUFile *f, void *pv, size_t size, 2641 const VMStateField *field) 2642 { 2643 VirtIODevice *vdev = pv; 2644 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2645 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 2646 2647 if (!k->load_extra_state) { 2648 return -1; 2649 } else { 2650 return k->load_extra_state(qbus->parent, f); 2651 } 2652 } 2653 2654 static int put_extra_state(QEMUFile *f, void *pv, size_t size, 2655 const VMStateField *field, JSONWriter *vmdesc) 2656 { 2657 VirtIODevice *vdev = pv; 2658 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2659 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 2660 2661 k->save_extra_state(qbus->parent, f); 2662 return 0; 2663 } 2664 2665 static const VMStateInfo vmstate_info_extra_state = { 2666 .name = "virtqueue_extra_state", 2667 .get = get_extra_state, 2668 .put = put_extra_state, 2669 }; 2670 2671 static const VMStateDescription vmstate_virtio_extra_state = { 2672 .name = "virtio/extra_state", 2673 .version_id = 1, 2674 .minimum_version_id = 1, 2675 .needed = &virtio_extra_state_needed, 2676 .fields = (VMStateField[]) { 2677 { 2678 .name = "extra_state", 2679 .version_id = 0, 2680 .field_exists = NULL, 2681 .size = 0, 2682 .info = &vmstate_info_extra_state, 2683 .flags = VMS_SINGLE, 2684 .offset = 0, 2685 }, 2686 VMSTATE_END_OF_LIST() 2687 } 2688 }; 2689 2690 static const VMStateDescription vmstate_virtio_device_endian = { 2691 .name = "virtio/device_endian", 2692 .version_id = 1, 2693 .minimum_version_id = 1, 2694 .needed = &virtio_device_endian_needed, 2695 .fields = (VMStateField[]) { 2696 VMSTATE_UINT8(device_endian, VirtIODevice), 2697 VMSTATE_END_OF_LIST() 2698 } 2699 }; 2700 2701 static const VMStateDescription vmstate_virtio_64bit_features = { 2702 .name = "virtio/64bit_features", 2703 .version_id = 1, 2704 .minimum_version_id = 1, 2705 .needed = &virtio_64bit_features_needed, 2706 .fields = (VMStateField[]) { 2707 VMSTATE_UINT64(guest_features, VirtIODevice), 2708 VMSTATE_END_OF_LIST() 2709 } 2710 }; 2711 2712 static const VMStateDescription vmstate_virtio_broken = { 2713 .name = "virtio/broken", 2714 .version_id = 1, 2715 .minimum_version_id = 1, 2716 .needed = &virtio_broken_needed, 2717 .fields = (VMStateField[]) { 2718 VMSTATE_BOOL(broken, VirtIODevice), 2719 VMSTATE_END_OF_LIST() 2720 } 2721 }; 2722 2723 static const VMStateDescription vmstate_virtio_started = { 2724 .name = "virtio/started", 2725 .version_id = 1, 2726 .minimum_version_id = 1, 2727 .needed = &virtio_started_needed, 2728 .fields = (VMStateField[]) { 2729 VMSTATE_BOOL(started, VirtIODevice), 2730 VMSTATE_END_OF_LIST() 2731 } 2732 }; 2733 2734 static const VMStateDescription vmstate_virtio_disabled = { 2735 .name = "virtio/disabled", 2736 .version_id = 1, 2737 .minimum_version_id = 1, 2738 .needed = &virtio_disabled_needed, 2739 .fields = (VMStateField[]) { 2740 VMSTATE_BOOL(disabled, VirtIODevice), 2741 VMSTATE_END_OF_LIST() 2742 } 2743 }; 2744 2745 static const VMStateDescription vmstate_virtio = { 2746 .name = "virtio", 2747 .version_id = 1, 2748 .minimum_version_id = 1, 2749 .fields = (VMStateField[]) { 2750 VMSTATE_END_OF_LIST() 2751 }, 2752 .subsections = (const VMStateDescription*[]) { 2753 &vmstate_virtio_device_endian, 2754 &vmstate_virtio_64bit_features, 2755 &vmstate_virtio_virtqueues, 2756 &vmstate_virtio_ringsize, 2757 &vmstate_virtio_broken, 2758 &vmstate_virtio_extra_state, 2759 &vmstate_virtio_started, 2760 &vmstate_virtio_packed_virtqueues, 2761 &vmstate_virtio_disabled, 2762 NULL 2763 } 2764 }; 2765 2766 int virtio_save(VirtIODevice *vdev, QEMUFile *f) 2767 { 2768 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2769 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 2770 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 2771 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff); 2772 int i; 2773 2774 if (k->save_config) { 2775 k->save_config(qbus->parent, f); 2776 } 2777 2778 qemu_put_8s(f, &vdev->status); 2779 qemu_put_8s(f, &vdev->isr); 2780 qemu_put_be16s(f, &vdev->queue_sel); 2781 qemu_put_be32s(f, &guest_features_lo); 2782 qemu_put_be32(f, vdev->config_len); 2783 qemu_put_buffer(f, vdev->config, vdev->config_len); 2784 2785 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 2786 if (vdev->vq[i].vring.num == 0) 2787 break; 2788 } 2789 2790 qemu_put_be32(f, i); 2791 2792 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 2793 if (vdev->vq[i].vring.num == 0) 2794 break; 2795 2796 qemu_put_be32(f, vdev->vq[i].vring.num); 2797 if (k->has_variable_vring_alignment) { 2798 qemu_put_be32(f, vdev->vq[i].vring.align); 2799 } 2800 /* 2801 * Save desc now, the rest of the ring addresses are saved in 2802 * subsections for VIRTIO-1 devices. 2803 */ 2804 qemu_put_be64(f, vdev->vq[i].vring.desc); 2805 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); 2806 if (k->save_queue) { 2807 k->save_queue(qbus->parent, i, f); 2808 } 2809 } 2810 2811 if (vdc->save != NULL) { 2812 vdc->save(vdev, f); 2813 } 2814 2815 if (vdc->vmsd) { 2816 int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL); 2817 if (ret) { 2818 return ret; 2819 } 2820 } 2821 2822 /* Subsections */ 2823 return vmstate_save_state(f, &vmstate_virtio, vdev, NULL); 2824 } 2825 2826 /* A wrapper for use as a VMState .put function */ 2827 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size, 2828 const VMStateField *field, JSONWriter *vmdesc) 2829 { 2830 return virtio_save(VIRTIO_DEVICE(opaque), f); 2831 } 2832 2833 /* A wrapper for use as a VMState .get function */ 2834 static int coroutine_mixed_fn 2835 virtio_device_get(QEMUFile *f, void *opaque, size_t size, 2836 const VMStateField *field) 2837 { 2838 VirtIODevice *vdev = VIRTIO_DEVICE(opaque); 2839 DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev)); 2840 2841 return virtio_load(vdev, f, dc->vmsd->version_id); 2842 } 2843 2844 const VMStateInfo virtio_vmstate_info = { 2845 .name = "virtio", 2846 .get = virtio_device_get, 2847 .put = virtio_device_put, 2848 }; 2849 2850 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val) 2851 { 2852 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 2853 bool bad = (val & ~(vdev->host_features)) != 0; 2854 2855 val &= vdev->host_features; 2856 if (k->set_features) { 2857 k->set_features(vdev, val); 2858 } 2859 vdev->guest_features = val; 2860 return bad ? -1 : 0; 2861 } 2862 2863 typedef struct VirtioSetFeaturesNocheckData { 2864 Coroutine *co; 2865 VirtIODevice *vdev; 2866 uint64_t val; 2867 int ret; 2868 } VirtioSetFeaturesNocheckData; 2869 2870 static void virtio_set_features_nocheck_bh(void *opaque) 2871 { 2872 VirtioSetFeaturesNocheckData *data = opaque; 2873 2874 data->ret = virtio_set_features_nocheck(data->vdev, data->val); 2875 aio_co_wake(data->co); 2876 } 2877 2878 static int coroutine_mixed_fn 2879 virtio_set_features_nocheck_maybe_co(VirtIODevice *vdev, uint64_t val) 2880 { 2881 if (qemu_in_coroutine()) { 2882 VirtioSetFeaturesNocheckData data = { 2883 .co = qemu_coroutine_self(), 2884 .vdev = vdev, 2885 .val = val, 2886 }; 2887 aio_bh_schedule_oneshot(qemu_get_current_aio_context(), 2888 virtio_set_features_nocheck_bh, &data); 2889 qemu_coroutine_yield(); 2890 return data.ret; 2891 } else { 2892 return virtio_set_features_nocheck(vdev, val); 2893 } 2894 } 2895 2896 int virtio_set_features(VirtIODevice *vdev, uint64_t val) 2897 { 2898 int ret; 2899 /* 2900 * The driver must not attempt to set features after feature negotiation 2901 * has finished. 2902 */ 2903 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) { 2904 return -EINVAL; 2905 } 2906 2907 if (val & (1ull << VIRTIO_F_BAD_FEATURE)) { 2908 qemu_log_mask(LOG_GUEST_ERROR, 2909 "%s: guest driver for %s has enabled UNUSED(30) feature bit!\n", 2910 __func__, vdev->name); 2911 } 2912 2913 ret = virtio_set_features_nocheck(vdev, val); 2914 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 2915 /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */ 2916 int i; 2917 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 2918 if (vdev->vq[i].vring.num != 0) { 2919 virtio_init_region_cache(vdev, i); 2920 } 2921 } 2922 } 2923 if (!ret) { 2924 if (!virtio_device_started(vdev, vdev->status) && 2925 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 2926 vdev->start_on_kick = true; 2927 } 2928 } 2929 return ret; 2930 } 2931 2932 size_t virtio_get_config_size(const VirtIOConfigSizeParams *params, 2933 uint64_t host_features) 2934 { 2935 size_t config_size = params->min_size; 2936 const VirtIOFeature *feature_sizes = params->feature_sizes; 2937 size_t i; 2938 2939 for (i = 0; feature_sizes[i].flags != 0; i++) { 2940 if (host_features & feature_sizes[i].flags) { 2941 config_size = MAX(feature_sizes[i].end, config_size); 2942 } 2943 } 2944 2945 assert(config_size <= params->max_size); 2946 return config_size; 2947 } 2948 2949 int coroutine_mixed_fn 2950 virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) 2951 { 2952 int i, ret; 2953 int32_t config_len; 2954 uint32_t num; 2955 uint32_t features; 2956 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2957 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 2958 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 2959 2960 /* 2961 * We poison the endianness to ensure it does not get used before 2962 * subsections have been loaded. 2963 */ 2964 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN; 2965 2966 if (k->load_config) { 2967 ret = k->load_config(qbus->parent, f); 2968 if (ret) 2969 return ret; 2970 } 2971 2972 qemu_get_8s(f, &vdev->status); 2973 qemu_get_8s(f, &vdev->isr); 2974 qemu_get_be16s(f, &vdev->queue_sel); 2975 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) { 2976 return -1; 2977 } 2978 qemu_get_be32s(f, &features); 2979 2980 /* 2981 * Temporarily set guest_features low bits - needed by 2982 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2983 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ. 2984 * 2985 * Note: devices should always test host features in future - don't create 2986 * new dependencies like this. 2987 */ 2988 vdev->guest_features = features; 2989 2990 config_len = qemu_get_be32(f); 2991 2992 /* 2993 * There are cases where the incoming config can be bigger or smaller 2994 * than what we have; so load what we have space for, and skip 2995 * any excess that's in the stream. 2996 */ 2997 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len)); 2998 2999 while (config_len > vdev->config_len) { 3000 qemu_get_byte(f); 3001 config_len--; 3002 } 3003 3004 num = qemu_get_be32(f); 3005 3006 if (num > VIRTIO_QUEUE_MAX) { 3007 error_report("Invalid number of virtqueues: 0x%x", num); 3008 return -1; 3009 } 3010 3011 for (i = 0; i < num; i++) { 3012 vdev->vq[i].vring.num = qemu_get_be32(f); 3013 if (k->has_variable_vring_alignment) { 3014 vdev->vq[i].vring.align = qemu_get_be32(f); 3015 } 3016 vdev->vq[i].vring.desc = qemu_get_be64(f); 3017 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); 3018 vdev->vq[i].signalled_used_valid = false; 3019 vdev->vq[i].notification = true; 3020 3021 if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) { 3022 error_report("VQ %d address 0x0 " 3023 "inconsistent with Host index 0x%x", 3024 i, vdev->vq[i].last_avail_idx); 3025 return -1; 3026 } 3027 if (k->load_queue) { 3028 ret = k->load_queue(qbus->parent, i, f); 3029 if (ret) 3030 return ret; 3031 } 3032 } 3033 3034 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 3035 3036 if (vdc->load != NULL) { 3037 ret = vdc->load(vdev, f, version_id); 3038 if (ret) { 3039 return ret; 3040 } 3041 } 3042 3043 if (vdc->vmsd) { 3044 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id); 3045 if (ret) { 3046 return ret; 3047 } 3048 } 3049 3050 /* Subsections */ 3051 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1); 3052 if (ret) { 3053 return ret; 3054 } 3055 3056 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) { 3057 vdev->device_endian = virtio_default_endian(); 3058 } 3059 3060 if (virtio_64bit_features_needed(vdev)) { 3061 /* 3062 * Subsection load filled vdev->guest_features. Run them 3063 * through virtio_set_features to sanity-check them against 3064 * host_features. 3065 */ 3066 uint64_t features64 = vdev->guest_features; 3067 if (virtio_set_features_nocheck_maybe_co(vdev, features64) < 0) { 3068 error_report("Features 0x%" PRIx64 " unsupported. " 3069 "Allowed features: 0x%" PRIx64, 3070 features64, vdev->host_features); 3071 return -1; 3072 } 3073 } else { 3074 if (virtio_set_features_nocheck_maybe_co(vdev, features) < 0) { 3075 error_report("Features 0x%x unsupported. " 3076 "Allowed features: 0x%" PRIx64, 3077 features, vdev->host_features); 3078 return -1; 3079 } 3080 } 3081 3082 if (!virtio_device_started(vdev, vdev->status) && 3083 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 3084 vdev->start_on_kick = true; 3085 } 3086 3087 RCU_READ_LOCK_GUARD(); 3088 for (i = 0; i < num; i++) { 3089 if (vdev->vq[i].vring.desc) { 3090 uint16_t nheads; 3091 3092 /* 3093 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so 3094 * only the region cache needs to be set up. Legacy devices need 3095 * to calculate used and avail ring addresses based on the desc 3096 * address. 3097 */ 3098 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 3099 virtio_init_region_cache(vdev, i); 3100 } else { 3101 virtio_queue_update_rings(vdev, i); 3102 } 3103 3104 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { 3105 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx; 3106 vdev->vq[i].shadow_avail_wrap_counter = 3107 vdev->vq[i].last_avail_wrap_counter; 3108 continue; 3109 } 3110 3111 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; 3112 /* Check it isn't doing strange things with descriptor numbers. */ 3113 if (nheads > vdev->vq[i].vring.num) { 3114 virtio_error(vdev, "VQ %d size 0x%x Guest index 0x%x " 3115 "inconsistent with Host index 0x%x: delta 0x%x", 3116 i, vdev->vq[i].vring.num, 3117 vring_avail_idx(&vdev->vq[i]), 3118 vdev->vq[i].last_avail_idx, nheads); 3119 vdev->vq[i].used_idx = 0; 3120 vdev->vq[i].shadow_avail_idx = 0; 3121 vdev->vq[i].inuse = 0; 3122 continue; 3123 } 3124 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]); 3125 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]); 3126 3127 /* 3128 * Some devices migrate VirtQueueElements that have been popped 3129 * from the avail ring but not yet returned to the used ring. 3130 * Since max ring size < UINT16_MAX it's safe to use modulo 3131 * UINT16_MAX + 1 subtraction. 3132 */ 3133 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx - 3134 vdev->vq[i].used_idx); 3135 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) { 3136 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - " 3137 "used_idx 0x%x", 3138 i, vdev->vq[i].vring.num, 3139 vdev->vq[i].last_avail_idx, 3140 vdev->vq[i].used_idx); 3141 return -1; 3142 } 3143 } 3144 } 3145 3146 if (vdc->post_load) { 3147 ret = vdc->post_load(vdev); 3148 if (ret) { 3149 return ret; 3150 } 3151 } 3152 3153 return 0; 3154 } 3155 3156 void virtio_cleanup(VirtIODevice *vdev) 3157 { 3158 qemu_del_vm_change_state_handler(vdev->vmstate); 3159 } 3160 3161 static void virtio_vmstate_change(void *opaque, bool running, RunState state) 3162 { 3163 VirtIODevice *vdev = opaque; 3164 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 3165 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 3166 bool backend_run = running && virtio_device_started(vdev, vdev->status); 3167 vdev->vm_running = running; 3168 3169 if (backend_run) { 3170 virtio_set_status(vdev, vdev->status); 3171 } 3172 3173 if (k->vmstate_change) { 3174 k->vmstate_change(qbus->parent, backend_run); 3175 } 3176 3177 if (!backend_run) { 3178 virtio_set_status(vdev, vdev->status); 3179 } 3180 } 3181 3182 void virtio_instance_init_common(Object *proxy_obj, void *data, 3183 size_t vdev_size, const char *vdev_name) 3184 { 3185 DeviceState *vdev = data; 3186 3187 object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev, 3188 vdev_size, vdev_name, &error_abort, 3189 NULL); 3190 qdev_alias_all_properties(vdev, proxy_obj); 3191 } 3192 3193 void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size) 3194 { 3195 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 3196 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 3197 int i; 3198 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0; 3199 3200 if (nvectors) { 3201 vdev->vector_queues = 3202 g_malloc0(sizeof(*vdev->vector_queues) * nvectors); 3203 } 3204 3205 vdev->start_on_kick = false; 3206 vdev->started = false; 3207 vdev->vhost_started = false; 3208 vdev->device_id = device_id; 3209 vdev->status = 0; 3210 qatomic_set(&vdev->isr, 0); 3211 vdev->queue_sel = 0; 3212 vdev->config_vector = VIRTIO_NO_VECTOR; 3213 vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX); 3214 vdev->vm_running = runstate_is_running(); 3215 vdev->broken = false; 3216 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 3217 vdev->vq[i].vector = VIRTIO_NO_VECTOR; 3218 vdev->vq[i].vdev = vdev; 3219 vdev->vq[i].queue_index = i; 3220 vdev->vq[i].host_notifier_enabled = false; 3221 } 3222 3223 vdev->name = virtio_id_to_name(device_id); 3224 vdev->config_len = config_size; 3225 if (vdev->config_len) { 3226 vdev->config = g_malloc0(config_size); 3227 } else { 3228 vdev->config = NULL; 3229 } 3230 vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev), 3231 virtio_vmstate_change, vdev); 3232 vdev->device_endian = virtio_default_endian(); 3233 vdev->use_guest_notifier_mask = true; 3234 } 3235 3236 /* 3237 * Only devices that have already been around prior to defining the virtio 3238 * standard support legacy mode; this includes devices not specified in the 3239 * standard. All newer devices conform to the virtio standard only. 3240 */ 3241 bool virtio_legacy_allowed(VirtIODevice *vdev) 3242 { 3243 switch (vdev->device_id) { 3244 case VIRTIO_ID_NET: 3245 case VIRTIO_ID_BLOCK: 3246 case VIRTIO_ID_CONSOLE: 3247 case VIRTIO_ID_RNG: 3248 case VIRTIO_ID_BALLOON: 3249 case VIRTIO_ID_RPMSG: 3250 case VIRTIO_ID_SCSI: 3251 case VIRTIO_ID_9P: 3252 case VIRTIO_ID_RPROC_SERIAL: 3253 case VIRTIO_ID_CAIF: 3254 return true; 3255 default: 3256 return false; 3257 } 3258 } 3259 3260 bool virtio_legacy_check_disabled(VirtIODevice *vdev) 3261 { 3262 return vdev->disable_legacy_check; 3263 } 3264 3265 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n) 3266 { 3267 return vdev->vq[n].vring.desc; 3268 } 3269 3270 bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n) 3271 { 3272 return virtio_queue_get_desc_addr(vdev, n) != 0; 3273 } 3274 3275 bool virtio_queue_enabled(VirtIODevice *vdev, int n) 3276 { 3277 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 3278 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 3279 3280 if (k->queue_enabled) { 3281 return k->queue_enabled(qbus->parent, n); 3282 } 3283 return virtio_queue_enabled_legacy(vdev, n); 3284 } 3285 3286 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n) 3287 { 3288 return vdev->vq[n].vring.avail; 3289 } 3290 3291 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n) 3292 { 3293 return vdev->vq[n].vring.used; 3294 } 3295 3296 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n) 3297 { 3298 return sizeof(VRingDesc) * vdev->vq[n].vring.num; 3299 } 3300 3301 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n) 3302 { 3303 int s; 3304 3305 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { 3306 return sizeof(struct VRingPackedDescEvent); 3307 } 3308 3309 s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 3310 return offsetof(VRingAvail, ring) + 3311 sizeof(uint16_t) * vdev->vq[n].vring.num + s; 3312 } 3313 3314 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n) 3315 { 3316 int s; 3317 3318 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { 3319 return sizeof(struct VRingPackedDescEvent); 3320 } 3321 3322 s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 3323 return offsetof(VRingUsed, ring) + 3324 sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s; 3325 } 3326 3327 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev, 3328 int n) 3329 { 3330 unsigned int avail, used; 3331 3332 avail = vdev->vq[n].last_avail_idx; 3333 avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15; 3334 3335 used = vdev->vq[n].used_idx; 3336 used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15; 3337 3338 return avail | used << 16; 3339 } 3340 3341 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev, 3342 int n) 3343 { 3344 return vdev->vq[n].last_avail_idx; 3345 } 3346 3347 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) 3348 { 3349 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { 3350 return virtio_queue_packed_get_last_avail_idx(vdev, n); 3351 } else { 3352 return virtio_queue_split_get_last_avail_idx(vdev, n); 3353 } 3354 } 3355 3356 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev, 3357 int n, unsigned int idx) 3358 { 3359 struct VirtQueue *vq = &vdev->vq[n]; 3360 3361 vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff; 3362 vq->last_avail_wrap_counter = 3363 vq->shadow_avail_wrap_counter = !!(idx & 0x8000); 3364 idx >>= 16; 3365 vq->used_idx = idx & 0x7fff; 3366 vq->used_wrap_counter = !!(idx & 0x8000); 3367 } 3368 3369 static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev, 3370 int n, unsigned int idx) 3371 { 3372 vdev->vq[n].last_avail_idx = idx; 3373 vdev->vq[n].shadow_avail_idx = idx; 3374 } 3375 3376 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, 3377 unsigned int idx) 3378 { 3379 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { 3380 virtio_queue_packed_set_last_avail_idx(vdev, n, idx); 3381 } else { 3382 virtio_queue_split_set_last_avail_idx(vdev, n, idx); 3383 } 3384 } 3385 3386 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev, 3387 int n) 3388 { 3389 /* We don't have a reference like avail idx in shared memory */ 3390 return; 3391 } 3392 3393 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev, 3394 int n) 3395 { 3396 RCU_READ_LOCK_GUARD(); 3397 if (vdev->vq[n].vring.desc) { 3398 vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]); 3399 vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx; 3400 } 3401 } 3402 3403 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n) 3404 { 3405 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { 3406 virtio_queue_packed_restore_last_avail_idx(vdev, n); 3407 } else { 3408 virtio_queue_split_restore_last_avail_idx(vdev, n); 3409 } 3410 } 3411 3412 static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n) 3413 { 3414 /* used idx was updated through set_last_avail_idx() */ 3415 return; 3416 } 3417 3418 static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n) 3419 { 3420 RCU_READ_LOCK_GUARD(); 3421 if (vdev->vq[n].vring.desc) { 3422 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]); 3423 } 3424 } 3425 3426 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n) 3427 { 3428 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { 3429 return virtio_queue_packed_update_used_idx(vdev, n); 3430 } else { 3431 return virtio_split_packed_update_used_idx(vdev, n); 3432 } 3433 } 3434 3435 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n) 3436 { 3437 vdev->vq[n].signalled_used_valid = false; 3438 } 3439 3440 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n) 3441 { 3442 return vdev->vq + n; 3443 } 3444 3445 uint16_t virtio_get_queue_index(VirtQueue *vq) 3446 { 3447 return vq->queue_index; 3448 } 3449 3450 static void virtio_queue_guest_notifier_read(EventNotifier *n) 3451 { 3452 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier); 3453 if (event_notifier_test_and_clear(n)) { 3454 virtio_irq(vq); 3455 } 3456 } 3457 static void virtio_config_guest_notifier_read(EventNotifier *n) 3458 { 3459 VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier); 3460 3461 if (event_notifier_test_and_clear(n)) { 3462 virtio_notify_config(vdev); 3463 } 3464 } 3465 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, 3466 bool with_irqfd) 3467 { 3468 if (assign && !with_irqfd) { 3469 event_notifier_set_handler(&vq->guest_notifier, 3470 virtio_queue_guest_notifier_read); 3471 } else { 3472 event_notifier_set_handler(&vq->guest_notifier, NULL); 3473 } 3474 if (!assign) { 3475 /* Test and clear notifier before closing it, 3476 * in case poll callback didn't have time to run. */ 3477 virtio_queue_guest_notifier_read(&vq->guest_notifier); 3478 } 3479 } 3480 3481 void virtio_config_set_guest_notifier_fd_handler(VirtIODevice *vdev, 3482 bool assign, bool with_irqfd) 3483 { 3484 EventNotifier *n; 3485 n = &vdev->config_notifier; 3486 if (assign && !with_irqfd) { 3487 event_notifier_set_handler(n, virtio_config_guest_notifier_read); 3488 } else { 3489 event_notifier_set_handler(n, NULL); 3490 } 3491 if (!assign) { 3492 /* Test and clear notifier before closing it,*/ 3493 /* in case poll callback didn't have time to run. */ 3494 virtio_config_guest_notifier_read(n); 3495 } 3496 } 3497 3498 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) 3499 { 3500 return &vq->guest_notifier; 3501 } 3502 3503 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n) 3504 { 3505 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 3506 3507 virtio_queue_set_notification(vq, 0); 3508 } 3509 3510 static bool virtio_queue_host_notifier_aio_poll(void *opaque) 3511 { 3512 EventNotifier *n = opaque; 3513 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 3514 3515 return vq->vring.desc && !virtio_queue_empty(vq); 3516 } 3517 3518 static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n) 3519 { 3520 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 3521 3522 virtio_queue_notify_vq(vq); 3523 } 3524 3525 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n) 3526 { 3527 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 3528 3529 /* Caller polls once more after this to catch requests that race with us */ 3530 virtio_queue_set_notification(vq, 1); 3531 } 3532 3533 void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx) 3534 { 3535 aio_set_event_notifier(ctx, &vq->host_notifier, 3536 virtio_queue_host_notifier_read, 3537 virtio_queue_host_notifier_aio_poll, 3538 virtio_queue_host_notifier_aio_poll_ready); 3539 aio_set_event_notifier_poll(ctx, &vq->host_notifier, 3540 virtio_queue_host_notifier_aio_poll_begin, 3541 virtio_queue_host_notifier_aio_poll_end); 3542 } 3543 3544 /* 3545 * Same as virtio_queue_aio_attach_host_notifier() but without polling. Use 3546 * this for rx virtqueues and similar cases where the virtqueue handler 3547 * function does not pop all elements. When the virtqueue is left non-empty 3548 * polling consumes CPU cycles and should not be used. 3549 */ 3550 void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx) 3551 { 3552 aio_set_event_notifier(ctx, &vq->host_notifier, 3553 virtio_queue_host_notifier_read, 3554 NULL, NULL); 3555 } 3556 3557 void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx) 3558 { 3559 aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL); 3560 } 3561 3562 void virtio_queue_host_notifier_read(EventNotifier *n) 3563 { 3564 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 3565 if (event_notifier_test_and_clear(n)) { 3566 virtio_queue_notify_vq(vq); 3567 } 3568 } 3569 3570 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) 3571 { 3572 return &vq->host_notifier; 3573 } 3574 3575 EventNotifier *virtio_config_get_guest_notifier(VirtIODevice *vdev) 3576 { 3577 return &vdev->config_notifier; 3578 } 3579 3580 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled) 3581 { 3582 vq->host_notifier_enabled = enabled; 3583 } 3584 3585 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n, 3586 MemoryRegion *mr, bool assign) 3587 { 3588 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 3589 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 3590 3591 if (k->set_host_notifier_mr) { 3592 return k->set_host_notifier_mr(qbus->parent, n, mr, assign); 3593 } 3594 3595 return -1; 3596 } 3597 3598 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name) 3599 { 3600 g_free(vdev->bus_name); 3601 vdev->bus_name = g_strdup(bus_name); 3602 } 3603 3604 void G_GNUC_PRINTF(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...) 3605 { 3606 va_list ap; 3607 3608 va_start(ap, fmt); 3609 error_vreport(fmt, ap); 3610 va_end(ap); 3611 3612 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 3613 vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET; 3614 virtio_notify_config(vdev); 3615 } 3616 3617 vdev->broken = true; 3618 } 3619 3620 static void virtio_memory_listener_commit(MemoryListener *listener) 3621 { 3622 VirtIODevice *vdev = container_of(listener, VirtIODevice, listener); 3623 int i; 3624 3625 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 3626 if (vdev->vq[i].vring.num == 0) { 3627 break; 3628 } 3629 virtio_init_region_cache(vdev, i); 3630 } 3631 } 3632 3633 static void virtio_device_realize(DeviceState *dev, Error **errp) 3634 { 3635 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 3636 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 3637 Error *err = NULL; 3638 3639 /* Devices should either use vmsd or the load/save methods */ 3640 assert(!vdc->vmsd || !vdc->load); 3641 3642 if (vdc->realize != NULL) { 3643 vdc->realize(dev, &err); 3644 if (err != NULL) { 3645 error_propagate(errp, err); 3646 return; 3647 } 3648 } 3649 3650 virtio_bus_device_plugged(vdev, &err); 3651 if (err != NULL) { 3652 error_propagate(errp, err); 3653 vdc->unrealize(dev); 3654 return; 3655 } 3656 3657 vdev->listener.commit = virtio_memory_listener_commit; 3658 vdev->listener.name = "virtio"; 3659 memory_listener_register(&vdev->listener, vdev->dma_as); 3660 } 3661 3662 static void virtio_device_unrealize(DeviceState *dev) 3663 { 3664 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 3665 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 3666 3667 memory_listener_unregister(&vdev->listener); 3668 virtio_bus_device_unplugged(vdev); 3669 3670 if (vdc->unrealize != NULL) { 3671 vdc->unrealize(dev); 3672 } 3673 3674 g_free(vdev->bus_name); 3675 vdev->bus_name = NULL; 3676 } 3677 3678 static void virtio_device_free_virtqueues(VirtIODevice *vdev) 3679 { 3680 int i; 3681 if (!vdev->vq) { 3682 return; 3683 } 3684 3685 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 3686 if (vdev->vq[i].vring.num == 0) { 3687 break; 3688 } 3689 virtio_virtqueue_reset_region_cache(&vdev->vq[i]); 3690 } 3691 g_free(vdev->vq); 3692 } 3693 3694 static void virtio_device_instance_finalize(Object *obj) 3695 { 3696 VirtIODevice *vdev = VIRTIO_DEVICE(obj); 3697 3698 virtio_device_free_virtqueues(vdev); 3699 3700 g_free(vdev->config); 3701 g_free(vdev->vector_queues); 3702 } 3703 3704 static Property virtio_properties[] = { 3705 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features), 3706 DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true), 3707 DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true), 3708 DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice, 3709 disable_legacy_check, false), 3710 DEFINE_PROP_END_OF_LIST(), 3711 }; 3712 3713 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev) 3714 { 3715 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev))); 3716 int i, n, r, err; 3717 3718 /* 3719 * Batch all the host notifiers in a single transaction to avoid 3720 * quadratic time complexity in address_space_update_ioeventfds(). 3721 */ 3722 memory_region_transaction_begin(); 3723 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { 3724 VirtQueue *vq = &vdev->vq[n]; 3725 if (!virtio_queue_get_num(vdev, n)) { 3726 continue; 3727 } 3728 r = virtio_bus_set_host_notifier(qbus, n, true); 3729 if (r < 0) { 3730 err = r; 3731 goto assign_error; 3732 } 3733 event_notifier_set_handler(&vq->host_notifier, 3734 virtio_queue_host_notifier_read); 3735 } 3736 3737 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { 3738 /* Kick right away to begin processing requests already in vring */ 3739 VirtQueue *vq = &vdev->vq[n]; 3740 if (!vq->vring.num) { 3741 continue; 3742 } 3743 event_notifier_set(&vq->host_notifier); 3744 } 3745 memory_region_transaction_commit(); 3746 return 0; 3747 3748 assign_error: 3749 i = n; /* save n for a second iteration after transaction is committed. */ 3750 while (--n >= 0) { 3751 VirtQueue *vq = &vdev->vq[n]; 3752 if (!virtio_queue_get_num(vdev, n)) { 3753 continue; 3754 } 3755 3756 event_notifier_set_handler(&vq->host_notifier, NULL); 3757 r = virtio_bus_set_host_notifier(qbus, n, false); 3758 assert(r >= 0); 3759 } 3760 /* 3761 * The transaction expects the ioeventfds to be open when it 3762 * commits. Do it now, before the cleanup loop. 3763 */ 3764 memory_region_transaction_commit(); 3765 3766 while (--i >= 0) { 3767 if (!virtio_queue_get_num(vdev, i)) { 3768 continue; 3769 } 3770 virtio_bus_cleanup_host_notifier(qbus, i); 3771 } 3772 return err; 3773 } 3774 3775 int virtio_device_start_ioeventfd(VirtIODevice *vdev) 3776 { 3777 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 3778 VirtioBusState *vbus = VIRTIO_BUS(qbus); 3779 3780 return virtio_bus_start_ioeventfd(vbus); 3781 } 3782 3783 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev) 3784 { 3785 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev))); 3786 int n, r; 3787 3788 /* 3789 * Batch all the host notifiers in a single transaction to avoid 3790 * quadratic time complexity in address_space_update_ioeventfds(). 3791 */ 3792 memory_region_transaction_begin(); 3793 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { 3794 VirtQueue *vq = &vdev->vq[n]; 3795 3796 if (!virtio_queue_get_num(vdev, n)) { 3797 continue; 3798 } 3799 event_notifier_set_handler(&vq->host_notifier, NULL); 3800 r = virtio_bus_set_host_notifier(qbus, n, false); 3801 assert(r >= 0); 3802 } 3803 /* 3804 * The transaction expects the ioeventfds to be open when it 3805 * commits. Do it now, before the cleanup loop. 3806 */ 3807 memory_region_transaction_commit(); 3808 3809 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { 3810 if (!virtio_queue_get_num(vdev, n)) { 3811 continue; 3812 } 3813 virtio_bus_cleanup_host_notifier(qbus, n); 3814 } 3815 } 3816 3817 int virtio_device_grab_ioeventfd(VirtIODevice *vdev) 3818 { 3819 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 3820 VirtioBusState *vbus = VIRTIO_BUS(qbus); 3821 3822 return virtio_bus_grab_ioeventfd(vbus); 3823 } 3824 3825 void virtio_device_release_ioeventfd(VirtIODevice *vdev) 3826 { 3827 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 3828 VirtioBusState *vbus = VIRTIO_BUS(qbus); 3829 3830 virtio_bus_release_ioeventfd(vbus); 3831 } 3832 3833 static void virtio_device_class_init(ObjectClass *klass, void *data) 3834 { 3835 /* Set the default value here. */ 3836 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 3837 DeviceClass *dc = DEVICE_CLASS(klass); 3838 3839 dc->realize = virtio_device_realize; 3840 dc->unrealize = virtio_device_unrealize; 3841 dc->bus_type = TYPE_VIRTIO_BUS; 3842 device_class_set_props(dc, virtio_properties); 3843 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl; 3844 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl; 3845 3846 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES; 3847 } 3848 3849 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev) 3850 { 3851 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 3852 VirtioBusState *vbus = VIRTIO_BUS(qbus); 3853 3854 return virtio_bus_ioeventfd_enabled(vbus); 3855 } 3856 3857 VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path, 3858 uint16_t queue, 3859 Error **errp) 3860 { 3861 VirtIODevice *vdev; 3862 VirtQueueStatus *status; 3863 3864 vdev = qmp_find_virtio_device(path); 3865 if (vdev == NULL) { 3866 error_setg(errp, "Path %s is not a VirtIODevice", path); 3867 return NULL; 3868 } 3869 3870 if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) { 3871 error_setg(errp, "Invalid virtqueue number %d", queue); 3872 return NULL; 3873 } 3874 3875 status = g_new0(VirtQueueStatus, 1); 3876 status->name = g_strdup(vdev->name); 3877 status->queue_index = vdev->vq[queue].queue_index; 3878 status->inuse = vdev->vq[queue].inuse; 3879 status->vring_num = vdev->vq[queue].vring.num; 3880 status->vring_num_default = vdev->vq[queue].vring.num_default; 3881 status->vring_align = vdev->vq[queue].vring.align; 3882 status->vring_desc = vdev->vq[queue].vring.desc; 3883 status->vring_avail = vdev->vq[queue].vring.avail; 3884 status->vring_used = vdev->vq[queue].vring.used; 3885 status->used_idx = vdev->vq[queue].used_idx; 3886 status->signalled_used = vdev->vq[queue].signalled_used; 3887 status->signalled_used_valid = vdev->vq[queue].signalled_used_valid; 3888 3889 if (vdev->vhost_started) { 3890 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 3891 struct vhost_dev *hdev = vdc->get_vhost(vdev); 3892 3893 /* check if vq index exists for vhost as well */ 3894 if (queue >= hdev->vq_index && queue < hdev->vq_index + hdev->nvqs) { 3895 status->has_last_avail_idx = true; 3896 3897 int vhost_vq_index = 3898 hdev->vhost_ops->vhost_get_vq_index(hdev, queue); 3899 struct vhost_vring_state state = { 3900 .index = vhost_vq_index, 3901 }; 3902 3903 status->last_avail_idx = 3904 hdev->vhost_ops->vhost_get_vring_base(hdev, &state); 3905 } 3906 } else { 3907 status->has_shadow_avail_idx = true; 3908 status->has_last_avail_idx = true; 3909 status->last_avail_idx = vdev->vq[queue].last_avail_idx; 3910 status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx; 3911 } 3912 3913 return status; 3914 } 3915 3916 static strList *qmp_decode_vring_desc_flags(uint16_t flags) 3917 { 3918 strList *list = NULL; 3919 strList *node; 3920 int i; 3921 3922 struct { 3923 uint16_t flag; 3924 const char *value; 3925 } map[] = { 3926 { VRING_DESC_F_NEXT, "next" }, 3927 { VRING_DESC_F_WRITE, "write" }, 3928 { VRING_DESC_F_INDIRECT, "indirect" }, 3929 { 1 << VRING_PACKED_DESC_F_AVAIL, "avail" }, 3930 { 1 << VRING_PACKED_DESC_F_USED, "used" }, 3931 { 0, "" } 3932 }; 3933 3934 for (i = 0; map[i].flag; i++) { 3935 if ((map[i].flag & flags) == 0) { 3936 continue; 3937 } 3938 node = g_malloc0(sizeof(strList)); 3939 node->value = g_strdup(map[i].value); 3940 node->next = list; 3941 list = node; 3942 } 3943 3944 return list; 3945 } 3946 3947 VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path, 3948 uint16_t queue, 3949 bool has_index, 3950 uint16_t index, 3951 Error **errp) 3952 { 3953 VirtIODevice *vdev; 3954 VirtQueue *vq; 3955 VirtioQueueElement *element = NULL; 3956 3957 vdev = qmp_find_virtio_device(path); 3958 if (vdev == NULL) { 3959 error_setg(errp, "Path %s is not a VirtIO device", path); 3960 return NULL; 3961 } 3962 3963 if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) { 3964 error_setg(errp, "Invalid virtqueue number %d", queue); 3965 return NULL; 3966 } 3967 vq = &vdev->vq[queue]; 3968 3969 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { 3970 error_setg(errp, "Packed ring not supported"); 3971 return NULL; 3972 } else { 3973 unsigned int head, i, max; 3974 VRingMemoryRegionCaches *caches; 3975 MemoryRegionCache indirect_desc_cache; 3976 MemoryRegionCache *desc_cache; 3977 VRingDesc desc; 3978 VirtioRingDescList *list = NULL; 3979 VirtioRingDescList *node; 3980 int rc; int ndescs; 3981 3982 address_space_cache_init_empty(&indirect_desc_cache); 3983 3984 RCU_READ_LOCK_GUARD(); 3985 3986 max = vq->vring.num; 3987 3988 if (!has_index) { 3989 head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num); 3990 } else { 3991 head = vring_avail_ring(vq, index % vq->vring.num); 3992 } 3993 i = head; 3994 3995 caches = vring_get_region_caches(vq); 3996 if (!caches) { 3997 error_setg(errp, "Region caches not initialized"); 3998 return NULL; 3999 } 4000 if (caches->desc.len < max * sizeof(VRingDesc)) { 4001 error_setg(errp, "Cannot map descriptor ring"); 4002 return NULL; 4003 } 4004 4005 desc_cache = &caches->desc; 4006 vring_split_desc_read(vdev, &desc, desc_cache, i); 4007 if (desc.flags & VRING_DESC_F_INDIRECT) { 4008 int64_t len; 4009 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, 4010 desc.addr, desc.len, false); 4011 desc_cache = &indirect_desc_cache; 4012 if (len < desc.len) { 4013 error_setg(errp, "Cannot map indirect buffer"); 4014 goto done; 4015 } 4016 4017 max = desc.len / sizeof(VRingDesc); 4018 i = 0; 4019 vring_split_desc_read(vdev, &desc, desc_cache, i); 4020 } 4021 4022 element = g_new0(VirtioQueueElement, 1); 4023 element->avail = g_new0(VirtioRingAvail, 1); 4024 element->used = g_new0(VirtioRingUsed, 1); 4025 element->name = g_strdup(vdev->name); 4026 element->index = head; 4027 element->avail->flags = vring_avail_flags(vq); 4028 element->avail->idx = vring_avail_idx(vq); 4029 element->avail->ring = head; 4030 element->used->flags = vring_used_flags(vq); 4031 element->used->idx = vring_used_idx(vq); 4032 ndescs = 0; 4033 4034 do { 4035 /* A buggy driver may produce an infinite loop */ 4036 if (ndescs >= max) { 4037 break; 4038 } 4039 node = g_new0(VirtioRingDescList, 1); 4040 node->value = g_new0(VirtioRingDesc, 1); 4041 node->value->addr = desc.addr; 4042 node->value->len = desc.len; 4043 node->value->flags = qmp_decode_vring_desc_flags(desc.flags); 4044 node->next = list; 4045 list = node; 4046 4047 ndescs++; 4048 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, 4049 max, &i); 4050 } while (rc == VIRTQUEUE_READ_DESC_MORE); 4051 element->descs = list; 4052 done: 4053 address_space_cache_destroy(&indirect_desc_cache); 4054 } 4055 4056 return element; 4057 } 4058 4059 static const TypeInfo virtio_device_info = { 4060 .name = TYPE_VIRTIO_DEVICE, 4061 .parent = TYPE_DEVICE, 4062 .instance_size = sizeof(VirtIODevice), 4063 .class_init = virtio_device_class_init, 4064 .instance_finalize = virtio_device_instance_finalize, 4065 .abstract = true, 4066 .class_size = sizeof(VirtioDeviceClass), 4067 }; 4068 4069 static void virtio_register_types(void) 4070 { 4071 type_register_static(&virtio_device_info); 4072 } 4073 4074 type_init(virtio_register_types) 4075