1 /* 2 * Virtio Support 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qemu-common.h" 17 #include "cpu.h" 18 #include "trace.h" 19 #include "exec/address-spaces.h" 20 #include "qemu/error-report.h" 21 #include "hw/virtio/virtio.h" 22 #include "qemu/atomic.h" 23 #include "hw/virtio/virtio-bus.h" 24 #include "hw/virtio/virtio-access.h" 25 #include "sysemu/dma.h" 26 27 /* 28 * The alignment to use between consumer and producer parts of vring. 29 * x86 pagesize again. This is the default, used by transports like PCI 30 * which don't provide a means for the guest to tell the host the alignment. 31 */ 32 #define VIRTIO_PCI_VRING_ALIGN 4096 33 34 typedef struct VRingDesc 35 { 36 uint64_t addr; 37 uint32_t len; 38 uint16_t flags; 39 uint16_t next; 40 } VRingDesc; 41 42 typedef struct VRingAvail 43 { 44 uint16_t flags; 45 uint16_t idx; 46 uint16_t ring[0]; 47 } VRingAvail; 48 49 typedef struct VRingUsedElem 50 { 51 uint32_t id; 52 uint32_t len; 53 } VRingUsedElem; 54 55 typedef struct VRingUsed 56 { 57 uint16_t flags; 58 uint16_t idx; 59 VRingUsedElem ring[0]; 60 } VRingUsed; 61 62 typedef struct VRingMemoryRegionCaches { 63 struct rcu_head rcu; 64 MemoryRegionCache desc; 65 MemoryRegionCache avail; 66 MemoryRegionCache used; 67 } VRingMemoryRegionCaches; 68 69 typedef struct VRing 70 { 71 unsigned int num; 72 unsigned int num_default; 73 unsigned int align; 74 hwaddr desc; 75 hwaddr avail; 76 hwaddr used; 77 VRingMemoryRegionCaches *caches; 78 } VRing; 79 80 struct VirtQueue 81 { 82 VRing vring; 83 84 /* Next head to pop */ 85 uint16_t last_avail_idx; 86 87 /* Last avail_idx read from VQ. */ 88 uint16_t shadow_avail_idx; 89 90 uint16_t used_idx; 91 92 /* Last used index value we have signalled on */ 93 uint16_t signalled_used; 94 95 /* Last used index value we have signalled on */ 96 bool signalled_used_valid; 97 98 /* Notification enabled? */ 99 bool notification; 100 101 uint16_t queue_index; 102 103 unsigned int inuse; 104 105 uint16_t vector; 106 VirtIOHandleOutput handle_output; 107 VirtIOHandleAIOOutput handle_aio_output; 108 VirtIODevice *vdev; 109 EventNotifier guest_notifier; 110 EventNotifier host_notifier; 111 QLIST_ENTRY(VirtQueue) node; 112 }; 113 114 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches) 115 { 116 if (!caches) { 117 return; 118 } 119 120 address_space_cache_destroy(&caches->desc); 121 address_space_cache_destroy(&caches->avail); 122 address_space_cache_destroy(&caches->used); 123 g_free(caches); 124 } 125 126 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq) 127 { 128 VRingMemoryRegionCaches *caches; 129 130 caches = atomic_read(&vq->vring.caches); 131 atomic_rcu_set(&vq->vring.caches, NULL); 132 if (caches) { 133 call_rcu(caches, virtio_free_region_cache, rcu); 134 } 135 } 136 137 static void virtio_init_region_cache(VirtIODevice *vdev, int n) 138 { 139 VirtQueue *vq = &vdev->vq[n]; 140 VRingMemoryRegionCaches *old = vq->vring.caches; 141 VRingMemoryRegionCaches *new = NULL; 142 hwaddr addr, size; 143 int event_size; 144 int64_t len; 145 146 event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 147 148 addr = vq->vring.desc; 149 if (!addr) { 150 goto out_no_cache; 151 } 152 new = g_new0(VRingMemoryRegionCaches, 1); 153 size = virtio_queue_get_desc_size(vdev, n); 154 len = address_space_cache_init(&new->desc, vdev->dma_as, 155 addr, size, false); 156 if (len < size) { 157 virtio_error(vdev, "Cannot map desc"); 158 goto err_desc; 159 } 160 161 size = virtio_queue_get_used_size(vdev, n) + event_size; 162 len = address_space_cache_init(&new->used, vdev->dma_as, 163 vq->vring.used, size, true); 164 if (len < size) { 165 virtio_error(vdev, "Cannot map used"); 166 goto err_used; 167 } 168 169 size = virtio_queue_get_avail_size(vdev, n) + event_size; 170 len = address_space_cache_init(&new->avail, vdev->dma_as, 171 vq->vring.avail, size, false); 172 if (len < size) { 173 virtio_error(vdev, "Cannot map avail"); 174 goto err_avail; 175 } 176 177 atomic_rcu_set(&vq->vring.caches, new); 178 if (old) { 179 call_rcu(old, virtio_free_region_cache, rcu); 180 } 181 return; 182 183 err_avail: 184 address_space_cache_destroy(&new->avail); 185 err_used: 186 address_space_cache_destroy(&new->used); 187 err_desc: 188 address_space_cache_destroy(&new->desc); 189 out_no_cache: 190 g_free(new); 191 virtio_virtqueue_reset_region_cache(vq); 192 } 193 194 /* virt queue functions */ 195 void virtio_queue_update_rings(VirtIODevice *vdev, int n) 196 { 197 VRing *vring = &vdev->vq[n].vring; 198 199 if (!vring->num || !vring->desc || !vring->align) { 200 /* not yet setup -> nothing to do */ 201 return; 202 } 203 vring->avail = vring->desc + vring->num * sizeof(VRingDesc); 204 vring->used = vring_align(vring->avail + 205 offsetof(VRingAvail, ring[vring->num]), 206 vring->align); 207 virtio_init_region_cache(vdev, n); 208 } 209 210 /* Called within rcu_read_lock(). */ 211 static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc, 212 MemoryRegionCache *cache, int i) 213 { 214 address_space_read_cached(cache, i * sizeof(VRingDesc), 215 desc, sizeof(VRingDesc)); 216 virtio_tswap64s(vdev, &desc->addr); 217 virtio_tswap32s(vdev, &desc->len); 218 virtio_tswap16s(vdev, &desc->flags); 219 virtio_tswap16s(vdev, &desc->next); 220 } 221 222 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq) 223 { 224 VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches); 225 assert(caches != NULL); 226 return caches; 227 } 228 /* Called within rcu_read_lock(). */ 229 static inline uint16_t vring_avail_flags(VirtQueue *vq) 230 { 231 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 232 hwaddr pa = offsetof(VRingAvail, flags); 233 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); 234 } 235 236 /* Called within rcu_read_lock(). */ 237 static inline uint16_t vring_avail_idx(VirtQueue *vq) 238 { 239 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 240 hwaddr pa = offsetof(VRingAvail, idx); 241 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); 242 return vq->shadow_avail_idx; 243 } 244 245 /* Called within rcu_read_lock(). */ 246 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) 247 { 248 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 249 hwaddr pa = offsetof(VRingAvail, ring[i]); 250 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); 251 } 252 253 /* Called within rcu_read_lock(). */ 254 static inline uint16_t vring_get_used_event(VirtQueue *vq) 255 { 256 return vring_avail_ring(vq, vq->vring.num); 257 } 258 259 /* Called within rcu_read_lock(). */ 260 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem, 261 int i) 262 { 263 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 264 hwaddr pa = offsetof(VRingUsed, ring[i]); 265 virtio_tswap32s(vq->vdev, &uelem->id); 266 virtio_tswap32s(vq->vdev, &uelem->len); 267 address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem)); 268 address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem)); 269 } 270 271 /* Called within rcu_read_lock(). */ 272 static uint16_t vring_used_idx(VirtQueue *vq) 273 { 274 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 275 hwaddr pa = offsetof(VRingUsed, idx); 276 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); 277 } 278 279 /* Called within rcu_read_lock(). */ 280 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) 281 { 282 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 283 hwaddr pa = offsetof(VRingUsed, idx); 284 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); 285 address_space_cache_invalidate(&caches->used, pa, sizeof(val)); 286 vq->used_idx = val; 287 } 288 289 /* Called within rcu_read_lock(). */ 290 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) 291 { 292 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 293 VirtIODevice *vdev = vq->vdev; 294 hwaddr pa = offsetof(VRingUsed, flags); 295 uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); 296 297 virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask); 298 address_space_cache_invalidate(&caches->used, pa, sizeof(flags)); 299 } 300 301 /* Called within rcu_read_lock(). */ 302 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) 303 { 304 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); 305 VirtIODevice *vdev = vq->vdev; 306 hwaddr pa = offsetof(VRingUsed, flags); 307 uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); 308 309 virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask); 310 address_space_cache_invalidate(&caches->used, pa, sizeof(flags)); 311 } 312 313 /* Called within rcu_read_lock(). */ 314 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) 315 { 316 VRingMemoryRegionCaches *caches; 317 hwaddr pa; 318 if (!vq->notification) { 319 return; 320 } 321 322 caches = vring_get_region_caches(vq); 323 pa = offsetof(VRingUsed, ring[vq->vring.num]); 324 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); 325 address_space_cache_invalidate(&caches->used, pa, sizeof(val)); 326 } 327 328 void virtio_queue_set_notification(VirtQueue *vq, int enable) 329 { 330 vq->notification = enable; 331 332 if (!vq->vring.desc) { 333 return; 334 } 335 336 rcu_read_lock(); 337 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { 338 vring_set_avail_event(vq, vring_avail_idx(vq)); 339 } else if (enable) { 340 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); 341 } else { 342 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); 343 } 344 if (enable) { 345 /* Expose avail event/used flags before caller checks the avail idx. */ 346 smp_mb(); 347 } 348 rcu_read_unlock(); 349 } 350 351 int virtio_queue_ready(VirtQueue *vq) 352 { 353 return vq->vring.avail != 0; 354 } 355 356 /* Fetch avail_idx from VQ memory only when we really need to know if 357 * guest has added some buffers. 358 * Called within rcu_read_lock(). */ 359 static int virtio_queue_empty_rcu(VirtQueue *vq) 360 { 361 if (unlikely(vq->vdev->broken)) { 362 return 1; 363 } 364 365 if (unlikely(!vq->vring.avail)) { 366 return 1; 367 } 368 369 if (vq->shadow_avail_idx != vq->last_avail_idx) { 370 return 0; 371 } 372 373 return vring_avail_idx(vq) == vq->last_avail_idx; 374 } 375 376 int virtio_queue_empty(VirtQueue *vq) 377 { 378 bool empty; 379 380 if (unlikely(vq->vdev->broken)) { 381 return 1; 382 } 383 384 if (unlikely(!vq->vring.avail)) { 385 return 1; 386 } 387 388 if (vq->shadow_avail_idx != vq->last_avail_idx) { 389 return 0; 390 } 391 392 rcu_read_lock(); 393 empty = vring_avail_idx(vq) == vq->last_avail_idx; 394 rcu_read_unlock(); 395 return empty; 396 } 397 398 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, 399 unsigned int len) 400 { 401 AddressSpace *dma_as = vq->vdev->dma_as; 402 unsigned int offset; 403 int i; 404 405 offset = 0; 406 for (i = 0; i < elem->in_num; i++) { 407 size_t size = MIN(len - offset, elem->in_sg[i].iov_len); 408 409 dma_memory_unmap(dma_as, elem->in_sg[i].iov_base, 410 elem->in_sg[i].iov_len, 411 DMA_DIRECTION_FROM_DEVICE, size); 412 413 offset += size; 414 } 415 416 for (i = 0; i < elem->out_num; i++) 417 dma_memory_unmap(dma_as, elem->out_sg[i].iov_base, 418 elem->out_sg[i].iov_len, 419 DMA_DIRECTION_TO_DEVICE, 420 elem->out_sg[i].iov_len); 421 } 422 423 /* virtqueue_detach_element: 424 * @vq: The #VirtQueue 425 * @elem: The #VirtQueueElement 426 * @len: number of bytes written 427 * 428 * Detach the element from the virtqueue. This function is suitable for device 429 * reset or other situations where a #VirtQueueElement is simply freed and will 430 * not be pushed or discarded. 431 */ 432 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem, 433 unsigned int len) 434 { 435 vq->inuse--; 436 virtqueue_unmap_sg(vq, elem, len); 437 } 438 439 /* virtqueue_unpop: 440 * @vq: The #VirtQueue 441 * @elem: The #VirtQueueElement 442 * @len: number of bytes written 443 * 444 * Pretend the most recent element wasn't popped from the virtqueue. The next 445 * call to virtqueue_pop() will refetch the element. 446 */ 447 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem, 448 unsigned int len) 449 { 450 vq->last_avail_idx--; 451 virtqueue_detach_element(vq, elem, len); 452 } 453 454 /* virtqueue_rewind: 455 * @vq: The #VirtQueue 456 * @num: Number of elements to push back 457 * 458 * Pretend that elements weren't popped from the virtqueue. The next 459 * virtqueue_pop() will refetch the oldest element. 460 * 461 * Use virtqueue_unpop() instead if you have a VirtQueueElement. 462 * 463 * Returns: true on success, false if @num is greater than the number of in use 464 * elements. 465 */ 466 bool virtqueue_rewind(VirtQueue *vq, unsigned int num) 467 { 468 if (num > vq->inuse) { 469 return false; 470 } 471 vq->last_avail_idx -= num; 472 vq->inuse -= num; 473 return true; 474 } 475 476 /* Called within rcu_read_lock(). */ 477 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, 478 unsigned int len, unsigned int idx) 479 { 480 VRingUsedElem uelem; 481 482 trace_virtqueue_fill(vq, elem, len, idx); 483 484 virtqueue_unmap_sg(vq, elem, len); 485 486 if (unlikely(vq->vdev->broken)) { 487 return; 488 } 489 490 if (unlikely(!vq->vring.used)) { 491 return; 492 } 493 494 idx = (idx + vq->used_idx) % vq->vring.num; 495 496 uelem.id = elem->index; 497 uelem.len = len; 498 vring_used_write(vq, &uelem, idx); 499 } 500 501 /* Called within rcu_read_lock(). */ 502 void virtqueue_flush(VirtQueue *vq, unsigned int count) 503 { 504 uint16_t old, new; 505 506 if (unlikely(vq->vdev->broken)) { 507 vq->inuse -= count; 508 return; 509 } 510 511 if (unlikely(!vq->vring.used)) { 512 return; 513 } 514 515 /* Make sure buffer is written before we update index. */ 516 smp_wmb(); 517 trace_virtqueue_flush(vq, count); 518 old = vq->used_idx; 519 new = old + count; 520 vring_used_idx_set(vq, new); 521 vq->inuse -= count; 522 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) 523 vq->signalled_used_valid = false; 524 } 525 526 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, 527 unsigned int len) 528 { 529 rcu_read_lock(); 530 virtqueue_fill(vq, elem, len, 0); 531 virtqueue_flush(vq, 1); 532 rcu_read_unlock(); 533 } 534 535 /* Called within rcu_read_lock(). */ 536 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) 537 { 538 uint16_t num_heads = vring_avail_idx(vq) - idx; 539 540 /* Check it isn't doing very strange things with descriptor numbers. */ 541 if (num_heads > vq->vring.num) { 542 virtio_error(vq->vdev, "Guest moved used index from %u to %u", 543 idx, vq->shadow_avail_idx); 544 return -EINVAL; 545 } 546 /* On success, callers read a descriptor at vq->last_avail_idx. 547 * Make sure descriptor read does not bypass avail index read. */ 548 if (num_heads) { 549 smp_rmb(); 550 } 551 552 return num_heads; 553 } 554 555 /* Called within rcu_read_lock(). */ 556 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx, 557 unsigned int *head) 558 { 559 /* Grab the next descriptor number they're advertising, and increment 560 * the index we've seen. */ 561 *head = vring_avail_ring(vq, idx % vq->vring.num); 562 563 /* If their number is silly, that's a fatal mistake. */ 564 if (*head >= vq->vring.num) { 565 virtio_error(vq->vdev, "Guest says index %u is available", *head); 566 return false; 567 } 568 569 return true; 570 } 571 572 enum { 573 VIRTQUEUE_READ_DESC_ERROR = -1, 574 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */ 575 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */ 576 }; 577 578 static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc, 579 MemoryRegionCache *desc_cache, unsigned int max, 580 unsigned int *next) 581 { 582 /* If this descriptor says it doesn't chain, we're done. */ 583 if (!(desc->flags & VRING_DESC_F_NEXT)) { 584 return VIRTQUEUE_READ_DESC_DONE; 585 } 586 587 /* Check they're not leading us off end of descriptors. */ 588 *next = desc->next; 589 /* Make sure compiler knows to grab that: we don't want it changing! */ 590 smp_wmb(); 591 592 if (*next >= max) { 593 virtio_error(vdev, "Desc next is %u", *next); 594 return VIRTQUEUE_READ_DESC_ERROR; 595 } 596 597 vring_desc_read(vdev, desc, desc_cache, *next); 598 return VIRTQUEUE_READ_DESC_MORE; 599 } 600 601 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, 602 unsigned int *out_bytes, 603 unsigned max_in_bytes, unsigned max_out_bytes) 604 { 605 VirtIODevice *vdev = vq->vdev; 606 unsigned int max, idx; 607 unsigned int total_bufs, in_total, out_total; 608 VRingMemoryRegionCaches *caches; 609 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; 610 int64_t len = 0; 611 int rc; 612 613 if (unlikely(!vq->vring.desc)) { 614 if (in_bytes) { 615 *in_bytes = 0; 616 } 617 if (out_bytes) { 618 *out_bytes = 0; 619 } 620 return; 621 } 622 623 rcu_read_lock(); 624 idx = vq->last_avail_idx; 625 total_bufs = in_total = out_total = 0; 626 627 max = vq->vring.num; 628 caches = vring_get_region_caches(vq); 629 if (caches->desc.len < max * sizeof(VRingDesc)) { 630 virtio_error(vdev, "Cannot map descriptor ring"); 631 goto err; 632 } 633 634 while ((rc = virtqueue_num_heads(vq, idx)) > 0) { 635 MemoryRegionCache *desc_cache = &caches->desc; 636 unsigned int num_bufs; 637 VRingDesc desc; 638 unsigned int i; 639 640 num_bufs = total_bufs; 641 642 if (!virtqueue_get_head(vq, idx++, &i)) { 643 goto err; 644 } 645 646 vring_desc_read(vdev, &desc, desc_cache, i); 647 648 if (desc.flags & VRING_DESC_F_INDIRECT) { 649 if (desc.len % sizeof(VRingDesc)) { 650 virtio_error(vdev, "Invalid size for indirect buffer table"); 651 goto err; 652 } 653 654 /* If we've got too many, that implies a descriptor loop. */ 655 if (num_bufs >= max) { 656 virtio_error(vdev, "Looped descriptor"); 657 goto err; 658 } 659 660 /* loop over the indirect descriptor table */ 661 len = address_space_cache_init(&indirect_desc_cache, 662 vdev->dma_as, 663 desc.addr, desc.len, false); 664 desc_cache = &indirect_desc_cache; 665 if (len < desc.len) { 666 virtio_error(vdev, "Cannot map indirect buffer"); 667 goto err; 668 } 669 670 max = desc.len / sizeof(VRingDesc); 671 num_bufs = i = 0; 672 vring_desc_read(vdev, &desc, desc_cache, i); 673 } 674 675 do { 676 /* If we've got too many, that implies a descriptor loop. */ 677 if (++num_bufs > max) { 678 virtio_error(vdev, "Looped descriptor"); 679 goto err; 680 } 681 682 if (desc.flags & VRING_DESC_F_WRITE) { 683 in_total += desc.len; 684 } else { 685 out_total += desc.len; 686 } 687 if (in_total >= max_in_bytes && out_total >= max_out_bytes) { 688 goto done; 689 } 690 691 rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i); 692 } while (rc == VIRTQUEUE_READ_DESC_MORE); 693 694 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 695 goto err; 696 } 697 698 if (desc_cache == &indirect_desc_cache) { 699 address_space_cache_destroy(&indirect_desc_cache); 700 total_bufs++; 701 } else { 702 total_bufs = num_bufs; 703 } 704 } 705 706 if (rc < 0) { 707 goto err; 708 } 709 710 done: 711 address_space_cache_destroy(&indirect_desc_cache); 712 if (in_bytes) { 713 *in_bytes = in_total; 714 } 715 if (out_bytes) { 716 *out_bytes = out_total; 717 } 718 rcu_read_unlock(); 719 return; 720 721 err: 722 in_total = out_total = 0; 723 goto done; 724 } 725 726 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, 727 unsigned int out_bytes) 728 { 729 unsigned int in_total, out_total; 730 731 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes); 732 return in_bytes <= in_total && out_bytes <= out_total; 733 } 734 735 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg, 736 hwaddr *addr, struct iovec *iov, 737 unsigned int max_num_sg, bool is_write, 738 hwaddr pa, size_t sz) 739 { 740 bool ok = false; 741 unsigned num_sg = *p_num_sg; 742 assert(num_sg <= max_num_sg); 743 744 if (!sz) { 745 virtio_error(vdev, "virtio: zero sized buffers are not allowed"); 746 goto out; 747 } 748 749 while (sz) { 750 hwaddr len = sz; 751 752 if (num_sg == max_num_sg) { 753 virtio_error(vdev, "virtio: too many write descriptors in " 754 "indirect table"); 755 goto out; 756 } 757 758 iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len, 759 is_write ? 760 DMA_DIRECTION_FROM_DEVICE : 761 DMA_DIRECTION_TO_DEVICE); 762 if (!iov[num_sg].iov_base) { 763 virtio_error(vdev, "virtio: bogus descriptor or out of resources"); 764 goto out; 765 } 766 767 iov[num_sg].iov_len = len; 768 addr[num_sg] = pa; 769 770 sz -= len; 771 pa += len; 772 num_sg++; 773 } 774 ok = true; 775 776 out: 777 *p_num_sg = num_sg; 778 return ok; 779 } 780 781 /* Only used by error code paths before we have a VirtQueueElement (therefore 782 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to 783 * yet. 784 */ 785 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num, 786 struct iovec *iov) 787 { 788 unsigned int i; 789 790 for (i = 0; i < out_num + in_num; i++) { 791 int is_write = i >= out_num; 792 793 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0); 794 iov++; 795 } 796 } 797 798 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg, 799 hwaddr *addr, unsigned int *num_sg, 800 int is_write) 801 { 802 unsigned int i; 803 hwaddr len; 804 805 for (i = 0; i < *num_sg; i++) { 806 len = sg[i].iov_len; 807 sg[i].iov_base = dma_memory_map(vdev->dma_as, 808 addr[i], &len, is_write ? 809 DMA_DIRECTION_FROM_DEVICE : 810 DMA_DIRECTION_TO_DEVICE); 811 if (!sg[i].iov_base) { 812 error_report("virtio: error trying to map MMIO memory"); 813 exit(1); 814 } 815 if (len != sg[i].iov_len) { 816 error_report("virtio: unexpected memory split"); 817 exit(1); 818 } 819 } 820 } 821 822 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem) 823 { 824 virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, &elem->in_num, 1); 825 virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, &elem->out_num, 0); 826 } 827 828 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num) 829 { 830 VirtQueueElement *elem; 831 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0])); 832 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]); 833 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]); 834 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0])); 835 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); 836 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); 837 838 assert(sz >= sizeof(VirtQueueElement)); 839 elem = g_malloc(out_sg_end); 840 trace_virtqueue_alloc_element(elem, sz, in_num, out_num); 841 elem->out_num = out_num; 842 elem->in_num = in_num; 843 elem->in_addr = (void *)elem + in_addr_ofs; 844 elem->out_addr = (void *)elem + out_addr_ofs; 845 elem->in_sg = (void *)elem + in_sg_ofs; 846 elem->out_sg = (void *)elem + out_sg_ofs; 847 return elem; 848 } 849 850 void *virtqueue_pop(VirtQueue *vq, size_t sz) 851 { 852 unsigned int i, head, max; 853 VRingMemoryRegionCaches *caches; 854 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; 855 MemoryRegionCache *desc_cache; 856 int64_t len; 857 VirtIODevice *vdev = vq->vdev; 858 VirtQueueElement *elem = NULL; 859 unsigned out_num, in_num, elem_entries; 860 hwaddr addr[VIRTQUEUE_MAX_SIZE]; 861 struct iovec iov[VIRTQUEUE_MAX_SIZE]; 862 VRingDesc desc; 863 int rc; 864 865 if (unlikely(vdev->broken)) { 866 return NULL; 867 } 868 rcu_read_lock(); 869 if (virtio_queue_empty_rcu(vq)) { 870 goto done; 871 } 872 /* Needed after virtio_queue_empty(), see comment in 873 * virtqueue_num_heads(). */ 874 smp_rmb(); 875 876 /* When we start there are none of either input nor output. */ 877 out_num = in_num = elem_entries = 0; 878 879 max = vq->vring.num; 880 881 if (vq->inuse >= vq->vring.num) { 882 virtio_error(vdev, "Virtqueue size exceeded"); 883 goto done; 884 } 885 886 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) { 887 goto done; 888 } 889 890 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 891 vring_set_avail_event(vq, vq->last_avail_idx); 892 } 893 894 i = head; 895 896 caches = vring_get_region_caches(vq); 897 if (caches->desc.len < max * sizeof(VRingDesc)) { 898 virtio_error(vdev, "Cannot map descriptor ring"); 899 goto done; 900 } 901 902 desc_cache = &caches->desc; 903 vring_desc_read(vdev, &desc, desc_cache, i); 904 if (desc.flags & VRING_DESC_F_INDIRECT) { 905 if (desc.len % sizeof(VRingDesc)) { 906 virtio_error(vdev, "Invalid size for indirect buffer table"); 907 goto done; 908 } 909 910 /* loop over the indirect descriptor table */ 911 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, 912 desc.addr, desc.len, false); 913 desc_cache = &indirect_desc_cache; 914 if (len < desc.len) { 915 virtio_error(vdev, "Cannot map indirect buffer"); 916 goto done; 917 } 918 919 max = desc.len / sizeof(VRingDesc); 920 i = 0; 921 vring_desc_read(vdev, &desc, desc_cache, i); 922 } 923 924 /* Collect all the descriptors */ 925 do { 926 bool map_ok; 927 928 if (desc.flags & VRING_DESC_F_WRITE) { 929 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num, 930 iov + out_num, 931 VIRTQUEUE_MAX_SIZE - out_num, true, 932 desc.addr, desc.len); 933 } else { 934 if (in_num) { 935 virtio_error(vdev, "Incorrect order for descriptors"); 936 goto err_undo_map; 937 } 938 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov, 939 VIRTQUEUE_MAX_SIZE, false, 940 desc.addr, desc.len); 941 } 942 if (!map_ok) { 943 goto err_undo_map; 944 } 945 946 /* If we've got too many, that implies a descriptor loop. */ 947 if (++elem_entries > max) { 948 virtio_error(vdev, "Looped descriptor"); 949 goto err_undo_map; 950 } 951 952 rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i); 953 } while (rc == VIRTQUEUE_READ_DESC_MORE); 954 955 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 956 goto err_undo_map; 957 } 958 959 /* Now copy what we have collected and mapped */ 960 elem = virtqueue_alloc_element(sz, out_num, in_num); 961 elem->index = head; 962 for (i = 0; i < out_num; i++) { 963 elem->out_addr[i] = addr[i]; 964 elem->out_sg[i] = iov[i]; 965 } 966 for (i = 0; i < in_num; i++) { 967 elem->in_addr[i] = addr[out_num + i]; 968 elem->in_sg[i] = iov[out_num + i]; 969 } 970 971 vq->inuse++; 972 973 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); 974 done: 975 address_space_cache_destroy(&indirect_desc_cache); 976 rcu_read_unlock(); 977 978 return elem; 979 980 err_undo_map: 981 virtqueue_undo_map_desc(out_num, in_num, iov); 982 goto done; 983 } 984 985 /* virtqueue_drop_all: 986 * @vq: The #VirtQueue 987 * Drops all queued buffers and indicates them to the guest 988 * as if they are done. Useful when buffers can not be 989 * processed but must be returned to the guest. 990 */ 991 unsigned int virtqueue_drop_all(VirtQueue *vq) 992 { 993 unsigned int dropped = 0; 994 VirtQueueElement elem = {}; 995 VirtIODevice *vdev = vq->vdev; 996 bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 997 998 if (unlikely(vdev->broken)) { 999 return 0; 1000 } 1001 1002 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) { 1003 /* works similar to virtqueue_pop but does not map buffers 1004 * and does not allocate any memory */ 1005 smp_rmb(); 1006 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) { 1007 break; 1008 } 1009 vq->inuse++; 1010 vq->last_avail_idx++; 1011 if (fEventIdx) { 1012 vring_set_avail_event(vq, vq->last_avail_idx); 1013 } 1014 /* immediately push the element, nothing to unmap 1015 * as both in_num and out_num are set to 0 */ 1016 virtqueue_push(vq, &elem, 0); 1017 dropped++; 1018 } 1019 1020 return dropped; 1021 } 1022 1023 /* Reading and writing a structure directly to QEMUFile is *awful*, but 1024 * it is what QEMU has always done by mistake. We can change it sooner 1025 * or later by bumping the version number of the affected vm states. 1026 * In the meanwhile, since the in-memory layout of VirtQueueElement 1027 * has changed, we need to marshal to and from the layout that was 1028 * used before the change. 1029 */ 1030 typedef struct VirtQueueElementOld { 1031 unsigned int index; 1032 unsigned int out_num; 1033 unsigned int in_num; 1034 hwaddr in_addr[VIRTQUEUE_MAX_SIZE]; 1035 hwaddr out_addr[VIRTQUEUE_MAX_SIZE]; 1036 struct iovec in_sg[VIRTQUEUE_MAX_SIZE]; 1037 struct iovec out_sg[VIRTQUEUE_MAX_SIZE]; 1038 } VirtQueueElementOld; 1039 1040 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz) 1041 { 1042 VirtQueueElement *elem; 1043 VirtQueueElementOld data; 1044 int i; 1045 1046 qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld)); 1047 1048 /* TODO: teach all callers that this can fail, and return failure instead 1049 * of asserting here. 1050 * This is just one thing (there are probably more) that must be 1051 * fixed before we can allow NDEBUG compilation. 1052 */ 1053 assert(ARRAY_SIZE(data.in_addr) >= data.in_num); 1054 assert(ARRAY_SIZE(data.out_addr) >= data.out_num); 1055 1056 elem = virtqueue_alloc_element(sz, data.out_num, data.in_num); 1057 elem->index = data.index; 1058 1059 for (i = 0; i < elem->in_num; i++) { 1060 elem->in_addr[i] = data.in_addr[i]; 1061 } 1062 1063 for (i = 0; i < elem->out_num; i++) { 1064 elem->out_addr[i] = data.out_addr[i]; 1065 } 1066 1067 for (i = 0; i < elem->in_num; i++) { 1068 /* Base is overwritten by virtqueue_map. */ 1069 elem->in_sg[i].iov_base = 0; 1070 elem->in_sg[i].iov_len = data.in_sg[i].iov_len; 1071 } 1072 1073 for (i = 0; i < elem->out_num; i++) { 1074 /* Base is overwritten by virtqueue_map. */ 1075 elem->out_sg[i].iov_base = 0; 1076 elem->out_sg[i].iov_len = data.out_sg[i].iov_len; 1077 } 1078 1079 virtqueue_map(vdev, elem); 1080 return elem; 1081 } 1082 1083 void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem) 1084 { 1085 VirtQueueElementOld data; 1086 int i; 1087 1088 memset(&data, 0, sizeof(data)); 1089 data.index = elem->index; 1090 data.in_num = elem->in_num; 1091 data.out_num = elem->out_num; 1092 1093 for (i = 0; i < elem->in_num; i++) { 1094 data.in_addr[i] = elem->in_addr[i]; 1095 } 1096 1097 for (i = 0; i < elem->out_num; i++) { 1098 data.out_addr[i] = elem->out_addr[i]; 1099 } 1100 1101 for (i = 0; i < elem->in_num; i++) { 1102 /* Base is overwritten by virtqueue_map when loading. Do not 1103 * save it, as it would leak the QEMU address space layout. */ 1104 data.in_sg[i].iov_len = elem->in_sg[i].iov_len; 1105 } 1106 1107 for (i = 0; i < elem->out_num; i++) { 1108 /* Do not save iov_base as above. */ 1109 data.out_sg[i].iov_len = elem->out_sg[i].iov_len; 1110 } 1111 qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld)); 1112 } 1113 1114 /* virtio device */ 1115 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector) 1116 { 1117 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1118 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1119 1120 if (unlikely(vdev->broken)) { 1121 return; 1122 } 1123 1124 if (k->notify) { 1125 k->notify(qbus->parent, vector); 1126 } 1127 } 1128 1129 void virtio_update_irq(VirtIODevice *vdev) 1130 { 1131 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 1132 } 1133 1134 static int virtio_validate_features(VirtIODevice *vdev) 1135 { 1136 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1137 1138 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) && 1139 !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { 1140 return -EFAULT; 1141 } 1142 1143 if (k->validate_features) { 1144 return k->validate_features(vdev); 1145 } else { 1146 return 0; 1147 } 1148 } 1149 1150 int virtio_set_status(VirtIODevice *vdev, uint8_t val) 1151 { 1152 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1153 trace_virtio_set_status(vdev, val); 1154 1155 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1156 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) && 1157 val & VIRTIO_CONFIG_S_FEATURES_OK) { 1158 int ret = virtio_validate_features(vdev); 1159 1160 if (ret) { 1161 return ret; 1162 } 1163 } 1164 } 1165 if (k->set_status) { 1166 k->set_status(vdev, val); 1167 } 1168 vdev->status = val; 1169 return 0; 1170 } 1171 1172 bool target_words_bigendian(void); 1173 static enum virtio_device_endian virtio_default_endian(void) 1174 { 1175 if (target_words_bigendian()) { 1176 return VIRTIO_DEVICE_ENDIAN_BIG; 1177 } else { 1178 return VIRTIO_DEVICE_ENDIAN_LITTLE; 1179 } 1180 } 1181 1182 static enum virtio_device_endian virtio_current_cpu_endian(void) 1183 { 1184 CPUClass *cc = CPU_GET_CLASS(current_cpu); 1185 1186 if (cc->virtio_is_big_endian(current_cpu)) { 1187 return VIRTIO_DEVICE_ENDIAN_BIG; 1188 } else { 1189 return VIRTIO_DEVICE_ENDIAN_LITTLE; 1190 } 1191 } 1192 1193 void virtio_reset(void *opaque) 1194 { 1195 VirtIODevice *vdev = opaque; 1196 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1197 int i; 1198 1199 virtio_set_status(vdev, 0); 1200 if (current_cpu) { 1201 /* Guest initiated reset */ 1202 vdev->device_endian = virtio_current_cpu_endian(); 1203 } else { 1204 /* System reset */ 1205 vdev->device_endian = virtio_default_endian(); 1206 } 1207 1208 if (k->reset) { 1209 k->reset(vdev); 1210 } 1211 1212 vdev->broken = false; 1213 vdev->guest_features = 0; 1214 vdev->queue_sel = 0; 1215 vdev->status = 0; 1216 atomic_set(&vdev->isr, 0); 1217 vdev->config_vector = VIRTIO_NO_VECTOR; 1218 virtio_notify_vector(vdev, vdev->config_vector); 1219 1220 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1221 vdev->vq[i].vring.desc = 0; 1222 vdev->vq[i].vring.avail = 0; 1223 vdev->vq[i].vring.used = 0; 1224 vdev->vq[i].last_avail_idx = 0; 1225 vdev->vq[i].shadow_avail_idx = 0; 1226 vdev->vq[i].used_idx = 0; 1227 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR); 1228 vdev->vq[i].signalled_used = 0; 1229 vdev->vq[i].signalled_used_valid = false; 1230 vdev->vq[i].notification = true; 1231 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default; 1232 vdev->vq[i].inuse = 0; 1233 virtio_virtqueue_reset_region_cache(&vdev->vq[i]); 1234 } 1235 } 1236 1237 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) 1238 { 1239 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1240 uint8_t val; 1241 1242 if (addr + sizeof(val) > vdev->config_len) { 1243 return (uint32_t)-1; 1244 } 1245 1246 k->get_config(vdev, vdev->config); 1247 1248 val = ldub_p(vdev->config + addr); 1249 return val; 1250 } 1251 1252 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr) 1253 { 1254 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1255 uint16_t val; 1256 1257 if (addr + sizeof(val) > vdev->config_len) { 1258 return (uint32_t)-1; 1259 } 1260 1261 k->get_config(vdev, vdev->config); 1262 1263 val = lduw_p(vdev->config + addr); 1264 return val; 1265 } 1266 1267 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr) 1268 { 1269 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1270 uint32_t val; 1271 1272 if (addr + sizeof(val) > vdev->config_len) { 1273 return (uint32_t)-1; 1274 } 1275 1276 k->get_config(vdev, vdev->config); 1277 1278 val = ldl_p(vdev->config + addr); 1279 return val; 1280 } 1281 1282 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data) 1283 { 1284 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1285 uint8_t val = data; 1286 1287 if (addr + sizeof(val) > vdev->config_len) { 1288 return; 1289 } 1290 1291 stb_p(vdev->config + addr, val); 1292 1293 if (k->set_config) { 1294 k->set_config(vdev, vdev->config); 1295 } 1296 } 1297 1298 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data) 1299 { 1300 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1301 uint16_t val = data; 1302 1303 if (addr + sizeof(val) > vdev->config_len) { 1304 return; 1305 } 1306 1307 stw_p(vdev->config + addr, val); 1308 1309 if (k->set_config) { 1310 k->set_config(vdev, vdev->config); 1311 } 1312 } 1313 1314 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data) 1315 { 1316 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1317 uint32_t val = data; 1318 1319 if (addr + sizeof(val) > vdev->config_len) { 1320 return; 1321 } 1322 1323 stl_p(vdev->config + addr, val); 1324 1325 if (k->set_config) { 1326 k->set_config(vdev, vdev->config); 1327 } 1328 } 1329 1330 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr) 1331 { 1332 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1333 uint8_t val; 1334 1335 if (addr + sizeof(val) > vdev->config_len) { 1336 return (uint32_t)-1; 1337 } 1338 1339 k->get_config(vdev, vdev->config); 1340 1341 val = ldub_p(vdev->config + addr); 1342 return val; 1343 } 1344 1345 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr) 1346 { 1347 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1348 uint16_t val; 1349 1350 if (addr + sizeof(val) > vdev->config_len) { 1351 return (uint32_t)-1; 1352 } 1353 1354 k->get_config(vdev, vdev->config); 1355 1356 val = lduw_le_p(vdev->config + addr); 1357 return val; 1358 } 1359 1360 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr) 1361 { 1362 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1363 uint32_t val; 1364 1365 if (addr + sizeof(val) > vdev->config_len) { 1366 return (uint32_t)-1; 1367 } 1368 1369 k->get_config(vdev, vdev->config); 1370 1371 val = ldl_le_p(vdev->config + addr); 1372 return val; 1373 } 1374 1375 void virtio_config_modern_writeb(VirtIODevice *vdev, 1376 uint32_t addr, uint32_t data) 1377 { 1378 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1379 uint8_t val = data; 1380 1381 if (addr + sizeof(val) > vdev->config_len) { 1382 return; 1383 } 1384 1385 stb_p(vdev->config + addr, val); 1386 1387 if (k->set_config) { 1388 k->set_config(vdev, vdev->config); 1389 } 1390 } 1391 1392 void virtio_config_modern_writew(VirtIODevice *vdev, 1393 uint32_t addr, uint32_t data) 1394 { 1395 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1396 uint16_t val = data; 1397 1398 if (addr + sizeof(val) > vdev->config_len) { 1399 return; 1400 } 1401 1402 stw_le_p(vdev->config + addr, val); 1403 1404 if (k->set_config) { 1405 k->set_config(vdev, vdev->config); 1406 } 1407 } 1408 1409 void virtio_config_modern_writel(VirtIODevice *vdev, 1410 uint32_t addr, uint32_t data) 1411 { 1412 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1413 uint32_t val = data; 1414 1415 if (addr + sizeof(val) > vdev->config_len) { 1416 return; 1417 } 1418 1419 stl_le_p(vdev->config + addr, val); 1420 1421 if (k->set_config) { 1422 k->set_config(vdev, vdev->config); 1423 } 1424 } 1425 1426 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr) 1427 { 1428 if (!vdev->vq[n].vring.num) { 1429 return; 1430 } 1431 vdev->vq[n].vring.desc = addr; 1432 virtio_queue_update_rings(vdev, n); 1433 } 1434 1435 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n) 1436 { 1437 return vdev->vq[n].vring.desc; 1438 } 1439 1440 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc, 1441 hwaddr avail, hwaddr used) 1442 { 1443 if (!vdev->vq[n].vring.num) { 1444 return; 1445 } 1446 vdev->vq[n].vring.desc = desc; 1447 vdev->vq[n].vring.avail = avail; 1448 vdev->vq[n].vring.used = used; 1449 virtio_init_region_cache(vdev, n); 1450 } 1451 1452 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num) 1453 { 1454 /* Don't allow guest to flip queue between existent and 1455 * nonexistent states, or to set it to an invalid size. 1456 */ 1457 if (!!num != !!vdev->vq[n].vring.num || 1458 num > VIRTQUEUE_MAX_SIZE || 1459 num < 0) { 1460 return; 1461 } 1462 vdev->vq[n].vring.num = num; 1463 } 1464 1465 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector) 1466 { 1467 return QLIST_FIRST(&vdev->vector_queues[vector]); 1468 } 1469 1470 VirtQueue *virtio_vector_next_queue(VirtQueue *vq) 1471 { 1472 return QLIST_NEXT(vq, node); 1473 } 1474 1475 int virtio_queue_get_num(VirtIODevice *vdev, int n) 1476 { 1477 return vdev->vq[n].vring.num; 1478 } 1479 1480 int virtio_queue_get_max_num(VirtIODevice *vdev, int n) 1481 { 1482 return vdev->vq[n].vring.num_default; 1483 } 1484 1485 int virtio_get_num_queues(VirtIODevice *vdev) 1486 { 1487 int i; 1488 1489 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1490 if (!virtio_queue_get_num(vdev, i)) { 1491 break; 1492 } 1493 } 1494 1495 return i; 1496 } 1497 1498 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) 1499 { 1500 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1501 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1502 1503 /* virtio-1 compliant devices cannot change the alignment */ 1504 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1505 error_report("tried to modify queue alignment for virtio-1 device"); 1506 return; 1507 } 1508 /* Check that the transport told us it was going to do this 1509 * (so a buggy transport will immediately assert rather than 1510 * silently failing to migrate this state) 1511 */ 1512 assert(k->has_variable_vring_alignment); 1513 1514 if (align) { 1515 vdev->vq[n].vring.align = align; 1516 virtio_queue_update_rings(vdev, n); 1517 } 1518 } 1519 1520 static bool virtio_queue_notify_aio_vq(VirtQueue *vq) 1521 { 1522 if (vq->vring.desc && vq->handle_aio_output) { 1523 VirtIODevice *vdev = vq->vdev; 1524 1525 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); 1526 return vq->handle_aio_output(vdev, vq); 1527 } 1528 1529 return false; 1530 } 1531 1532 static void virtio_queue_notify_vq(VirtQueue *vq) 1533 { 1534 if (vq->vring.desc && vq->handle_output) { 1535 VirtIODevice *vdev = vq->vdev; 1536 1537 if (unlikely(vdev->broken)) { 1538 return; 1539 } 1540 1541 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); 1542 vq->handle_output(vdev, vq); 1543 } 1544 } 1545 1546 void virtio_queue_notify(VirtIODevice *vdev, int n) 1547 { 1548 VirtQueue *vq = &vdev->vq[n]; 1549 1550 if (unlikely(!vq->vring.desc || vdev->broken)) { 1551 return; 1552 } 1553 1554 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); 1555 if (vq->handle_aio_output) { 1556 event_notifier_set(&vq->host_notifier); 1557 } else if (vq->handle_output) { 1558 vq->handle_output(vdev, vq); 1559 } 1560 } 1561 1562 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n) 1563 { 1564 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector : 1565 VIRTIO_NO_VECTOR; 1566 } 1567 1568 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector) 1569 { 1570 VirtQueue *vq = &vdev->vq[n]; 1571 1572 if (n < VIRTIO_QUEUE_MAX) { 1573 if (vdev->vector_queues && 1574 vdev->vq[n].vector != VIRTIO_NO_VECTOR) { 1575 QLIST_REMOVE(vq, node); 1576 } 1577 vdev->vq[n].vector = vector; 1578 if (vdev->vector_queues && 1579 vector != VIRTIO_NO_VECTOR) { 1580 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node); 1581 } 1582 } 1583 } 1584 1585 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, 1586 VirtIOHandleOutput handle_output) 1587 { 1588 int i; 1589 1590 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1591 if (vdev->vq[i].vring.num == 0) 1592 break; 1593 } 1594 1595 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) 1596 abort(); 1597 1598 vdev->vq[i].vring.num = queue_size; 1599 vdev->vq[i].vring.num_default = queue_size; 1600 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; 1601 vdev->vq[i].handle_output = handle_output; 1602 vdev->vq[i].handle_aio_output = NULL; 1603 1604 return &vdev->vq[i]; 1605 } 1606 1607 void virtio_del_queue(VirtIODevice *vdev, int n) 1608 { 1609 if (n < 0 || n >= VIRTIO_QUEUE_MAX) { 1610 abort(); 1611 } 1612 1613 vdev->vq[n].vring.num = 0; 1614 vdev->vq[n].vring.num_default = 0; 1615 } 1616 1617 static void virtio_set_isr(VirtIODevice *vdev, int value) 1618 { 1619 uint8_t old = atomic_read(&vdev->isr); 1620 1621 /* Do not write ISR if it does not change, so that its cacheline remains 1622 * shared in the common case where the guest does not read it. 1623 */ 1624 if ((old & value) != value) { 1625 atomic_or(&vdev->isr, value); 1626 } 1627 } 1628 1629 /* Called within rcu_read_lock(). */ 1630 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq) 1631 { 1632 uint16_t old, new; 1633 bool v; 1634 /* We need to expose used array entries before checking used event. */ 1635 smp_mb(); 1636 /* Always notify when queue is empty (when feature acknowledge) */ 1637 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) && 1638 !vq->inuse && virtio_queue_empty(vq)) { 1639 return true; 1640 } 1641 1642 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 1643 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); 1644 } 1645 1646 v = vq->signalled_used_valid; 1647 vq->signalled_used_valid = true; 1648 old = vq->signalled_used; 1649 new = vq->signalled_used = vq->used_idx; 1650 return !v || vring_need_event(vring_get_used_event(vq), new, old); 1651 } 1652 1653 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq) 1654 { 1655 bool should_notify; 1656 rcu_read_lock(); 1657 should_notify = virtio_should_notify(vdev, vq); 1658 rcu_read_unlock(); 1659 1660 if (!should_notify) { 1661 return; 1662 } 1663 1664 trace_virtio_notify_irqfd(vdev, vq); 1665 1666 /* 1667 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but 1668 * windows drivers included in virtio-win 1.8.0 (circa 2015) are 1669 * incorrectly polling this bit during crashdump and hibernation 1670 * in MSI mode, causing a hang if this bit is never updated. 1671 * Recent releases of Windows do not really shut down, but rather 1672 * log out and hibernate to make the next startup faster. Hence, 1673 * this manifested as a more serious hang during shutdown with 1674 * 1675 * Next driver release from 2016 fixed this problem, so working around it 1676 * is not a must, but it's easy to do so let's do it here. 1677 * 1678 * Note: it's safe to update ISR from any thread as it was switched 1679 * to an atomic operation. 1680 */ 1681 virtio_set_isr(vq->vdev, 0x1); 1682 event_notifier_set(&vq->guest_notifier); 1683 } 1684 1685 static void virtio_irq(VirtQueue *vq) 1686 { 1687 virtio_set_isr(vq->vdev, 0x1); 1688 virtio_notify_vector(vq->vdev, vq->vector); 1689 } 1690 1691 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) 1692 { 1693 bool should_notify; 1694 rcu_read_lock(); 1695 should_notify = virtio_should_notify(vdev, vq); 1696 rcu_read_unlock(); 1697 1698 if (!should_notify) { 1699 return; 1700 } 1701 1702 trace_virtio_notify(vdev, vq); 1703 virtio_irq(vq); 1704 } 1705 1706 void virtio_notify_config(VirtIODevice *vdev) 1707 { 1708 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) 1709 return; 1710 1711 virtio_set_isr(vdev, 0x3); 1712 vdev->generation++; 1713 virtio_notify_vector(vdev, vdev->config_vector); 1714 } 1715 1716 static bool virtio_device_endian_needed(void *opaque) 1717 { 1718 VirtIODevice *vdev = opaque; 1719 1720 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN); 1721 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1722 return vdev->device_endian != virtio_default_endian(); 1723 } 1724 /* Devices conforming to VIRTIO 1.0 or later are always LE. */ 1725 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE; 1726 } 1727 1728 static bool virtio_64bit_features_needed(void *opaque) 1729 { 1730 VirtIODevice *vdev = opaque; 1731 1732 return (vdev->host_features >> 32) != 0; 1733 } 1734 1735 static bool virtio_virtqueue_needed(void *opaque) 1736 { 1737 VirtIODevice *vdev = opaque; 1738 1739 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1); 1740 } 1741 1742 static bool virtio_ringsize_needed(void *opaque) 1743 { 1744 VirtIODevice *vdev = opaque; 1745 int i; 1746 1747 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1748 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) { 1749 return true; 1750 } 1751 } 1752 return false; 1753 } 1754 1755 static bool virtio_extra_state_needed(void *opaque) 1756 { 1757 VirtIODevice *vdev = opaque; 1758 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1759 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1760 1761 return k->has_extra_state && 1762 k->has_extra_state(qbus->parent); 1763 } 1764 1765 static bool virtio_broken_needed(void *opaque) 1766 { 1767 VirtIODevice *vdev = opaque; 1768 1769 return vdev->broken; 1770 } 1771 1772 static const VMStateDescription vmstate_virtqueue = { 1773 .name = "virtqueue_state", 1774 .version_id = 1, 1775 .minimum_version_id = 1, 1776 .fields = (VMStateField[]) { 1777 VMSTATE_UINT64(vring.avail, struct VirtQueue), 1778 VMSTATE_UINT64(vring.used, struct VirtQueue), 1779 VMSTATE_END_OF_LIST() 1780 } 1781 }; 1782 1783 static const VMStateDescription vmstate_virtio_virtqueues = { 1784 .name = "virtio/virtqueues", 1785 .version_id = 1, 1786 .minimum_version_id = 1, 1787 .needed = &virtio_virtqueue_needed, 1788 .fields = (VMStateField[]) { 1789 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, 1790 VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue), 1791 VMSTATE_END_OF_LIST() 1792 } 1793 }; 1794 1795 static const VMStateDescription vmstate_ringsize = { 1796 .name = "ringsize_state", 1797 .version_id = 1, 1798 .minimum_version_id = 1, 1799 .fields = (VMStateField[]) { 1800 VMSTATE_UINT32(vring.num_default, struct VirtQueue), 1801 VMSTATE_END_OF_LIST() 1802 } 1803 }; 1804 1805 static const VMStateDescription vmstate_virtio_ringsize = { 1806 .name = "virtio/ringsize", 1807 .version_id = 1, 1808 .minimum_version_id = 1, 1809 .needed = &virtio_ringsize_needed, 1810 .fields = (VMStateField[]) { 1811 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, 1812 VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue), 1813 VMSTATE_END_OF_LIST() 1814 } 1815 }; 1816 1817 static int get_extra_state(QEMUFile *f, void *pv, size_t size, 1818 VMStateField *field) 1819 { 1820 VirtIODevice *vdev = pv; 1821 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1822 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1823 1824 if (!k->load_extra_state) { 1825 return -1; 1826 } else { 1827 return k->load_extra_state(qbus->parent, f); 1828 } 1829 } 1830 1831 static int put_extra_state(QEMUFile *f, void *pv, size_t size, 1832 VMStateField *field, QJSON *vmdesc) 1833 { 1834 VirtIODevice *vdev = pv; 1835 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1836 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1837 1838 k->save_extra_state(qbus->parent, f); 1839 return 0; 1840 } 1841 1842 static const VMStateInfo vmstate_info_extra_state = { 1843 .name = "virtqueue_extra_state", 1844 .get = get_extra_state, 1845 .put = put_extra_state, 1846 }; 1847 1848 static const VMStateDescription vmstate_virtio_extra_state = { 1849 .name = "virtio/extra_state", 1850 .version_id = 1, 1851 .minimum_version_id = 1, 1852 .needed = &virtio_extra_state_needed, 1853 .fields = (VMStateField[]) { 1854 { 1855 .name = "extra_state", 1856 .version_id = 0, 1857 .field_exists = NULL, 1858 .size = 0, 1859 .info = &vmstate_info_extra_state, 1860 .flags = VMS_SINGLE, 1861 .offset = 0, 1862 }, 1863 VMSTATE_END_OF_LIST() 1864 } 1865 }; 1866 1867 static const VMStateDescription vmstate_virtio_device_endian = { 1868 .name = "virtio/device_endian", 1869 .version_id = 1, 1870 .minimum_version_id = 1, 1871 .needed = &virtio_device_endian_needed, 1872 .fields = (VMStateField[]) { 1873 VMSTATE_UINT8(device_endian, VirtIODevice), 1874 VMSTATE_END_OF_LIST() 1875 } 1876 }; 1877 1878 static const VMStateDescription vmstate_virtio_64bit_features = { 1879 .name = "virtio/64bit_features", 1880 .version_id = 1, 1881 .minimum_version_id = 1, 1882 .needed = &virtio_64bit_features_needed, 1883 .fields = (VMStateField[]) { 1884 VMSTATE_UINT64(guest_features, VirtIODevice), 1885 VMSTATE_END_OF_LIST() 1886 } 1887 }; 1888 1889 static const VMStateDescription vmstate_virtio_broken = { 1890 .name = "virtio/broken", 1891 .version_id = 1, 1892 .minimum_version_id = 1, 1893 .needed = &virtio_broken_needed, 1894 .fields = (VMStateField[]) { 1895 VMSTATE_BOOL(broken, VirtIODevice), 1896 VMSTATE_END_OF_LIST() 1897 } 1898 }; 1899 1900 static const VMStateDescription vmstate_virtio = { 1901 .name = "virtio", 1902 .version_id = 1, 1903 .minimum_version_id = 1, 1904 .minimum_version_id_old = 1, 1905 .fields = (VMStateField[]) { 1906 VMSTATE_END_OF_LIST() 1907 }, 1908 .subsections = (const VMStateDescription*[]) { 1909 &vmstate_virtio_device_endian, 1910 &vmstate_virtio_64bit_features, 1911 &vmstate_virtio_virtqueues, 1912 &vmstate_virtio_ringsize, 1913 &vmstate_virtio_broken, 1914 &vmstate_virtio_extra_state, 1915 NULL 1916 } 1917 }; 1918 1919 int virtio_save(VirtIODevice *vdev, QEMUFile *f) 1920 { 1921 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1922 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1923 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1924 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff); 1925 int i; 1926 1927 if (k->save_config) { 1928 k->save_config(qbus->parent, f); 1929 } 1930 1931 qemu_put_8s(f, &vdev->status); 1932 qemu_put_8s(f, &vdev->isr); 1933 qemu_put_be16s(f, &vdev->queue_sel); 1934 qemu_put_be32s(f, &guest_features_lo); 1935 qemu_put_be32(f, vdev->config_len); 1936 qemu_put_buffer(f, vdev->config, vdev->config_len); 1937 1938 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1939 if (vdev->vq[i].vring.num == 0) 1940 break; 1941 } 1942 1943 qemu_put_be32(f, i); 1944 1945 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1946 if (vdev->vq[i].vring.num == 0) 1947 break; 1948 1949 qemu_put_be32(f, vdev->vq[i].vring.num); 1950 if (k->has_variable_vring_alignment) { 1951 qemu_put_be32(f, vdev->vq[i].vring.align); 1952 } 1953 /* 1954 * Save desc now, the rest of the ring addresses are saved in 1955 * subsections for VIRTIO-1 devices. 1956 */ 1957 qemu_put_be64(f, vdev->vq[i].vring.desc); 1958 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); 1959 if (k->save_queue) { 1960 k->save_queue(qbus->parent, i, f); 1961 } 1962 } 1963 1964 if (vdc->save != NULL) { 1965 vdc->save(vdev, f); 1966 } 1967 1968 if (vdc->vmsd) { 1969 int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL); 1970 if (ret) { 1971 return ret; 1972 } 1973 } 1974 1975 /* Subsections */ 1976 return vmstate_save_state(f, &vmstate_virtio, vdev, NULL); 1977 } 1978 1979 /* A wrapper for use as a VMState .put function */ 1980 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size, 1981 VMStateField *field, QJSON *vmdesc) 1982 { 1983 return virtio_save(VIRTIO_DEVICE(opaque), f); 1984 } 1985 1986 /* A wrapper for use as a VMState .get function */ 1987 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size, 1988 VMStateField *field) 1989 { 1990 VirtIODevice *vdev = VIRTIO_DEVICE(opaque); 1991 DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev)); 1992 1993 return virtio_load(vdev, f, dc->vmsd->version_id); 1994 } 1995 1996 const VMStateInfo virtio_vmstate_info = { 1997 .name = "virtio", 1998 .get = virtio_device_get, 1999 .put = virtio_device_put, 2000 }; 2001 2002 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val) 2003 { 2004 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 2005 bool bad = (val & ~(vdev->host_features)) != 0; 2006 2007 val &= vdev->host_features; 2008 if (k->set_features) { 2009 k->set_features(vdev, val); 2010 } 2011 vdev->guest_features = val; 2012 return bad ? -1 : 0; 2013 } 2014 2015 int virtio_set_features(VirtIODevice *vdev, uint64_t val) 2016 { 2017 int ret; 2018 /* 2019 * The driver must not attempt to set features after feature negotiation 2020 * has finished. 2021 */ 2022 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) { 2023 return -EINVAL; 2024 } 2025 ret = virtio_set_features_nocheck(vdev, val); 2026 if (!ret && virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 2027 /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */ 2028 int i; 2029 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 2030 if (vdev->vq[i].vring.num != 0) { 2031 virtio_init_region_cache(vdev, i); 2032 } 2033 } 2034 } 2035 return ret; 2036 } 2037 2038 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) 2039 { 2040 int i, ret; 2041 int32_t config_len; 2042 uint32_t num; 2043 uint32_t features; 2044 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2045 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 2046 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 2047 2048 /* 2049 * We poison the endianness to ensure it does not get used before 2050 * subsections have been loaded. 2051 */ 2052 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN; 2053 2054 if (k->load_config) { 2055 ret = k->load_config(qbus->parent, f); 2056 if (ret) 2057 return ret; 2058 } 2059 2060 qemu_get_8s(f, &vdev->status); 2061 qemu_get_8s(f, &vdev->isr); 2062 qemu_get_be16s(f, &vdev->queue_sel); 2063 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) { 2064 return -1; 2065 } 2066 qemu_get_be32s(f, &features); 2067 2068 /* 2069 * Temporarily set guest_features low bits - needed by 2070 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2071 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ. 2072 * 2073 * Note: devices should always test host features in future - don't create 2074 * new dependencies like this. 2075 */ 2076 vdev->guest_features = features; 2077 2078 config_len = qemu_get_be32(f); 2079 2080 /* 2081 * There are cases where the incoming config can be bigger or smaller 2082 * than what we have; so load what we have space for, and skip 2083 * any excess that's in the stream. 2084 */ 2085 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len)); 2086 2087 while (config_len > vdev->config_len) { 2088 qemu_get_byte(f); 2089 config_len--; 2090 } 2091 2092 num = qemu_get_be32(f); 2093 2094 if (num > VIRTIO_QUEUE_MAX) { 2095 error_report("Invalid number of virtqueues: 0x%x", num); 2096 return -1; 2097 } 2098 2099 for (i = 0; i < num; i++) { 2100 vdev->vq[i].vring.num = qemu_get_be32(f); 2101 if (k->has_variable_vring_alignment) { 2102 vdev->vq[i].vring.align = qemu_get_be32(f); 2103 } 2104 vdev->vq[i].vring.desc = qemu_get_be64(f); 2105 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); 2106 vdev->vq[i].signalled_used_valid = false; 2107 vdev->vq[i].notification = true; 2108 2109 if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) { 2110 error_report("VQ %d address 0x0 " 2111 "inconsistent with Host index 0x%x", 2112 i, vdev->vq[i].last_avail_idx); 2113 return -1; 2114 } 2115 if (k->load_queue) { 2116 ret = k->load_queue(qbus->parent, i, f); 2117 if (ret) 2118 return ret; 2119 } 2120 } 2121 2122 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 2123 2124 if (vdc->load != NULL) { 2125 ret = vdc->load(vdev, f, version_id); 2126 if (ret) { 2127 return ret; 2128 } 2129 } 2130 2131 if (vdc->vmsd) { 2132 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id); 2133 if (ret) { 2134 return ret; 2135 } 2136 } 2137 2138 /* Subsections */ 2139 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1); 2140 if (ret) { 2141 return ret; 2142 } 2143 2144 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) { 2145 vdev->device_endian = virtio_default_endian(); 2146 } 2147 2148 if (virtio_64bit_features_needed(vdev)) { 2149 /* 2150 * Subsection load filled vdev->guest_features. Run them 2151 * through virtio_set_features to sanity-check them against 2152 * host_features. 2153 */ 2154 uint64_t features64 = vdev->guest_features; 2155 if (virtio_set_features_nocheck(vdev, features64) < 0) { 2156 error_report("Features 0x%" PRIx64 " unsupported. " 2157 "Allowed features: 0x%" PRIx64, 2158 features64, vdev->host_features); 2159 return -1; 2160 } 2161 } else { 2162 if (virtio_set_features_nocheck(vdev, features) < 0) { 2163 error_report("Features 0x%x unsupported. " 2164 "Allowed features: 0x%" PRIx64, 2165 features, vdev->host_features); 2166 return -1; 2167 } 2168 } 2169 2170 rcu_read_lock(); 2171 for (i = 0; i < num; i++) { 2172 if (vdev->vq[i].vring.desc) { 2173 uint16_t nheads; 2174 2175 /* 2176 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so 2177 * only the region cache needs to be set up. Legacy devices need 2178 * to calculate used and avail ring addresses based on the desc 2179 * address. 2180 */ 2181 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 2182 virtio_init_region_cache(vdev, i); 2183 } else { 2184 virtio_queue_update_rings(vdev, i); 2185 } 2186 2187 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; 2188 /* Check it isn't doing strange things with descriptor numbers. */ 2189 if (nheads > vdev->vq[i].vring.num) { 2190 error_report("VQ %d size 0x%x Guest index 0x%x " 2191 "inconsistent with Host index 0x%x: delta 0x%x", 2192 i, vdev->vq[i].vring.num, 2193 vring_avail_idx(&vdev->vq[i]), 2194 vdev->vq[i].last_avail_idx, nheads); 2195 return -1; 2196 } 2197 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]); 2198 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]); 2199 2200 /* 2201 * Some devices migrate VirtQueueElements that have been popped 2202 * from the avail ring but not yet returned to the used ring. 2203 * Since max ring size < UINT16_MAX it's safe to use modulo 2204 * UINT16_MAX + 1 subtraction. 2205 */ 2206 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx - 2207 vdev->vq[i].used_idx); 2208 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) { 2209 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - " 2210 "used_idx 0x%x", 2211 i, vdev->vq[i].vring.num, 2212 vdev->vq[i].last_avail_idx, 2213 vdev->vq[i].used_idx); 2214 return -1; 2215 } 2216 } 2217 } 2218 rcu_read_unlock(); 2219 2220 return 0; 2221 } 2222 2223 void virtio_cleanup(VirtIODevice *vdev) 2224 { 2225 qemu_del_vm_change_state_handler(vdev->vmstate); 2226 } 2227 2228 static void virtio_vmstate_change(void *opaque, int running, RunState state) 2229 { 2230 VirtIODevice *vdev = opaque; 2231 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2232 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 2233 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK); 2234 vdev->vm_running = running; 2235 2236 if (backend_run) { 2237 virtio_set_status(vdev, vdev->status); 2238 } 2239 2240 if (k->vmstate_change) { 2241 k->vmstate_change(qbus->parent, backend_run); 2242 } 2243 2244 if (!backend_run) { 2245 virtio_set_status(vdev, vdev->status); 2246 } 2247 } 2248 2249 void virtio_instance_init_common(Object *proxy_obj, void *data, 2250 size_t vdev_size, const char *vdev_name) 2251 { 2252 DeviceState *vdev = data; 2253 2254 object_initialize(vdev, vdev_size, vdev_name); 2255 object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL); 2256 object_unref(OBJECT(vdev)); 2257 qdev_alias_all_properties(vdev, proxy_obj); 2258 } 2259 2260 void virtio_init(VirtIODevice *vdev, const char *name, 2261 uint16_t device_id, size_t config_size) 2262 { 2263 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2264 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 2265 int i; 2266 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0; 2267 2268 if (nvectors) { 2269 vdev->vector_queues = 2270 g_malloc0(sizeof(*vdev->vector_queues) * nvectors); 2271 } 2272 2273 vdev->device_id = device_id; 2274 vdev->status = 0; 2275 atomic_set(&vdev->isr, 0); 2276 vdev->queue_sel = 0; 2277 vdev->config_vector = VIRTIO_NO_VECTOR; 2278 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX); 2279 vdev->vm_running = runstate_is_running(); 2280 vdev->broken = false; 2281 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 2282 vdev->vq[i].vector = VIRTIO_NO_VECTOR; 2283 vdev->vq[i].vdev = vdev; 2284 vdev->vq[i].queue_index = i; 2285 } 2286 2287 vdev->name = name; 2288 vdev->config_len = config_size; 2289 if (vdev->config_len) { 2290 vdev->config = g_malloc0(config_size); 2291 } else { 2292 vdev->config = NULL; 2293 } 2294 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, 2295 vdev); 2296 vdev->device_endian = virtio_default_endian(); 2297 vdev->use_guest_notifier_mask = true; 2298 } 2299 2300 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n) 2301 { 2302 return vdev->vq[n].vring.desc; 2303 } 2304 2305 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n) 2306 { 2307 return vdev->vq[n].vring.avail; 2308 } 2309 2310 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n) 2311 { 2312 return vdev->vq[n].vring.used; 2313 } 2314 2315 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n) 2316 { 2317 return sizeof(VRingDesc) * vdev->vq[n].vring.num; 2318 } 2319 2320 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n) 2321 { 2322 return offsetof(VRingAvail, ring) + 2323 sizeof(uint16_t) * vdev->vq[n].vring.num; 2324 } 2325 2326 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n) 2327 { 2328 return offsetof(VRingUsed, ring) + 2329 sizeof(VRingUsedElem) * vdev->vq[n].vring.num; 2330 } 2331 2332 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) 2333 { 2334 return vdev->vq[n].last_avail_idx; 2335 } 2336 2337 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx) 2338 { 2339 vdev->vq[n].last_avail_idx = idx; 2340 vdev->vq[n].shadow_avail_idx = idx; 2341 } 2342 2343 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n) 2344 { 2345 rcu_read_lock(); 2346 if (vdev->vq[n].vring.desc) { 2347 vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]); 2348 vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx; 2349 } 2350 rcu_read_unlock(); 2351 } 2352 2353 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n) 2354 { 2355 rcu_read_lock(); 2356 if (vdev->vq[n].vring.desc) { 2357 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]); 2358 } 2359 rcu_read_unlock(); 2360 } 2361 2362 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n) 2363 { 2364 vdev->vq[n].signalled_used_valid = false; 2365 } 2366 2367 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n) 2368 { 2369 return vdev->vq + n; 2370 } 2371 2372 uint16_t virtio_get_queue_index(VirtQueue *vq) 2373 { 2374 return vq->queue_index; 2375 } 2376 2377 static void virtio_queue_guest_notifier_read(EventNotifier *n) 2378 { 2379 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier); 2380 if (event_notifier_test_and_clear(n)) { 2381 virtio_irq(vq); 2382 } 2383 } 2384 2385 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, 2386 bool with_irqfd) 2387 { 2388 if (assign && !with_irqfd) { 2389 event_notifier_set_handler(&vq->guest_notifier, 2390 virtio_queue_guest_notifier_read); 2391 } else { 2392 event_notifier_set_handler(&vq->guest_notifier, NULL); 2393 } 2394 if (!assign) { 2395 /* Test and clear notifier before closing it, 2396 * in case poll callback didn't have time to run. */ 2397 virtio_queue_guest_notifier_read(&vq->guest_notifier); 2398 } 2399 } 2400 2401 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) 2402 { 2403 return &vq->guest_notifier; 2404 } 2405 2406 static void virtio_queue_host_notifier_aio_read(EventNotifier *n) 2407 { 2408 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 2409 if (event_notifier_test_and_clear(n)) { 2410 virtio_queue_notify_aio_vq(vq); 2411 } 2412 } 2413 2414 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n) 2415 { 2416 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 2417 2418 virtio_queue_set_notification(vq, 0); 2419 } 2420 2421 static bool virtio_queue_host_notifier_aio_poll(void *opaque) 2422 { 2423 EventNotifier *n = opaque; 2424 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 2425 bool progress; 2426 2427 if (!vq->vring.desc || virtio_queue_empty(vq)) { 2428 return false; 2429 } 2430 2431 progress = virtio_queue_notify_aio_vq(vq); 2432 2433 /* In case the handler function re-enabled notifications */ 2434 virtio_queue_set_notification(vq, 0); 2435 return progress; 2436 } 2437 2438 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n) 2439 { 2440 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 2441 2442 /* Caller polls once more after this to catch requests that race with us */ 2443 virtio_queue_set_notification(vq, 1); 2444 } 2445 2446 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx, 2447 VirtIOHandleAIOOutput handle_output) 2448 { 2449 if (handle_output) { 2450 vq->handle_aio_output = handle_output; 2451 aio_set_event_notifier(ctx, &vq->host_notifier, true, 2452 virtio_queue_host_notifier_aio_read, 2453 virtio_queue_host_notifier_aio_poll); 2454 aio_set_event_notifier_poll(ctx, &vq->host_notifier, 2455 virtio_queue_host_notifier_aio_poll_begin, 2456 virtio_queue_host_notifier_aio_poll_end); 2457 } else { 2458 aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL); 2459 /* Test and clear notifier before after disabling event, 2460 * in case poll callback didn't have time to run. */ 2461 virtio_queue_host_notifier_aio_read(&vq->host_notifier); 2462 vq->handle_aio_output = NULL; 2463 } 2464 } 2465 2466 void virtio_queue_host_notifier_read(EventNotifier *n) 2467 { 2468 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 2469 if (event_notifier_test_and_clear(n)) { 2470 virtio_queue_notify_vq(vq); 2471 } 2472 } 2473 2474 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) 2475 { 2476 return &vq->host_notifier; 2477 } 2478 2479 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n, 2480 MemoryRegion *mr, bool assign) 2481 { 2482 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2483 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 2484 2485 if (k->set_host_notifier_mr) { 2486 return k->set_host_notifier_mr(qbus->parent, n, mr, assign); 2487 } 2488 2489 return -1; 2490 } 2491 2492 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name) 2493 { 2494 g_free(vdev->bus_name); 2495 vdev->bus_name = g_strdup(bus_name); 2496 } 2497 2498 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...) 2499 { 2500 va_list ap; 2501 2502 va_start(ap, fmt); 2503 error_vreport(fmt, ap); 2504 va_end(ap); 2505 2506 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 2507 vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET; 2508 virtio_notify_config(vdev); 2509 } 2510 2511 vdev->broken = true; 2512 } 2513 2514 static void virtio_memory_listener_commit(MemoryListener *listener) 2515 { 2516 VirtIODevice *vdev = container_of(listener, VirtIODevice, listener); 2517 int i; 2518 2519 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 2520 if (vdev->vq[i].vring.num == 0) { 2521 break; 2522 } 2523 virtio_init_region_cache(vdev, i); 2524 } 2525 } 2526 2527 static void virtio_device_realize(DeviceState *dev, Error **errp) 2528 { 2529 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 2530 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 2531 Error *err = NULL; 2532 2533 /* Devices should either use vmsd or the load/save methods */ 2534 assert(!vdc->vmsd || !vdc->load); 2535 2536 if (vdc->realize != NULL) { 2537 vdc->realize(dev, &err); 2538 if (err != NULL) { 2539 error_propagate(errp, err); 2540 return; 2541 } 2542 } 2543 2544 virtio_bus_device_plugged(vdev, &err); 2545 if (err != NULL) { 2546 error_propagate(errp, err); 2547 vdc->unrealize(dev, NULL); 2548 return; 2549 } 2550 2551 vdev->listener.commit = virtio_memory_listener_commit; 2552 memory_listener_register(&vdev->listener, vdev->dma_as); 2553 } 2554 2555 static void virtio_device_unrealize(DeviceState *dev, Error **errp) 2556 { 2557 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 2558 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 2559 Error *err = NULL; 2560 2561 virtio_bus_device_unplugged(vdev); 2562 2563 if (vdc->unrealize != NULL) { 2564 vdc->unrealize(dev, &err); 2565 if (err != NULL) { 2566 error_propagate(errp, err); 2567 return; 2568 } 2569 } 2570 2571 g_free(vdev->bus_name); 2572 vdev->bus_name = NULL; 2573 } 2574 2575 static void virtio_device_free_virtqueues(VirtIODevice *vdev) 2576 { 2577 int i; 2578 if (!vdev->vq) { 2579 return; 2580 } 2581 2582 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 2583 if (vdev->vq[i].vring.num == 0) { 2584 break; 2585 } 2586 virtio_virtqueue_reset_region_cache(&vdev->vq[i]); 2587 } 2588 g_free(vdev->vq); 2589 } 2590 2591 static void virtio_device_instance_finalize(Object *obj) 2592 { 2593 VirtIODevice *vdev = VIRTIO_DEVICE(obj); 2594 2595 memory_listener_unregister(&vdev->listener); 2596 virtio_device_free_virtqueues(vdev); 2597 2598 g_free(vdev->config); 2599 g_free(vdev->vector_queues); 2600 } 2601 2602 static Property virtio_properties[] = { 2603 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features), 2604 DEFINE_PROP_END_OF_LIST(), 2605 }; 2606 2607 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev) 2608 { 2609 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev))); 2610 int i, n, r, err; 2611 2612 memory_region_transaction_begin(); 2613 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { 2614 VirtQueue *vq = &vdev->vq[n]; 2615 if (!virtio_queue_get_num(vdev, n)) { 2616 continue; 2617 } 2618 r = virtio_bus_set_host_notifier(qbus, n, true); 2619 if (r < 0) { 2620 err = r; 2621 goto assign_error; 2622 } 2623 event_notifier_set_handler(&vq->host_notifier, 2624 virtio_queue_host_notifier_read); 2625 } 2626 2627 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { 2628 /* Kick right away to begin processing requests already in vring */ 2629 VirtQueue *vq = &vdev->vq[n]; 2630 if (!vq->vring.num) { 2631 continue; 2632 } 2633 event_notifier_set(&vq->host_notifier); 2634 } 2635 memory_region_transaction_commit(); 2636 return 0; 2637 2638 assign_error: 2639 i = n; /* save n for a second iteration after transaction is committed. */ 2640 while (--n >= 0) { 2641 VirtQueue *vq = &vdev->vq[n]; 2642 if (!virtio_queue_get_num(vdev, n)) { 2643 continue; 2644 } 2645 2646 event_notifier_set_handler(&vq->host_notifier, NULL); 2647 r = virtio_bus_set_host_notifier(qbus, n, false); 2648 assert(r >= 0); 2649 } 2650 memory_region_transaction_commit(); 2651 2652 while (--i >= 0) { 2653 if (!virtio_queue_get_num(vdev, i)) { 2654 continue; 2655 } 2656 virtio_bus_cleanup_host_notifier(qbus, i); 2657 } 2658 return err; 2659 } 2660 2661 int virtio_device_start_ioeventfd(VirtIODevice *vdev) 2662 { 2663 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2664 VirtioBusState *vbus = VIRTIO_BUS(qbus); 2665 2666 return virtio_bus_start_ioeventfd(vbus); 2667 } 2668 2669 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev) 2670 { 2671 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev))); 2672 int n, r; 2673 2674 memory_region_transaction_begin(); 2675 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { 2676 VirtQueue *vq = &vdev->vq[n]; 2677 2678 if (!virtio_queue_get_num(vdev, n)) { 2679 continue; 2680 } 2681 event_notifier_set_handler(&vq->host_notifier, NULL); 2682 r = virtio_bus_set_host_notifier(qbus, n, false); 2683 assert(r >= 0); 2684 } 2685 memory_region_transaction_commit(); 2686 2687 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { 2688 if (!virtio_queue_get_num(vdev, n)) { 2689 continue; 2690 } 2691 virtio_bus_cleanup_host_notifier(qbus, n); 2692 } 2693 } 2694 2695 void virtio_device_stop_ioeventfd(VirtIODevice *vdev) 2696 { 2697 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2698 VirtioBusState *vbus = VIRTIO_BUS(qbus); 2699 2700 virtio_bus_stop_ioeventfd(vbus); 2701 } 2702 2703 int virtio_device_grab_ioeventfd(VirtIODevice *vdev) 2704 { 2705 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2706 VirtioBusState *vbus = VIRTIO_BUS(qbus); 2707 2708 return virtio_bus_grab_ioeventfd(vbus); 2709 } 2710 2711 void virtio_device_release_ioeventfd(VirtIODevice *vdev) 2712 { 2713 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2714 VirtioBusState *vbus = VIRTIO_BUS(qbus); 2715 2716 virtio_bus_release_ioeventfd(vbus); 2717 } 2718 2719 static void virtio_device_class_init(ObjectClass *klass, void *data) 2720 { 2721 /* Set the default value here. */ 2722 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 2723 DeviceClass *dc = DEVICE_CLASS(klass); 2724 2725 dc->realize = virtio_device_realize; 2726 dc->unrealize = virtio_device_unrealize; 2727 dc->bus_type = TYPE_VIRTIO_BUS; 2728 dc->props = virtio_properties; 2729 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl; 2730 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl; 2731 2732 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES; 2733 } 2734 2735 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev) 2736 { 2737 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 2738 VirtioBusState *vbus = VIRTIO_BUS(qbus); 2739 2740 return virtio_bus_ioeventfd_enabled(vbus); 2741 } 2742 2743 static const TypeInfo virtio_device_info = { 2744 .name = TYPE_VIRTIO_DEVICE, 2745 .parent = TYPE_DEVICE, 2746 .instance_size = sizeof(VirtIODevice), 2747 .class_init = virtio_device_class_init, 2748 .instance_finalize = virtio_device_instance_finalize, 2749 .abstract = true, 2750 .class_size = sizeof(VirtioDeviceClass), 2751 }; 2752 2753 static void virtio_register_types(void) 2754 { 2755 type_register_static(&virtio_device_info); 2756 } 2757 2758 type_init(virtio_register_types) 2759