1 /* 2 * QEMU KVM support 3 * 4 * Copyright IBM, Corp. 2008 5 * Red Hat, Inc. 2008 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Glauber Costa <gcosta@redhat.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2 or later. 12 * See the COPYING file in the top-level directory. 13 * 14 */ 15 16 #include "qemu/osdep.h" 17 #include <sys/ioctl.h> 18 #include <poll.h> 19 20 #include <linux/kvm.h> 21 22 #include "qemu/atomic.h" 23 #include "qemu/option.h" 24 #include "qemu/config-file.h" 25 #include "qemu/error-report.h" 26 #include "qapi/error.h" 27 #include "hw/pci/msi.h" 28 #include "hw/pci/msix.h" 29 #include "hw/s390x/adapter.h" 30 #include "exec/gdbstub.h" 31 #include "sysemu/kvm_int.h" 32 #include "sysemu/runstate.h" 33 #include "sysemu/cpus.h" 34 #include "sysemu/accel-blocker.h" 35 #include "qemu/bswap.h" 36 #include "exec/memory.h" 37 #include "exec/ram_addr.h" 38 #include "qemu/event_notifier.h" 39 #include "qemu/main-loop.h" 40 #include "trace.h" 41 #include "hw/irq.h" 42 #include "qapi/visitor.h" 43 #include "qapi/qapi-types-common.h" 44 #include "qapi/qapi-visit-common.h" 45 #include "sysemu/reset.h" 46 #include "qemu/guest-random.h" 47 #include "sysemu/hw_accel.h" 48 #include "kvm-cpus.h" 49 #include "sysemu/dirtylimit.h" 50 #include "qemu/range.h" 51 52 #include "hw/boards.h" 53 #include "sysemu/stats.h" 54 55 /* This check must be after config-host.h is included */ 56 #ifdef CONFIG_EVENTFD 57 #include <sys/eventfd.h> 58 #endif 59 60 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We 61 * need to use the real host PAGE_SIZE, as that's what KVM will use. 62 */ 63 #ifdef PAGE_SIZE 64 #undef PAGE_SIZE 65 #endif 66 #define PAGE_SIZE qemu_real_host_page_size() 67 68 #ifndef KVM_GUESTDBG_BLOCKIRQ 69 #define KVM_GUESTDBG_BLOCKIRQ 0 70 #endif 71 72 struct KVMParkedVcpu { 73 unsigned long vcpu_id; 74 int kvm_fd; 75 QLIST_ENTRY(KVMParkedVcpu) node; 76 }; 77 78 KVMState *kvm_state; 79 bool kvm_kernel_irqchip; 80 bool kvm_split_irqchip; 81 bool kvm_async_interrupts_allowed; 82 bool kvm_halt_in_kernel_allowed; 83 bool kvm_resamplefds_allowed; 84 bool kvm_msi_via_irqfd_allowed; 85 bool kvm_gsi_routing_allowed; 86 bool kvm_gsi_direct_mapping; 87 bool kvm_allowed; 88 bool kvm_readonly_mem_allowed; 89 bool kvm_vm_attributes_allowed; 90 bool kvm_msi_use_devid; 91 static bool kvm_has_guest_debug; 92 static int kvm_sstep_flags; 93 static bool kvm_immediate_exit; 94 static hwaddr kvm_max_slot_size = ~0; 95 96 static const KVMCapabilityInfo kvm_required_capabilites[] = { 97 KVM_CAP_INFO(USER_MEMORY), 98 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS), 99 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS), 100 KVM_CAP_INFO(INTERNAL_ERROR_DATA), 101 KVM_CAP_INFO(IOEVENTFD), 102 KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH), 103 KVM_CAP_LAST_INFO 104 }; 105 106 static NotifierList kvm_irqchip_change_notifiers = 107 NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers); 108 109 struct KVMResampleFd { 110 int gsi; 111 EventNotifier *resample_event; 112 QLIST_ENTRY(KVMResampleFd) node; 113 }; 114 typedef struct KVMResampleFd KVMResampleFd; 115 116 /* 117 * Only used with split irqchip where we need to do the resample fd 118 * kick for the kernel from userspace. 119 */ 120 static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list = 121 QLIST_HEAD_INITIALIZER(kvm_resample_fd_list); 122 123 static QemuMutex kml_slots_lock; 124 125 #define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock) 126 #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock) 127 128 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem); 129 130 static inline void kvm_resample_fd_remove(int gsi) 131 { 132 KVMResampleFd *rfd; 133 134 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) { 135 if (rfd->gsi == gsi) { 136 QLIST_REMOVE(rfd, node); 137 g_free(rfd); 138 break; 139 } 140 } 141 } 142 143 static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event) 144 { 145 KVMResampleFd *rfd = g_new0(KVMResampleFd, 1); 146 147 rfd->gsi = gsi; 148 rfd->resample_event = event; 149 150 QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node); 151 } 152 153 void kvm_resample_fd_notify(int gsi) 154 { 155 KVMResampleFd *rfd; 156 157 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) { 158 if (rfd->gsi == gsi) { 159 event_notifier_set(rfd->resample_event); 160 trace_kvm_resample_fd_notify(gsi); 161 return; 162 } 163 } 164 } 165 166 unsigned int kvm_get_max_memslots(void) 167 { 168 KVMState *s = KVM_STATE(current_accel()); 169 170 return s->nr_slots; 171 } 172 173 unsigned int kvm_get_free_memslots(void) 174 { 175 unsigned int used_slots = 0; 176 KVMState *s = kvm_state; 177 int i; 178 179 kvm_slots_lock(); 180 for (i = 0; i < s->nr_as; i++) { 181 if (!s->as[i].ml) { 182 continue; 183 } 184 used_slots = MAX(used_slots, s->as[i].ml->nr_used_slots); 185 } 186 kvm_slots_unlock(); 187 188 return s->nr_slots - used_slots; 189 } 190 191 /* Called with KVMMemoryListener.slots_lock held */ 192 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml) 193 { 194 KVMState *s = kvm_state; 195 int i; 196 197 for (i = 0; i < s->nr_slots; i++) { 198 if (kml->slots[i].memory_size == 0) { 199 return &kml->slots[i]; 200 } 201 } 202 203 return NULL; 204 } 205 206 /* Called with KVMMemoryListener.slots_lock held */ 207 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml) 208 { 209 KVMSlot *slot = kvm_get_free_slot(kml); 210 211 if (slot) { 212 return slot; 213 } 214 215 fprintf(stderr, "%s: no free slot available\n", __func__); 216 abort(); 217 } 218 219 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml, 220 hwaddr start_addr, 221 hwaddr size) 222 { 223 KVMState *s = kvm_state; 224 int i; 225 226 for (i = 0; i < s->nr_slots; i++) { 227 KVMSlot *mem = &kml->slots[i]; 228 229 if (start_addr == mem->start_addr && size == mem->memory_size) { 230 return mem; 231 } 232 } 233 234 return NULL; 235 } 236 237 /* 238 * Calculate and align the start address and the size of the section. 239 * Return the size. If the size is 0, the aligned section is empty. 240 */ 241 static hwaddr kvm_align_section(MemoryRegionSection *section, 242 hwaddr *start) 243 { 244 hwaddr size = int128_get64(section->size); 245 hwaddr delta, aligned; 246 247 /* kvm works in page size chunks, but the function may be called 248 with sub-page size and unaligned start address. Pad the start 249 address to next and truncate size to previous page boundary. */ 250 aligned = ROUND_UP(section->offset_within_address_space, 251 qemu_real_host_page_size()); 252 delta = aligned - section->offset_within_address_space; 253 *start = aligned; 254 if (delta > size) { 255 return 0; 256 } 257 258 return (size - delta) & qemu_real_host_page_mask(); 259 } 260 261 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, 262 hwaddr *phys_addr) 263 { 264 KVMMemoryListener *kml = &s->memory_listener; 265 int i, ret = 0; 266 267 kvm_slots_lock(); 268 for (i = 0; i < s->nr_slots; i++) { 269 KVMSlot *mem = &kml->slots[i]; 270 271 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) { 272 *phys_addr = mem->start_addr + (ram - mem->ram); 273 ret = 1; 274 break; 275 } 276 } 277 kvm_slots_unlock(); 278 279 return ret; 280 } 281 282 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new) 283 { 284 KVMState *s = kvm_state; 285 struct kvm_userspace_memory_region mem; 286 int ret; 287 288 mem.slot = slot->slot | (kml->as_id << 16); 289 mem.guest_phys_addr = slot->start_addr; 290 mem.userspace_addr = (unsigned long)slot->ram; 291 mem.flags = slot->flags; 292 293 if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) { 294 /* Set the slot size to 0 before setting the slot to the desired 295 * value. This is needed based on KVM commit 75d61fbc. */ 296 mem.memory_size = 0; 297 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); 298 if (ret < 0) { 299 goto err; 300 } 301 } 302 mem.memory_size = slot->memory_size; 303 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); 304 slot->old_flags = mem.flags; 305 err: 306 trace_kvm_set_user_memory(mem.slot >> 16, (uint16_t)mem.slot, mem.flags, 307 mem.guest_phys_addr, mem.memory_size, 308 mem.userspace_addr, ret); 309 if (ret < 0) { 310 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d," 311 " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s", 312 __func__, mem.slot, slot->start_addr, 313 (uint64_t)mem.memory_size, strerror(errno)); 314 } 315 return ret; 316 } 317 318 static int do_kvm_destroy_vcpu(CPUState *cpu) 319 { 320 KVMState *s = kvm_state; 321 long mmap_size; 322 struct KVMParkedVcpu *vcpu = NULL; 323 int ret = 0; 324 325 trace_kvm_destroy_vcpu(); 326 327 ret = kvm_arch_destroy_vcpu(cpu); 328 if (ret < 0) { 329 goto err; 330 } 331 332 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); 333 if (mmap_size < 0) { 334 ret = mmap_size; 335 trace_kvm_failed_get_vcpu_mmap_size(); 336 goto err; 337 } 338 339 ret = munmap(cpu->kvm_run, mmap_size); 340 if (ret < 0) { 341 goto err; 342 } 343 344 if (cpu->kvm_dirty_gfns) { 345 ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes); 346 if (ret < 0) { 347 goto err; 348 } 349 } 350 351 vcpu = g_malloc0(sizeof(*vcpu)); 352 vcpu->vcpu_id = kvm_arch_vcpu_id(cpu); 353 vcpu->kvm_fd = cpu->kvm_fd; 354 QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node); 355 err: 356 return ret; 357 } 358 359 void kvm_destroy_vcpu(CPUState *cpu) 360 { 361 if (do_kvm_destroy_vcpu(cpu) < 0) { 362 error_report("kvm_destroy_vcpu failed"); 363 exit(EXIT_FAILURE); 364 } 365 } 366 367 static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id) 368 { 369 struct KVMParkedVcpu *cpu; 370 371 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { 372 if (cpu->vcpu_id == vcpu_id) { 373 int kvm_fd; 374 375 QLIST_REMOVE(cpu, node); 376 kvm_fd = cpu->kvm_fd; 377 g_free(cpu); 378 return kvm_fd; 379 } 380 } 381 382 return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id); 383 } 384 385 int kvm_init_vcpu(CPUState *cpu, Error **errp) 386 { 387 KVMState *s = kvm_state; 388 long mmap_size; 389 int ret; 390 391 trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); 392 393 ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu)); 394 if (ret < 0) { 395 error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)", 396 kvm_arch_vcpu_id(cpu)); 397 goto err; 398 } 399 400 cpu->kvm_fd = ret; 401 cpu->kvm_state = s; 402 cpu->vcpu_dirty = true; 403 cpu->dirty_pages = 0; 404 cpu->throttle_us_per_full = 0; 405 406 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); 407 if (mmap_size < 0) { 408 ret = mmap_size; 409 error_setg_errno(errp, -mmap_size, 410 "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed"); 411 goto err; 412 } 413 414 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, 415 cpu->kvm_fd, 0); 416 if (cpu->kvm_run == MAP_FAILED) { 417 ret = -errno; 418 error_setg_errno(errp, ret, 419 "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)", 420 kvm_arch_vcpu_id(cpu)); 421 goto err; 422 } 423 424 if (s->coalesced_mmio && !s->coalesced_mmio_ring) { 425 s->coalesced_mmio_ring = 426 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE; 427 } 428 429 if (s->kvm_dirty_ring_size) { 430 /* Use MAP_SHARED to share pages with the kernel */ 431 cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes, 432 PROT_READ | PROT_WRITE, MAP_SHARED, 433 cpu->kvm_fd, 434 PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET); 435 if (cpu->kvm_dirty_gfns == MAP_FAILED) { 436 ret = -errno; 437 goto err; 438 } 439 } 440 441 ret = kvm_arch_init_vcpu(cpu); 442 if (ret < 0) { 443 error_setg_errno(errp, -ret, 444 "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)", 445 kvm_arch_vcpu_id(cpu)); 446 } 447 cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL); 448 449 err: 450 return ret; 451 } 452 453 /* 454 * dirty pages logging control 455 */ 456 457 static int kvm_mem_flags(MemoryRegion *mr) 458 { 459 bool readonly = mr->readonly || memory_region_is_romd(mr); 460 int flags = 0; 461 462 if (memory_region_get_dirty_log_mask(mr) != 0) { 463 flags |= KVM_MEM_LOG_DIRTY_PAGES; 464 } 465 if (readonly && kvm_readonly_mem_allowed) { 466 flags |= KVM_MEM_READONLY; 467 } 468 return flags; 469 } 470 471 /* Called with KVMMemoryListener.slots_lock held */ 472 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem, 473 MemoryRegion *mr) 474 { 475 mem->flags = kvm_mem_flags(mr); 476 477 /* If nothing changed effectively, no need to issue ioctl */ 478 if (mem->flags == mem->old_flags) { 479 return 0; 480 } 481 482 kvm_slot_init_dirty_bitmap(mem); 483 return kvm_set_user_memory_region(kml, mem, false); 484 } 485 486 static int kvm_section_update_flags(KVMMemoryListener *kml, 487 MemoryRegionSection *section) 488 { 489 hwaddr start_addr, size, slot_size; 490 KVMSlot *mem; 491 int ret = 0; 492 493 size = kvm_align_section(section, &start_addr); 494 if (!size) { 495 return 0; 496 } 497 498 kvm_slots_lock(); 499 500 while (size && !ret) { 501 slot_size = MIN(kvm_max_slot_size, size); 502 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); 503 if (!mem) { 504 /* We don't have a slot if we want to trap every access. */ 505 goto out; 506 } 507 508 ret = kvm_slot_update_flags(kml, mem, section->mr); 509 start_addr += slot_size; 510 size -= slot_size; 511 } 512 513 out: 514 kvm_slots_unlock(); 515 return ret; 516 } 517 518 static void kvm_log_start(MemoryListener *listener, 519 MemoryRegionSection *section, 520 int old, int new) 521 { 522 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); 523 int r; 524 525 if (old != 0) { 526 return; 527 } 528 529 r = kvm_section_update_flags(kml, section); 530 if (r < 0) { 531 abort(); 532 } 533 } 534 535 static void kvm_log_stop(MemoryListener *listener, 536 MemoryRegionSection *section, 537 int old, int new) 538 { 539 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); 540 int r; 541 542 if (new != 0) { 543 return; 544 } 545 546 r = kvm_section_update_flags(kml, section); 547 if (r < 0) { 548 abort(); 549 } 550 } 551 552 /* get kvm's dirty pages bitmap and update qemu's */ 553 static void kvm_slot_sync_dirty_pages(KVMSlot *slot) 554 { 555 ram_addr_t start = slot->ram_start_offset; 556 ram_addr_t pages = slot->memory_size / qemu_real_host_page_size(); 557 558 cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages); 559 } 560 561 static void kvm_slot_reset_dirty_pages(KVMSlot *slot) 562 { 563 memset(slot->dirty_bmap, 0, slot->dirty_bmap_size); 564 } 565 566 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1)) 567 568 /* Allocate the dirty bitmap for a slot */ 569 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem) 570 { 571 if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) { 572 return; 573 } 574 575 /* 576 * XXX bad kernel interface alert 577 * For dirty bitmap, kernel allocates array of size aligned to 578 * bits-per-long. But for case when the kernel is 64bits and 579 * the userspace is 32bits, userspace can't align to the same 580 * bits-per-long, since sizeof(long) is different between kernel 581 * and user space. This way, userspace will provide buffer which 582 * may be 4 bytes less than the kernel will use, resulting in 583 * userspace memory corruption (which is not detectable by valgrind 584 * too, in most cases). 585 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in 586 * a hope that sizeof(long) won't become >8 any time soon. 587 * 588 * Note: the granule of kvm dirty log is qemu_real_host_page_size. 589 * And mem->memory_size is aligned to it (otherwise this mem can't 590 * be registered to KVM). 591 */ 592 hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(), 593 /*HOST_LONG_BITS*/ 64) / 8; 594 mem->dirty_bmap = g_malloc0(bitmap_size); 595 mem->dirty_bmap_size = bitmap_size; 596 } 597 598 /* 599 * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if 600 * succeeded, false otherwise 601 */ 602 static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot) 603 { 604 struct kvm_dirty_log d = {}; 605 int ret; 606 607 d.dirty_bitmap = slot->dirty_bmap; 608 d.slot = slot->slot | (slot->as_id << 16); 609 ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d); 610 611 if (ret == -ENOENT) { 612 /* kernel does not have dirty bitmap in this slot */ 613 ret = 0; 614 } 615 if (ret) { 616 error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d", 617 __func__, ret); 618 } 619 return ret == 0; 620 } 621 622 /* Should be with all slots_lock held for the address spaces. */ 623 static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id, 624 uint32_t slot_id, uint64_t offset) 625 { 626 KVMMemoryListener *kml; 627 KVMSlot *mem; 628 629 if (as_id >= s->nr_as) { 630 return; 631 } 632 633 kml = s->as[as_id].ml; 634 mem = &kml->slots[slot_id]; 635 636 if (!mem->memory_size || offset >= 637 (mem->memory_size / qemu_real_host_page_size())) { 638 return; 639 } 640 641 set_bit(offset, mem->dirty_bmap); 642 } 643 644 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn) 645 { 646 /* 647 * Read the flags before the value. Pairs with barrier in 648 * KVM's kvm_dirty_ring_push() function. 649 */ 650 return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY; 651 } 652 653 static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn) 654 { 655 /* 656 * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS 657 * sees the full content of the ring: 658 * 659 * CPU0 CPU1 CPU2 660 * ------------------------------------------------------------------------------ 661 * fill gfn0 662 * store-rel flags for gfn0 663 * load-acq flags for gfn0 664 * store-rel RESET for gfn0 665 * ioctl(RESET_RINGS) 666 * load-acq flags for gfn0 667 * check if flags have RESET 668 * 669 * The synchronization goes from CPU2 to CPU0 to CPU1. 670 */ 671 qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET); 672 } 673 674 /* 675 * Should be with all slots_lock held for the address spaces. It returns the 676 * dirty page we've collected on this dirty ring. 677 */ 678 static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu) 679 { 680 struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur; 681 uint32_t ring_size = s->kvm_dirty_ring_size; 682 uint32_t count = 0, fetch = cpu->kvm_fetch_index; 683 684 /* 685 * It's possible that we race with vcpu creation code where the vcpu is 686 * put onto the vcpus list but not yet initialized the dirty ring 687 * structures. If so, skip it. 688 */ 689 if (!cpu->created) { 690 return 0; 691 } 692 693 assert(dirty_gfns && ring_size); 694 trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index); 695 696 while (true) { 697 cur = &dirty_gfns[fetch % ring_size]; 698 if (!dirty_gfn_is_dirtied(cur)) { 699 break; 700 } 701 kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff, 702 cur->offset); 703 dirty_gfn_set_collected(cur); 704 trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset); 705 fetch++; 706 count++; 707 } 708 cpu->kvm_fetch_index = fetch; 709 cpu->dirty_pages += count; 710 711 return count; 712 } 713 714 /* Must be with slots_lock held */ 715 static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu) 716 { 717 int ret; 718 uint64_t total = 0; 719 int64_t stamp; 720 721 stamp = get_clock(); 722 723 if (cpu) { 724 total = kvm_dirty_ring_reap_one(s, cpu); 725 } else { 726 CPU_FOREACH(cpu) { 727 total += kvm_dirty_ring_reap_one(s, cpu); 728 } 729 } 730 731 if (total) { 732 ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS); 733 assert(ret == total); 734 } 735 736 stamp = get_clock() - stamp; 737 738 if (total) { 739 trace_kvm_dirty_ring_reap(total, stamp / 1000); 740 } 741 742 return total; 743 } 744 745 /* 746 * Currently for simplicity, we must hold BQL before calling this. We can 747 * consider to drop the BQL if we're clear with all the race conditions. 748 */ 749 static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu) 750 { 751 uint64_t total; 752 753 /* 754 * We need to lock all kvm slots for all address spaces here, 755 * because: 756 * 757 * (1) We need to mark dirty for dirty bitmaps in multiple slots 758 * and for tons of pages, so it's better to take the lock here 759 * once rather than once per page. And more importantly, 760 * 761 * (2) We must _NOT_ publish dirty bits to the other threads 762 * (e.g., the migration thread) via the kvm memory slot dirty 763 * bitmaps before correctly re-protect those dirtied pages. 764 * Otherwise we can have potential risk of data corruption if 765 * the page data is read in the other thread before we do 766 * reset below. 767 */ 768 kvm_slots_lock(); 769 total = kvm_dirty_ring_reap_locked(s, cpu); 770 kvm_slots_unlock(); 771 772 return total; 773 } 774 775 static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg) 776 { 777 /* No need to do anything */ 778 } 779 780 /* 781 * Kick all vcpus out in a synchronized way. When returned, we 782 * guarantee that every vcpu has been kicked and at least returned to 783 * userspace once. 784 */ 785 static void kvm_cpu_synchronize_kick_all(void) 786 { 787 CPUState *cpu; 788 789 CPU_FOREACH(cpu) { 790 run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL); 791 } 792 } 793 794 /* 795 * Flush all the existing dirty pages to the KVM slot buffers. When 796 * this call returns, we guarantee that all the touched dirty pages 797 * before calling this function have been put into the per-kvmslot 798 * dirty bitmap. 799 * 800 * This function must be called with BQL held. 801 */ 802 static void kvm_dirty_ring_flush(void) 803 { 804 trace_kvm_dirty_ring_flush(0); 805 /* 806 * The function needs to be serialized. Since this function 807 * should always be with BQL held, serialization is guaranteed. 808 * However, let's be sure of it. 809 */ 810 assert(bql_locked()); 811 /* 812 * First make sure to flush the hardware buffers by kicking all 813 * vcpus out in a synchronous way. 814 */ 815 kvm_cpu_synchronize_kick_all(); 816 kvm_dirty_ring_reap(kvm_state, NULL); 817 trace_kvm_dirty_ring_flush(1); 818 } 819 820 /** 821 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space 822 * 823 * This function will first try to fetch dirty bitmap from the kernel, 824 * and then updates qemu's dirty bitmap. 825 * 826 * NOTE: caller must be with kml->slots_lock held. 827 * 828 * @kml: the KVM memory listener object 829 * @section: the memory section to sync the dirty bitmap with 830 */ 831 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml, 832 MemoryRegionSection *section) 833 { 834 KVMState *s = kvm_state; 835 KVMSlot *mem; 836 hwaddr start_addr, size; 837 hwaddr slot_size; 838 839 size = kvm_align_section(section, &start_addr); 840 while (size) { 841 slot_size = MIN(kvm_max_slot_size, size); 842 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); 843 if (!mem) { 844 /* We don't have a slot if we want to trap every access. */ 845 return; 846 } 847 if (kvm_slot_get_dirty_log(s, mem)) { 848 kvm_slot_sync_dirty_pages(mem); 849 } 850 start_addr += slot_size; 851 size -= slot_size; 852 } 853 } 854 855 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */ 856 #define KVM_CLEAR_LOG_SHIFT 6 857 #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT) 858 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN) 859 860 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start, 861 uint64_t size) 862 { 863 KVMState *s = kvm_state; 864 uint64_t end, bmap_start, start_delta, bmap_npages; 865 struct kvm_clear_dirty_log d; 866 unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size(); 867 int ret; 868 869 /* 870 * We need to extend either the start or the size or both to 871 * satisfy the KVM interface requirement. Firstly, do the start 872 * page alignment on 64 host pages 873 */ 874 bmap_start = start & KVM_CLEAR_LOG_MASK; 875 start_delta = start - bmap_start; 876 bmap_start /= psize; 877 878 /* 879 * The kernel interface has restriction on the size too, that either: 880 * 881 * (1) the size is 64 host pages aligned (just like the start), or 882 * (2) the size fills up until the end of the KVM memslot. 883 */ 884 bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN) 885 << KVM_CLEAR_LOG_SHIFT; 886 end = mem->memory_size / psize; 887 if (bmap_npages > end - bmap_start) { 888 bmap_npages = end - bmap_start; 889 } 890 start_delta /= psize; 891 892 /* 893 * Prepare the bitmap to clear dirty bits. Here we must guarantee 894 * that we won't clear any unknown dirty bits otherwise we might 895 * accidentally clear some set bits which are not yet synced from 896 * the kernel into QEMU's bitmap, then we'll lose track of the 897 * guest modifications upon those pages (which can directly lead 898 * to guest data loss or panic after migration). 899 * 900 * Layout of the KVMSlot.dirty_bmap: 901 * 902 * |<-------- bmap_npages -----------..>| 903 * [1] 904 * start_delta size 905 * |----------------|-------------|------------------|------------| 906 * ^ ^ ^ ^ 907 * | | | | 908 * start bmap_start (start) end 909 * of memslot of memslot 910 * 911 * [1] bmap_npages can be aligned to either 64 pages or the end of slot 912 */ 913 914 assert(bmap_start % BITS_PER_LONG == 0); 915 /* We should never do log_clear before log_sync */ 916 assert(mem->dirty_bmap); 917 if (start_delta || bmap_npages - size / psize) { 918 /* Slow path - we need to manipulate a temp bitmap */ 919 bmap_clear = bitmap_new(bmap_npages); 920 bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap, 921 bmap_start, start_delta + size / psize); 922 /* 923 * We need to fill the holes at start because that was not 924 * specified by the caller and we extended the bitmap only for 925 * 64 pages alignment 926 */ 927 bitmap_clear(bmap_clear, 0, start_delta); 928 d.dirty_bitmap = bmap_clear; 929 } else { 930 /* 931 * Fast path - both start and size align well with BITS_PER_LONG 932 * (or the end of memory slot) 933 */ 934 d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start); 935 } 936 937 d.first_page = bmap_start; 938 /* It should never overflow. If it happens, say something */ 939 assert(bmap_npages <= UINT32_MAX); 940 d.num_pages = bmap_npages; 941 d.slot = mem->slot | (as_id << 16); 942 943 ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d); 944 if (ret < 0 && ret != -ENOENT) { 945 error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, " 946 "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d", 947 __func__, d.slot, (uint64_t)d.first_page, 948 (uint32_t)d.num_pages, ret); 949 } else { 950 ret = 0; 951 trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages); 952 } 953 954 /* 955 * After we have updated the remote dirty bitmap, we update the 956 * cached bitmap as well for the memslot, then if another user 957 * clears the same region we know we shouldn't clear it again on 958 * the remote otherwise it's data loss as well. 959 */ 960 bitmap_clear(mem->dirty_bmap, bmap_start + start_delta, 961 size / psize); 962 /* This handles the NULL case well */ 963 g_free(bmap_clear); 964 return ret; 965 } 966 967 968 /** 969 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range 970 * 971 * NOTE: this will be a no-op if we haven't enabled manual dirty log 972 * protection in the host kernel because in that case this operation 973 * will be done within log_sync(). 974 * 975 * @kml: the kvm memory listener 976 * @section: the memory range to clear dirty bitmap 977 */ 978 static int kvm_physical_log_clear(KVMMemoryListener *kml, 979 MemoryRegionSection *section) 980 { 981 KVMState *s = kvm_state; 982 uint64_t start, size, offset, count; 983 KVMSlot *mem; 984 int ret = 0, i; 985 986 if (!s->manual_dirty_log_protect) { 987 /* No need to do explicit clear */ 988 return ret; 989 } 990 991 start = section->offset_within_address_space; 992 size = int128_get64(section->size); 993 994 if (!size) { 995 /* Nothing more we can do... */ 996 return ret; 997 } 998 999 kvm_slots_lock(); 1000 1001 for (i = 0; i < s->nr_slots; i++) { 1002 mem = &kml->slots[i]; 1003 /* Discard slots that are empty or do not overlap the section */ 1004 if (!mem->memory_size || 1005 mem->start_addr > start + size - 1 || 1006 start > mem->start_addr + mem->memory_size - 1) { 1007 continue; 1008 } 1009 1010 if (start >= mem->start_addr) { 1011 /* The slot starts before section or is aligned to it. */ 1012 offset = start - mem->start_addr; 1013 count = MIN(mem->memory_size - offset, size); 1014 } else { 1015 /* The slot starts after section. */ 1016 offset = 0; 1017 count = MIN(mem->memory_size, size - (mem->start_addr - start)); 1018 } 1019 ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count); 1020 if (ret < 0) { 1021 break; 1022 } 1023 } 1024 1025 kvm_slots_unlock(); 1026 1027 return ret; 1028 } 1029 1030 static void kvm_coalesce_mmio_region(MemoryListener *listener, 1031 MemoryRegionSection *secion, 1032 hwaddr start, hwaddr size) 1033 { 1034 KVMState *s = kvm_state; 1035 1036 if (s->coalesced_mmio) { 1037 struct kvm_coalesced_mmio_zone zone; 1038 1039 zone.addr = start; 1040 zone.size = size; 1041 zone.pad = 0; 1042 1043 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); 1044 } 1045 } 1046 1047 static void kvm_uncoalesce_mmio_region(MemoryListener *listener, 1048 MemoryRegionSection *secion, 1049 hwaddr start, hwaddr size) 1050 { 1051 KVMState *s = kvm_state; 1052 1053 if (s->coalesced_mmio) { 1054 struct kvm_coalesced_mmio_zone zone; 1055 1056 zone.addr = start; 1057 zone.size = size; 1058 zone.pad = 0; 1059 1060 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); 1061 } 1062 } 1063 1064 static void kvm_coalesce_pio_add(MemoryListener *listener, 1065 MemoryRegionSection *section, 1066 hwaddr start, hwaddr size) 1067 { 1068 KVMState *s = kvm_state; 1069 1070 if (s->coalesced_pio) { 1071 struct kvm_coalesced_mmio_zone zone; 1072 1073 zone.addr = start; 1074 zone.size = size; 1075 zone.pio = 1; 1076 1077 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); 1078 } 1079 } 1080 1081 static void kvm_coalesce_pio_del(MemoryListener *listener, 1082 MemoryRegionSection *section, 1083 hwaddr start, hwaddr size) 1084 { 1085 KVMState *s = kvm_state; 1086 1087 if (s->coalesced_pio) { 1088 struct kvm_coalesced_mmio_zone zone; 1089 1090 zone.addr = start; 1091 zone.size = size; 1092 zone.pio = 1; 1093 1094 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); 1095 } 1096 } 1097 1098 int kvm_check_extension(KVMState *s, unsigned int extension) 1099 { 1100 int ret; 1101 1102 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension); 1103 if (ret < 0) { 1104 ret = 0; 1105 } 1106 1107 return ret; 1108 } 1109 1110 int kvm_vm_check_extension(KVMState *s, unsigned int extension) 1111 { 1112 int ret; 1113 1114 ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension); 1115 if (ret < 0) { 1116 /* VM wide version not implemented, use global one instead */ 1117 ret = kvm_check_extension(s, extension); 1118 } 1119 1120 return ret; 1121 } 1122 1123 /* 1124 * We track the poisoned pages to be able to: 1125 * - replace them on VM reset 1126 * - block a migration for a VM with a poisoned page 1127 */ 1128 typedef struct HWPoisonPage { 1129 ram_addr_t ram_addr; 1130 QLIST_ENTRY(HWPoisonPage) list; 1131 } HWPoisonPage; 1132 1133 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list = 1134 QLIST_HEAD_INITIALIZER(hwpoison_page_list); 1135 1136 static void kvm_unpoison_all(void *param) 1137 { 1138 HWPoisonPage *page, *next_page; 1139 1140 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) { 1141 QLIST_REMOVE(page, list); 1142 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE); 1143 g_free(page); 1144 } 1145 } 1146 1147 void kvm_hwpoison_page_add(ram_addr_t ram_addr) 1148 { 1149 HWPoisonPage *page; 1150 1151 QLIST_FOREACH(page, &hwpoison_page_list, list) { 1152 if (page->ram_addr == ram_addr) { 1153 return; 1154 } 1155 } 1156 page = g_new(HWPoisonPage, 1); 1157 page->ram_addr = ram_addr; 1158 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list); 1159 } 1160 1161 bool kvm_hwpoisoned_mem(void) 1162 { 1163 return !QLIST_EMPTY(&hwpoison_page_list); 1164 } 1165 1166 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size) 1167 { 1168 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN 1169 /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN 1170 * endianness, but the memory core hands them in target endianness. 1171 * For example, PPC is always treated as big-endian even if running 1172 * on KVM and on PPC64LE. Correct here. 1173 */ 1174 switch (size) { 1175 case 2: 1176 val = bswap16(val); 1177 break; 1178 case 4: 1179 val = bswap32(val); 1180 break; 1181 } 1182 #endif 1183 return val; 1184 } 1185 1186 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val, 1187 bool assign, uint32_t size, bool datamatch) 1188 { 1189 int ret; 1190 struct kvm_ioeventfd iofd = { 1191 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0, 1192 .addr = addr, 1193 .len = size, 1194 .flags = 0, 1195 .fd = fd, 1196 }; 1197 1198 trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size, 1199 datamatch); 1200 if (!kvm_enabled()) { 1201 return -ENOSYS; 1202 } 1203 1204 if (datamatch) { 1205 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH; 1206 } 1207 if (!assign) { 1208 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; 1209 } 1210 1211 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd); 1212 1213 if (ret < 0) { 1214 return -errno; 1215 } 1216 1217 return 0; 1218 } 1219 1220 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val, 1221 bool assign, uint32_t size, bool datamatch) 1222 { 1223 struct kvm_ioeventfd kick = { 1224 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0, 1225 .addr = addr, 1226 .flags = KVM_IOEVENTFD_FLAG_PIO, 1227 .len = size, 1228 .fd = fd, 1229 }; 1230 int r; 1231 trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch); 1232 if (!kvm_enabled()) { 1233 return -ENOSYS; 1234 } 1235 if (datamatch) { 1236 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH; 1237 } 1238 if (!assign) { 1239 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; 1240 } 1241 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); 1242 if (r < 0) { 1243 return r; 1244 } 1245 return 0; 1246 } 1247 1248 1249 static const KVMCapabilityInfo * 1250 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list) 1251 { 1252 while (list->name) { 1253 if (!kvm_check_extension(s, list->value)) { 1254 return list; 1255 } 1256 list++; 1257 } 1258 return NULL; 1259 } 1260 1261 void kvm_set_max_memslot_size(hwaddr max_slot_size) 1262 { 1263 g_assert( 1264 ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size 1265 ); 1266 kvm_max_slot_size = max_slot_size; 1267 } 1268 1269 /* Called with KVMMemoryListener.slots_lock held */ 1270 static void kvm_set_phys_mem(KVMMemoryListener *kml, 1271 MemoryRegionSection *section, bool add) 1272 { 1273 KVMSlot *mem; 1274 int err; 1275 MemoryRegion *mr = section->mr; 1276 bool writable = !mr->readonly && !mr->rom_device; 1277 hwaddr start_addr, size, slot_size, mr_offset; 1278 ram_addr_t ram_start_offset; 1279 void *ram; 1280 1281 if (!memory_region_is_ram(mr)) { 1282 if (writable || !kvm_readonly_mem_allowed) { 1283 return; 1284 } else if (!mr->romd_mode) { 1285 /* If the memory device is not in romd_mode, then we actually want 1286 * to remove the kvm memory slot so all accesses will trap. */ 1287 add = false; 1288 } 1289 } 1290 1291 size = kvm_align_section(section, &start_addr); 1292 if (!size) { 1293 return; 1294 } 1295 1296 /* The offset of the kvmslot within the memory region */ 1297 mr_offset = section->offset_within_region + start_addr - 1298 section->offset_within_address_space; 1299 1300 /* use aligned delta to align the ram address and offset */ 1301 ram = memory_region_get_ram_ptr(mr) + mr_offset; 1302 ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset; 1303 1304 if (!add) { 1305 do { 1306 slot_size = MIN(kvm_max_slot_size, size); 1307 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); 1308 if (!mem) { 1309 return; 1310 } 1311 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { 1312 /* 1313 * NOTE: We should be aware of the fact that here we're only 1314 * doing a best effort to sync dirty bits. No matter whether 1315 * we're using dirty log or dirty ring, we ignored two facts: 1316 * 1317 * (1) dirty bits can reside in hardware buffers (PML) 1318 * 1319 * (2) after we collected dirty bits here, pages can be dirtied 1320 * again before we do the final KVM_SET_USER_MEMORY_REGION to 1321 * remove the slot. 1322 * 1323 * Not easy. Let's cross the fingers until it's fixed. 1324 */ 1325 if (kvm_state->kvm_dirty_ring_size) { 1326 kvm_dirty_ring_reap_locked(kvm_state, NULL); 1327 if (kvm_state->kvm_dirty_ring_with_bitmap) { 1328 kvm_slot_sync_dirty_pages(mem); 1329 kvm_slot_get_dirty_log(kvm_state, mem); 1330 } 1331 } else { 1332 kvm_slot_get_dirty_log(kvm_state, mem); 1333 } 1334 kvm_slot_sync_dirty_pages(mem); 1335 } 1336 1337 /* unregister the slot */ 1338 g_free(mem->dirty_bmap); 1339 mem->dirty_bmap = NULL; 1340 mem->memory_size = 0; 1341 mem->flags = 0; 1342 err = kvm_set_user_memory_region(kml, mem, false); 1343 if (err) { 1344 fprintf(stderr, "%s: error unregistering slot: %s\n", 1345 __func__, strerror(-err)); 1346 abort(); 1347 } 1348 start_addr += slot_size; 1349 size -= slot_size; 1350 kml->nr_used_slots--; 1351 } while (size); 1352 return; 1353 } 1354 1355 /* register the new slot */ 1356 do { 1357 slot_size = MIN(kvm_max_slot_size, size); 1358 mem = kvm_alloc_slot(kml); 1359 mem->as_id = kml->as_id; 1360 mem->memory_size = slot_size; 1361 mem->start_addr = start_addr; 1362 mem->ram_start_offset = ram_start_offset; 1363 mem->ram = ram; 1364 mem->flags = kvm_mem_flags(mr); 1365 kvm_slot_init_dirty_bitmap(mem); 1366 err = kvm_set_user_memory_region(kml, mem, true); 1367 if (err) { 1368 fprintf(stderr, "%s: error registering slot: %s\n", __func__, 1369 strerror(-err)); 1370 abort(); 1371 } 1372 start_addr += slot_size; 1373 ram_start_offset += slot_size; 1374 ram += slot_size; 1375 size -= slot_size; 1376 kml->nr_used_slots++; 1377 } while (size); 1378 } 1379 1380 static void *kvm_dirty_ring_reaper_thread(void *data) 1381 { 1382 KVMState *s = data; 1383 struct KVMDirtyRingReaper *r = &s->reaper; 1384 1385 rcu_register_thread(); 1386 1387 trace_kvm_dirty_ring_reaper("init"); 1388 1389 while (true) { 1390 r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT; 1391 trace_kvm_dirty_ring_reaper("wait"); 1392 /* 1393 * TODO: provide a smarter timeout rather than a constant? 1394 */ 1395 sleep(1); 1396 1397 /* keep sleeping so that dirtylimit not be interfered by reaper */ 1398 if (dirtylimit_in_service()) { 1399 continue; 1400 } 1401 1402 trace_kvm_dirty_ring_reaper("wakeup"); 1403 r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING; 1404 1405 bql_lock(); 1406 kvm_dirty_ring_reap(s, NULL); 1407 bql_unlock(); 1408 1409 r->reaper_iteration++; 1410 } 1411 1412 trace_kvm_dirty_ring_reaper("exit"); 1413 1414 rcu_unregister_thread(); 1415 1416 return NULL; 1417 } 1418 1419 static void kvm_dirty_ring_reaper_init(KVMState *s) 1420 { 1421 struct KVMDirtyRingReaper *r = &s->reaper; 1422 1423 qemu_thread_create(&r->reaper_thr, "kvm-reaper", 1424 kvm_dirty_ring_reaper_thread, 1425 s, QEMU_THREAD_JOINABLE); 1426 } 1427 1428 static int kvm_dirty_ring_init(KVMState *s) 1429 { 1430 uint32_t ring_size = s->kvm_dirty_ring_size; 1431 uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn); 1432 unsigned int capability = KVM_CAP_DIRTY_LOG_RING; 1433 int ret; 1434 1435 s->kvm_dirty_ring_size = 0; 1436 s->kvm_dirty_ring_bytes = 0; 1437 1438 /* Bail if the dirty ring size isn't specified */ 1439 if (!ring_size) { 1440 return 0; 1441 } 1442 1443 /* 1444 * Read the max supported pages. Fall back to dirty logging mode 1445 * if the dirty ring isn't supported. 1446 */ 1447 ret = kvm_vm_check_extension(s, capability); 1448 if (ret <= 0) { 1449 capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL; 1450 ret = kvm_vm_check_extension(s, capability); 1451 } 1452 1453 if (ret <= 0) { 1454 warn_report("KVM dirty ring not available, using bitmap method"); 1455 return 0; 1456 } 1457 1458 if (ring_bytes > ret) { 1459 error_report("KVM dirty ring size %" PRIu32 " too big " 1460 "(maximum is %ld). Please use a smaller value.", 1461 ring_size, (long)ret / sizeof(struct kvm_dirty_gfn)); 1462 return -EINVAL; 1463 } 1464 1465 ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes); 1466 if (ret) { 1467 error_report("Enabling of KVM dirty ring failed: %s. " 1468 "Suggested minimum value is 1024.", strerror(-ret)); 1469 return -EIO; 1470 } 1471 1472 /* Enable the backup bitmap if it is supported */ 1473 ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP); 1474 if (ret > 0) { 1475 ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0); 1476 if (ret) { 1477 error_report("Enabling of KVM dirty ring's backup bitmap failed: " 1478 "%s. ", strerror(-ret)); 1479 return -EIO; 1480 } 1481 1482 s->kvm_dirty_ring_with_bitmap = true; 1483 } 1484 1485 s->kvm_dirty_ring_size = ring_size; 1486 s->kvm_dirty_ring_bytes = ring_bytes; 1487 1488 return 0; 1489 } 1490 1491 static void kvm_region_add(MemoryListener *listener, 1492 MemoryRegionSection *section) 1493 { 1494 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); 1495 KVMMemoryUpdate *update; 1496 1497 update = g_new0(KVMMemoryUpdate, 1); 1498 update->section = *section; 1499 1500 QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next); 1501 } 1502 1503 static void kvm_region_del(MemoryListener *listener, 1504 MemoryRegionSection *section) 1505 { 1506 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); 1507 KVMMemoryUpdate *update; 1508 1509 update = g_new0(KVMMemoryUpdate, 1); 1510 update->section = *section; 1511 1512 QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next); 1513 } 1514 1515 static void kvm_region_commit(MemoryListener *listener) 1516 { 1517 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, 1518 listener); 1519 KVMMemoryUpdate *u1, *u2; 1520 bool need_inhibit = false; 1521 1522 if (QSIMPLEQ_EMPTY(&kml->transaction_add) && 1523 QSIMPLEQ_EMPTY(&kml->transaction_del)) { 1524 return; 1525 } 1526 1527 /* 1528 * We have to be careful when regions to add overlap with ranges to remove. 1529 * We have to simulate atomic KVM memslot updates by making sure no ioctl() 1530 * is currently active. 1531 * 1532 * The lists are order by addresses, so it's easy to find overlaps. 1533 */ 1534 u1 = QSIMPLEQ_FIRST(&kml->transaction_del); 1535 u2 = QSIMPLEQ_FIRST(&kml->transaction_add); 1536 while (u1 && u2) { 1537 Range r1, r2; 1538 1539 range_init_nofail(&r1, u1->section.offset_within_address_space, 1540 int128_get64(u1->section.size)); 1541 range_init_nofail(&r2, u2->section.offset_within_address_space, 1542 int128_get64(u2->section.size)); 1543 1544 if (range_overlaps_range(&r1, &r2)) { 1545 need_inhibit = true; 1546 break; 1547 } 1548 if (range_lob(&r1) < range_lob(&r2)) { 1549 u1 = QSIMPLEQ_NEXT(u1, next); 1550 } else { 1551 u2 = QSIMPLEQ_NEXT(u2, next); 1552 } 1553 } 1554 1555 kvm_slots_lock(); 1556 if (need_inhibit) { 1557 accel_ioctl_inhibit_begin(); 1558 } 1559 1560 /* Remove all memslots before adding the new ones. */ 1561 while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) { 1562 u1 = QSIMPLEQ_FIRST(&kml->transaction_del); 1563 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next); 1564 1565 kvm_set_phys_mem(kml, &u1->section, false); 1566 memory_region_unref(u1->section.mr); 1567 1568 g_free(u1); 1569 } 1570 while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) { 1571 u1 = QSIMPLEQ_FIRST(&kml->transaction_add); 1572 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next); 1573 1574 memory_region_ref(u1->section.mr); 1575 kvm_set_phys_mem(kml, &u1->section, true); 1576 1577 g_free(u1); 1578 } 1579 1580 if (need_inhibit) { 1581 accel_ioctl_inhibit_end(); 1582 } 1583 kvm_slots_unlock(); 1584 } 1585 1586 static void kvm_log_sync(MemoryListener *listener, 1587 MemoryRegionSection *section) 1588 { 1589 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); 1590 1591 kvm_slots_lock(); 1592 kvm_physical_sync_dirty_bitmap(kml, section); 1593 kvm_slots_unlock(); 1594 } 1595 1596 static void kvm_log_sync_global(MemoryListener *l, bool last_stage) 1597 { 1598 KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener); 1599 KVMState *s = kvm_state; 1600 KVMSlot *mem; 1601 int i; 1602 1603 /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */ 1604 kvm_dirty_ring_flush(); 1605 1606 /* 1607 * TODO: make this faster when nr_slots is big while there are 1608 * only a few used slots (small VMs). 1609 */ 1610 kvm_slots_lock(); 1611 for (i = 0; i < s->nr_slots; i++) { 1612 mem = &kml->slots[i]; 1613 if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { 1614 kvm_slot_sync_dirty_pages(mem); 1615 1616 if (s->kvm_dirty_ring_with_bitmap && last_stage && 1617 kvm_slot_get_dirty_log(s, mem)) { 1618 kvm_slot_sync_dirty_pages(mem); 1619 } 1620 1621 /* 1622 * This is not needed by KVM_GET_DIRTY_LOG because the 1623 * ioctl will unconditionally overwrite the whole region. 1624 * However kvm dirty ring has no such side effect. 1625 */ 1626 kvm_slot_reset_dirty_pages(mem); 1627 } 1628 } 1629 kvm_slots_unlock(); 1630 } 1631 1632 static void kvm_log_clear(MemoryListener *listener, 1633 MemoryRegionSection *section) 1634 { 1635 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); 1636 int r; 1637 1638 r = kvm_physical_log_clear(kml, section); 1639 if (r < 0) { 1640 error_report_once("%s: kvm log clear failed: mr=%s " 1641 "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__, 1642 section->mr->name, section->offset_within_region, 1643 int128_get64(section->size)); 1644 abort(); 1645 } 1646 } 1647 1648 static void kvm_mem_ioeventfd_add(MemoryListener *listener, 1649 MemoryRegionSection *section, 1650 bool match_data, uint64_t data, 1651 EventNotifier *e) 1652 { 1653 int fd = event_notifier_get_fd(e); 1654 int r; 1655 1656 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, 1657 data, true, int128_get64(section->size), 1658 match_data); 1659 if (r < 0) { 1660 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n", 1661 __func__, strerror(-r), -r); 1662 abort(); 1663 } 1664 } 1665 1666 static void kvm_mem_ioeventfd_del(MemoryListener *listener, 1667 MemoryRegionSection *section, 1668 bool match_data, uint64_t data, 1669 EventNotifier *e) 1670 { 1671 int fd = event_notifier_get_fd(e); 1672 int r; 1673 1674 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, 1675 data, false, int128_get64(section->size), 1676 match_data); 1677 if (r < 0) { 1678 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n", 1679 __func__, strerror(-r), -r); 1680 abort(); 1681 } 1682 } 1683 1684 static void kvm_io_ioeventfd_add(MemoryListener *listener, 1685 MemoryRegionSection *section, 1686 bool match_data, uint64_t data, 1687 EventNotifier *e) 1688 { 1689 int fd = event_notifier_get_fd(e); 1690 int r; 1691 1692 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, 1693 data, true, int128_get64(section->size), 1694 match_data); 1695 if (r < 0) { 1696 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n", 1697 __func__, strerror(-r), -r); 1698 abort(); 1699 } 1700 } 1701 1702 static void kvm_io_ioeventfd_del(MemoryListener *listener, 1703 MemoryRegionSection *section, 1704 bool match_data, uint64_t data, 1705 EventNotifier *e) 1706 1707 { 1708 int fd = event_notifier_get_fd(e); 1709 int r; 1710 1711 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, 1712 data, false, int128_get64(section->size), 1713 match_data); 1714 if (r < 0) { 1715 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n", 1716 __func__, strerror(-r), -r); 1717 abort(); 1718 } 1719 } 1720 1721 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, 1722 AddressSpace *as, int as_id, const char *name) 1723 { 1724 int i; 1725 1726 kml->slots = g_new0(KVMSlot, s->nr_slots); 1727 kml->as_id = as_id; 1728 1729 for (i = 0; i < s->nr_slots; i++) { 1730 kml->slots[i].slot = i; 1731 } 1732 1733 QSIMPLEQ_INIT(&kml->transaction_add); 1734 QSIMPLEQ_INIT(&kml->transaction_del); 1735 1736 kml->listener.region_add = kvm_region_add; 1737 kml->listener.region_del = kvm_region_del; 1738 kml->listener.commit = kvm_region_commit; 1739 kml->listener.log_start = kvm_log_start; 1740 kml->listener.log_stop = kvm_log_stop; 1741 kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL; 1742 kml->listener.name = name; 1743 1744 if (s->kvm_dirty_ring_size) { 1745 kml->listener.log_sync_global = kvm_log_sync_global; 1746 } else { 1747 kml->listener.log_sync = kvm_log_sync; 1748 kml->listener.log_clear = kvm_log_clear; 1749 } 1750 1751 memory_listener_register(&kml->listener, as); 1752 1753 for (i = 0; i < s->nr_as; ++i) { 1754 if (!s->as[i].as) { 1755 s->as[i].as = as; 1756 s->as[i].ml = kml; 1757 break; 1758 } 1759 } 1760 } 1761 1762 static MemoryListener kvm_io_listener = { 1763 .name = "kvm-io", 1764 .coalesced_io_add = kvm_coalesce_pio_add, 1765 .coalesced_io_del = kvm_coalesce_pio_del, 1766 .eventfd_add = kvm_io_ioeventfd_add, 1767 .eventfd_del = kvm_io_ioeventfd_del, 1768 .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND, 1769 }; 1770 1771 int kvm_set_irq(KVMState *s, int irq, int level) 1772 { 1773 struct kvm_irq_level event; 1774 int ret; 1775 1776 assert(kvm_async_interrupts_enabled()); 1777 1778 event.level = level; 1779 event.irq = irq; 1780 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event); 1781 if (ret < 0) { 1782 perror("kvm_set_irq"); 1783 abort(); 1784 } 1785 1786 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status; 1787 } 1788 1789 #ifdef KVM_CAP_IRQ_ROUTING 1790 typedef struct KVMMSIRoute { 1791 struct kvm_irq_routing_entry kroute; 1792 QTAILQ_ENTRY(KVMMSIRoute) entry; 1793 } KVMMSIRoute; 1794 1795 static void set_gsi(KVMState *s, unsigned int gsi) 1796 { 1797 set_bit(gsi, s->used_gsi_bitmap); 1798 } 1799 1800 static void clear_gsi(KVMState *s, unsigned int gsi) 1801 { 1802 clear_bit(gsi, s->used_gsi_bitmap); 1803 } 1804 1805 void kvm_init_irq_routing(KVMState *s) 1806 { 1807 int gsi_count; 1808 1809 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1; 1810 if (gsi_count > 0) { 1811 /* Round up so we can search ints using ffs */ 1812 s->used_gsi_bitmap = bitmap_new(gsi_count); 1813 s->gsi_count = gsi_count; 1814 } 1815 1816 s->irq_routes = g_malloc0(sizeof(*s->irq_routes)); 1817 s->nr_allocated_irq_routes = 0; 1818 1819 kvm_arch_init_irq_routing(s); 1820 } 1821 1822 void kvm_irqchip_commit_routes(KVMState *s) 1823 { 1824 int ret; 1825 1826 if (kvm_gsi_direct_mapping()) { 1827 return; 1828 } 1829 1830 if (!kvm_gsi_routing_enabled()) { 1831 return; 1832 } 1833 1834 s->irq_routes->flags = 0; 1835 trace_kvm_irqchip_commit_routes(); 1836 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes); 1837 assert(ret == 0); 1838 } 1839 1840 static void kvm_add_routing_entry(KVMState *s, 1841 struct kvm_irq_routing_entry *entry) 1842 { 1843 struct kvm_irq_routing_entry *new; 1844 int n, size; 1845 1846 if (s->irq_routes->nr == s->nr_allocated_irq_routes) { 1847 n = s->nr_allocated_irq_routes * 2; 1848 if (n < 64) { 1849 n = 64; 1850 } 1851 size = sizeof(struct kvm_irq_routing); 1852 size += n * sizeof(*new); 1853 s->irq_routes = g_realloc(s->irq_routes, size); 1854 s->nr_allocated_irq_routes = n; 1855 } 1856 n = s->irq_routes->nr++; 1857 new = &s->irq_routes->entries[n]; 1858 1859 *new = *entry; 1860 1861 set_gsi(s, entry->gsi); 1862 } 1863 1864 static int kvm_update_routing_entry(KVMState *s, 1865 struct kvm_irq_routing_entry *new_entry) 1866 { 1867 struct kvm_irq_routing_entry *entry; 1868 int n; 1869 1870 for (n = 0; n < s->irq_routes->nr; n++) { 1871 entry = &s->irq_routes->entries[n]; 1872 if (entry->gsi != new_entry->gsi) { 1873 continue; 1874 } 1875 1876 if(!memcmp(entry, new_entry, sizeof *entry)) { 1877 return 0; 1878 } 1879 1880 *entry = *new_entry; 1881 1882 return 0; 1883 } 1884 1885 return -ESRCH; 1886 } 1887 1888 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin) 1889 { 1890 struct kvm_irq_routing_entry e = {}; 1891 1892 assert(pin < s->gsi_count); 1893 1894 e.gsi = irq; 1895 e.type = KVM_IRQ_ROUTING_IRQCHIP; 1896 e.flags = 0; 1897 e.u.irqchip.irqchip = irqchip; 1898 e.u.irqchip.pin = pin; 1899 kvm_add_routing_entry(s, &e); 1900 } 1901 1902 void kvm_irqchip_release_virq(KVMState *s, int virq) 1903 { 1904 struct kvm_irq_routing_entry *e; 1905 int i; 1906 1907 if (kvm_gsi_direct_mapping()) { 1908 return; 1909 } 1910 1911 for (i = 0; i < s->irq_routes->nr; i++) { 1912 e = &s->irq_routes->entries[i]; 1913 if (e->gsi == virq) { 1914 s->irq_routes->nr--; 1915 *e = s->irq_routes->entries[s->irq_routes->nr]; 1916 } 1917 } 1918 clear_gsi(s, virq); 1919 kvm_arch_release_virq_post(virq); 1920 trace_kvm_irqchip_release_virq(virq); 1921 } 1922 1923 void kvm_irqchip_add_change_notifier(Notifier *n) 1924 { 1925 notifier_list_add(&kvm_irqchip_change_notifiers, n); 1926 } 1927 1928 void kvm_irqchip_remove_change_notifier(Notifier *n) 1929 { 1930 notifier_remove(n); 1931 } 1932 1933 void kvm_irqchip_change_notify(void) 1934 { 1935 notifier_list_notify(&kvm_irqchip_change_notifiers, NULL); 1936 } 1937 1938 static int kvm_irqchip_get_virq(KVMState *s) 1939 { 1940 int next_virq; 1941 1942 /* Return the lowest unused GSI in the bitmap */ 1943 next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count); 1944 if (next_virq >= s->gsi_count) { 1945 return -ENOSPC; 1946 } else { 1947 return next_virq; 1948 } 1949 } 1950 1951 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) 1952 { 1953 struct kvm_msi msi; 1954 1955 msi.address_lo = (uint32_t)msg.address; 1956 msi.address_hi = msg.address >> 32; 1957 msi.data = le32_to_cpu(msg.data); 1958 msi.flags = 0; 1959 memset(msi.pad, 0, sizeof(msi.pad)); 1960 1961 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi); 1962 } 1963 1964 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) 1965 { 1966 struct kvm_irq_routing_entry kroute = {}; 1967 int virq; 1968 KVMState *s = c->s; 1969 MSIMessage msg = {0, 0}; 1970 1971 if (pci_available && dev) { 1972 msg = pci_get_msi_message(dev, vector); 1973 } 1974 1975 if (kvm_gsi_direct_mapping()) { 1976 return kvm_arch_msi_data_to_gsi(msg.data); 1977 } 1978 1979 if (!kvm_gsi_routing_enabled()) { 1980 return -ENOSYS; 1981 } 1982 1983 virq = kvm_irqchip_get_virq(s); 1984 if (virq < 0) { 1985 return virq; 1986 } 1987 1988 kroute.gsi = virq; 1989 kroute.type = KVM_IRQ_ROUTING_MSI; 1990 kroute.flags = 0; 1991 kroute.u.msi.address_lo = (uint32_t)msg.address; 1992 kroute.u.msi.address_hi = msg.address >> 32; 1993 kroute.u.msi.data = le32_to_cpu(msg.data); 1994 if (pci_available && kvm_msi_devid_required()) { 1995 kroute.flags = KVM_MSI_VALID_DEVID; 1996 kroute.u.msi.devid = pci_requester_id(dev); 1997 } 1998 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) { 1999 kvm_irqchip_release_virq(s, virq); 2000 return -EINVAL; 2001 } 2002 2003 if (s->irq_routes->nr < s->gsi_count) { 2004 trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A", 2005 vector, virq); 2006 2007 kvm_add_routing_entry(s, &kroute); 2008 kvm_arch_add_msi_route_post(&kroute, vector, dev); 2009 c->changes++; 2010 } else { 2011 kvm_irqchip_release_virq(s, virq); 2012 return -ENOSPC; 2013 } 2014 2015 return virq; 2016 } 2017 2018 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, 2019 PCIDevice *dev) 2020 { 2021 struct kvm_irq_routing_entry kroute = {}; 2022 2023 if (kvm_gsi_direct_mapping()) { 2024 return 0; 2025 } 2026 2027 if (!kvm_irqchip_in_kernel()) { 2028 return -ENOSYS; 2029 } 2030 2031 kroute.gsi = virq; 2032 kroute.type = KVM_IRQ_ROUTING_MSI; 2033 kroute.flags = 0; 2034 kroute.u.msi.address_lo = (uint32_t)msg.address; 2035 kroute.u.msi.address_hi = msg.address >> 32; 2036 kroute.u.msi.data = le32_to_cpu(msg.data); 2037 if (pci_available && kvm_msi_devid_required()) { 2038 kroute.flags = KVM_MSI_VALID_DEVID; 2039 kroute.u.msi.devid = pci_requester_id(dev); 2040 } 2041 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) { 2042 return -EINVAL; 2043 } 2044 2045 trace_kvm_irqchip_update_msi_route(virq); 2046 2047 return kvm_update_routing_entry(s, &kroute); 2048 } 2049 2050 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event, 2051 EventNotifier *resample, int virq, 2052 bool assign) 2053 { 2054 int fd = event_notifier_get_fd(event); 2055 int rfd = resample ? event_notifier_get_fd(resample) : -1; 2056 2057 struct kvm_irqfd irqfd = { 2058 .fd = fd, 2059 .gsi = virq, 2060 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN, 2061 }; 2062 2063 if (rfd != -1) { 2064 assert(assign); 2065 if (kvm_irqchip_is_split()) { 2066 /* 2067 * When the slow irqchip (e.g. IOAPIC) is in the 2068 * userspace, KVM kernel resamplefd will not work because 2069 * the EOI of the interrupt will be delivered to userspace 2070 * instead, so the KVM kernel resamplefd kick will be 2071 * skipped. The userspace here mimics what the kernel 2072 * provides with resamplefd, remember the resamplefd and 2073 * kick it when we receive EOI of this IRQ. 2074 * 2075 * This is hackery because IOAPIC is mostly bypassed 2076 * (except EOI broadcasts) when irqfd is used. However 2077 * this can bring much performance back for split irqchip 2078 * with INTx IRQs (for VFIO, this gives 93% perf of the 2079 * full fast path, which is 46% perf boost comparing to 2080 * the INTx slow path). 2081 */ 2082 kvm_resample_fd_insert(virq, resample); 2083 } else { 2084 irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE; 2085 irqfd.resamplefd = rfd; 2086 } 2087 } else if (!assign) { 2088 if (kvm_irqchip_is_split()) { 2089 kvm_resample_fd_remove(virq); 2090 } 2091 } 2092 2093 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd); 2094 } 2095 2096 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) 2097 { 2098 struct kvm_irq_routing_entry kroute = {}; 2099 int virq; 2100 2101 if (!kvm_gsi_routing_enabled()) { 2102 return -ENOSYS; 2103 } 2104 2105 virq = kvm_irqchip_get_virq(s); 2106 if (virq < 0) { 2107 return virq; 2108 } 2109 2110 kroute.gsi = virq; 2111 kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER; 2112 kroute.flags = 0; 2113 kroute.u.adapter.summary_addr = adapter->summary_addr; 2114 kroute.u.adapter.ind_addr = adapter->ind_addr; 2115 kroute.u.adapter.summary_offset = adapter->summary_offset; 2116 kroute.u.adapter.ind_offset = adapter->ind_offset; 2117 kroute.u.adapter.adapter_id = adapter->adapter_id; 2118 2119 kvm_add_routing_entry(s, &kroute); 2120 2121 return virq; 2122 } 2123 2124 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint) 2125 { 2126 struct kvm_irq_routing_entry kroute = {}; 2127 int virq; 2128 2129 if (!kvm_gsi_routing_enabled()) { 2130 return -ENOSYS; 2131 } 2132 if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) { 2133 return -ENOSYS; 2134 } 2135 virq = kvm_irqchip_get_virq(s); 2136 if (virq < 0) { 2137 return virq; 2138 } 2139 2140 kroute.gsi = virq; 2141 kroute.type = KVM_IRQ_ROUTING_HV_SINT; 2142 kroute.flags = 0; 2143 kroute.u.hv_sint.vcpu = vcpu; 2144 kroute.u.hv_sint.sint = sint; 2145 2146 kvm_add_routing_entry(s, &kroute); 2147 kvm_irqchip_commit_routes(s); 2148 2149 return virq; 2150 } 2151 2152 #else /* !KVM_CAP_IRQ_ROUTING */ 2153 2154 void kvm_init_irq_routing(KVMState *s) 2155 { 2156 } 2157 2158 void kvm_irqchip_release_virq(KVMState *s, int virq) 2159 { 2160 } 2161 2162 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) 2163 { 2164 abort(); 2165 } 2166 2167 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) 2168 { 2169 return -ENOSYS; 2170 } 2171 2172 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) 2173 { 2174 return -ENOSYS; 2175 } 2176 2177 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint) 2178 { 2179 return -ENOSYS; 2180 } 2181 2182 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event, 2183 EventNotifier *resample, int virq, 2184 bool assign) 2185 { 2186 abort(); 2187 } 2188 2189 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg) 2190 { 2191 return -ENOSYS; 2192 } 2193 #endif /* !KVM_CAP_IRQ_ROUTING */ 2194 2195 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, 2196 EventNotifier *rn, int virq) 2197 { 2198 return kvm_irqchip_assign_irqfd(s, n, rn, virq, true); 2199 } 2200 2201 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, 2202 int virq) 2203 { 2204 return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false); 2205 } 2206 2207 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, 2208 EventNotifier *rn, qemu_irq irq) 2209 { 2210 gpointer key, gsi; 2211 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); 2212 2213 if (!found) { 2214 return -ENXIO; 2215 } 2216 return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi)); 2217 } 2218 2219 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, 2220 qemu_irq irq) 2221 { 2222 gpointer key, gsi; 2223 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); 2224 2225 if (!found) { 2226 return -ENXIO; 2227 } 2228 return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi)); 2229 } 2230 2231 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi) 2232 { 2233 g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi)); 2234 } 2235 2236 static void kvm_irqchip_create(KVMState *s) 2237 { 2238 int ret; 2239 2240 assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO); 2241 if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) { 2242 ; 2243 } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) { 2244 ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0); 2245 if (ret < 0) { 2246 fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret)); 2247 exit(1); 2248 } 2249 } else { 2250 return; 2251 } 2252 2253 if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) { 2254 fprintf(stderr, "kvm: irqfd not implemented\n"); 2255 exit(1); 2256 } 2257 2258 /* First probe and see if there's a arch-specific hook to create the 2259 * in-kernel irqchip for us */ 2260 ret = kvm_arch_irqchip_create(s); 2261 if (ret == 0) { 2262 if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) { 2263 error_report("Split IRQ chip mode not supported."); 2264 exit(1); 2265 } else { 2266 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP); 2267 } 2268 } 2269 if (ret < 0) { 2270 fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret)); 2271 exit(1); 2272 } 2273 2274 kvm_kernel_irqchip = true; 2275 /* If we have an in-kernel IRQ chip then we must have asynchronous 2276 * interrupt delivery (though the reverse is not necessarily true) 2277 */ 2278 kvm_async_interrupts_allowed = true; 2279 kvm_halt_in_kernel_allowed = true; 2280 2281 kvm_init_irq_routing(s); 2282 2283 s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal); 2284 } 2285 2286 /* Find number of supported CPUs using the recommended 2287 * procedure from the kernel API documentation to cope with 2288 * older kernels that may be missing capabilities. 2289 */ 2290 static int kvm_recommended_vcpus(KVMState *s) 2291 { 2292 int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS); 2293 return (ret) ? ret : 4; 2294 } 2295 2296 static int kvm_max_vcpus(KVMState *s) 2297 { 2298 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS); 2299 return (ret) ? ret : kvm_recommended_vcpus(s); 2300 } 2301 2302 static int kvm_max_vcpu_id(KVMState *s) 2303 { 2304 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID); 2305 return (ret) ? ret : kvm_max_vcpus(s); 2306 } 2307 2308 bool kvm_vcpu_id_is_valid(int vcpu_id) 2309 { 2310 KVMState *s = KVM_STATE(current_accel()); 2311 return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s); 2312 } 2313 2314 bool kvm_dirty_ring_enabled(void) 2315 { 2316 return kvm_state->kvm_dirty_ring_size ? true : false; 2317 } 2318 2319 static void query_stats_cb(StatsResultList **result, StatsTarget target, 2320 strList *names, strList *targets, Error **errp); 2321 static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp); 2322 2323 uint32_t kvm_dirty_ring_size(void) 2324 { 2325 return kvm_state->kvm_dirty_ring_size; 2326 } 2327 2328 static int kvm_init(MachineState *ms) 2329 { 2330 MachineClass *mc = MACHINE_GET_CLASS(ms); 2331 static const char upgrade_note[] = 2332 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" 2333 "(see http://sourceforge.net/projects/kvm).\n"; 2334 const struct { 2335 const char *name; 2336 int num; 2337 } num_cpus[] = { 2338 { "SMP", ms->smp.cpus }, 2339 { "hotpluggable", ms->smp.max_cpus }, 2340 { /* end of list */ } 2341 }, *nc = num_cpus; 2342 int soft_vcpus_limit, hard_vcpus_limit; 2343 KVMState *s; 2344 const KVMCapabilityInfo *missing_cap; 2345 int ret; 2346 int type; 2347 uint64_t dirty_log_manual_caps; 2348 2349 qemu_mutex_init(&kml_slots_lock); 2350 2351 s = KVM_STATE(ms->accelerator); 2352 2353 /* 2354 * On systems where the kernel can support different base page 2355 * sizes, host page size may be different from TARGET_PAGE_SIZE, 2356 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum 2357 * page size for the system though. 2358 */ 2359 assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size()); 2360 2361 s->sigmask_len = 8; 2362 accel_blocker_init(); 2363 2364 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG 2365 QTAILQ_INIT(&s->kvm_sw_breakpoints); 2366 #endif 2367 QLIST_INIT(&s->kvm_parked_vcpus); 2368 s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR); 2369 if (s->fd == -1) { 2370 fprintf(stderr, "Could not access KVM kernel module: %m\n"); 2371 ret = -errno; 2372 goto err; 2373 } 2374 2375 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); 2376 if (ret < KVM_API_VERSION) { 2377 if (ret >= 0) { 2378 ret = -EINVAL; 2379 } 2380 fprintf(stderr, "kvm version too old\n"); 2381 goto err; 2382 } 2383 2384 if (ret > KVM_API_VERSION) { 2385 ret = -EINVAL; 2386 fprintf(stderr, "kvm version not supported\n"); 2387 goto err; 2388 } 2389 2390 kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT); 2391 s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); 2392 2393 /* If unspecified, use the default value */ 2394 if (!s->nr_slots) { 2395 s->nr_slots = 32; 2396 } 2397 2398 s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE); 2399 if (s->nr_as <= 1) { 2400 s->nr_as = 1; 2401 } 2402 s->as = g_new0(struct KVMAs, s->nr_as); 2403 2404 if (object_property_find(OBJECT(current_machine), "kvm-type")) { 2405 g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine), 2406 "kvm-type", 2407 &error_abort); 2408 type = mc->kvm_type(ms, kvm_type); 2409 } else if (mc->kvm_type) { 2410 type = mc->kvm_type(ms, NULL); 2411 } else { 2412 type = kvm_arch_get_default_type(ms); 2413 } 2414 2415 if (type < 0) { 2416 ret = -EINVAL; 2417 goto err; 2418 } 2419 2420 do { 2421 ret = kvm_ioctl(s, KVM_CREATE_VM, type); 2422 } while (ret == -EINTR); 2423 2424 if (ret < 0) { 2425 fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret, 2426 strerror(-ret)); 2427 2428 #ifdef TARGET_S390X 2429 if (ret == -EINVAL) { 2430 fprintf(stderr, 2431 "Host kernel setup problem detected. Please verify:\n"); 2432 fprintf(stderr, "- for kernels supporting the switch_amode or" 2433 " user_mode parameters, whether\n"); 2434 fprintf(stderr, 2435 " user space is running in primary address space\n"); 2436 fprintf(stderr, 2437 "- for kernels supporting the vm.allocate_pgste sysctl, " 2438 "whether it is enabled\n"); 2439 } 2440 #elif defined(TARGET_PPC) 2441 if (ret == -EINVAL) { 2442 fprintf(stderr, 2443 "PPC KVM module is not loaded. Try modprobe kvm_%s.\n", 2444 (type == 2) ? "pr" : "hv"); 2445 } 2446 #endif 2447 goto err; 2448 } 2449 2450 s->vmfd = ret; 2451 2452 /* check the vcpu limits */ 2453 soft_vcpus_limit = kvm_recommended_vcpus(s); 2454 hard_vcpus_limit = kvm_max_vcpus(s); 2455 2456 while (nc->name) { 2457 if (nc->num > soft_vcpus_limit) { 2458 warn_report("Number of %s cpus requested (%d) exceeds " 2459 "the recommended cpus supported by KVM (%d)", 2460 nc->name, nc->num, soft_vcpus_limit); 2461 2462 if (nc->num > hard_vcpus_limit) { 2463 fprintf(stderr, "Number of %s cpus requested (%d) exceeds " 2464 "the maximum cpus supported by KVM (%d)\n", 2465 nc->name, nc->num, hard_vcpus_limit); 2466 exit(1); 2467 } 2468 } 2469 nc++; 2470 } 2471 2472 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); 2473 if (!missing_cap) { 2474 missing_cap = 2475 kvm_check_extension_list(s, kvm_arch_required_capabilities); 2476 } 2477 if (missing_cap) { 2478 ret = -EINVAL; 2479 fprintf(stderr, "kvm does not support %s\n%s", 2480 missing_cap->name, upgrade_note); 2481 goto err; 2482 } 2483 2484 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); 2485 s->coalesced_pio = s->coalesced_mmio && 2486 kvm_check_extension(s, KVM_CAP_COALESCED_PIO); 2487 2488 /* 2489 * Enable KVM dirty ring if supported, otherwise fall back to 2490 * dirty logging mode 2491 */ 2492 ret = kvm_dirty_ring_init(s); 2493 if (ret < 0) { 2494 goto err; 2495 } 2496 2497 /* 2498 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is 2499 * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no 2500 * page is wr-protected initially, which is against how kvm dirty ring is 2501 * usage - kvm dirty ring requires all pages are wr-protected at the very 2502 * beginning. Enabling this feature for dirty ring causes data corruption. 2503 * 2504 * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log, 2505 * we may expect a higher stall time when starting the migration. In the 2506 * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too: 2507 * instead of clearing dirty bit, it can be a way to explicitly wr-protect 2508 * guest pages. 2509 */ 2510 if (!s->kvm_dirty_ring_size) { 2511 dirty_log_manual_caps = 2512 kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); 2513 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | 2514 KVM_DIRTY_LOG_INITIALLY_SET); 2515 s->manual_dirty_log_protect = dirty_log_manual_caps; 2516 if (dirty_log_manual_caps) { 2517 ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 2518 dirty_log_manual_caps); 2519 if (ret) { 2520 warn_report("Trying to enable capability %"PRIu64" of " 2521 "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. " 2522 "Falling back to the legacy mode. ", 2523 dirty_log_manual_caps); 2524 s->manual_dirty_log_protect = 0; 2525 } 2526 } 2527 } 2528 2529 #ifdef KVM_CAP_VCPU_EVENTS 2530 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); 2531 #endif 2532 s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); 2533 2534 s->irq_set_ioctl = KVM_IRQ_LINE; 2535 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { 2536 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; 2537 } 2538 2539 kvm_readonly_mem_allowed = 2540 (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); 2541 2542 kvm_resamplefds_allowed = 2543 (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0); 2544 2545 kvm_vm_attributes_allowed = 2546 (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0); 2547 2548 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG 2549 kvm_has_guest_debug = 2550 (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0); 2551 #endif 2552 2553 kvm_sstep_flags = 0; 2554 if (kvm_has_guest_debug) { 2555 kvm_sstep_flags = SSTEP_ENABLE; 2556 2557 #if defined TARGET_KVM_HAVE_GUEST_DEBUG 2558 int guest_debug_flags = 2559 kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2); 2560 2561 if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) { 2562 kvm_sstep_flags |= SSTEP_NOIRQ; 2563 } 2564 #endif 2565 } 2566 2567 kvm_state = s; 2568 2569 ret = kvm_arch_init(ms, s); 2570 if (ret < 0) { 2571 goto err; 2572 } 2573 2574 if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) { 2575 s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; 2576 } 2577 2578 qemu_register_reset(kvm_unpoison_all, NULL); 2579 2580 if (s->kernel_irqchip_allowed) { 2581 kvm_irqchip_create(s); 2582 } 2583 2584 s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; 2585 s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; 2586 s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; 2587 s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; 2588 2589 kvm_memory_listener_register(s, &s->memory_listener, 2590 &address_space_memory, 0, "kvm-memory"); 2591 memory_listener_register(&kvm_io_listener, 2592 &address_space_io); 2593 2594 s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); 2595 if (!s->sync_mmu) { 2596 ret = ram_block_discard_disable(true); 2597 assert(!ret); 2598 } 2599 2600 if (s->kvm_dirty_ring_size) { 2601 kvm_dirty_ring_reaper_init(s); 2602 } 2603 2604 if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) { 2605 add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb, 2606 query_stats_schemas_cb); 2607 } 2608 2609 return 0; 2610 2611 err: 2612 assert(ret < 0); 2613 if (s->vmfd >= 0) { 2614 close(s->vmfd); 2615 } 2616 if (s->fd != -1) { 2617 close(s->fd); 2618 } 2619 g_free(s->as); 2620 g_free(s->memory_listener.slots); 2621 2622 return ret; 2623 } 2624 2625 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len) 2626 { 2627 s->sigmask_len = sigmask_len; 2628 } 2629 2630 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction, 2631 int size, uint32_t count) 2632 { 2633 int i; 2634 uint8_t *ptr = data; 2635 2636 for (i = 0; i < count; i++) { 2637 address_space_rw(&address_space_io, port, attrs, 2638 ptr, size, 2639 direction == KVM_EXIT_IO_OUT); 2640 ptr += size; 2641 } 2642 } 2643 2644 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run) 2645 { 2646 int i; 2647 2648 fprintf(stderr, "KVM internal error. Suberror: %d\n", 2649 run->internal.suberror); 2650 2651 for (i = 0; i < run->internal.ndata; ++i) { 2652 fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n", 2653 i, (uint64_t)run->internal.data[i]); 2654 } 2655 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { 2656 fprintf(stderr, "emulation failure\n"); 2657 if (!kvm_arch_stop_on_emulation_error(cpu)) { 2658 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); 2659 return EXCP_INTERRUPT; 2660 } 2661 } 2662 /* FIXME: Should trigger a qmp message to let management know 2663 * something went wrong. 2664 */ 2665 return -1; 2666 } 2667 2668 void kvm_flush_coalesced_mmio_buffer(void) 2669 { 2670 KVMState *s = kvm_state; 2671 2672 if (!s || s->coalesced_flush_in_progress) { 2673 return; 2674 } 2675 2676 s->coalesced_flush_in_progress = true; 2677 2678 if (s->coalesced_mmio_ring) { 2679 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring; 2680 while (ring->first != ring->last) { 2681 struct kvm_coalesced_mmio *ent; 2682 2683 ent = &ring->coalesced_mmio[ring->first]; 2684 2685 if (ent->pio == 1) { 2686 address_space_write(&address_space_io, ent->phys_addr, 2687 MEMTXATTRS_UNSPECIFIED, ent->data, 2688 ent->len); 2689 } else { 2690 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); 2691 } 2692 smp_wmb(); 2693 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX; 2694 } 2695 } 2696 2697 s->coalesced_flush_in_progress = false; 2698 } 2699 2700 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) 2701 { 2702 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) { 2703 int ret = kvm_arch_get_registers(cpu); 2704 if (ret) { 2705 error_report("Failed to get registers: %s", strerror(-ret)); 2706 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); 2707 vm_stop(RUN_STATE_INTERNAL_ERROR); 2708 } 2709 2710 cpu->vcpu_dirty = true; 2711 } 2712 } 2713 2714 void kvm_cpu_synchronize_state(CPUState *cpu) 2715 { 2716 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) { 2717 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL); 2718 } 2719 } 2720 2721 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) 2722 { 2723 int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE); 2724 if (ret) { 2725 error_report("Failed to put registers after reset: %s", strerror(-ret)); 2726 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); 2727 vm_stop(RUN_STATE_INTERNAL_ERROR); 2728 } 2729 2730 cpu->vcpu_dirty = false; 2731 } 2732 2733 void kvm_cpu_synchronize_post_reset(CPUState *cpu) 2734 { 2735 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL); 2736 } 2737 2738 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) 2739 { 2740 int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); 2741 if (ret) { 2742 error_report("Failed to put registers after init: %s", strerror(-ret)); 2743 exit(1); 2744 } 2745 2746 cpu->vcpu_dirty = false; 2747 } 2748 2749 void kvm_cpu_synchronize_post_init(CPUState *cpu) 2750 { 2751 if (!kvm_state->guest_state_protected) { 2752 /* 2753 * This runs before the machine_init_done notifiers, and is the last 2754 * opportunity to synchronize the state of confidential guests. 2755 */ 2756 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL); 2757 } 2758 } 2759 2760 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg) 2761 { 2762 cpu->vcpu_dirty = true; 2763 } 2764 2765 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu) 2766 { 2767 run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL); 2768 } 2769 2770 #ifdef KVM_HAVE_MCE_INJECTION 2771 static __thread void *pending_sigbus_addr; 2772 static __thread int pending_sigbus_code; 2773 static __thread bool have_sigbus_pending; 2774 #endif 2775 2776 static void kvm_cpu_kick(CPUState *cpu) 2777 { 2778 qatomic_set(&cpu->kvm_run->immediate_exit, 1); 2779 } 2780 2781 static void kvm_cpu_kick_self(void) 2782 { 2783 if (kvm_immediate_exit) { 2784 kvm_cpu_kick(current_cpu); 2785 } else { 2786 qemu_cpu_kick_self(); 2787 } 2788 } 2789 2790 static void kvm_eat_signals(CPUState *cpu) 2791 { 2792 struct timespec ts = { 0, 0 }; 2793 siginfo_t siginfo; 2794 sigset_t waitset; 2795 sigset_t chkset; 2796 int r; 2797 2798 if (kvm_immediate_exit) { 2799 qatomic_set(&cpu->kvm_run->immediate_exit, 0); 2800 /* Write kvm_run->immediate_exit before the cpu->exit_request 2801 * write in kvm_cpu_exec. 2802 */ 2803 smp_wmb(); 2804 return; 2805 } 2806 2807 sigemptyset(&waitset); 2808 sigaddset(&waitset, SIG_IPI); 2809 2810 do { 2811 r = sigtimedwait(&waitset, &siginfo, &ts); 2812 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { 2813 perror("sigtimedwait"); 2814 exit(1); 2815 } 2816 2817 r = sigpending(&chkset); 2818 if (r == -1) { 2819 perror("sigpending"); 2820 exit(1); 2821 } 2822 } while (sigismember(&chkset, SIG_IPI)); 2823 } 2824 2825 int kvm_cpu_exec(CPUState *cpu) 2826 { 2827 struct kvm_run *run = cpu->kvm_run; 2828 int ret, run_ret; 2829 2830 trace_kvm_cpu_exec(); 2831 2832 if (kvm_arch_process_async_events(cpu)) { 2833 qatomic_set(&cpu->exit_request, 0); 2834 return EXCP_HLT; 2835 } 2836 2837 bql_unlock(); 2838 cpu_exec_start(cpu); 2839 2840 do { 2841 MemTxAttrs attrs; 2842 2843 if (cpu->vcpu_dirty) { 2844 ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE); 2845 if (ret) { 2846 error_report("Failed to put registers after init: %s", 2847 strerror(-ret)); 2848 ret = -1; 2849 break; 2850 } 2851 2852 cpu->vcpu_dirty = false; 2853 } 2854 2855 kvm_arch_pre_run(cpu, run); 2856 if (qatomic_read(&cpu->exit_request)) { 2857 trace_kvm_interrupt_exit_request(); 2858 /* 2859 * KVM requires us to reenter the kernel after IO exits to complete 2860 * instruction emulation. This self-signal will ensure that we 2861 * leave ASAP again. 2862 */ 2863 kvm_cpu_kick_self(); 2864 } 2865 2866 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit. 2867 * Matching barrier in kvm_eat_signals. 2868 */ 2869 smp_rmb(); 2870 2871 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0); 2872 2873 attrs = kvm_arch_post_run(cpu, run); 2874 2875 #ifdef KVM_HAVE_MCE_INJECTION 2876 if (unlikely(have_sigbus_pending)) { 2877 bql_lock(); 2878 kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code, 2879 pending_sigbus_addr); 2880 have_sigbus_pending = false; 2881 bql_unlock(); 2882 } 2883 #endif 2884 2885 if (run_ret < 0) { 2886 if (run_ret == -EINTR || run_ret == -EAGAIN) { 2887 trace_kvm_io_window_exit(); 2888 kvm_eat_signals(cpu); 2889 ret = EXCP_INTERRUPT; 2890 break; 2891 } 2892 fprintf(stderr, "error: kvm run failed %s\n", 2893 strerror(-run_ret)); 2894 #ifdef TARGET_PPC 2895 if (run_ret == -EBUSY) { 2896 fprintf(stderr, 2897 "This is probably because your SMT is enabled.\n" 2898 "VCPU can only run on primary threads with all " 2899 "secondary threads offline.\n"); 2900 } 2901 #endif 2902 ret = -1; 2903 break; 2904 } 2905 2906 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason); 2907 switch (run->exit_reason) { 2908 case KVM_EXIT_IO: 2909 /* Called outside BQL */ 2910 kvm_handle_io(run->io.port, attrs, 2911 (uint8_t *)run + run->io.data_offset, 2912 run->io.direction, 2913 run->io.size, 2914 run->io.count); 2915 ret = 0; 2916 break; 2917 case KVM_EXIT_MMIO: 2918 /* Called outside BQL */ 2919 address_space_rw(&address_space_memory, 2920 run->mmio.phys_addr, attrs, 2921 run->mmio.data, 2922 run->mmio.len, 2923 run->mmio.is_write); 2924 ret = 0; 2925 break; 2926 case KVM_EXIT_IRQ_WINDOW_OPEN: 2927 ret = EXCP_INTERRUPT; 2928 break; 2929 case KVM_EXIT_SHUTDOWN: 2930 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 2931 ret = EXCP_INTERRUPT; 2932 break; 2933 case KVM_EXIT_UNKNOWN: 2934 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n", 2935 (uint64_t)run->hw.hardware_exit_reason); 2936 ret = -1; 2937 break; 2938 case KVM_EXIT_INTERNAL_ERROR: 2939 ret = kvm_handle_internal_error(cpu, run); 2940 break; 2941 case KVM_EXIT_DIRTY_RING_FULL: 2942 /* 2943 * We shouldn't continue if the dirty ring of this vcpu is 2944 * still full. Got kicked by KVM_RESET_DIRTY_RINGS. 2945 */ 2946 trace_kvm_dirty_ring_full(cpu->cpu_index); 2947 bql_lock(); 2948 /* 2949 * We throttle vCPU by making it sleep once it exit from kernel 2950 * due to dirty ring full. In the dirtylimit scenario, reaping 2951 * all vCPUs after a single vCPU dirty ring get full result in 2952 * the miss of sleep, so just reap the ring-fulled vCPU. 2953 */ 2954 if (dirtylimit_in_service()) { 2955 kvm_dirty_ring_reap(kvm_state, cpu); 2956 } else { 2957 kvm_dirty_ring_reap(kvm_state, NULL); 2958 } 2959 bql_unlock(); 2960 dirtylimit_vcpu_execute(cpu); 2961 ret = 0; 2962 break; 2963 case KVM_EXIT_SYSTEM_EVENT: 2964 trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type); 2965 switch (run->system_event.type) { 2966 case KVM_SYSTEM_EVENT_SHUTDOWN: 2967 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); 2968 ret = EXCP_INTERRUPT; 2969 break; 2970 case KVM_SYSTEM_EVENT_RESET: 2971 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 2972 ret = EXCP_INTERRUPT; 2973 break; 2974 case KVM_SYSTEM_EVENT_CRASH: 2975 kvm_cpu_synchronize_state(cpu); 2976 bql_lock(); 2977 qemu_system_guest_panicked(cpu_get_crash_info(cpu)); 2978 bql_unlock(); 2979 ret = 0; 2980 break; 2981 default: 2982 ret = kvm_arch_handle_exit(cpu, run); 2983 break; 2984 } 2985 break; 2986 default: 2987 ret = kvm_arch_handle_exit(cpu, run); 2988 break; 2989 } 2990 } while (ret == 0); 2991 2992 cpu_exec_end(cpu); 2993 bql_lock(); 2994 2995 if (ret < 0) { 2996 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); 2997 vm_stop(RUN_STATE_INTERNAL_ERROR); 2998 } 2999 3000 qatomic_set(&cpu->exit_request, 0); 3001 return ret; 3002 } 3003 3004 int kvm_ioctl(KVMState *s, int type, ...) 3005 { 3006 int ret; 3007 void *arg; 3008 va_list ap; 3009 3010 va_start(ap, type); 3011 arg = va_arg(ap, void *); 3012 va_end(ap); 3013 3014 trace_kvm_ioctl(type, arg); 3015 ret = ioctl(s->fd, type, arg); 3016 if (ret == -1) { 3017 ret = -errno; 3018 } 3019 return ret; 3020 } 3021 3022 int kvm_vm_ioctl(KVMState *s, int type, ...) 3023 { 3024 int ret; 3025 void *arg; 3026 va_list ap; 3027 3028 va_start(ap, type); 3029 arg = va_arg(ap, void *); 3030 va_end(ap); 3031 3032 trace_kvm_vm_ioctl(type, arg); 3033 accel_ioctl_begin(); 3034 ret = ioctl(s->vmfd, type, arg); 3035 accel_ioctl_end(); 3036 if (ret == -1) { 3037 ret = -errno; 3038 } 3039 return ret; 3040 } 3041 3042 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...) 3043 { 3044 int ret; 3045 void *arg; 3046 va_list ap; 3047 3048 va_start(ap, type); 3049 arg = va_arg(ap, void *); 3050 va_end(ap); 3051 3052 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg); 3053 accel_cpu_ioctl_begin(cpu); 3054 ret = ioctl(cpu->kvm_fd, type, arg); 3055 accel_cpu_ioctl_end(cpu); 3056 if (ret == -1) { 3057 ret = -errno; 3058 } 3059 return ret; 3060 } 3061 3062 int kvm_device_ioctl(int fd, int type, ...) 3063 { 3064 int ret; 3065 void *arg; 3066 va_list ap; 3067 3068 va_start(ap, type); 3069 arg = va_arg(ap, void *); 3070 va_end(ap); 3071 3072 trace_kvm_device_ioctl(fd, type, arg); 3073 accel_ioctl_begin(); 3074 ret = ioctl(fd, type, arg); 3075 accel_ioctl_end(); 3076 if (ret == -1) { 3077 ret = -errno; 3078 } 3079 return ret; 3080 } 3081 3082 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr) 3083 { 3084 int ret; 3085 struct kvm_device_attr attribute = { 3086 .group = group, 3087 .attr = attr, 3088 }; 3089 3090 if (!kvm_vm_attributes_allowed) { 3091 return 0; 3092 } 3093 3094 ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute); 3095 /* kvm returns 0 on success for HAS_DEVICE_ATTR */ 3096 return ret ? 0 : 1; 3097 } 3098 3099 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr) 3100 { 3101 struct kvm_device_attr attribute = { 3102 .group = group, 3103 .attr = attr, 3104 .flags = 0, 3105 }; 3106 3107 return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1; 3108 } 3109 3110 int kvm_device_access(int fd, int group, uint64_t attr, 3111 void *val, bool write, Error **errp) 3112 { 3113 struct kvm_device_attr kvmattr; 3114 int err; 3115 3116 kvmattr.flags = 0; 3117 kvmattr.group = group; 3118 kvmattr.attr = attr; 3119 kvmattr.addr = (uintptr_t)val; 3120 3121 err = kvm_device_ioctl(fd, 3122 write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR, 3123 &kvmattr); 3124 if (err < 0) { 3125 error_setg_errno(errp, -err, 3126 "KVM_%s_DEVICE_ATTR failed: Group %d " 3127 "attr 0x%016" PRIx64, 3128 write ? "SET" : "GET", group, attr); 3129 } 3130 return err; 3131 } 3132 3133 bool kvm_has_sync_mmu(void) 3134 { 3135 return kvm_state->sync_mmu; 3136 } 3137 3138 int kvm_has_vcpu_events(void) 3139 { 3140 return kvm_state->vcpu_events; 3141 } 3142 3143 int kvm_max_nested_state_length(void) 3144 { 3145 return kvm_state->max_nested_state_len; 3146 } 3147 3148 int kvm_has_gsi_routing(void) 3149 { 3150 #ifdef KVM_CAP_IRQ_ROUTING 3151 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING); 3152 #else 3153 return false; 3154 #endif 3155 } 3156 3157 bool kvm_arm_supports_user_irq(void) 3158 { 3159 return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ); 3160 } 3161 3162 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG 3163 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc) 3164 { 3165 struct kvm_sw_breakpoint *bp; 3166 3167 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) { 3168 if (bp->pc == pc) { 3169 return bp; 3170 } 3171 } 3172 return NULL; 3173 } 3174 3175 int kvm_sw_breakpoints_active(CPUState *cpu) 3176 { 3177 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints); 3178 } 3179 3180 struct kvm_set_guest_debug_data { 3181 struct kvm_guest_debug dbg; 3182 int err; 3183 }; 3184 3185 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data) 3186 { 3187 struct kvm_set_guest_debug_data *dbg_data = 3188 (struct kvm_set_guest_debug_data *) data.host_ptr; 3189 3190 dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG, 3191 &dbg_data->dbg); 3192 } 3193 3194 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) 3195 { 3196 struct kvm_set_guest_debug_data data; 3197 3198 data.dbg.control = reinject_trap; 3199 3200 if (cpu->singlestep_enabled) { 3201 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; 3202 3203 if (cpu->singlestep_enabled & SSTEP_NOIRQ) { 3204 data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ; 3205 } 3206 } 3207 kvm_arch_update_guest_debug(cpu, &data.dbg); 3208 3209 run_on_cpu(cpu, kvm_invoke_set_guest_debug, 3210 RUN_ON_CPU_HOST_PTR(&data)); 3211 return data.err; 3212 } 3213 3214 bool kvm_supports_guest_debug(void) 3215 { 3216 /* probed during kvm_init() */ 3217 return kvm_has_guest_debug; 3218 } 3219 3220 int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) 3221 { 3222 struct kvm_sw_breakpoint *bp; 3223 int err; 3224 3225 if (type == GDB_BREAKPOINT_SW) { 3226 bp = kvm_find_sw_breakpoint(cpu, addr); 3227 if (bp) { 3228 bp->use_count++; 3229 return 0; 3230 } 3231 3232 bp = g_new(struct kvm_sw_breakpoint, 1); 3233 bp->pc = addr; 3234 bp->use_count = 1; 3235 err = kvm_arch_insert_sw_breakpoint(cpu, bp); 3236 if (err) { 3237 g_free(bp); 3238 return err; 3239 } 3240 3241 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); 3242 } else { 3243 err = kvm_arch_insert_hw_breakpoint(addr, len, type); 3244 if (err) { 3245 return err; 3246 } 3247 } 3248 3249 CPU_FOREACH(cpu) { 3250 err = kvm_update_guest_debug(cpu, 0); 3251 if (err) { 3252 return err; 3253 } 3254 } 3255 return 0; 3256 } 3257 3258 int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) 3259 { 3260 struct kvm_sw_breakpoint *bp; 3261 int err; 3262 3263 if (type == GDB_BREAKPOINT_SW) { 3264 bp = kvm_find_sw_breakpoint(cpu, addr); 3265 if (!bp) { 3266 return -ENOENT; 3267 } 3268 3269 if (bp->use_count > 1) { 3270 bp->use_count--; 3271 return 0; 3272 } 3273 3274 err = kvm_arch_remove_sw_breakpoint(cpu, bp); 3275 if (err) { 3276 return err; 3277 } 3278 3279 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); 3280 g_free(bp); 3281 } else { 3282 err = kvm_arch_remove_hw_breakpoint(addr, len, type); 3283 if (err) { 3284 return err; 3285 } 3286 } 3287 3288 CPU_FOREACH(cpu) { 3289 err = kvm_update_guest_debug(cpu, 0); 3290 if (err) { 3291 return err; 3292 } 3293 } 3294 return 0; 3295 } 3296 3297 void kvm_remove_all_breakpoints(CPUState *cpu) 3298 { 3299 struct kvm_sw_breakpoint *bp, *next; 3300 KVMState *s = cpu->kvm_state; 3301 CPUState *tmpcpu; 3302 3303 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { 3304 if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) { 3305 /* Try harder to find a CPU that currently sees the breakpoint. */ 3306 CPU_FOREACH(tmpcpu) { 3307 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) { 3308 break; 3309 } 3310 } 3311 } 3312 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry); 3313 g_free(bp); 3314 } 3315 kvm_arch_remove_all_hw_breakpoints(); 3316 3317 CPU_FOREACH(cpu) { 3318 kvm_update_guest_debug(cpu, 0); 3319 } 3320 } 3321 3322 #endif /* !TARGET_KVM_HAVE_GUEST_DEBUG */ 3323 3324 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset) 3325 { 3326 KVMState *s = kvm_state; 3327 struct kvm_signal_mask *sigmask; 3328 int r; 3329 3330 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset)); 3331 3332 sigmask->len = s->sigmask_len; 3333 memcpy(sigmask->sigset, sigset, sizeof(*sigset)); 3334 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask); 3335 g_free(sigmask); 3336 3337 return r; 3338 } 3339 3340 static void kvm_ipi_signal(int sig) 3341 { 3342 if (current_cpu) { 3343 assert(kvm_immediate_exit); 3344 kvm_cpu_kick(current_cpu); 3345 } 3346 } 3347 3348 void kvm_init_cpu_signals(CPUState *cpu) 3349 { 3350 int r; 3351 sigset_t set; 3352 struct sigaction sigact; 3353 3354 memset(&sigact, 0, sizeof(sigact)); 3355 sigact.sa_handler = kvm_ipi_signal; 3356 sigaction(SIG_IPI, &sigact, NULL); 3357 3358 pthread_sigmask(SIG_BLOCK, NULL, &set); 3359 #if defined KVM_HAVE_MCE_INJECTION 3360 sigdelset(&set, SIGBUS); 3361 pthread_sigmask(SIG_SETMASK, &set, NULL); 3362 #endif 3363 sigdelset(&set, SIG_IPI); 3364 if (kvm_immediate_exit) { 3365 r = pthread_sigmask(SIG_SETMASK, &set, NULL); 3366 } else { 3367 r = kvm_set_signal_mask(cpu, &set); 3368 } 3369 if (r) { 3370 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); 3371 exit(1); 3372 } 3373 } 3374 3375 /* Called asynchronously in VCPU thread. */ 3376 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) 3377 { 3378 #ifdef KVM_HAVE_MCE_INJECTION 3379 if (have_sigbus_pending) { 3380 return 1; 3381 } 3382 have_sigbus_pending = true; 3383 pending_sigbus_addr = addr; 3384 pending_sigbus_code = code; 3385 qatomic_set(&cpu->exit_request, 1); 3386 return 0; 3387 #else 3388 return 1; 3389 #endif 3390 } 3391 3392 /* Called synchronously (via signalfd) in main thread. */ 3393 int kvm_on_sigbus(int code, void *addr) 3394 { 3395 #ifdef KVM_HAVE_MCE_INJECTION 3396 /* Action required MCE kills the process if SIGBUS is blocked. Because 3397 * that's what happens in the I/O thread, where we handle MCE via signalfd, 3398 * we can only get action optional here. 3399 */ 3400 assert(code != BUS_MCEERR_AR); 3401 kvm_arch_on_sigbus_vcpu(first_cpu, code, addr); 3402 return 0; 3403 #else 3404 return 1; 3405 #endif 3406 } 3407 3408 int kvm_create_device(KVMState *s, uint64_t type, bool test) 3409 { 3410 int ret; 3411 struct kvm_create_device create_dev; 3412 3413 create_dev.type = type; 3414 create_dev.fd = -1; 3415 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0; 3416 3417 if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) { 3418 return -ENOTSUP; 3419 } 3420 3421 ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev); 3422 if (ret) { 3423 return ret; 3424 } 3425 3426 return test ? 0 : create_dev.fd; 3427 } 3428 3429 bool kvm_device_supported(int vmfd, uint64_t type) 3430 { 3431 struct kvm_create_device create_dev = { 3432 .type = type, 3433 .fd = -1, 3434 .flags = KVM_CREATE_DEVICE_TEST, 3435 }; 3436 3437 if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) { 3438 return false; 3439 } 3440 3441 return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0); 3442 } 3443 3444 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source) 3445 { 3446 struct kvm_one_reg reg; 3447 int r; 3448 3449 reg.id = id; 3450 reg.addr = (uintptr_t) source; 3451 r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 3452 if (r) { 3453 trace_kvm_failed_reg_set(id, strerror(-r)); 3454 } 3455 return r; 3456 } 3457 3458 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target) 3459 { 3460 struct kvm_one_reg reg; 3461 int r; 3462 3463 reg.id = id; 3464 reg.addr = (uintptr_t) target; 3465 r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 3466 if (r) { 3467 trace_kvm_failed_reg_get(id, strerror(-r)); 3468 } 3469 return r; 3470 } 3471 3472 static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as, 3473 hwaddr start_addr, hwaddr size) 3474 { 3475 KVMState *kvm = KVM_STATE(ms->accelerator); 3476 int i; 3477 3478 for (i = 0; i < kvm->nr_as; ++i) { 3479 if (kvm->as[i].as == as && kvm->as[i].ml) { 3480 size = MIN(kvm_max_slot_size, size); 3481 return NULL != kvm_lookup_matching_slot(kvm->as[i].ml, 3482 start_addr, size); 3483 } 3484 } 3485 3486 return false; 3487 } 3488 3489 static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v, 3490 const char *name, void *opaque, 3491 Error **errp) 3492 { 3493 KVMState *s = KVM_STATE(obj); 3494 int64_t value = s->kvm_shadow_mem; 3495 3496 visit_type_int(v, name, &value, errp); 3497 } 3498 3499 static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v, 3500 const char *name, void *opaque, 3501 Error **errp) 3502 { 3503 KVMState *s = KVM_STATE(obj); 3504 int64_t value; 3505 3506 if (s->fd != -1) { 3507 error_setg(errp, "Cannot set properties after the accelerator has been initialized"); 3508 return; 3509 } 3510 3511 if (!visit_type_int(v, name, &value, errp)) { 3512 return; 3513 } 3514 3515 s->kvm_shadow_mem = value; 3516 } 3517 3518 static void kvm_set_kernel_irqchip(Object *obj, Visitor *v, 3519 const char *name, void *opaque, 3520 Error **errp) 3521 { 3522 KVMState *s = KVM_STATE(obj); 3523 OnOffSplit mode; 3524 3525 if (s->fd != -1) { 3526 error_setg(errp, "Cannot set properties after the accelerator has been initialized"); 3527 return; 3528 } 3529 3530 if (!visit_type_OnOffSplit(v, name, &mode, errp)) { 3531 return; 3532 } 3533 switch (mode) { 3534 case ON_OFF_SPLIT_ON: 3535 s->kernel_irqchip_allowed = true; 3536 s->kernel_irqchip_required = true; 3537 s->kernel_irqchip_split = ON_OFF_AUTO_OFF; 3538 break; 3539 case ON_OFF_SPLIT_OFF: 3540 s->kernel_irqchip_allowed = false; 3541 s->kernel_irqchip_required = false; 3542 s->kernel_irqchip_split = ON_OFF_AUTO_OFF; 3543 break; 3544 case ON_OFF_SPLIT_SPLIT: 3545 s->kernel_irqchip_allowed = true; 3546 s->kernel_irqchip_required = true; 3547 s->kernel_irqchip_split = ON_OFF_AUTO_ON; 3548 break; 3549 default: 3550 /* The value was checked in visit_type_OnOffSplit() above. If 3551 * we get here, then something is wrong in QEMU. 3552 */ 3553 abort(); 3554 } 3555 } 3556 3557 bool kvm_kernel_irqchip_allowed(void) 3558 { 3559 return kvm_state->kernel_irqchip_allowed; 3560 } 3561 3562 bool kvm_kernel_irqchip_required(void) 3563 { 3564 return kvm_state->kernel_irqchip_required; 3565 } 3566 3567 bool kvm_kernel_irqchip_split(void) 3568 { 3569 return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON; 3570 } 3571 3572 static void kvm_get_dirty_ring_size(Object *obj, Visitor *v, 3573 const char *name, void *opaque, 3574 Error **errp) 3575 { 3576 KVMState *s = KVM_STATE(obj); 3577 uint32_t value = s->kvm_dirty_ring_size; 3578 3579 visit_type_uint32(v, name, &value, errp); 3580 } 3581 3582 static void kvm_set_dirty_ring_size(Object *obj, Visitor *v, 3583 const char *name, void *opaque, 3584 Error **errp) 3585 { 3586 KVMState *s = KVM_STATE(obj); 3587 uint32_t value; 3588 3589 if (s->fd != -1) { 3590 error_setg(errp, "Cannot set properties after the accelerator has been initialized"); 3591 return; 3592 } 3593 3594 if (!visit_type_uint32(v, name, &value, errp)) { 3595 return; 3596 } 3597 if (value & (value - 1)) { 3598 error_setg(errp, "dirty-ring-size must be a power of two."); 3599 return; 3600 } 3601 3602 s->kvm_dirty_ring_size = value; 3603 } 3604 3605 static char *kvm_get_device(Object *obj, 3606 Error **errp G_GNUC_UNUSED) 3607 { 3608 KVMState *s = KVM_STATE(obj); 3609 3610 return g_strdup(s->device); 3611 } 3612 3613 static void kvm_set_device(Object *obj, 3614 const char *value, 3615 Error **errp G_GNUC_UNUSED) 3616 { 3617 KVMState *s = KVM_STATE(obj); 3618 3619 g_free(s->device); 3620 s->device = g_strdup(value); 3621 } 3622 3623 static void kvm_accel_instance_init(Object *obj) 3624 { 3625 KVMState *s = KVM_STATE(obj); 3626 3627 s->fd = -1; 3628 s->vmfd = -1; 3629 s->kvm_shadow_mem = -1; 3630 s->kernel_irqchip_allowed = true; 3631 s->kernel_irqchip_split = ON_OFF_AUTO_AUTO; 3632 /* KVM dirty ring is by default off */ 3633 s->kvm_dirty_ring_size = 0; 3634 s->kvm_dirty_ring_with_bitmap = false; 3635 s->kvm_eager_split_size = 0; 3636 s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN; 3637 s->notify_window = 0; 3638 s->xen_version = 0; 3639 s->xen_gnttab_max_frames = 64; 3640 s->xen_evtchn_max_pirq = 256; 3641 s->device = NULL; 3642 } 3643 3644 /** 3645 * kvm_gdbstub_sstep_flags(): 3646 * 3647 * Returns: SSTEP_* flags that KVM supports for guest debug. The 3648 * support is probed during kvm_init() 3649 */ 3650 static int kvm_gdbstub_sstep_flags(void) 3651 { 3652 return kvm_sstep_flags; 3653 } 3654 3655 static void kvm_accel_class_init(ObjectClass *oc, void *data) 3656 { 3657 AccelClass *ac = ACCEL_CLASS(oc); 3658 ac->name = "KVM"; 3659 ac->init_machine = kvm_init; 3660 ac->has_memory = kvm_accel_has_memory; 3661 ac->allowed = &kvm_allowed; 3662 ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags; 3663 3664 object_class_property_add(oc, "kernel-irqchip", "on|off|split", 3665 NULL, kvm_set_kernel_irqchip, 3666 NULL, NULL); 3667 object_class_property_set_description(oc, "kernel-irqchip", 3668 "Configure KVM in-kernel irqchip"); 3669 3670 object_class_property_add(oc, "kvm-shadow-mem", "int", 3671 kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem, 3672 NULL, NULL); 3673 object_class_property_set_description(oc, "kvm-shadow-mem", 3674 "KVM shadow MMU size"); 3675 3676 object_class_property_add(oc, "dirty-ring-size", "uint32", 3677 kvm_get_dirty_ring_size, kvm_set_dirty_ring_size, 3678 NULL, NULL); 3679 object_class_property_set_description(oc, "dirty-ring-size", 3680 "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)"); 3681 3682 object_class_property_add_str(oc, "device", kvm_get_device, kvm_set_device); 3683 object_class_property_set_description(oc, "device", 3684 "Path to the device node to use (default: /dev/kvm)"); 3685 3686 kvm_arch_accel_class_init(oc); 3687 } 3688 3689 static const TypeInfo kvm_accel_type = { 3690 .name = TYPE_KVM_ACCEL, 3691 .parent = TYPE_ACCEL, 3692 .instance_init = kvm_accel_instance_init, 3693 .class_init = kvm_accel_class_init, 3694 .instance_size = sizeof(KVMState), 3695 }; 3696 3697 static void kvm_type_init(void) 3698 { 3699 type_register_static(&kvm_accel_type); 3700 } 3701 3702 type_init(kvm_type_init); 3703 3704 typedef struct StatsArgs { 3705 union StatsResultsType { 3706 StatsResultList **stats; 3707 StatsSchemaList **schema; 3708 } result; 3709 strList *names; 3710 Error **errp; 3711 } StatsArgs; 3712 3713 static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc, 3714 uint64_t *stats_data, 3715 StatsList *stats_list, 3716 Error **errp) 3717 { 3718 3719 Stats *stats; 3720 uint64List *val_list = NULL; 3721 3722 /* Only add stats that we understand. */ 3723 switch (pdesc->flags & KVM_STATS_TYPE_MASK) { 3724 case KVM_STATS_TYPE_CUMULATIVE: 3725 case KVM_STATS_TYPE_INSTANT: 3726 case KVM_STATS_TYPE_PEAK: 3727 case KVM_STATS_TYPE_LINEAR_HIST: 3728 case KVM_STATS_TYPE_LOG_HIST: 3729 break; 3730 default: 3731 return stats_list; 3732 } 3733 3734 switch (pdesc->flags & KVM_STATS_UNIT_MASK) { 3735 case KVM_STATS_UNIT_NONE: 3736 case KVM_STATS_UNIT_BYTES: 3737 case KVM_STATS_UNIT_CYCLES: 3738 case KVM_STATS_UNIT_SECONDS: 3739 case KVM_STATS_UNIT_BOOLEAN: 3740 break; 3741 default: 3742 return stats_list; 3743 } 3744 3745 switch (pdesc->flags & KVM_STATS_BASE_MASK) { 3746 case KVM_STATS_BASE_POW10: 3747 case KVM_STATS_BASE_POW2: 3748 break; 3749 default: 3750 return stats_list; 3751 } 3752 3753 /* Alloc and populate data list */ 3754 stats = g_new0(Stats, 1); 3755 stats->name = g_strdup(pdesc->name); 3756 stats->value = g_new0(StatsValue, 1);; 3757 3758 if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) { 3759 stats->value->u.boolean = *stats_data; 3760 stats->value->type = QTYPE_QBOOL; 3761 } else if (pdesc->size == 1) { 3762 stats->value->u.scalar = *stats_data; 3763 stats->value->type = QTYPE_QNUM; 3764 } else { 3765 int i; 3766 for (i = 0; i < pdesc->size; i++) { 3767 QAPI_LIST_PREPEND(val_list, stats_data[i]); 3768 } 3769 stats->value->u.list = val_list; 3770 stats->value->type = QTYPE_QLIST; 3771 } 3772 3773 QAPI_LIST_PREPEND(stats_list, stats); 3774 return stats_list; 3775 } 3776 3777 static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc, 3778 StatsSchemaValueList *list, 3779 Error **errp) 3780 { 3781 StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1); 3782 schema_entry->value = g_new0(StatsSchemaValue, 1); 3783 3784 switch (pdesc->flags & KVM_STATS_TYPE_MASK) { 3785 case KVM_STATS_TYPE_CUMULATIVE: 3786 schema_entry->value->type = STATS_TYPE_CUMULATIVE; 3787 break; 3788 case KVM_STATS_TYPE_INSTANT: 3789 schema_entry->value->type = STATS_TYPE_INSTANT; 3790 break; 3791 case KVM_STATS_TYPE_PEAK: 3792 schema_entry->value->type = STATS_TYPE_PEAK; 3793 break; 3794 case KVM_STATS_TYPE_LINEAR_HIST: 3795 schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM; 3796 schema_entry->value->bucket_size = pdesc->bucket_size; 3797 schema_entry->value->has_bucket_size = true; 3798 break; 3799 case KVM_STATS_TYPE_LOG_HIST: 3800 schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM; 3801 break; 3802 default: 3803 goto exit; 3804 } 3805 3806 switch (pdesc->flags & KVM_STATS_UNIT_MASK) { 3807 case KVM_STATS_UNIT_NONE: 3808 break; 3809 case KVM_STATS_UNIT_BOOLEAN: 3810 schema_entry->value->has_unit = true; 3811 schema_entry->value->unit = STATS_UNIT_BOOLEAN; 3812 break; 3813 case KVM_STATS_UNIT_BYTES: 3814 schema_entry->value->has_unit = true; 3815 schema_entry->value->unit = STATS_UNIT_BYTES; 3816 break; 3817 case KVM_STATS_UNIT_CYCLES: 3818 schema_entry->value->has_unit = true; 3819 schema_entry->value->unit = STATS_UNIT_CYCLES; 3820 break; 3821 case KVM_STATS_UNIT_SECONDS: 3822 schema_entry->value->has_unit = true; 3823 schema_entry->value->unit = STATS_UNIT_SECONDS; 3824 break; 3825 default: 3826 goto exit; 3827 } 3828 3829 schema_entry->value->exponent = pdesc->exponent; 3830 if (pdesc->exponent) { 3831 switch (pdesc->flags & KVM_STATS_BASE_MASK) { 3832 case KVM_STATS_BASE_POW10: 3833 schema_entry->value->has_base = true; 3834 schema_entry->value->base = 10; 3835 break; 3836 case KVM_STATS_BASE_POW2: 3837 schema_entry->value->has_base = true; 3838 schema_entry->value->base = 2; 3839 break; 3840 default: 3841 goto exit; 3842 } 3843 } 3844 3845 schema_entry->value->name = g_strdup(pdesc->name); 3846 schema_entry->next = list; 3847 return schema_entry; 3848 exit: 3849 g_free(schema_entry->value); 3850 g_free(schema_entry); 3851 return list; 3852 } 3853 3854 /* Cached stats descriptors */ 3855 typedef struct StatsDescriptors { 3856 const char *ident; /* cache key, currently the StatsTarget */ 3857 struct kvm_stats_desc *kvm_stats_desc; 3858 struct kvm_stats_header kvm_stats_header; 3859 QTAILQ_ENTRY(StatsDescriptors) next; 3860 } StatsDescriptors; 3861 3862 static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors = 3863 QTAILQ_HEAD_INITIALIZER(stats_descriptors); 3864 3865 /* 3866 * Return the descriptors for 'target', that either have already been read 3867 * or are retrieved from 'stats_fd'. 3868 */ 3869 static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd, 3870 Error **errp) 3871 { 3872 StatsDescriptors *descriptors; 3873 const char *ident; 3874 struct kvm_stats_desc *kvm_stats_desc; 3875 struct kvm_stats_header *kvm_stats_header; 3876 size_t size_desc; 3877 ssize_t ret; 3878 3879 ident = StatsTarget_str(target); 3880 QTAILQ_FOREACH(descriptors, &stats_descriptors, next) { 3881 if (g_str_equal(descriptors->ident, ident)) { 3882 return descriptors; 3883 } 3884 } 3885 3886 descriptors = g_new0(StatsDescriptors, 1); 3887 3888 /* Read stats header */ 3889 kvm_stats_header = &descriptors->kvm_stats_header; 3890 ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0); 3891 if (ret != sizeof(*kvm_stats_header)) { 3892 error_setg(errp, "KVM stats: failed to read stats header: " 3893 "expected %zu actual %zu", 3894 sizeof(*kvm_stats_header), ret); 3895 g_free(descriptors); 3896 return NULL; 3897 } 3898 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; 3899 3900 /* Read stats descriptors */ 3901 kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc); 3902 ret = pread(stats_fd, kvm_stats_desc, 3903 size_desc * kvm_stats_header->num_desc, 3904 kvm_stats_header->desc_offset); 3905 3906 if (ret != size_desc * kvm_stats_header->num_desc) { 3907 error_setg(errp, "KVM stats: failed to read stats descriptors: " 3908 "expected %zu actual %zu", 3909 size_desc * kvm_stats_header->num_desc, ret); 3910 g_free(descriptors); 3911 g_free(kvm_stats_desc); 3912 return NULL; 3913 } 3914 descriptors->kvm_stats_desc = kvm_stats_desc; 3915 descriptors->ident = ident; 3916 QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next); 3917 return descriptors; 3918 } 3919 3920 static void query_stats(StatsResultList **result, StatsTarget target, 3921 strList *names, int stats_fd, CPUState *cpu, 3922 Error **errp) 3923 { 3924 struct kvm_stats_desc *kvm_stats_desc; 3925 struct kvm_stats_header *kvm_stats_header; 3926 StatsDescriptors *descriptors; 3927 g_autofree uint64_t *stats_data = NULL; 3928 struct kvm_stats_desc *pdesc; 3929 StatsList *stats_list = NULL; 3930 size_t size_desc, size_data = 0; 3931 ssize_t ret; 3932 int i; 3933 3934 descriptors = find_stats_descriptors(target, stats_fd, errp); 3935 if (!descriptors) { 3936 return; 3937 } 3938 3939 kvm_stats_header = &descriptors->kvm_stats_header; 3940 kvm_stats_desc = descriptors->kvm_stats_desc; 3941 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; 3942 3943 /* Tally the total data size; read schema data */ 3944 for (i = 0; i < kvm_stats_header->num_desc; ++i) { 3945 pdesc = (void *)kvm_stats_desc + i * size_desc; 3946 size_data += pdesc->size * sizeof(*stats_data); 3947 } 3948 3949 stats_data = g_malloc0(size_data); 3950 ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset); 3951 3952 if (ret != size_data) { 3953 error_setg(errp, "KVM stats: failed to read data: " 3954 "expected %zu actual %zu", size_data, ret); 3955 return; 3956 } 3957 3958 for (i = 0; i < kvm_stats_header->num_desc; ++i) { 3959 uint64_t *stats; 3960 pdesc = (void *)kvm_stats_desc + i * size_desc; 3961 3962 /* Add entry to the list */ 3963 stats = (void *)stats_data + pdesc->offset; 3964 if (!apply_str_list_filter(pdesc->name, names)) { 3965 continue; 3966 } 3967 stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp); 3968 } 3969 3970 if (!stats_list) { 3971 return; 3972 } 3973 3974 switch (target) { 3975 case STATS_TARGET_VM: 3976 add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list); 3977 break; 3978 case STATS_TARGET_VCPU: 3979 add_stats_entry(result, STATS_PROVIDER_KVM, 3980 cpu->parent_obj.canonical_path, 3981 stats_list); 3982 break; 3983 default: 3984 g_assert_not_reached(); 3985 } 3986 } 3987 3988 static void query_stats_schema(StatsSchemaList **result, StatsTarget target, 3989 int stats_fd, Error **errp) 3990 { 3991 struct kvm_stats_desc *kvm_stats_desc; 3992 struct kvm_stats_header *kvm_stats_header; 3993 StatsDescriptors *descriptors; 3994 struct kvm_stats_desc *pdesc; 3995 StatsSchemaValueList *stats_list = NULL; 3996 size_t size_desc; 3997 int i; 3998 3999 descriptors = find_stats_descriptors(target, stats_fd, errp); 4000 if (!descriptors) { 4001 return; 4002 } 4003 4004 kvm_stats_header = &descriptors->kvm_stats_header; 4005 kvm_stats_desc = descriptors->kvm_stats_desc; 4006 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; 4007 4008 /* Tally the total data size; read schema data */ 4009 for (i = 0; i < kvm_stats_header->num_desc; ++i) { 4010 pdesc = (void *)kvm_stats_desc + i * size_desc; 4011 stats_list = add_kvmschema_entry(pdesc, stats_list, errp); 4012 } 4013 4014 add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list); 4015 } 4016 4017 static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args) 4018 { 4019 int stats_fd = cpu->kvm_vcpu_stats_fd; 4020 Error *local_err = NULL; 4021 4022 if (stats_fd == -1) { 4023 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed"); 4024 error_propagate(kvm_stats_args->errp, local_err); 4025 return; 4026 } 4027 query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU, 4028 kvm_stats_args->names, stats_fd, cpu, 4029 kvm_stats_args->errp); 4030 } 4031 4032 static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args) 4033 { 4034 int stats_fd = cpu->kvm_vcpu_stats_fd; 4035 Error *local_err = NULL; 4036 4037 if (stats_fd == -1) { 4038 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed"); 4039 error_propagate(kvm_stats_args->errp, local_err); 4040 return; 4041 } 4042 query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd, 4043 kvm_stats_args->errp); 4044 } 4045 4046 static void query_stats_cb(StatsResultList **result, StatsTarget target, 4047 strList *names, strList *targets, Error **errp) 4048 { 4049 KVMState *s = kvm_state; 4050 CPUState *cpu; 4051 int stats_fd; 4052 4053 switch (target) { 4054 case STATS_TARGET_VM: 4055 { 4056 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL); 4057 if (stats_fd == -1) { 4058 error_setg_errno(errp, errno, "KVM stats: ioctl failed"); 4059 return; 4060 } 4061 query_stats(result, target, names, stats_fd, NULL, errp); 4062 close(stats_fd); 4063 break; 4064 } 4065 case STATS_TARGET_VCPU: 4066 { 4067 StatsArgs stats_args; 4068 stats_args.result.stats = result; 4069 stats_args.names = names; 4070 stats_args.errp = errp; 4071 CPU_FOREACH(cpu) { 4072 if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) { 4073 continue; 4074 } 4075 query_stats_vcpu(cpu, &stats_args); 4076 } 4077 break; 4078 } 4079 default: 4080 break; 4081 } 4082 } 4083 4084 void query_stats_schemas_cb(StatsSchemaList **result, Error **errp) 4085 { 4086 StatsArgs stats_args; 4087 KVMState *s = kvm_state; 4088 int stats_fd; 4089 4090 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL); 4091 if (stats_fd == -1) { 4092 error_setg_errno(errp, errno, "KVM stats: ioctl failed"); 4093 return; 4094 } 4095 query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp); 4096 close(stats_fd); 4097 4098 if (first_cpu) { 4099 stats_args.result.schema = result; 4100 stats_args.errp = errp; 4101 query_stats_schema_vcpu(first_cpu, &stats_args); 4102 } 4103 } 4104 4105 void kvm_mark_guest_state_protected(void) 4106 { 4107 kvm_state->guest_state_protected = true; 4108 } 4109