1 /*
2 * QEMU KVM support
3 *
4 * Copyright IBM, Corp. 2008
5 * Red Hat, Inc. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
18 #include <poll.h>
19
20 #include <linux/kvm.h>
21
22 #include "qemu/atomic.h"
23 #include "qemu/option.h"
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
26 #include "qapi/error.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "gdbstub/enums.h"
31 #include "sysemu/kvm_int.h"
32 #include "sysemu/runstate.h"
33 #include "sysemu/cpus.h"
34 #include "sysemu/accel-blocker.h"
35 #include "qemu/bswap.h"
36 #include "exec/memory.h"
37 #include "exec/ram_addr.h"
38 #include "qemu/event_notifier.h"
39 #include "qemu/main-loop.h"
40 #include "trace.h"
41 #include "hw/irq.h"
42 #include "qapi/visitor.h"
43 #include "qapi/qapi-types-common.h"
44 #include "qapi/qapi-visit-common.h"
45 #include "sysemu/reset.h"
46 #include "qemu/guest-random.h"
47 #include "sysemu/hw_accel.h"
48 #include "kvm-cpus.h"
49 #include "sysemu/dirtylimit.h"
50 #include "qemu/range.h"
51
52 #include "hw/boards.h"
53 #include "sysemu/stats.h"
54
55 /* This check must be after config-host.h is included */
56 #ifdef CONFIG_EVENTFD
57 #include <sys/eventfd.h>
58 #endif
59
60 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
61 * need to use the real host PAGE_SIZE, as that's what KVM will use.
62 */
63 #ifdef PAGE_SIZE
64 #undef PAGE_SIZE
65 #endif
66 #define PAGE_SIZE qemu_real_host_page_size()
67
68 #ifndef KVM_GUESTDBG_BLOCKIRQ
69 #define KVM_GUESTDBG_BLOCKIRQ 0
70 #endif
71
72 struct KVMParkedVcpu {
73 unsigned long vcpu_id;
74 int kvm_fd;
75 QLIST_ENTRY(KVMParkedVcpu) node;
76 };
77
78 KVMState *kvm_state;
79 bool kvm_kernel_irqchip;
80 bool kvm_split_irqchip;
81 bool kvm_async_interrupts_allowed;
82 bool kvm_halt_in_kernel_allowed;
83 bool kvm_resamplefds_allowed;
84 bool kvm_msi_via_irqfd_allowed;
85 bool kvm_gsi_routing_allowed;
86 bool kvm_gsi_direct_mapping;
87 bool kvm_allowed;
88 bool kvm_readonly_mem_allowed;
89 bool kvm_vm_attributes_allowed;
90 bool kvm_msi_use_devid;
91 static bool kvm_has_guest_debug;
92 static int kvm_sstep_flags;
93 static bool kvm_immediate_exit;
94 static uint64_t kvm_supported_memory_attributes;
95 static bool kvm_guest_memfd_supported;
96 static hwaddr kvm_max_slot_size = ~0;
97
98 static const KVMCapabilityInfo kvm_required_capabilites[] = {
99 KVM_CAP_INFO(USER_MEMORY),
100 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
101 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
102 KVM_CAP_INFO(INTERNAL_ERROR_DATA),
103 KVM_CAP_INFO(IOEVENTFD),
104 KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH),
105 KVM_CAP_LAST_INFO
106 };
107
108 static NotifierList kvm_irqchip_change_notifiers =
109 NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
110
111 struct KVMResampleFd {
112 int gsi;
113 EventNotifier *resample_event;
114 QLIST_ENTRY(KVMResampleFd) node;
115 };
116 typedef struct KVMResampleFd KVMResampleFd;
117
118 /*
119 * Only used with split irqchip where we need to do the resample fd
120 * kick for the kernel from userspace.
121 */
122 static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
123 QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
124
125 static QemuMutex kml_slots_lock;
126
127 #define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock)
128 #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
129
130 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
131
kvm_resample_fd_remove(int gsi)132 static inline void kvm_resample_fd_remove(int gsi)
133 {
134 KVMResampleFd *rfd;
135
136 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
137 if (rfd->gsi == gsi) {
138 QLIST_REMOVE(rfd, node);
139 g_free(rfd);
140 break;
141 }
142 }
143 }
144
kvm_resample_fd_insert(int gsi,EventNotifier * event)145 static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
146 {
147 KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
148
149 rfd->gsi = gsi;
150 rfd->resample_event = event;
151
152 QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
153 }
154
kvm_resample_fd_notify(int gsi)155 void kvm_resample_fd_notify(int gsi)
156 {
157 KVMResampleFd *rfd;
158
159 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
160 if (rfd->gsi == gsi) {
161 event_notifier_set(rfd->resample_event);
162 trace_kvm_resample_fd_notify(gsi);
163 return;
164 }
165 }
166 }
167
kvm_get_max_memslots(void)168 unsigned int kvm_get_max_memslots(void)
169 {
170 KVMState *s = KVM_STATE(current_accel());
171
172 return s->nr_slots;
173 }
174
kvm_get_free_memslots(void)175 unsigned int kvm_get_free_memslots(void)
176 {
177 unsigned int used_slots = 0;
178 KVMState *s = kvm_state;
179 int i;
180
181 kvm_slots_lock();
182 for (i = 0; i < s->nr_as; i++) {
183 if (!s->as[i].ml) {
184 continue;
185 }
186 used_slots = MAX(used_slots, s->as[i].ml->nr_used_slots);
187 }
188 kvm_slots_unlock();
189
190 return s->nr_slots - used_slots;
191 }
192
193 /* Called with KVMMemoryListener.slots_lock held */
kvm_get_free_slot(KVMMemoryListener * kml)194 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
195 {
196 KVMState *s = kvm_state;
197 int i;
198
199 for (i = 0; i < s->nr_slots; i++) {
200 if (kml->slots[i].memory_size == 0) {
201 return &kml->slots[i];
202 }
203 }
204
205 return NULL;
206 }
207
208 /* Called with KVMMemoryListener.slots_lock held */
kvm_alloc_slot(KVMMemoryListener * kml)209 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
210 {
211 KVMSlot *slot = kvm_get_free_slot(kml);
212
213 if (slot) {
214 return slot;
215 }
216
217 fprintf(stderr, "%s: no free slot available\n", __func__);
218 abort();
219 }
220
kvm_lookup_matching_slot(KVMMemoryListener * kml,hwaddr start_addr,hwaddr size)221 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
222 hwaddr start_addr,
223 hwaddr size)
224 {
225 KVMState *s = kvm_state;
226 int i;
227
228 for (i = 0; i < s->nr_slots; i++) {
229 KVMSlot *mem = &kml->slots[i];
230
231 if (start_addr == mem->start_addr && size == mem->memory_size) {
232 return mem;
233 }
234 }
235
236 return NULL;
237 }
238
239 /*
240 * Calculate and align the start address and the size of the section.
241 * Return the size. If the size is 0, the aligned section is empty.
242 */
kvm_align_section(MemoryRegionSection * section,hwaddr * start)243 static hwaddr kvm_align_section(MemoryRegionSection *section,
244 hwaddr *start)
245 {
246 hwaddr size = int128_get64(section->size);
247 hwaddr delta, aligned;
248
249 /* kvm works in page size chunks, but the function may be called
250 with sub-page size and unaligned start address. Pad the start
251 address to next and truncate size to previous page boundary. */
252 aligned = ROUND_UP(section->offset_within_address_space,
253 qemu_real_host_page_size());
254 delta = aligned - section->offset_within_address_space;
255 *start = aligned;
256 if (delta > size) {
257 return 0;
258 }
259
260 return (size - delta) & qemu_real_host_page_mask();
261 }
262
kvm_physical_memory_addr_from_host(KVMState * s,void * ram,hwaddr * phys_addr)263 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
264 hwaddr *phys_addr)
265 {
266 KVMMemoryListener *kml = &s->memory_listener;
267 int i, ret = 0;
268
269 kvm_slots_lock();
270 for (i = 0; i < s->nr_slots; i++) {
271 KVMSlot *mem = &kml->slots[i];
272
273 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
274 *phys_addr = mem->start_addr + (ram - mem->ram);
275 ret = 1;
276 break;
277 }
278 }
279 kvm_slots_unlock();
280
281 return ret;
282 }
283
kvm_set_user_memory_region(KVMMemoryListener * kml,KVMSlot * slot,bool new)284 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
285 {
286 KVMState *s = kvm_state;
287 struct kvm_userspace_memory_region2 mem;
288 int ret;
289
290 mem.slot = slot->slot | (kml->as_id << 16);
291 mem.guest_phys_addr = slot->start_addr;
292 mem.userspace_addr = (unsigned long)slot->ram;
293 mem.flags = slot->flags;
294 mem.guest_memfd = slot->guest_memfd;
295 mem.guest_memfd_offset = slot->guest_memfd_offset;
296
297 if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
298 /* Set the slot size to 0 before setting the slot to the desired
299 * value. This is needed based on KVM commit 75d61fbc. */
300 mem.memory_size = 0;
301
302 if (kvm_guest_memfd_supported) {
303 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
304 } else {
305 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
306 }
307 if (ret < 0) {
308 goto err;
309 }
310 }
311 mem.memory_size = slot->memory_size;
312 if (kvm_guest_memfd_supported) {
313 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
314 } else {
315 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
316 }
317 slot->old_flags = mem.flags;
318 err:
319 trace_kvm_set_user_memory(mem.slot >> 16, (uint16_t)mem.slot, mem.flags,
320 mem.guest_phys_addr, mem.memory_size,
321 mem.userspace_addr, mem.guest_memfd,
322 mem.guest_memfd_offset, ret);
323 if (ret < 0) {
324 if (kvm_guest_memfd_supported) {
325 error_report("%s: KVM_SET_USER_MEMORY_REGION2 failed, slot=%d,"
326 " start=0x%" PRIx64 ", size=0x%" PRIx64 ","
327 " flags=0x%" PRIx32 ", guest_memfd=%" PRId32 ","
328 " guest_memfd_offset=0x%" PRIx64 ": %s",
329 __func__, mem.slot, slot->start_addr,
330 (uint64_t)mem.memory_size, mem.flags,
331 mem.guest_memfd, (uint64_t)mem.guest_memfd_offset,
332 strerror(errno));
333 } else {
334 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
335 " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
336 __func__, mem.slot, slot->start_addr,
337 (uint64_t)mem.memory_size, strerror(errno));
338 }
339 }
340 return ret;
341 }
342
kvm_park_vcpu(CPUState * cpu)343 void kvm_park_vcpu(CPUState *cpu)
344 {
345 struct KVMParkedVcpu *vcpu;
346
347 trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
348
349 vcpu = g_malloc0(sizeof(*vcpu));
350 vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
351 vcpu->kvm_fd = cpu->kvm_fd;
352 QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
353 }
354
kvm_unpark_vcpu(KVMState * s,unsigned long vcpu_id)355 int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id)
356 {
357 struct KVMParkedVcpu *cpu;
358 int kvm_fd = -ENOENT;
359
360 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
361 if (cpu->vcpu_id == vcpu_id) {
362 QLIST_REMOVE(cpu, node);
363 kvm_fd = cpu->kvm_fd;
364 g_free(cpu);
365 break;
366 }
367 }
368
369 trace_kvm_unpark_vcpu(vcpu_id, kvm_fd > 0 ? "unparked" : "!found parked");
370
371 return kvm_fd;
372 }
373
kvm_create_vcpu(CPUState * cpu)374 int kvm_create_vcpu(CPUState *cpu)
375 {
376 unsigned long vcpu_id = kvm_arch_vcpu_id(cpu);
377 KVMState *s = kvm_state;
378 int kvm_fd;
379
380 /* check if the KVM vCPU already exist but is parked */
381 kvm_fd = kvm_unpark_vcpu(s, vcpu_id);
382 if (kvm_fd < 0) {
383 /* vCPU not parked: create a new KVM vCPU */
384 kvm_fd = kvm_vm_ioctl(s, KVM_CREATE_VCPU, vcpu_id);
385 if (kvm_fd < 0) {
386 error_report("KVM_CREATE_VCPU IOCTL failed for vCPU %lu", vcpu_id);
387 return kvm_fd;
388 }
389 }
390
391 cpu->kvm_fd = kvm_fd;
392 cpu->kvm_state = s;
393 cpu->vcpu_dirty = true;
394 cpu->dirty_pages = 0;
395 cpu->throttle_us_per_full = 0;
396
397 trace_kvm_create_vcpu(cpu->cpu_index, vcpu_id, kvm_fd);
398
399 return 0;
400 }
401
kvm_create_and_park_vcpu(CPUState * cpu)402 int kvm_create_and_park_vcpu(CPUState *cpu)
403 {
404 int ret = 0;
405
406 ret = kvm_create_vcpu(cpu);
407 if (!ret) {
408 kvm_park_vcpu(cpu);
409 }
410
411 return ret;
412 }
413
do_kvm_destroy_vcpu(CPUState * cpu)414 static int do_kvm_destroy_vcpu(CPUState *cpu)
415 {
416 KVMState *s = kvm_state;
417 int mmap_size;
418 int ret = 0;
419
420 trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
421
422 ret = kvm_arch_destroy_vcpu(cpu);
423 if (ret < 0) {
424 goto err;
425 }
426
427 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
428 if (mmap_size < 0) {
429 ret = mmap_size;
430 trace_kvm_failed_get_vcpu_mmap_size();
431 goto err;
432 }
433
434 ret = munmap(cpu->kvm_run, mmap_size);
435 if (ret < 0) {
436 goto err;
437 }
438
439 if (cpu->kvm_dirty_gfns) {
440 ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
441 if (ret < 0) {
442 goto err;
443 }
444 }
445
446 kvm_park_vcpu(cpu);
447 err:
448 return ret;
449 }
450
kvm_destroy_vcpu(CPUState * cpu)451 void kvm_destroy_vcpu(CPUState *cpu)
452 {
453 if (do_kvm_destroy_vcpu(cpu) < 0) {
454 error_report("kvm_destroy_vcpu failed");
455 exit(EXIT_FAILURE);
456 }
457 }
458
kvm_init_vcpu(CPUState * cpu,Error ** errp)459 int kvm_init_vcpu(CPUState *cpu, Error **errp)
460 {
461 KVMState *s = kvm_state;
462 int mmap_size;
463 int ret;
464
465 trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
466
467 ret = kvm_create_vcpu(cpu);
468 if (ret < 0) {
469 error_setg_errno(errp, -ret,
470 "kvm_init_vcpu: kvm_create_vcpu failed (%lu)",
471 kvm_arch_vcpu_id(cpu));
472 goto err;
473 }
474
475 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
476 if (mmap_size < 0) {
477 ret = mmap_size;
478 error_setg_errno(errp, -mmap_size,
479 "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
480 goto err;
481 }
482
483 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
484 cpu->kvm_fd, 0);
485 if (cpu->kvm_run == MAP_FAILED) {
486 ret = -errno;
487 error_setg_errno(errp, ret,
488 "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
489 kvm_arch_vcpu_id(cpu));
490 goto err;
491 }
492
493 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
494 s->coalesced_mmio_ring =
495 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
496 }
497
498 if (s->kvm_dirty_ring_size) {
499 /* Use MAP_SHARED to share pages with the kernel */
500 cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes,
501 PROT_READ | PROT_WRITE, MAP_SHARED,
502 cpu->kvm_fd,
503 PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
504 if (cpu->kvm_dirty_gfns == MAP_FAILED) {
505 ret = -errno;
506 goto err;
507 }
508 }
509
510 ret = kvm_arch_init_vcpu(cpu);
511 if (ret < 0) {
512 error_setg_errno(errp, -ret,
513 "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
514 kvm_arch_vcpu_id(cpu));
515 }
516 cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
517
518 err:
519 return ret;
520 }
521
522 /*
523 * dirty pages logging control
524 */
525
kvm_mem_flags(MemoryRegion * mr)526 static int kvm_mem_flags(MemoryRegion *mr)
527 {
528 bool readonly = mr->readonly || memory_region_is_romd(mr);
529 int flags = 0;
530
531 if (memory_region_get_dirty_log_mask(mr) != 0) {
532 flags |= KVM_MEM_LOG_DIRTY_PAGES;
533 }
534 if (readonly && kvm_readonly_mem_allowed) {
535 flags |= KVM_MEM_READONLY;
536 }
537 if (memory_region_has_guest_memfd(mr)) {
538 assert(kvm_guest_memfd_supported);
539 flags |= KVM_MEM_GUEST_MEMFD;
540 }
541 return flags;
542 }
543
544 /* Called with KVMMemoryListener.slots_lock held */
kvm_slot_update_flags(KVMMemoryListener * kml,KVMSlot * mem,MemoryRegion * mr)545 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
546 MemoryRegion *mr)
547 {
548 mem->flags = kvm_mem_flags(mr);
549
550 /* If nothing changed effectively, no need to issue ioctl */
551 if (mem->flags == mem->old_flags) {
552 return 0;
553 }
554
555 kvm_slot_init_dirty_bitmap(mem);
556 return kvm_set_user_memory_region(kml, mem, false);
557 }
558
kvm_section_update_flags(KVMMemoryListener * kml,MemoryRegionSection * section)559 static int kvm_section_update_flags(KVMMemoryListener *kml,
560 MemoryRegionSection *section)
561 {
562 hwaddr start_addr, size, slot_size;
563 KVMSlot *mem;
564 int ret = 0;
565
566 size = kvm_align_section(section, &start_addr);
567 if (!size) {
568 return 0;
569 }
570
571 kvm_slots_lock();
572
573 while (size && !ret) {
574 slot_size = MIN(kvm_max_slot_size, size);
575 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
576 if (!mem) {
577 /* We don't have a slot if we want to trap every access. */
578 goto out;
579 }
580
581 ret = kvm_slot_update_flags(kml, mem, section->mr);
582 start_addr += slot_size;
583 size -= slot_size;
584 }
585
586 out:
587 kvm_slots_unlock();
588 return ret;
589 }
590
kvm_log_start(MemoryListener * listener,MemoryRegionSection * section,int old,int new)591 static void kvm_log_start(MemoryListener *listener,
592 MemoryRegionSection *section,
593 int old, int new)
594 {
595 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
596 int r;
597
598 if (old != 0) {
599 return;
600 }
601
602 r = kvm_section_update_flags(kml, section);
603 if (r < 0) {
604 abort();
605 }
606 }
607
kvm_log_stop(MemoryListener * listener,MemoryRegionSection * section,int old,int new)608 static void kvm_log_stop(MemoryListener *listener,
609 MemoryRegionSection *section,
610 int old, int new)
611 {
612 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
613 int r;
614
615 if (new != 0) {
616 return;
617 }
618
619 r = kvm_section_update_flags(kml, section);
620 if (r < 0) {
621 abort();
622 }
623 }
624
625 /* get kvm's dirty pages bitmap and update qemu's */
kvm_slot_sync_dirty_pages(KVMSlot * slot)626 static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
627 {
628 ram_addr_t start = slot->ram_start_offset;
629 ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
630
631 cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
632 }
633
kvm_slot_reset_dirty_pages(KVMSlot * slot)634 static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
635 {
636 memset(slot->dirty_bmap, 0, slot->dirty_bmap_size);
637 }
638
639 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
640
641 /* Allocate the dirty bitmap for a slot */
kvm_slot_init_dirty_bitmap(KVMSlot * mem)642 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
643 {
644 if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
645 return;
646 }
647
648 /*
649 * XXX bad kernel interface alert
650 * For dirty bitmap, kernel allocates array of size aligned to
651 * bits-per-long. But for case when the kernel is 64bits and
652 * the userspace is 32bits, userspace can't align to the same
653 * bits-per-long, since sizeof(long) is different between kernel
654 * and user space. This way, userspace will provide buffer which
655 * may be 4 bytes less than the kernel will use, resulting in
656 * userspace memory corruption (which is not detectable by valgrind
657 * too, in most cases).
658 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
659 * a hope that sizeof(long) won't become >8 any time soon.
660 *
661 * Note: the granule of kvm dirty log is qemu_real_host_page_size.
662 * And mem->memory_size is aligned to it (otherwise this mem can't
663 * be registered to KVM).
664 */
665 hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
666 /*HOST_LONG_BITS*/ 64) / 8;
667 mem->dirty_bmap = g_malloc0(bitmap_size);
668 mem->dirty_bmap_size = bitmap_size;
669 }
670
671 /*
672 * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
673 * succeeded, false otherwise
674 */
kvm_slot_get_dirty_log(KVMState * s,KVMSlot * slot)675 static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot)
676 {
677 struct kvm_dirty_log d = {};
678 int ret;
679
680 d.dirty_bitmap = slot->dirty_bmap;
681 d.slot = slot->slot | (slot->as_id << 16);
682 ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
683
684 if (ret == -ENOENT) {
685 /* kernel does not have dirty bitmap in this slot */
686 ret = 0;
687 }
688 if (ret) {
689 error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
690 __func__, ret);
691 }
692 return ret == 0;
693 }
694
695 /* Should be with all slots_lock held for the address spaces. */
kvm_dirty_ring_mark_page(KVMState * s,uint32_t as_id,uint32_t slot_id,uint64_t offset)696 static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
697 uint32_t slot_id, uint64_t offset)
698 {
699 KVMMemoryListener *kml;
700 KVMSlot *mem;
701
702 if (as_id >= s->nr_as) {
703 return;
704 }
705
706 kml = s->as[as_id].ml;
707 mem = &kml->slots[slot_id];
708
709 if (!mem->memory_size || offset >=
710 (mem->memory_size / qemu_real_host_page_size())) {
711 return;
712 }
713
714 set_bit(offset, mem->dirty_bmap);
715 }
716
dirty_gfn_is_dirtied(struct kvm_dirty_gfn * gfn)717 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
718 {
719 /*
720 * Read the flags before the value. Pairs with barrier in
721 * KVM's kvm_dirty_ring_push() function.
722 */
723 return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
724 }
725
dirty_gfn_set_collected(struct kvm_dirty_gfn * gfn)726 static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
727 {
728 /*
729 * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS
730 * sees the full content of the ring:
731 *
732 * CPU0 CPU1 CPU2
733 * ------------------------------------------------------------------------------
734 * fill gfn0
735 * store-rel flags for gfn0
736 * load-acq flags for gfn0
737 * store-rel RESET for gfn0
738 * ioctl(RESET_RINGS)
739 * load-acq flags for gfn0
740 * check if flags have RESET
741 *
742 * The synchronization goes from CPU2 to CPU0 to CPU1.
743 */
744 qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
745 }
746
747 /*
748 * Should be with all slots_lock held for the address spaces. It returns the
749 * dirty page we've collected on this dirty ring.
750 */
kvm_dirty_ring_reap_one(KVMState * s,CPUState * cpu)751 static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
752 {
753 struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
754 uint32_t ring_size = s->kvm_dirty_ring_size;
755 uint32_t count = 0, fetch = cpu->kvm_fetch_index;
756
757 /*
758 * It's possible that we race with vcpu creation code where the vcpu is
759 * put onto the vcpus list but not yet initialized the dirty ring
760 * structures. If so, skip it.
761 */
762 if (!cpu->created) {
763 return 0;
764 }
765
766 assert(dirty_gfns && ring_size);
767 trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
768
769 while (true) {
770 cur = &dirty_gfns[fetch % ring_size];
771 if (!dirty_gfn_is_dirtied(cur)) {
772 break;
773 }
774 kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff,
775 cur->offset);
776 dirty_gfn_set_collected(cur);
777 trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset);
778 fetch++;
779 count++;
780 }
781 cpu->kvm_fetch_index = fetch;
782 cpu->dirty_pages += count;
783
784 return count;
785 }
786
787 /* Must be with slots_lock held */
kvm_dirty_ring_reap_locked(KVMState * s,CPUState * cpu)788 static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu)
789 {
790 int ret;
791 uint64_t total = 0;
792 int64_t stamp;
793
794 stamp = get_clock();
795
796 if (cpu) {
797 total = kvm_dirty_ring_reap_one(s, cpu);
798 } else {
799 CPU_FOREACH(cpu) {
800 total += kvm_dirty_ring_reap_one(s, cpu);
801 }
802 }
803
804 if (total) {
805 ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS);
806 assert(ret == total);
807 }
808
809 stamp = get_clock() - stamp;
810
811 if (total) {
812 trace_kvm_dirty_ring_reap(total, stamp / 1000);
813 }
814
815 return total;
816 }
817
818 /*
819 * Currently for simplicity, we must hold BQL before calling this. We can
820 * consider to drop the BQL if we're clear with all the race conditions.
821 */
kvm_dirty_ring_reap(KVMState * s,CPUState * cpu)822 static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu)
823 {
824 uint64_t total;
825
826 /*
827 * We need to lock all kvm slots for all address spaces here,
828 * because:
829 *
830 * (1) We need to mark dirty for dirty bitmaps in multiple slots
831 * and for tons of pages, so it's better to take the lock here
832 * once rather than once per page. And more importantly,
833 *
834 * (2) We must _NOT_ publish dirty bits to the other threads
835 * (e.g., the migration thread) via the kvm memory slot dirty
836 * bitmaps before correctly re-protect those dirtied pages.
837 * Otherwise we can have potential risk of data corruption if
838 * the page data is read in the other thread before we do
839 * reset below.
840 */
841 kvm_slots_lock();
842 total = kvm_dirty_ring_reap_locked(s, cpu);
843 kvm_slots_unlock();
844
845 return total;
846 }
847
do_kvm_cpu_synchronize_kick(CPUState * cpu,run_on_cpu_data arg)848 static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg)
849 {
850 /* No need to do anything */
851 }
852
853 /*
854 * Kick all vcpus out in a synchronized way. When returned, we
855 * guarantee that every vcpu has been kicked and at least returned to
856 * userspace once.
857 */
kvm_cpu_synchronize_kick_all(void)858 static void kvm_cpu_synchronize_kick_all(void)
859 {
860 CPUState *cpu;
861
862 CPU_FOREACH(cpu) {
863 run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL);
864 }
865 }
866
867 /*
868 * Flush all the existing dirty pages to the KVM slot buffers. When
869 * this call returns, we guarantee that all the touched dirty pages
870 * before calling this function have been put into the per-kvmslot
871 * dirty bitmap.
872 *
873 * This function must be called with BQL held.
874 */
kvm_dirty_ring_flush(void)875 static void kvm_dirty_ring_flush(void)
876 {
877 trace_kvm_dirty_ring_flush(0);
878 /*
879 * The function needs to be serialized. Since this function
880 * should always be with BQL held, serialization is guaranteed.
881 * However, let's be sure of it.
882 */
883 assert(bql_locked());
884 /*
885 * First make sure to flush the hardware buffers by kicking all
886 * vcpus out in a synchronous way.
887 */
888 kvm_cpu_synchronize_kick_all();
889 kvm_dirty_ring_reap(kvm_state, NULL);
890 trace_kvm_dirty_ring_flush(1);
891 }
892
893 /**
894 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
895 *
896 * This function will first try to fetch dirty bitmap from the kernel,
897 * and then updates qemu's dirty bitmap.
898 *
899 * NOTE: caller must be with kml->slots_lock held.
900 *
901 * @kml: the KVM memory listener object
902 * @section: the memory section to sync the dirty bitmap with
903 */
kvm_physical_sync_dirty_bitmap(KVMMemoryListener * kml,MemoryRegionSection * section)904 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
905 MemoryRegionSection *section)
906 {
907 KVMState *s = kvm_state;
908 KVMSlot *mem;
909 hwaddr start_addr, size;
910 hwaddr slot_size;
911
912 size = kvm_align_section(section, &start_addr);
913 while (size) {
914 slot_size = MIN(kvm_max_slot_size, size);
915 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
916 if (!mem) {
917 /* We don't have a slot if we want to trap every access. */
918 return;
919 }
920 if (kvm_slot_get_dirty_log(s, mem)) {
921 kvm_slot_sync_dirty_pages(mem);
922 }
923 start_addr += slot_size;
924 size -= slot_size;
925 }
926 }
927
928 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
929 #define KVM_CLEAR_LOG_SHIFT 6
930 #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
931 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
932
kvm_log_clear_one_slot(KVMSlot * mem,int as_id,uint64_t start,uint64_t size)933 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
934 uint64_t size)
935 {
936 KVMState *s = kvm_state;
937 uint64_t end, bmap_start, start_delta, bmap_npages;
938 struct kvm_clear_dirty_log d;
939 unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
940 int ret;
941
942 /*
943 * We need to extend either the start or the size or both to
944 * satisfy the KVM interface requirement. Firstly, do the start
945 * page alignment on 64 host pages
946 */
947 bmap_start = start & KVM_CLEAR_LOG_MASK;
948 start_delta = start - bmap_start;
949 bmap_start /= psize;
950
951 /*
952 * The kernel interface has restriction on the size too, that either:
953 *
954 * (1) the size is 64 host pages aligned (just like the start), or
955 * (2) the size fills up until the end of the KVM memslot.
956 */
957 bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
958 << KVM_CLEAR_LOG_SHIFT;
959 end = mem->memory_size / psize;
960 if (bmap_npages > end - bmap_start) {
961 bmap_npages = end - bmap_start;
962 }
963 start_delta /= psize;
964
965 /*
966 * Prepare the bitmap to clear dirty bits. Here we must guarantee
967 * that we won't clear any unknown dirty bits otherwise we might
968 * accidentally clear some set bits which are not yet synced from
969 * the kernel into QEMU's bitmap, then we'll lose track of the
970 * guest modifications upon those pages (which can directly lead
971 * to guest data loss or panic after migration).
972 *
973 * Layout of the KVMSlot.dirty_bmap:
974 *
975 * |<-------- bmap_npages -----------..>|
976 * [1]
977 * start_delta size
978 * |----------------|-------------|------------------|------------|
979 * ^ ^ ^ ^
980 * | | | |
981 * start bmap_start (start) end
982 * of memslot of memslot
983 *
984 * [1] bmap_npages can be aligned to either 64 pages or the end of slot
985 */
986
987 assert(bmap_start % BITS_PER_LONG == 0);
988 /* We should never do log_clear before log_sync */
989 assert(mem->dirty_bmap);
990 if (start_delta || bmap_npages - size / psize) {
991 /* Slow path - we need to manipulate a temp bitmap */
992 bmap_clear = bitmap_new(bmap_npages);
993 bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
994 bmap_start, start_delta + size / psize);
995 /*
996 * We need to fill the holes at start because that was not
997 * specified by the caller and we extended the bitmap only for
998 * 64 pages alignment
999 */
1000 bitmap_clear(bmap_clear, 0, start_delta);
1001 d.dirty_bitmap = bmap_clear;
1002 } else {
1003 /*
1004 * Fast path - both start and size align well with BITS_PER_LONG
1005 * (or the end of memory slot)
1006 */
1007 d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
1008 }
1009
1010 d.first_page = bmap_start;
1011 /* It should never overflow. If it happens, say something */
1012 assert(bmap_npages <= UINT32_MAX);
1013 d.num_pages = bmap_npages;
1014 d.slot = mem->slot | (as_id << 16);
1015
1016 ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d);
1017 if (ret < 0 && ret != -ENOENT) {
1018 error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
1019 "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
1020 __func__, d.slot, (uint64_t)d.first_page,
1021 (uint32_t)d.num_pages, ret);
1022 } else {
1023 ret = 0;
1024 trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
1025 }
1026
1027 /*
1028 * After we have updated the remote dirty bitmap, we update the
1029 * cached bitmap as well for the memslot, then if another user
1030 * clears the same region we know we shouldn't clear it again on
1031 * the remote otherwise it's data loss as well.
1032 */
1033 bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
1034 size / psize);
1035 /* This handles the NULL case well */
1036 g_free(bmap_clear);
1037 return ret;
1038 }
1039
1040
1041 /**
1042 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
1043 *
1044 * NOTE: this will be a no-op if we haven't enabled manual dirty log
1045 * protection in the host kernel because in that case this operation
1046 * will be done within log_sync().
1047 *
1048 * @kml: the kvm memory listener
1049 * @section: the memory range to clear dirty bitmap
1050 */
kvm_physical_log_clear(KVMMemoryListener * kml,MemoryRegionSection * section)1051 static int kvm_physical_log_clear(KVMMemoryListener *kml,
1052 MemoryRegionSection *section)
1053 {
1054 KVMState *s = kvm_state;
1055 uint64_t start, size, offset, count;
1056 KVMSlot *mem;
1057 int ret = 0, i;
1058
1059 if (!s->manual_dirty_log_protect) {
1060 /* No need to do explicit clear */
1061 return ret;
1062 }
1063
1064 start = section->offset_within_address_space;
1065 size = int128_get64(section->size);
1066
1067 if (!size) {
1068 /* Nothing more we can do... */
1069 return ret;
1070 }
1071
1072 kvm_slots_lock();
1073
1074 for (i = 0; i < s->nr_slots; i++) {
1075 mem = &kml->slots[i];
1076 /* Discard slots that are empty or do not overlap the section */
1077 if (!mem->memory_size ||
1078 mem->start_addr > start + size - 1 ||
1079 start > mem->start_addr + mem->memory_size - 1) {
1080 continue;
1081 }
1082
1083 if (start >= mem->start_addr) {
1084 /* The slot starts before section or is aligned to it. */
1085 offset = start - mem->start_addr;
1086 count = MIN(mem->memory_size - offset, size);
1087 } else {
1088 /* The slot starts after section. */
1089 offset = 0;
1090 count = MIN(mem->memory_size, size - (mem->start_addr - start));
1091 }
1092 ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
1093 if (ret < 0) {
1094 break;
1095 }
1096 }
1097
1098 kvm_slots_unlock();
1099
1100 return ret;
1101 }
1102
kvm_coalesce_mmio_region(MemoryListener * listener,MemoryRegionSection * secion,hwaddr start,hwaddr size)1103 static void kvm_coalesce_mmio_region(MemoryListener *listener,
1104 MemoryRegionSection *secion,
1105 hwaddr start, hwaddr size)
1106 {
1107 KVMState *s = kvm_state;
1108
1109 if (s->coalesced_mmio) {
1110 struct kvm_coalesced_mmio_zone zone;
1111
1112 zone.addr = start;
1113 zone.size = size;
1114 zone.pad = 0;
1115
1116 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1117 }
1118 }
1119
kvm_uncoalesce_mmio_region(MemoryListener * listener,MemoryRegionSection * secion,hwaddr start,hwaddr size)1120 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
1121 MemoryRegionSection *secion,
1122 hwaddr start, hwaddr size)
1123 {
1124 KVMState *s = kvm_state;
1125
1126 if (s->coalesced_mmio) {
1127 struct kvm_coalesced_mmio_zone zone;
1128
1129 zone.addr = start;
1130 zone.size = size;
1131 zone.pad = 0;
1132
1133 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1134 }
1135 }
1136
kvm_coalesce_pio_add(MemoryListener * listener,MemoryRegionSection * section,hwaddr start,hwaddr size)1137 static void kvm_coalesce_pio_add(MemoryListener *listener,
1138 MemoryRegionSection *section,
1139 hwaddr start, hwaddr size)
1140 {
1141 KVMState *s = kvm_state;
1142
1143 if (s->coalesced_pio) {
1144 struct kvm_coalesced_mmio_zone zone;
1145
1146 zone.addr = start;
1147 zone.size = size;
1148 zone.pio = 1;
1149
1150 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1151 }
1152 }
1153
kvm_coalesce_pio_del(MemoryListener * listener,MemoryRegionSection * section,hwaddr start,hwaddr size)1154 static void kvm_coalesce_pio_del(MemoryListener *listener,
1155 MemoryRegionSection *section,
1156 hwaddr start, hwaddr size)
1157 {
1158 KVMState *s = kvm_state;
1159
1160 if (s->coalesced_pio) {
1161 struct kvm_coalesced_mmio_zone zone;
1162
1163 zone.addr = start;
1164 zone.size = size;
1165 zone.pio = 1;
1166
1167 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1168 }
1169 }
1170
kvm_check_extension(KVMState * s,unsigned int extension)1171 int kvm_check_extension(KVMState *s, unsigned int extension)
1172 {
1173 int ret;
1174
1175 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1176 if (ret < 0) {
1177 ret = 0;
1178 }
1179
1180 return ret;
1181 }
1182
kvm_vm_check_extension(KVMState * s,unsigned int extension)1183 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
1184 {
1185 int ret;
1186
1187 ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1188 if (ret < 0) {
1189 /* VM wide version not implemented, use global one instead */
1190 ret = kvm_check_extension(s, extension);
1191 }
1192
1193 return ret;
1194 }
1195
1196 /*
1197 * We track the poisoned pages to be able to:
1198 * - replace them on VM reset
1199 * - block a migration for a VM with a poisoned page
1200 */
1201 typedef struct HWPoisonPage {
1202 ram_addr_t ram_addr;
1203 QLIST_ENTRY(HWPoisonPage) list;
1204 } HWPoisonPage;
1205
1206 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
1207 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
1208
kvm_unpoison_all(void * param)1209 static void kvm_unpoison_all(void *param)
1210 {
1211 HWPoisonPage *page, *next_page;
1212
1213 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
1214 QLIST_REMOVE(page, list);
1215 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
1216 g_free(page);
1217 }
1218 }
1219
kvm_hwpoison_page_add(ram_addr_t ram_addr)1220 void kvm_hwpoison_page_add(ram_addr_t ram_addr)
1221 {
1222 HWPoisonPage *page;
1223
1224 QLIST_FOREACH(page, &hwpoison_page_list, list) {
1225 if (page->ram_addr == ram_addr) {
1226 return;
1227 }
1228 }
1229 page = g_new(HWPoisonPage, 1);
1230 page->ram_addr = ram_addr;
1231 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
1232 }
1233
kvm_hwpoisoned_mem(void)1234 bool kvm_hwpoisoned_mem(void)
1235 {
1236 return !QLIST_EMPTY(&hwpoison_page_list);
1237 }
1238
adjust_ioeventfd_endianness(uint32_t val,uint32_t size)1239 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
1240 {
1241 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
1242 /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
1243 * endianness, but the memory core hands them in target endianness.
1244 * For example, PPC is always treated as big-endian even if running
1245 * on KVM and on PPC64LE. Correct here.
1246 */
1247 switch (size) {
1248 case 2:
1249 val = bswap16(val);
1250 break;
1251 case 4:
1252 val = bswap32(val);
1253 break;
1254 }
1255 #endif
1256 return val;
1257 }
1258
kvm_set_ioeventfd_mmio(int fd,hwaddr addr,uint32_t val,bool assign,uint32_t size,bool datamatch)1259 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
1260 bool assign, uint32_t size, bool datamatch)
1261 {
1262 int ret;
1263 struct kvm_ioeventfd iofd = {
1264 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1265 .addr = addr,
1266 .len = size,
1267 .flags = 0,
1268 .fd = fd,
1269 };
1270
1271 trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
1272 datamatch);
1273 if (!kvm_enabled()) {
1274 return -ENOSYS;
1275 }
1276
1277 if (datamatch) {
1278 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1279 }
1280 if (!assign) {
1281 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1282 }
1283
1284 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1285
1286 if (ret < 0) {
1287 return -errno;
1288 }
1289
1290 return 0;
1291 }
1292
kvm_set_ioeventfd_pio(int fd,uint16_t addr,uint16_t val,bool assign,uint32_t size,bool datamatch)1293 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
1294 bool assign, uint32_t size, bool datamatch)
1295 {
1296 struct kvm_ioeventfd kick = {
1297 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1298 .addr = addr,
1299 .flags = KVM_IOEVENTFD_FLAG_PIO,
1300 .len = size,
1301 .fd = fd,
1302 };
1303 int r;
1304 trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
1305 if (!kvm_enabled()) {
1306 return -ENOSYS;
1307 }
1308 if (datamatch) {
1309 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1310 }
1311 if (!assign) {
1312 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1313 }
1314 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1315 if (r < 0) {
1316 return r;
1317 }
1318 return 0;
1319 }
1320
1321
1322 static const KVMCapabilityInfo *
kvm_check_extension_list(KVMState * s,const KVMCapabilityInfo * list)1323 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
1324 {
1325 while (list->name) {
1326 if (!kvm_check_extension(s, list->value)) {
1327 return list;
1328 }
1329 list++;
1330 }
1331 return NULL;
1332 }
1333
kvm_set_max_memslot_size(hwaddr max_slot_size)1334 void kvm_set_max_memslot_size(hwaddr max_slot_size)
1335 {
1336 g_assert(
1337 ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
1338 );
1339 kvm_max_slot_size = max_slot_size;
1340 }
1341
kvm_set_memory_attributes(hwaddr start,uint64_t size,uint64_t attr)1342 static int kvm_set_memory_attributes(hwaddr start, uint64_t size, uint64_t attr)
1343 {
1344 struct kvm_memory_attributes attrs;
1345 int r;
1346
1347 assert((attr & kvm_supported_memory_attributes) == attr);
1348 attrs.attributes = attr;
1349 attrs.address = start;
1350 attrs.size = size;
1351 attrs.flags = 0;
1352
1353 r = kvm_vm_ioctl(kvm_state, KVM_SET_MEMORY_ATTRIBUTES, &attrs);
1354 if (r) {
1355 error_report("failed to set memory (0x%" HWADDR_PRIx "+0x%" PRIx64 ") "
1356 "with attr 0x%" PRIx64 " error '%s'",
1357 start, size, attr, strerror(errno));
1358 }
1359 return r;
1360 }
1361
kvm_set_memory_attributes_private(hwaddr start,uint64_t size)1362 int kvm_set_memory_attributes_private(hwaddr start, uint64_t size)
1363 {
1364 return kvm_set_memory_attributes(start, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
1365 }
1366
kvm_set_memory_attributes_shared(hwaddr start,uint64_t size)1367 int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size)
1368 {
1369 return kvm_set_memory_attributes(start, size, 0);
1370 }
1371
1372 /* Called with KVMMemoryListener.slots_lock held */
kvm_set_phys_mem(KVMMemoryListener * kml,MemoryRegionSection * section,bool add)1373 static void kvm_set_phys_mem(KVMMemoryListener *kml,
1374 MemoryRegionSection *section, bool add)
1375 {
1376 KVMSlot *mem;
1377 int err;
1378 MemoryRegion *mr = section->mr;
1379 bool writable = !mr->readonly && !mr->rom_device;
1380 hwaddr start_addr, size, slot_size, mr_offset;
1381 ram_addr_t ram_start_offset;
1382 void *ram;
1383
1384 if (!memory_region_is_ram(mr)) {
1385 if (writable || !kvm_readonly_mem_allowed) {
1386 return;
1387 } else if (!mr->romd_mode) {
1388 /* If the memory device is not in romd_mode, then we actually want
1389 * to remove the kvm memory slot so all accesses will trap. */
1390 add = false;
1391 }
1392 }
1393
1394 size = kvm_align_section(section, &start_addr);
1395 if (!size) {
1396 return;
1397 }
1398
1399 /* The offset of the kvmslot within the memory region */
1400 mr_offset = section->offset_within_region + start_addr -
1401 section->offset_within_address_space;
1402
1403 /* use aligned delta to align the ram address and offset */
1404 ram = memory_region_get_ram_ptr(mr) + mr_offset;
1405 ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
1406
1407 if (!add) {
1408 do {
1409 slot_size = MIN(kvm_max_slot_size, size);
1410 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1411 if (!mem) {
1412 return;
1413 }
1414 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1415 /*
1416 * NOTE: We should be aware of the fact that here we're only
1417 * doing a best effort to sync dirty bits. No matter whether
1418 * we're using dirty log or dirty ring, we ignored two facts:
1419 *
1420 * (1) dirty bits can reside in hardware buffers (PML)
1421 *
1422 * (2) after we collected dirty bits here, pages can be dirtied
1423 * again before we do the final KVM_SET_USER_MEMORY_REGION to
1424 * remove the slot.
1425 *
1426 * Not easy. Let's cross the fingers until it's fixed.
1427 */
1428 if (kvm_state->kvm_dirty_ring_size) {
1429 kvm_dirty_ring_reap_locked(kvm_state, NULL);
1430 if (kvm_state->kvm_dirty_ring_with_bitmap) {
1431 kvm_slot_sync_dirty_pages(mem);
1432 kvm_slot_get_dirty_log(kvm_state, mem);
1433 }
1434 } else {
1435 kvm_slot_get_dirty_log(kvm_state, mem);
1436 }
1437 kvm_slot_sync_dirty_pages(mem);
1438 }
1439
1440 /* unregister the slot */
1441 g_free(mem->dirty_bmap);
1442 mem->dirty_bmap = NULL;
1443 mem->memory_size = 0;
1444 mem->flags = 0;
1445 err = kvm_set_user_memory_region(kml, mem, false);
1446 if (err) {
1447 fprintf(stderr, "%s: error unregistering slot: %s\n",
1448 __func__, strerror(-err));
1449 abort();
1450 }
1451 start_addr += slot_size;
1452 size -= slot_size;
1453 kml->nr_used_slots--;
1454 } while (size);
1455 return;
1456 }
1457
1458 /* register the new slot */
1459 do {
1460 slot_size = MIN(kvm_max_slot_size, size);
1461 mem = kvm_alloc_slot(kml);
1462 mem->as_id = kml->as_id;
1463 mem->memory_size = slot_size;
1464 mem->start_addr = start_addr;
1465 mem->ram_start_offset = ram_start_offset;
1466 mem->ram = ram;
1467 mem->flags = kvm_mem_flags(mr);
1468 mem->guest_memfd = mr->ram_block->guest_memfd;
1469 mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host;
1470
1471 kvm_slot_init_dirty_bitmap(mem);
1472 err = kvm_set_user_memory_region(kml, mem, true);
1473 if (err) {
1474 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1475 strerror(-err));
1476 abort();
1477 }
1478
1479 if (memory_region_has_guest_memfd(mr)) {
1480 err = kvm_set_memory_attributes_private(start_addr, slot_size);
1481 if (err) {
1482 error_report("%s: failed to set memory attribute private: %s",
1483 __func__, strerror(-err));
1484 exit(1);
1485 }
1486 }
1487
1488 start_addr += slot_size;
1489 ram_start_offset += slot_size;
1490 ram += slot_size;
1491 size -= slot_size;
1492 kml->nr_used_slots++;
1493 } while (size);
1494 }
1495
kvm_dirty_ring_reaper_thread(void * data)1496 static void *kvm_dirty_ring_reaper_thread(void *data)
1497 {
1498 KVMState *s = data;
1499 struct KVMDirtyRingReaper *r = &s->reaper;
1500
1501 rcu_register_thread();
1502
1503 trace_kvm_dirty_ring_reaper("init");
1504
1505 while (true) {
1506 r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT;
1507 trace_kvm_dirty_ring_reaper("wait");
1508 /*
1509 * TODO: provide a smarter timeout rather than a constant?
1510 */
1511 sleep(1);
1512
1513 /* keep sleeping so that dirtylimit not be interfered by reaper */
1514 if (dirtylimit_in_service()) {
1515 continue;
1516 }
1517
1518 trace_kvm_dirty_ring_reaper("wakeup");
1519 r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
1520
1521 bql_lock();
1522 kvm_dirty_ring_reap(s, NULL);
1523 bql_unlock();
1524
1525 r->reaper_iteration++;
1526 }
1527
1528 g_assert_not_reached();
1529 }
1530
kvm_dirty_ring_reaper_init(KVMState * s)1531 static void kvm_dirty_ring_reaper_init(KVMState *s)
1532 {
1533 struct KVMDirtyRingReaper *r = &s->reaper;
1534
1535 qemu_thread_create(&r->reaper_thr, "kvm-reaper",
1536 kvm_dirty_ring_reaper_thread,
1537 s, QEMU_THREAD_JOINABLE);
1538 }
1539
kvm_dirty_ring_init(KVMState * s)1540 static int kvm_dirty_ring_init(KVMState *s)
1541 {
1542 uint32_t ring_size = s->kvm_dirty_ring_size;
1543 uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn);
1544 unsigned int capability = KVM_CAP_DIRTY_LOG_RING;
1545 int ret;
1546
1547 s->kvm_dirty_ring_size = 0;
1548 s->kvm_dirty_ring_bytes = 0;
1549
1550 /* Bail if the dirty ring size isn't specified */
1551 if (!ring_size) {
1552 return 0;
1553 }
1554
1555 /*
1556 * Read the max supported pages. Fall back to dirty logging mode
1557 * if the dirty ring isn't supported.
1558 */
1559 ret = kvm_vm_check_extension(s, capability);
1560 if (ret <= 0) {
1561 capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL;
1562 ret = kvm_vm_check_extension(s, capability);
1563 }
1564
1565 if (ret <= 0) {
1566 warn_report("KVM dirty ring not available, using bitmap method");
1567 return 0;
1568 }
1569
1570 if (ring_bytes > ret) {
1571 error_report("KVM dirty ring size %" PRIu32 " too big "
1572 "(maximum is %ld). Please use a smaller value.",
1573 ring_size, (long)ret / sizeof(struct kvm_dirty_gfn));
1574 return -EINVAL;
1575 }
1576
1577 ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes);
1578 if (ret) {
1579 error_report("Enabling of KVM dirty ring failed: %s. "
1580 "Suggested minimum value is 1024.", strerror(-ret));
1581 return -EIO;
1582 }
1583
1584 /* Enable the backup bitmap if it is supported */
1585 ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP);
1586 if (ret > 0) {
1587 ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0);
1588 if (ret) {
1589 error_report("Enabling of KVM dirty ring's backup bitmap failed: "
1590 "%s. ", strerror(-ret));
1591 return -EIO;
1592 }
1593
1594 s->kvm_dirty_ring_with_bitmap = true;
1595 }
1596
1597 s->kvm_dirty_ring_size = ring_size;
1598 s->kvm_dirty_ring_bytes = ring_bytes;
1599
1600 return 0;
1601 }
1602
kvm_region_add(MemoryListener * listener,MemoryRegionSection * section)1603 static void kvm_region_add(MemoryListener *listener,
1604 MemoryRegionSection *section)
1605 {
1606 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1607 KVMMemoryUpdate *update;
1608
1609 update = g_new0(KVMMemoryUpdate, 1);
1610 update->section = *section;
1611
1612 QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next);
1613 }
1614
kvm_region_del(MemoryListener * listener,MemoryRegionSection * section)1615 static void kvm_region_del(MemoryListener *listener,
1616 MemoryRegionSection *section)
1617 {
1618 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1619 KVMMemoryUpdate *update;
1620
1621 update = g_new0(KVMMemoryUpdate, 1);
1622 update->section = *section;
1623
1624 QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next);
1625 }
1626
kvm_region_commit(MemoryListener * listener)1627 static void kvm_region_commit(MemoryListener *listener)
1628 {
1629 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener,
1630 listener);
1631 KVMMemoryUpdate *u1, *u2;
1632 bool need_inhibit = false;
1633
1634 if (QSIMPLEQ_EMPTY(&kml->transaction_add) &&
1635 QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1636 return;
1637 }
1638
1639 /*
1640 * We have to be careful when regions to add overlap with ranges to remove.
1641 * We have to simulate atomic KVM memslot updates by making sure no ioctl()
1642 * is currently active.
1643 *
1644 * The lists are order by addresses, so it's easy to find overlaps.
1645 */
1646 u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1647 u2 = QSIMPLEQ_FIRST(&kml->transaction_add);
1648 while (u1 && u2) {
1649 Range r1, r2;
1650
1651 range_init_nofail(&r1, u1->section.offset_within_address_space,
1652 int128_get64(u1->section.size));
1653 range_init_nofail(&r2, u2->section.offset_within_address_space,
1654 int128_get64(u2->section.size));
1655
1656 if (range_overlaps_range(&r1, &r2)) {
1657 need_inhibit = true;
1658 break;
1659 }
1660 if (range_lob(&r1) < range_lob(&r2)) {
1661 u1 = QSIMPLEQ_NEXT(u1, next);
1662 } else {
1663 u2 = QSIMPLEQ_NEXT(u2, next);
1664 }
1665 }
1666
1667 kvm_slots_lock();
1668 if (need_inhibit) {
1669 accel_ioctl_inhibit_begin();
1670 }
1671
1672 /* Remove all memslots before adding the new ones. */
1673 while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1674 u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1675 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next);
1676
1677 kvm_set_phys_mem(kml, &u1->section, false);
1678 memory_region_unref(u1->section.mr);
1679
1680 g_free(u1);
1681 }
1682 while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) {
1683 u1 = QSIMPLEQ_FIRST(&kml->transaction_add);
1684 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next);
1685
1686 memory_region_ref(u1->section.mr);
1687 kvm_set_phys_mem(kml, &u1->section, true);
1688
1689 g_free(u1);
1690 }
1691
1692 if (need_inhibit) {
1693 accel_ioctl_inhibit_end();
1694 }
1695 kvm_slots_unlock();
1696 }
1697
kvm_log_sync(MemoryListener * listener,MemoryRegionSection * section)1698 static void kvm_log_sync(MemoryListener *listener,
1699 MemoryRegionSection *section)
1700 {
1701 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1702
1703 kvm_slots_lock();
1704 kvm_physical_sync_dirty_bitmap(kml, section);
1705 kvm_slots_unlock();
1706 }
1707
kvm_log_sync_global(MemoryListener * l,bool last_stage)1708 static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
1709 {
1710 KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
1711 KVMState *s = kvm_state;
1712 KVMSlot *mem;
1713 int i;
1714
1715 /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1716 kvm_dirty_ring_flush();
1717
1718 /*
1719 * TODO: make this faster when nr_slots is big while there are
1720 * only a few used slots (small VMs).
1721 */
1722 kvm_slots_lock();
1723 for (i = 0; i < s->nr_slots; i++) {
1724 mem = &kml->slots[i];
1725 if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1726 kvm_slot_sync_dirty_pages(mem);
1727
1728 if (s->kvm_dirty_ring_with_bitmap && last_stage &&
1729 kvm_slot_get_dirty_log(s, mem)) {
1730 kvm_slot_sync_dirty_pages(mem);
1731 }
1732
1733 /*
1734 * This is not needed by KVM_GET_DIRTY_LOG because the
1735 * ioctl will unconditionally overwrite the whole region.
1736 * However kvm dirty ring has no such side effect.
1737 */
1738 kvm_slot_reset_dirty_pages(mem);
1739 }
1740 }
1741 kvm_slots_unlock();
1742 }
1743
kvm_log_clear(MemoryListener * listener,MemoryRegionSection * section)1744 static void kvm_log_clear(MemoryListener *listener,
1745 MemoryRegionSection *section)
1746 {
1747 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1748 int r;
1749
1750 r = kvm_physical_log_clear(kml, section);
1751 if (r < 0) {
1752 error_report_once("%s: kvm log clear failed: mr=%s "
1753 "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1754 section->mr->name, section->offset_within_region,
1755 int128_get64(section->size));
1756 abort();
1757 }
1758 }
1759
kvm_mem_ioeventfd_add(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1760 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1761 MemoryRegionSection *section,
1762 bool match_data, uint64_t data,
1763 EventNotifier *e)
1764 {
1765 int fd = event_notifier_get_fd(e);
1766 int r;
1767
1768 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1769 data, true, int128_get64(section->size),
1770 match_data);
1771 if (r < 0) {
1772 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1773 __func__, strerror(-r), -r);
1774 abort();
1775 }
1776 }
1777
kvm_mem_ioeventfd_del(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1778 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1779 MemoryRegionSection *section,
1780 bool match_data, uint64_t data,
1781 EventNotifier *e)
1782 {
1783 int fd = event_notifier_get_fd(e);
1784 int r;
1785
1786 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1787 data, false, int128_get64(section->size),
1788 match_data);
1789 if (r < 0) {
1790 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1791 __func__, strerror(-r), -r);
1792 abort();
1793 }
1794 }
1795
kvm_io_ioeventfd_add(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1796 static void kvm_io_ioeventfd_add(MemoryListener *listener,
1797 MemoryRegionSection *section,
1798 bool match_data, uint64_t data,
1799 EventNotifier *e)
1800 {
1801 int fd = event_notifier_get_fd(e);
1802 int r;
1803
1804 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1805 data, true, int128_get64(section->size),
1806 match_data);
1807 if (r < 0) {
1808 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1809 __func__, strerror(-r), -r);
1810 abort();
1811 }
1812 }
1813
kvm_io_ioeventfd_del(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1814 static void kvm_io_ioeventfd_del(MemoryListener *listener,
1815 MemoryRegionSection *section,
1816 bool match_data, uint64_t data,
1817 EventNotifier *e)
1818
1819 {
1820 int fd = event_notifier_get_fd(e);
1821 int r;
1822
1823 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1824 data, false, int128_get64(section->size),
1825 match_data);
1826 if (r < 0) {
1827 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1828 __func__, strerror(-r), -r);
1829 abort();
1830 }
1831 }
1832
kvm_memory_listener_register(KVMState * s,KVMMemoryListener * kml,AddressSpace * as,int as_id,const char * name)1833 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1834 AddressSpace *as, int as_id, const char *name)
1835 {
1836 int i;
1837
1838 kml->slots = g_new0(KVMSlot, s->nr_slots);
1839 kml->as_id = as_id;
1840
1841 for (i = 0; i < s->nr_slots; i++) {
1842 kml->slots[i].slot = i;
1843 }
1844
1845 QSIMPLEQ_INIT(&kml->transaction_add);
1846 QSIMPLEQ_INIT(&kml->transaction_del);
1847
1848 kml->listener.region_add = kvm_region_add;
1849 kml->listener.region_del = kvm_region_del;
1850 kml->listener.commit = kvm_region_commit;
1851 kml->listener.log_start = kvm_log_start;
1852 kml->listener.log_stop = kvm_log_stop;
1853 kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL;
1854 kml->listener.name = name;
1855
1856 if (s->kvm_dirty_ring_size) {
1857 kml->listener.log_sync_global = kvm_log_sync_global;
1858 } else {
1859 kml->listener.log_sync = kvm_log_sync;
1860 kml->listener.log_clear = kvm_log_clear;
1861 }
1862
1863 memory_listener_register(&kml->listener, as);
1864
1865 for (i = 0; i < s->nr_as; ++i) {
1866 if (!s->as[i].as) {
1867 s->as[i].as = as;
1868 s->as[i].ml = kml;
1869 break;
1870 }
1871 }
1872 }
1873
1874 static MemoryListener kvm_io_listener = {
1875 .name = "kvm-io",
1876 .coalesced_io_add = kvm_coalesce_pio_add,
1877 .coalesced_io_del = kvm_coalesce_pio_del,
1878 .eventfd_add = kvm_io_ioeventfd_add,
1879 .eventfd_del = kvm_io_ioeventfd_del,
1880 .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND,
1881 };
1882
kvm_set_irq(KVMState * s,int irq,int level)1883 int kvm_set_irq(KVMState *s, int irq, int level)
1884 {
1885 struct kvm_irq_level event;
1886 int ret;
1887
1888 assert(kvm_async_interrupts_enabled());
1889
1890 event.level = level;
1891 event.irq = irq;
1892 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1893 if (ret < 0) {
1894 perror("kvm_set_irq");
1895 abort();
1896 }
1897
1898 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1899 }
1900
1901 #ifdef KVM_CAP_IRQ_ROUTING
1902 typedef struct KVMMSIRoute {
1903 struct kvm_irq_routing_entry kroute;
1904 QTAILQ_ENTRY(KVMMSIRoute) entry;
1905 } KVMMSIRoute;
1906
set_gsi(KVMState * s,unsigned int gsi)1907 static void set_gsi(KVMState *s, unsigned int gsi)
1908 {
1909 set_bit(gsi, s->used_gsi_bitmap);
1910 }
1911
clear_gsi(KVMState * s,unsigned int gsi)1912 static void clear_gsi(KVMState *s, unsigned int gsi)
1913 {
1914 clear_bit(gsi, s->used_gsi_bitmap);
1915 }
1916
kvm_init_irq_routing(KVMState * s)1917 void kvm_init_irq_routing(KVMState *s)
1918 {
1919 int gsi_count;
1920
1921 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1922 if (gsi_count > 0) {
1923 /* Round up so we can search ints using ffs */
1924 s->used_gsi_bitmap = bitmap_new(gsi_count);
1925 s->gsi_count = gsi_count;
1926 }
1927
1928 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1929 s->nr_allocated_irq_routes = 0;
1930
1931 kvm_arch_init_irq_routing(s);
1932 }
1933
kvm_irqchip_commit_routes(KVMState * s)1934 void kvm_irqchip_commit_routes(KVMState *s)
1935 {
1936 int ret;
1937
1938 if (kvm_gsi_direct_mapping()) {
1939 return;
1940 }
1941
1942 if (!kvm_gsi_routing_enabled()) {
1943 return;
1944 }
1945
1946 s->irq_routes->flags = 0;
1947 trace_kvm_irqchip_commit_routes();
1948 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1949 assert(ret == 0);
1950 }
1951
kvm_add_routing_entry(KVMState * s,struct kvm_irq_routing_entry * entry)1952 void kvm_add_routing_entry(KVMState *s,
1953 struct kvm_irq_routing_entry *entry)
1954 {
1955 struct kvm_irq_routing_entry *new;
1956 int n, size;
1957
1958 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1959 n = s->nr_allocated_irq_routes * 2;
1960 if (n < 64) {
1961 n = 64;
1962 }
1963 size = sizeof(struct kvm_irq_routing);
1964 size += n * sizeof(*new);
1965 s->irq_routes = g_realloc(s->irq_routes, size);
1966 s->nr_allocated_irq_routes = n;
1967 }
1968 n = s->irq_routes->nr++;
1969 new = &s->irq_routes->entries[n];
1970
1971 *new = *entry;
1972
1973 set_gsi(s, entry->gsi);
1974 }
1975
kvm_update_routing_entry(KVMState * s,struct kvm_irq_routing_entry * new_entry)1976 static int kvm_update_routing_entry(KVMState *s,
1977 struct kvm_irq_routing_entry *new_entry)
1978 {
1979 struct kvm_irq_routing_entry *entry;
1980 int n;
1981
1982 for (n = 0; n < s->irq_routes->nr; n++) {
1983 entry = &s->irq_routes->entries[n];
1984 if (entry->gsi != new_entry->gsi) {
1985 continue;
1986 }
1987
1988 if(!memcmp(entry, new_entry, sizeof *entry)) {
1989 return 0;
1990 }
1991
1992 *entry = *new_entry;
1993
1994 return 0;
1995 }
1996
1997 return -ESRCH;
1998 }
1999
kvm_irqchip_add_irq_route(KVMState * s,int irq,int irqchip,int pin)2000 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
2001 {
2002 struct kvm_irq_routing_entry e = {};
2003
2004 assert(pin < s->gsi_count);
2005
2006 e.gsi = irq;
2007 e.type = KVM_IRQ_ROUTING_IRQCHIP;
2008 e.flags = 0;
2009 e.u.irqchip.irqchip = irqchip;
2010 e.u.irqchip.pin = pin;
2011 kvm_add_routing_entry(s, &e);
2012 }
2013
kvm_irqchip_release_virq(KVMState * s,int virq)2014 void kvm_irqchip_release_virq(KVMState *s, int virq)
2015 {
2016 struct kvm_irq_routing_entry *e;
2017 int i;
2018
2019 if (kvm_gsi_direct_mapping()) {
2020 return;
2021 }
2022
2023 for (i = 0; i < s->irq_routes->nr; i++) {
2024 e = &s->irq_routes->entries[i];
2025 if (e->gsi == virq) {
2026 s->irq_routes->nr--;
2027 *e = s->irq_routes->entries[s->irq_routes->nr];
2028 }
2029 }
2030 clear_gsi(s, virq);
2031 kvm_arch_release_virq_post(virq);
2032 trace_kvm_irqchip_release_virq(virq);
2033 }
2034
kvm_irqchip_add_change_notifier(Notifier * n)2035 void kvm_irqchip_add_change_notifier(Notifier *n)
2036 {
2037 notifier_list_add(&kvm_irqchip_change_notifiers, n);
2038 }
2039
kvm_irqchip_remove_change_notifier(Notifier * n)2040 void kvm_irqchip_remove_change_notifier(Notifier *n)
2041 {
2042 notifier_remove(n);
2043 }
2044
kvm_irqchip_change_notify(void)2045 void kvm_irqchip_change_notify(void)
2046 {
2047 notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
2048 }
2049
kvm_irqchip_get_virq(KVMState * s)2050 int kvm_irqchip_get_virq(KVMState *s)
2051 {
2052 int next_virq;
2053
2054 /* Return the lowest unused GSI in the bitmap */
2055 next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
2056 if (next_virq >= s->gsi_count) {
2057 return -ENOSPC;
2058 } else {
2059 return next_virq;
2060 }
2061 }
2062
kvm_irqchip_send_msi(KVMState * s,MSIMessage msg)2063 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2064 {
2065 struct kvm_msi msi;
2066
2067 msi.address_lo = (uint32_t)msg.address;
2068 msi.address_hi = msg.address >> 32;
2069 msi.data = le32_to_cpu(msg.data);
2070 msi.flags = 0;
2071 memset(msi.pad, 0, sizeof(msi.pad));
2072
2073 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
2074 }
2075
kvm_irqchip_add_msi_route(KVMRouteChange * c,int vector,PCIDevice * dev)2076 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2077 {
2078 struct kvm_irq_routing_entry kroute = {};
2079 int virq;
2080 KVMState *s = c->s;
2081 MSIMessage msg = {0, 0};
2082
2083 if (pci_available && dev) {
2084 msg = pci_get_msi_message(dev, vector);
2085 }
2086
2087 if (kvm_gsi_direct_mapping()) {
2088 return kvm_arch_msi_data_to_gsi(msg.data);
2089 }
2090
2091 if (!kvm_gsi_routing_enabled()) {
2092 return -ENOSYS;
2093 }
2094
2095 virq = kvm_irqchip_get_virq(s);
2096 if (virq < 0) {
2097 return virq;
2098 }
2099
2100 kroute.gsi = virq;
2101 kroute.type = KVM_IRQ_ROUTING_MSI;
2102 kroute.flags = 0;
2103 kroute.u.msi.address_lo = (uint32_t)msg.address;
2104 kroute.u.msi.address_hi = msg.address >> 32;
2105 kroute.u.msi.data = le32_to_cpu(msg.data);
2106 if (pci_available && kvm_msi_devid_required()) {
2107 kroute.flags = KVM_MSI_VALID_DEVID;
2108 kroute.u.msi.devid = pci_requester_id(dev);
2109 }
2110 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2111 kvm_irqchip_release_virq(s, virq);
2112 return -EINVAL;
2113 }
2114
2115 if (s->irq_routes->nr < s->gsi_count) {
2116 trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
2117 vector, virq);
2118
2119 kvm_add_routing_entry(s, &kroute);
2120 kvm_arch_add_msi_route_post(&kroute, vector, dev);
2121 c->changes++;
2122 } else {
2123 kvm_irqchip_release_virq(s, virq);
2124 return -ENOSPC;
2125 }
2126
2127 return virq;
2128 }
2129
kvm_irqchip_update_msi_route(KVMState * s,int virq,MSIMessage msg,PCIDevice * dev)2130 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
2131 PCIDevice *dev)
2132 {
2133 struct kvm_irq_routing_entry kroute = {};
2134
2135 if (kvm_gsi_direct_mapping()) {
2136 return 0;
2137 }
2138
2139 if (!kvm_irqchip_in_kernel()) {
2140 return -ENOSYS;
2141 }
2142
2143 kroute.gsi = virq;
2144 kroute.type = KVM_IRQ_ROUTING_MSI;
2145 kroute.flags = 0;
2146 kroute.u.msi.address_lo = (uint32_t)msg.address;
2147 kroute.u.msi.address_hi = msg.address >> 32;
2148 kroute.u.msi.data = le32_to_cpu(msg.data);
2149 if (pci_available && kvm_msi_devid_required()) {
2150 kroute.flags = KVM_MSI_VALID_DEVID;
2151 kroute.u.msi.devid = pci_requester_id(dev);
2152 }
2153 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2154 return -EINVAL;
2155 }
2156
2157 trace_kvm_irqchip_update_msi_route(virq);
2158
2159 return kvm_update_routing_entry(s, &kroute);
2160 }
2161
kvm_irqchip_assign_irqfd(KVMState * s,EventNotifier * event,EventNotifier * resample,int virq,bool assign)2162 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2163 EventNotifier *resample, int virq,
2164 bool assign)
2165 {
2166 int fd = event_notifier_get_fd(event);
2167 int rfd = resample ? event_notifier_get_fd(resample) : -1;
2168
2169 struct kvm_irqfd irqfd = {
2170 .fd = fd,
2171 .gsi = virq,
2172 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
2173 };
2174
2175 if (rfd != -1) {
2176 assert(assign);
2177 if (kvm_irqchip_is_split()) {
2178 /*
2179 * When the slow irqchip (e.g. IOAPIC) is in the
2180 * userspace, KVM kernel resamplefd will not work because
2181 * the EOI of the interrupt will be delivered to userspace
2182 * instead, so the KVM kernel resamplefd kick will be
2183 * skipped. The userspace here mimics what the kernel
2184 * provides with resamplefd, remember the resamplefd and
2185 * kick it when we receive EOI of this IRQ.
2186 *
2187 * This is hackery because IOAPIC is mostly bypassed
2188 * (except EOI broadcasts) when irqfd is used. However
2189 * this can bring much performance back for split irqchip
2190 * with INTx IRQs (for VFIO, this gives 93% perf of the
2191 * full fast path, which is 46% perf boost comparing to
2192 * the INTx slow path).
2193 */
2194 kvm_resample_fd_insert(virq, resample);
2195 } else {
2196 irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
2197 irqfd.resamplefd = rfd;
2198 }
2199 } else if (!assign) {
2200 if (kvm_irqchip_is_split()) {
2201 kvm_resample_fd_remove(virq);
2202 }
2203 }
2204
2205 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
2206 }
2207
2208 #else /* !KVM_CAP_IRQ_ROUTING */
2209
kvm_init_irq_routing(KVMState * s)2210 void kvm_init_irq_routing(KVMState *s)
2211 {
2212 }
2213
kvm_irqchip_release_virq(KVMState * s,int virq)2214 void kvm_irqchip_release_virq(KVMState *s, int virq)
2215 {
2216 }
2217
kvm_irqchip_send_msi(KVMState * s,MSIMessage msg)2218 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2219 {
2220 abort();
2221 }
2222
kvm_irqchip_add_msi_route(KVMRouteChange * c,int vector,PCIDevice * dev)2223 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2224 {
2225 return -ENOSYS;
2226 }
2227
kvm_irqchip_add_adapter_route(KVMState * s,AdapterInfo * adapter)2228 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2229 {
2230 return -ENOSYS;
2231 }
2232
kvm_irqchip_add_hv_sint_route(KVMState * s,uint32_t vcpu,uint32_t sint)2233 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2234 {
2235 return -ENOSYS;
2236 }
2237
kvm_irqchip_assign_irqfd(KVMState * s,EventNotifier * event,EventNotifier * resample,int virq,bool assign)2238 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2239 EventNotifier *resample, int virq,
2240 bool assign)
2241 {
2242 abort();
2243 }
2244
kvm_irqchip_update_msi_route(KVMState * s,int virq,MSIMessage msg)2245 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
2246 {
2247 return -ENOSYS;
2248 }
2249 #endif /* !KVM_CAP_IRQ_ROUTING */
2250
kvm_irqchip_add_irqfd_notifier_gsi(KVMState * s,EventNotifier * n,EventNotifier * rn,int virq)2251 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2252 EventNotifier *rn, int virq)
2253 {
2254 return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
2255 }
2256
kvm_irqchip_remove_irqfd_notifier_gsi(KVMState * s,EventNotifier * n,int virq)2257 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2258 int virq)
2259 {
2260 return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
2261 }
2262
kvm_irqchip_add_irqfd_notifier(KVMState * s,EventNotifier * n,EventNotifier * rn,qemu_irq irq)2263 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
2264 EventNotifier *rn, qemu_irq irq)
2265 {
2266 gpointer key, gsi;
2267 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2268
2269 if (!found) {
2270 return -ENXIO;
2271 }
2272 return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
2273 }
2274
kvm_irqchip_remove_irqfd_notifier(KVMState * s,EventNotifier * n,qemu_irq irq)2275 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
2276 qemu_irq irq)
2277 {
2278 gpointer key, gsi;
2279 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2280
2281 if (!found) {
2282 return -ENXIO;
2283 }
2284 return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
2285 }
2286
kvm_irqchip_set_qemuirq_gsi(KVMState * s,qemu_irq irq,int gsi)2287 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
2288 {
2289 g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
2290 }
2291
kvm_irqchip_create(KVMState * s)2292 static void kvm_irqchip_create(KVMState *s)
2293 {
2294 int ret;
2295
2296 assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
2297 if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
2298 ;
2299 } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
2300 ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
2301 if (ret < 0) {
2302 fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
2303 exit(1);
2304 }
2305 } else {
2306 return;
2307 }
2308
2309 if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) {
2310 fprintf(stderr, "kvm: irqfd not implemented\n");
2311 exit(1);
2312 }
2313
2314 /* First probe and see if there's a arch-specific hook to create the
2315 * in-kernel irqchip for us */
2316 ret = kvm_arch_irqchip_create(s);
2317 if (ret == 0) {
2318 if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
2319 error_report("Split IRQ chip mode not supported.");
2320 exit(1);
2321 } else {
2322 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
2323 }
2324 }
2325 if (ret < 0) {
2326 fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
2327 exit(1);
2328 }
2329
2330 kvm_kernel_irqchip = true;
2331 /* If we have an in-kernel IRQ chip then we must have asynchronous
2332 * interrupt delivery (though the reverse is not necessarily true)
2333 */
2334 kvm_async_interrupts_allowed = true;
2335 kvm_halt_in_kernel_allowed = true;
2336
2337 kvm_init_irq_routing(s);
2338
2339 s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
2340 }
2341
2342 /* Find number of supported CPUs using the recommended
2343 * procedure from the kernel API documentation to cope with
2344 * older kernels that may be missing capabilities.
2345 */
kvm_recommended_vcpus(KVMState * s)2346 static int kvm_recommended_vcpus(KVMState *s)
2347 {
2348 int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
2349 return (ret) ? ret : 4;
2350 }
2351
kvm_max_vcpus(KVMState * s)2352 static int kvm_max_vcpus(KVMState *s)
2353 {
2354 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
2355 return (ret) ? ret : kvm_recommended_vcpus(s);
2356 }
2357
kvm_max_vcpu_id(KVMState * s)2358 static int kvm_max_vcpu_id(KVMState *s)
2359 {
2360 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
2361 return (ret) ? ret : kvm_max_vcpus(s);
2362 }
2363
kvm_vcpu_id_is_valid(int vcpu_id)2364 bool kvm_vcpu_id_is_valid(int vcpu_id)
2365 {
2366 KVMState *s = KVM_STATE(current_accel());
2367 return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
2368 }
2369
kvm_dirty_ring_enabled(void)2370 bool kvm_dirty_ring_enabled(void)
2371 {
2372 return kvm_state && kvm_state->kvm_dirty_ring_size;
2373 }
2374
2375 static void query_stats_cb(StatsResultList **result, StatsTarget target,
2376 strList *names, strList *targets, Error **errp);
2377 static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp);
2378
kvm_dirty_ring_size(void)2379 uint32_t kvm_dirty_ring_size(void)
2380 {
2381 return kvm_state->kvm_dirty_ring_size;
2382 }
2383
kvm_init(MachineState * ms)2384 static int kvm_init(MachineState *ms)
2385 {
2386 MachineClass *mc = MACHINE_GET_CLASS(ms);
2387 static const char upgrade_note[] =
2388 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2389 "(see http://sourceforge.net/projects/kvm).\n";
2390 const struct {
2391 const char *name;
2392 int num;
2393 } num_cpus[] = {
2394 { "SMP", ms->smp.cpus },
2395 { "hotpluggable", ms->smp.max_cpus },
2396 { /* end of list */ }
2397 }, *nc = num_cpus;
2398 int soft_vcpus_limit, hard_vcpus_limit;
2399 KVMState *s;
2400 const KVMCapabilityInfo *missing_cap;
2401 int ret;
2402 int type;
2403 uint64_t dirty_log_manual_caps;
2404
2405 qemu_mutex_init(&kml_slots_lock);
2406
2407 s = KVM_STATE(ms->accelerator);
2408
2409 /*
2410 * On systems where the kernel can support different base page
2411 * sizes, host page size may be different from TARGET_PAGE_SIZE,
2412 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
2413 * page size for the system though.
2414 */
2415 assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
2416
2417 s->sigmask_len = 8;
2418 accel_blocker_init();
2419
2420 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2421 QTAILQ_INIT(&s->kvm_sw_breakpoints);
2422 #endif
2423 QLIST_INIT(&s->kvm_parked_vcpus);
2424 s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR);
2425 if (s->fd == -1) {
2426 fprintf(stderr, "Could not access KVM kernel module: %m\n");
2427 ret = -errno;
2428 goto err;
2429 }
2430
2431 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
2432 if (ret < KVM_API_VERSION) {
2433 if (ret >= 0) {
2434 ret = -EINVAL;
2435 }
2436 fprintf(stderr, "kvm version too old\n");
2437 goto err;
2438 }
2439
2440 if (ret > KVM_API_VERSION) {
2441 ret = -EINVAL;
2442 fprintf(stderr, "kvm version not supported\n");
2443 goto err;
2444 }
2445
2446 kvm_supported_memory_attributes = kvm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES);
2447 kvm_guest_memfd_supported =
2448 kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) &&
2449 kvm_check_extension(s, KVM_CAP_USER_MEMORY2) &&
2450 (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE);
2451
2452 kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
2453 s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2454
2455 /* If unspecified, use the default value */
2456 if (!s->nr_slots) {
2457 s->nr_slots = 32;
2458 }
2459
2460 s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
2461 if (s->nr_as <= 1) {
2462 s->nr_as = 1;
2463 }
2464 s->as = g_new0(struct KVMAs, s->nr_as);
2465
2466 if (object_property_find(OBJECT(current_machine), "kvm-type")) {
2467 g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine),
2468 "kvm-type",
2469 &error_abort);
2470 type = mc->kvm_type(ms, kvm_type);
2471 } else if (mc->kvm_type) {
2472 type = mc->kvm_type(ms, NULL);
2473 } else {
2474 type = kvm_arch_get_default_type(ms);
2475 }
2476
2477 if (type < 0) {
2478 ret = -EINVAL;
2479 goto err;
2480 }
2481
2482 do {
2483 ret = kvm_ioctl(s, KVM_CREATE_VM, type);
2484 } while (ret == -EINTR);
2485
2486 if (ret < 0) {
2487 fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
2488 strerror(-ret));
2489
2490 #ifdef TARGET_S390X
2491 if (ret == -EINVAL) {
2492 fprintf(stderr,
2493 "Host kernel setup problem detected. Please verify:\n");
2494 fprintf(stderr, "- for kernels supporting the switch_amode or"
2495 " user_mode parameters, whether\n");
2496 fprintf(stderr,
2497 " user space is running in primary address space\n");
2498 fprintf(stderr,
2499 "- for kernels supporting the vm.allocate_pgste sysctl, "
2500 "whether it is enabled\n");
2501 }
2502 #elif defined(TARGET_PPC)
2503 if (ret == -EINVAL) {
2504 fprintf(stderr,
2505 "PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2506 (type == 2) ? "pr" : "hv");
2507 }
2508 #endif
2509 goto err;
2510 }
2511
2512 s->vmfd = ret;
2513
2514 /* check the vcpu limits */
2515 soft_vcpus_limit = kvm_recommended_vcpus(s);
2516 hard_vcpus_limit = kvm_max_vcpus(s);
2517
2518 while (nc->name) {
2519 if (nc->num > soft_vcpus_limit) {
2520 warn_report("Number of %s cpus requested (%d) exceeds "
2521 "the recommended cpus supported by KVM (%d)",
2522 nc->name, nc->num, soft_vcpus_limit);
2523
2524 if (nc->num > hard_vcpus_limit) {
2525 fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
2526 "the maximum cpus supported by KVM (%d)\n",
2527 nc->name, nc->num, hard_vcpus_limit);
2528 exit(1);
2529 }
2530 }
2531 nc++;
2532 }
2533
2534 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
2535 if (!missing_cap) {
2536 missing_cap =
2537 kvm_check_extension_list(s, kvm_arch_required_capabilities);
2538 }
2539 if (missing_cap) {
2540 ret = -EINVAL;
2541 fprintf(stderr, "kvm does not support %s\n%s",
2542 missing_cap->name, upgrade_note);
2543 goto err;
2544 }
2545
2546 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
2547 s->coalesced_pio = s->coalesced_mmio &&
2548 kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
2549
2550 /*
2551 * Enable KVM dirty ring if supported, otherwise fall back to
2552 * dirty logging mode
2553 */
2554 ret = kvm_dirty_ring_init(s);
2555 if (ret < 0) {
2556 goto err;
2557 }
2558
2559 /*
2560 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2561 * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2562 * page is wr-protected initially, which is against how kvm dirty ring is
2563 * usage - kvm dirty ring requires all pages are wr-protected at the very
2564 * beginning. Enabling this feature for dirty ring causes data corruption.
2565 *
2566 * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2567 * we may expect a higher stall time when starting the migration. In the
2568 * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2569 * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2570 * guest pages.
2571 */
2572 if (!s->kvm_dirty_ring_size) {
2573 dirty_log_manual_caps =
2574 kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
2575 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
2576 KVM_DIRTY_LOG_INITIALLY_SET);
2577 s->manual_dirty_log_protect = dirty_log_manual_caps;
2578 if (dirty_log_manual_caps) {
2579 ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
2580 dirty_log_manual_caps);
2581 if (ret) {
2582 warn_report("Trying to enable capability %"PRIu64" of "
2583 "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2584 "Falling back to the legacy mode. ",
2585 dirty_log_manual_caps);
2586 s->manual_dirty_log_protect = 0;
2587 }
2588 }
2589 }
2590
2591 #ifdef KVM_CAP_VCPU_EVENTS
2592 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2593 #endif
2594 s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2595
2596 s->irq_set_ioctl = KVM_IRQ_LINE;
2597 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2598 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2599 }
2600
2601 kvm_readonly_mem_allowed =
2602 (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2603
2604 kvm_resamplefds_allowed =
2605 (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2606
2607 kvm_vm_attributes_allowed =
2608 (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2609
2610 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2611 kvm_has_guest_debug =
2612 (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
2613 #endif
2614
2615 kvm_sstep_flags = 0;
2616 if (kvm_has_guest_debug) {
2617 kvm_sstep_flags = SSTEP_ENABLE;
2618
2619 #if defined TARGET_KVM_HAVE_GUEST_DEBUG
2620 int guest_debug_flags =
2621 kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2);
2622
2623 if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) {
2624 kvm_sstep_flags |= SSTEP_NOIRQ;
2625 }
2626 #endif
2627 }
2628
2629 kvm_state = s;
2630
2631 ret = kvm_arch_init(ms, s);
2632 if (ret < 0) {
2633 goto err;
2634 }
2635
2636 if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
2637 s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2638 }
2639
2640 qemu_register_reset(kvm_unpoison_all, NULL);
2641
2642 if (s->kernel_irqchip_allowed) {
2643 kvm_irqchip_create(s);
2644 }
2645
2646 s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2647 s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2648 s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2649 s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2650
2651 kvm_memory_listener_register(s, &s->memory_listener,
2652 &address_space_memory, 0, "kvm-memory");
2653 memory_listener_register(&kvm_io_listener,
2654 &address_space_io);
2655
2656 s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2657 if (!s->sync_mmu) {
2658 ret = ram_block_discard_disable(true);
2659 assert(!ret);
2660 }
2661
2662 if (s->kvm_dirty_ring_size) {
2663 kvm_dirty_ring_reaper_init(s);
2664 }
2665
2666 if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
2667 add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb,
2668 query_stats_schemas_cb);
2669 }
2670
2671 return 0;
2672
2673 err:
2674 assert(ret < 0);
2675 if (s->vmfd >= 0) {
2676 close(s->vmfd);
2677 }
2678 if (s->fd != -1) {
2679 close(s->fd);
2680 }
2681 g_free(s->as);
2682 g_free(s->memory_listener.slots);
2683
2684 return ret;
2685 }
2686
kvm_set_sigmask_len(KVMState * s,unsigned int sigmask_len)2687 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2688 {
2689 s->sigmask_len = sigmask_len;
2690 }
2691
kvm_handle_io(uint16_t port,MemTxAttrs attrs,void * data,int direction,int size,uint32_t count)2692 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2693 int size, uint32_t count)
2694 {
2695 int i;
2696 uint8_t *ptr = data;
2697
2698 for (i = 0; i < count; i++) {
2699 address_space_rw(&address_space_io, port, attrs,
2700 ptr, size,
2701 direction == KVM_EXIT_IO_OUT);
2702 ptr += size;
2703 }
2704 }
2705
kvm_handle_internal_error(CPUState * cpu,struct kvm_run * run)2706 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2707 {
2708 int i;
2709
2710 fprintf(stderr, "KVM internal error. Suberror: %d\n",
2711 run->internal.suberror);
2712
2713 for (i = 0; i < run->internal.ndata; ++i) {
2714 fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
2715 i, (uint64_t)run->internal.data[i]);
2716 }
2717 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2718 fprintf(stderr, "emulation failure\n");
2719 if (!kvm_arch_stop_on_emulation_error(cpu)) {
2720 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2721 return EXCP_INTERRUPT;
2722 }
2723 }
2724 /* FIXME: Should trigger a qmp message to let management know
2725 * something went wrong.
2726 */
2727 return -1;
2728 }
2729
kvm_flush_coalesced_mmio_buffer(void)2730 void kvm_flush_coalesced_mmio_buffer(void)
2731 {
2732 KVMState *s = kvm_state;
2733
2734 if (!s || s->coalesced_flush_in_progress) {
2735 return;
2736 }
2737
2738 s->coalesced_flush_in_progress = true;
2739
2740 if (s->coalesced_mmio_ring) {
2741 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2742 while (ring->first != ring->last) {
2743 struct kvm_coalesced_mmio *ent;
2744
2745 ent = &ring->coalesced_mmio[ring->first];
2746
2747 if (ent->pio == 1) {
2748 address_space_write(&address_space_io, ent->phys_addr,
2749 MEMTXATTRS_UNSPECIFIED, ent->data,
2750 ent->len);
2751 } else {
2752 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2753 }
2754 smp_wmb();
2755 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2756 }
2757 }
2758
2759 s->coalesced_flush_in_progress = false;
2760 }
2761
do_kvm_cpu_synchronize_state(CPUState * cpu,run_on_cpu_data arg)2762 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2763 {
2764 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
2765 int ret = kvm_arch_get_registers(cpu);
2766 if (ret) {
2767 error_report("Failed to get registers: %s", strerror(-ret));
2768 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2769 vm_stop(RUN_STATE_INTERNAL_ERROR);
2770 }
2771
2772 cpu->vcpu_dirty = true;
2773 }
2774 }
2775
kvm_cpu_synchronize_state(CPUState * cpu)2776 void kvm_cpu_synchronize_state(CPUState *cpu)
2777 {
2778 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
2779 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2780 }
2781 }
2782
do_kvm_cpu_synchronize_post_reset(CPUState * cpu,run_on_cpu_data arg)2783 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2784 {
2785 int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
2786 if (ret) {
2787 error_report("Failed to put registers after reset: %s", strerror(-ret));
2788 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2789 vm_stop(RUN_STATE_INTERNAL_ERROR);
2790 }
2791
2792 cpu->vcpu_dirty = false;
2793 }
2794
kvm_cpu_synchronize_post_reset(CPUState * cpu)2795 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2796 {
2797 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2798 }
2799
do_kvm_cpu_synchronize_post_init(CPUState * cpu,run_on_cpu_data arg)2800 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2801 {
2802 int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
2803 if (ret) {
2804 error_report("Failed to put registers after init: %s", strerror(-ret));
2805 exit(1);
2806 }
2807
2808 cpu->vcpu_dirty = false;
2809 }
2810
kvm_cpu_synchronize_post_init(CPUState * cpu)2811 void kvm_cpu_synchronize_post_init(CPUState *cpu)
2812 {
2813 if (!kvm_state->guest_state_protected) {
2814 /*
2815 * This runs before the machine_init_done notifiers, and is the last
2816 * opportunity to synchronize the state of confidential guests.
2817 */
2818 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2819 }
2820 }
2821
do_kvm_cpu_synchronize_pre_loadvm(CPUState * cpu,run_on_cpu_data arg)2822 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2823 {
2824 cpu->vcpu_dirty = true;
2825 }
2826
kvm_cpu_synchronize_pre_loadvm(CPUState * cpu)2827 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2828 {
2829 run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2830 }
2831
2832 #ifdef KVM_HAVE_MCE_INJECTION
2833 static __thread void *pending_sigbus_addr;
2834 static __thread int pending_sigbus_code;
2835 static __thread bool have_sigbus_pending;
2836 #endif
2837
kvm_cpu_kick(CPUState * cpu)2838 static void kvm_cpu_kick(CPUState *cpu)
2839 {
2840 qatomic_set(&cpu->kvm_run->immediate_exit, 1);
2841 }
2842
kvm_cpu_kick_self(void)2843 static void kvm_cpu_kick_self(void)
2844 {
2845 if (kvm_immediate_exit) {
2846 kvm_cpu_kick(current_cpu);
2847 } else {
2848 qemu_cpu_kick_self();
2849 }
2850 }
2851
kvm_eat_signals(CPUState * cpu)2852 static void kvm_eat_signals(CPUState *cpu)
2853 {
2854 struct timespec ts = { 0, 0 };
2855 siginfo_t siginfo;
2856 sigset_t waitset;
2857 sigset_t chkset;
2858 int r;
2859
2860 if (kvm_immediate_exit) {
2861 qatomic_set(&cpu->kvm_run->immediate_exit, 0);
2862 /* Write kvm_run->immediate_exit before the cpu->exit_request
2863 * write in kvm_cpu_exec.
2864 */
2865 smp_wmb();
2866 return;
2867 }
2868
2869 sigemptyset(&waitset);
2870 sigaddset(&waitset, SIG_IPI);
2871
2872 do {
2873 r = sigtimedwait(&waitset, &siginfo, &ts);
2874 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2875 perror("sigtimedwait");
2876 exit(1);
2877 }
2878
2879 r = sigpending(&chkset);
2880 if (r == -1) {
2881 perror("sigpending");
2882 exit(1);
2883 }
2884 } while (sigismember(&chkset, SIG_IPI));
2885 }
2886
kvm_convert_memory(hwaddr start,hwaddr size,bool to_private)2887 int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private)
2888 {
2889 MemoryRegionSection section;
2890 ram_addr_t offset;
2891 MemoryRegion *mr;
2892 RAMBlock *rb;
2893 void *addr;
2894 int ret = -1;
2895
2896 trace_kvm_convert_memory(start, size, to_private ? "shared_to_private" : "private_to_shared");
2897
2898 if (!QEMU_PTR_IS_ALIGNED(start, qemu_real_host_page_size()) ||
2899 !QEMU_PTR_IS_ALIGNED(size, qemu_real_host_page_size())) {
2900 return -1;
2901 }
2902
2903 if (!size) {
2904 return -1;
2905 }
2906
2907 section = memory_region_find(get_system_memory(), start, size);
2908 mr = section.mr;
2909 if (!mr) {
2910 /*
2911 * Ignore converting non-assigned region to shared.
2912 *
2913 * TDX requires vMMIO region to be shared to inject #VE to guest.
2914 * OVMF issues conservatively MapGPA(shared) on 32bit PCI MMIO region,
2915 * and vIO-APIC 0xFEC00000 4K page.
2916 * OVMF assigns 32bit PCI MMIO region to
2917 * [top of low memory: typically 2GB=0xC000000, 0xFC00000)
2918 */
2919 if (!to_private) {
2920 return 0;
2921 }
2922 return -1;
2923 }
2924
2925 if (!memory_region_has_guest_memfd(mr)) {
2926 /*
2927 * Because vMMIO region must be shared, guest TD may convert vMMIO
2928 * region to shared explicitly. Don't complain such case. See
2929 * memory_region_type() for checking if the region is MMIO region.
2930 */
2931 if (!to_private &&
2932 !memory_region_is_ram(mr) &&
2933 !memory_region_is_ram_device(mr) &&
2934 !memory_region_is_rom(mr) &&
2935 !memory_region_is_romd(mr)) {
2936 ret = 0;
2937 } else {
2938 error_report("Convert non guest_memfd backed memory region "
2939 "(0x%"HWADDR_PRIx" ,+ 0x%"HWADDR_PRIx") to %s",
2940 start, size, to_private ? "private" : "shared");
2941 }
2942 goto out_unref;
2943 }
2944
2945 if (to_private) {
2946 ret = kvm_set_memory_attributes_private(start, size);
2947 } else {
2948 ret = kvm_set_memory_attributes_shared(start, size);
2949 }
2950 if (ret) {
2951 goto out_unref;
2952 }
2953
2954 addr = memory_region_get_ram_ptr(mr) + section.offset_within_region;
2955 rb = qemu_ram_block_from_host(addr, false, &offset);
2956
2957 if (to_private) {
2958 if (rb->page_size != qemu_real_host_page_size()) {
2959 /*
2960 * shared memory is backed by hugetlb, which is supposed to be
2961 * pre-allocated and doesn't need to be discarded
2962 */
2963 goto out_unref;
2964 }
2965 ret = ram_block_discard_range(rb, offset, size);
2966 } else {
2967 ret = ram_block_discard_guest_memfd_range(rb, offset, size);
2968 }
2969
2970 out_unref:
2971 memory_region_unref(mr);
2972 return ret;
2973 }
2974
kvm_cpu_exec(CPUState * cpu)2975 int kvm_cpu_exec(CPUState *cpu)
2976 {
2977 struct kvm_run *run = cpu->kvm_run;
2978 int ret, run_ret;
2979
2980 trace_kvm_cpu_exec();
2981
2982 if (kvm_arch_process_async_events(cpu)) {
2983 qatomic_set(&cpu->exit_request, 0);
2984 return EXCP_HLT;
2985 }
2986
2987 bql_unlock();
2988 cpu_exec_start(cpu);
2989
2990 do {
2991 MemTxAttrs attrs;
2992
2993 if (cpu->vcpu_dirty) {
2994 ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
2995 if (ret) {
2996 error_report("Failed to put registers after init: %s",
2997 strerror(-ret));
2998 ret = -1;
2999 break;
3000 }
3001
3002 cpu->vcpu_dirty = false;
3003 }
3004
3005 kvm_arch_pre_run(cpu, run);
3006 if (qatomic_read(&cpu->exit_request)) {
3007 trace_kvm_interrupt_exit_request();
3008 /*
3009 * KVM requires us to reenter the kernel after IO exits to complete
3010 * instruction emulation. This self-signal will ensure that we
3011 * leave ASAP again.
3012 */
3013 kvm_cpu_kick_self();
3014 }
3015
3016 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
3017 * Matching barrier in kvm_eat_signals.
3018 */
3019 smp_rmb();
3020
3021 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
3022
3023 attrs = kvm_arch_post_run(cpu, run);
3024
3025 #ifdef KVM_HAVE_MCE_INJECTION
3026 if (unlikely(have_sigbus_pending)) {
3027 bql_lock();
3028 kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
3029 pending_sigbus_addr);
3030 have_sigbus_pending = false;
3031 bql_unlock();
3032 }
3033 #endif
3034
3035 if (run_ret < 0) {
3036 if (run_ret == -EINTR || run_ret == -EAGAIN) {
3037 trace_kvm_io_window_exit();
3038 kvm_eat_signals(cpu);
3039 ret = EXCP_INTERRUPT;
3040 break;
3041 }
3042 if (!(run_ret == -EFAULT && run->exit_reason == KVM_EXIT_MEMORY_FAULT)) {
3043 fprintf(stderr, "error: kvm run failed %s\n",
3044 strerror(-run_ret));
3045 #ifdef TARGET_PPC
3046 if (run_ret == -EBUSY) {
3047 fprintf(stderr,
3048 "This is probably because your SMT is enabled.\n"
3049 "VCPU can only run on primary threads with all "
3050 "secondary threads offline.\n");
3051 }
3052 #endif
3053 ret = -1;
3054 break;
3055 }
3056 }
3057
3058 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
3059 switch (run->exit_reason) {
3060 case KVM_EXIT_IO:
3061 /* Called outside BQL */
3062 kvm_handle_io(run->io.port, attrs,
3063 (uint8_t *)run + run->io.data_offset,
3064 run->io.direction,
3065 run->io.size,
3066 run->io.count);
3067 ret = 0;
3068 break;
3069 case KVM_EXIT_MMIO:
3070 /* Called outside BQL */
3071 address_space_rw(&address_space_memory,
3072 run->mmio.phys_addr, attrs,
3073 run->mmio.data,
3074 run->mmio.len,
3075 run->mmio.is_write);
3076 ret = 0;
3077 break;
3078 case KVM_EXIT_IRQ_WINDOW_OPEN:
3079 ret = EXCP_INTERRUPT;
3080 break;
3081 case KVM_EXIT_SHUTDOWN:
3082 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3083 ret = EXCP_INTERRUPT;
3084 break;
3085 case KVM_EXIT_UNKNOWN:
3086 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
3087 (uint64_t)run->hw.hardware_exit_reason);
3088 ret = -1;
3089 break;
3090 case KVM_EXIT_INTERNAL_ERROR:
3091 ret = kvm_handle_internal_error(cpu, run);
3092 break;
3093 case KVM_EXIT_DIRTY_RING_FULL:
3094 /*
3095 * We shouldn't continue if the dirty ring of this vcpu is
3096 * still full. Got kicked by KVM_RESET_DIRTY_RINGS.
3097 */
3098 trace_kvm_dirty_ring_full(cpu->cpu_index);
3099 bql_lock();
3100 /*
3101 * We throttle vCPU by making it sleep once it exit from kernel
3102 * due to dirty ring full. In the dirtylimit scenario, reaping
3103 * all vCPUs after a single vCPU dirty ring get full result in
3104 * the miss of sleep, so just reap the ring-fulled vCPU.
3105 */
3106 if (dirtylimit_in_service()) {
3107 kvm_dirty_ring_reap(kvm_state, cpu);
3108 } else {
3109 kvm_dirty_ring_reap(kvm_state, NULL);
3110 }
3111 bql_unlock();
3112 dirtylimit_vcpu_execute(cpu);
3113 ret = 0;
3114 break;
3115 case KVM_EXIT_SYSTEM_EVENT:
3116 trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type);
3117 switch (run->system_event.type) {
3118 case KVM_SYSTEM_EVENT_SHUTDOWN:
3119 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
3120 ret = EXCP_INTERRUPT;
3121 break;
3122 case KVM_SYSTEM_EVENT_RESET:
3123 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3124 ret = EXCP_INTERRUPT;
3125 break;
3126 case KVM_SYSTEM_EVENT_CRASH:
3127 kvm_cpu_synchronize_state(cpu);
3128 bql_lock();
3129 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
3130 bql_unlock();
3131 ret = 0;
3132 break;
3133 default:
3134 ret = kvm_arch_handle_exit(cpu, run);
3135 break;
3136 }
3137 break;
3138 case KVM_EXIT_MEMORY_FAULT:
3139 trace_kvm_memory_fault(run->memory_fault.gpa,
3140 run->memory_fault.size,
3141 run->memory_fault.flags);
3142 if (run->memory_fault.flags & ~KVM_MEMORY_EXIT_FLAG_PRIVATE) {
3143 error_report("KVM_EXIT_MEMORY_FAULT: Unknown flag 0x%" PRIx64,
3144 (uint64_t)run->memory_fault.flags);
3145 ret = -1;
3146 break;
3147 }
3148 ret = kvm_convert_memory(run->memory_fault.gpa, run->memory_fault.size,
3149 run->memory_fault.flags & KVM_MEMORY_EXIT_FLAG_PRIVATE);
3150 break;
3151 default:
3152 ret = kvm_arch_handle_exit(cpu, run);
3153 break;
3154 }
3155 } while (ret == 0);
3156
3157 cpu_exec_end(cpu);
3158 bql_lock();
3159
3160 if (ret < 0) {
3161 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
3162 vm_stop(RUN_STATE_INTERNAL_ERROR);
3163 }
3164
3165 qatomic_set(&cpu->exit_request, 0);
3166 return ret;
3167 }
3168
kvm_ioctl(KVMState * s,unsigned long type,...)3169 int kvm_ioctl(KVMState *s, unsigned long type, ...)
3170 {
3171 int ret;
3172 void *arg;
3173 va_list ap;
3174
3175 va_start(ap, type);
3176 arg = va_arg(ap, void *);
3177 va_end(ap);
3178
3179 trace_kvm_ioctl(type, arg);
3180 ret = ioctl(s->fd, type, arg);
3181 if (ret == -1) {
3182 ret = -errno;
3183 }
3184 return ret;
3185 }
3186
kvm_vm_ioctl(KVMState * s,unsigned long type,...)3187 int kvm_vm_ioctl(KVMState *s, unsigned long type, ...)
3188 {
3189 int ret;
3190 void *arg;
3191 va_list ap;
3192
3193 va_start(ap, type);
3194 arg = va_arg(ap, void *);
3195 va_end(ap);
3196
3197 trace_kvm_vm_ioctl(type, arg);
3198 accel_ioctl_begin();
3199 ret = ioctl(s->vmfd, type, arg);
3200 accel_ioctl_end();
3201 if (ret == -1) {
3202 ret = -errno;
3203 }
3204 return ret;
3205 }
3206
kvm_vcpu_ioctl(CPUState * cpu,unsigned long type,...)3207 int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...)
3208 {
3209 int ret;
3210 void *arg;
3211 va_list ap;
3212
3213 va_start(ap, type);
3214 arg = va_arg(ap, void *);
3215 va_end(ap);
3216
3217 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
3218 accel_cpu_ioctl_begin(cpu);
3219 ret = ioctl(cpu->kvm_fd, type, arg);
3220 accel_cpu_ioctl_end(cpu);
3221 if (ret == -1) {
3222 ret = -errno;
3223 }
3224 return ret;
3225 }
3226
kvm_device_ioctl(int fd,unsigned long type,...)3227 int kvm_device_ioctl(int fd, unsigned long type, ...)
3228 {
3229 int ret;
3230 void *arg;
3231 va_list ap;
3232
3233 va_start(ap, type);
3234 arg = va_arg(ap, void *);
3235 va_end(ap);
3236
3237 trace_kvm_device_ioctl(fd, type, arg);
3238 accel_ioctl_begin();
3239 ret = ioctl(fd, type, arg);
3240 accel_ioctl_end();
3241 if (ret == -1) {
3242 ret = -errno;
3243 }
3244 return ret;
3245 }
3246
kvm_vm_check_attr(KVMState * s,uint32_t group,uint64_t attr)3247 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
3248 {
3249 int ret;
3250 struct kvm_device_attr attribute = {
3251 .group = group,
3252 .attr = attr,
3253 };
3254
3255 if (!kvm_vm_attributes_allowed) {
3256 return 0;
3257 }
3258
3259 ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
3260 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3261 return ret ? 0 : 1;
3262 }
3263
kvm_device_check_attr(int dev_fd,uint32_t group,uint64_t attr)3264 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
3265 {
3266 struct kvm_device_attr attribute = {
3267 .group = group,
3268 .attr = attr,
3269 .flags = 0,
3270 };
3271
3272 return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
3273 }
3274
kvm_device_access(int fd,int group,uint64_t attr,void * val,bool write,Error ** errp)3275 int kvm_device_access(int fd, int group, uint64_t attr,
3276 void *val, bool write, Error **errp)
3277 {
3278 struct kvm_device_attr kvmattr;
3279 int err;
3280
3281 kvmattr.flags = 0;
3282 kvmattr.group = group;
3283 kvmattr.attr = attr;
3284 kvmattr.addr = (uintptr_t)val;
3285
3286 err = kvm_device_ioctl(fd,
3287 write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
3288 &kvmattr);
3289 if (err < 0) {
3290 error_setg_errno(errp, -err,
3291 "KVM_%s_DEVICE_ATTR failed: Group %d "
3292 "attr 0x%016" PRIx64,
3293 write ? "SET" : "GET", group, attr);
3294 }
3295 return err;
3296 }
3297
kvm_has_sync_mmu(void)3298 bool kvm_has_sync_mmu(void)
3299 {
3300 return kvm_state->sync_mmu;
3301 }
3302
kvm_has_vcpu_events(void)3303 int kvm_has_vcpu_events(void)
3304 {
3305 return kvm_state->vcpu_events;
3306 }
3307
kvm_max_nested_state_length(void)3308 int kvm_max_nested_state_length(void)
3309 {
3310 return kvm_state->max_nested_state_len;
3311 }
3312
kvm_has_gsi_routing(void)3313 int kvm_has_gsi_routing(void)
3314 {
3315 #ifdef KVM_CAP_IRQ_ROUTING
3316 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
3317 #else
3318 return false;
3319 #endif
3320 }
3321
kvm_arm_supports_user_irq(void)3322 bool kvm_arm_supports_user_irq(void)
3323 {
3324 return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
3325 }
3326
3327 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
kvm_find_sw_breakpoint(CPUState * cpu,vaddr pc)3328 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc)
3329 {
3330 struct kvm_sw_breakpoint *bp;
3331
3332 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
3333 if (bp->pc == pc) {
3334 return bp;
3335 }
3336 }
3337 return NULL;
3338 }
3339
kvm_sw_breakpoints_active(CPUState * cpu)3340 int kvm_sw_breakpoints_active(CPUState *cpu)
3341 {
3342 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
3343 }
3344
3345 struct kvm_set_guest_debug_data {
3346 struct kvm_guest_debug dbg;
3347 int err;
3348 };
3349
kvm_invoke_set_guest_debug(CPUState * cpu,run_on_cpu_data data)3350 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
3351 {
3352 struct kvm_set_guest_debug_data *dbg_data =
3353 (struct kvm_set_guest_debug_data *) data.host_ptr;
3354
3355 dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
3356 &dbg_data->dbg);
3357 }
3358
kvm_update_guest_debug(CPUState * cpu,unsigned long reinject_trap)3359 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3360 {
3361 struct kvm_set_guest_debug_data data;
3362
3363 data.dbg.control = reinject_trap;
3364
3365 if (cpu->singlestep_enabled) {
3366 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
3367
3368 if (cpu->singlestep_enabled & SSTEP_NOIRQ) {
3369 data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ;
3370 }
3371 }
3372 kvm_arch_update_guest_debug(cpu, &data.dbg);
3373
3374 run_on_cpu(cpu, kvm_invoke_set_guest_debug,
3375 RUN_ON_CPU_HOST_PTR(&data));
3376 return data.err;
3377 }
3378
kvm_supports_guest_debug(void)3379 bool kvm_supports_guest_debug(void)
3380 {
3381 /* probed during kvm_init() */
3382 return kvm_has_guest_debug;
3383 }
3384
kvm_insert_breakpoint(CPUState * cpu,int type,vaddr addr,vaddr len)3385 int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3386 {
3387 struct kvm_sw_breakpoint *bp;
3388 int err;
3389
3390 if (type == GDB_BREAKPOINT_SW) {
3391 bp = kvm_find_sw_breakpoint(cpu, addr);
3392 if (bp) {
3393 bp->use_count++;
3394 return 0;
3395 }
3396
3397 bp = g_new(struct kvm_sw_breakpoint, 1);
3398 bp->pc = addr;
3399 bp->use_count = 1;
3400 err = kvm_arch_insert_sw_breakpoint(cpu, bp);
3401 if (err) {
3402 g_free(bp);
3403 return err;
3404 }
3405
3406 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3407 } else {
3408 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
3409 if (err) {
3410 return err;
3411 }
3412 }
3413
3414 CPU_FOREACH(cpu) {
3415 err = kvm_update_guest_debug(cpu, 0);
3416 if (err) {
3417 return err;
3418 }
3419 }
3420 return 0;
3421 }
3422
kvm_remove_breakpoint(CPUState * cpu,int type,vaddr addr,vaddr len)3423 int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3424 {
3425 struct kvm_sw_breakpoint *bp;
3426 int err;
3427
3428 if (type == GDB_BREAKPOINT_SW) {
3429 bp = kvm_find_sw_breakpoint(cpu, addr);
3430 if (!bp) {
3431 return -ENOENT;
3432 }
3433
3434 if (bp->use_count > 1) {
3435 bp->use_count--;
3436 return 0;
3437 }
3438
3439 err = kvm_arch_remove_sw_breakpoint(cpu, bp);
3440 if (err) {
3441 return err;
3442 }
3443
3444 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3445 g_free(bp);
3446 } else {
3447 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
3448 if (err) {
3449 return err;
3450 }
3451 }
3452
3453 CPU_FOREACH(cpu) {
3454 err = kvm_update_guest_debug(cpu, 0);
3455 if (err) {
3456 return err;
3457 }
3458 }
3459 return 0;
3460 }
3461
kvm_remove_all_breakpoints(CPUState * cpu)3462 void kvm_remove_all_breakpoints(CPUState *cpu)
3463 {
3464 struct kvm_sw_breakpoint *bp, *next;
3465 KVMState *s = cpu->kvm_state;
3466 CPUState *tmpcpu;
3467
3468 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
3469 if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
3470 /* Try harder to find a CPU that currently sees the breakpoint. */
3471 CPU_FOREACH(tmpcpu) {
3472 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
3473 break;
3474 }
3475 }
3476 }
3477 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
3478 g_free(bp);
3479 }
3480 kvm_arch_remove_all_hw_breakpoints();
3481
3482 CPU_FOREACH(cpu) {
3483 kvm_update_guest_debug(cpu, 0);
3484 }
3485 }
3486
3487 #endif /* !TARGET_KVM_HAVE_GUEST_DEBUG */
3488
kvm_set_signal_mask(CPUState * cpu,const sigset_t * sigset)3489 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
3490 {
3491 KVMState *s = kvm_state;
3492 struct kvm_signal_mask *sigmask;
3493 int r;
3494
3495 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
3496
3497 sigmask->len = s->sigmask_len;
3498 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
3499 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
3500 g_free(sigmask);
3501
3502 return r;
3503 }
3504
kvm_ipi_signal(int sig)3505 static void kvm_ipi_signal(int sig)
3506 {
3507 if (current_cpu) {
3508 assert(kvm_immediate_exit);
3509 kvm_cpu_kick(current_cpu);
3510 }
3511 }
3512
kvm_init_cpu_signals(CPUState * cpu)3513 void kvm_init_cpu_signals(CPUState *cpu)
3514 {
3515 int r;
3516 sigset_t set;
3517 struct sigaction sigact;
3518
3519 memset(&sigact, 0, sizeof(sigact));
3520 sigact.sa_handler = kvm_ipi_signal;
3521 sigaction(SIG_IPI, &sigact, NULL);
3522
3523 pthread_sigmask(SIG_BLOCK, NULL, &set);
3524 #if defined KVM_HAVE_MCE_INJECTION
3525 sigdelset(&set, SIGBUS);
3526 pthread_sigmask(SIG_SETMASK, &set, NULL);
3527 #endif
3528 sigdelset(&set, SIG_IPI);
3529 if (kvm_immediate_exit) {
3530 r = pthread_sigmask(SIG_SETMASK, &set, NULL);
3531 } else {
3532 r = kvm_set_signal_mask(cpu, &set);
3533 }
3534 if (r) {
3535 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
3536 exit(1);
3537 }
3538 }
3539
3540 /* Called asynchronously in VCPU thread. */
kvm_on_sigbus_vcpu(CPUState * cpu,int code,void * addr)3541 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
3542 {
3543 #ifdef KVM_HAVE_MCE_INJECTION
3544 if (have_sigbus_pending) {
3545 return 1;
3546 }
3547 have_sigbus_pending = true;
3548 pending_sigbus_addr = addr;
3549 pending_sigbus_code = code;
3550 qatomic_set(&cpu->exit_request, 1);
3551 return 0;
3552 #else
3553 return 1;
3554 #endif
3555 }
3556
3557 /* Called synchronously (via signalfd) in main thread. */
kvm_on_sigbus(int code,void * addr)3558 int kvm_on_sigbus(int code, void *addr)
3559 {
3560 #ifdef KVM_HAVE_MCE_INJECTION
3561 /* Action required MCE kills the process if SIGBUS is blocked. Because
3562 * that's what happens in the I/O thread, where we handle MCE via signalfd,
3563 * we can only get action optional here.
3564 */
3565 assert(code != BUS_MCEERR_AR);
3566 kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
3567 return 0;
3568 #else
3569 return 1;
3570 #endif
3571 }
3572
kvm_create_device(KVMState * s,uint64_t type,bool test)3573 int kvm_create_device(KVMState *s, uint64_t type, bool test)
3574 {
3575 int ret;
3576 struct kvm_create_device create_dev;
3577
3578 create_dev.type = type;
3579 create_dev.fd = -1;
3580 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
3581
3582 if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
3583 return -ENOTSUP;
3584 }
3585
3586 ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
3587 if (ret) {
3588 return ret;
3589 }
3590
3591 return test ? 0 : create_dev.fd;
3592 }
3593
kvm_device_supported(int vmfd,uint64_t type)3594 bool kvm_device_supported(int vmfd, uint64_t type)
3595 {
3596 struct kvm_create_device create_dev = {
3597 .type = type,
3598 .fd = -1,
3599 .flags = KVM_CREATE_DEVICE_TEST,
3600 };
3601
3602 if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
3603 return false;
3604 }
3605
3606 return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
3607 }
3608
kvm_set_one_reg(CPUState * cs,uint64_t id,void * source)3609 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
3610 {
3611 struct kvm_one_reg reg;
3612 int r;
3613
3614 reg.id = id;
3615 reg.addr = (uintptr_t) source;
3616 r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
3617 if (r) {
3618 trace_kvm_failed_reg_set(id, strerror(-r));
3619 }
3620 return r;
3621 }
3622
kvm_get_one_reg(CPUState * cs,uint64_t id,void * target)3623 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
3624 {
3625 struct kvm_one_reg reg;
3626 int r;
3627
3628 reg.id = id;
3629 reg.addr = (uintptr_t) target;
3630 r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
3631 if (r) {
3632 trace_kvm_failed_reg_get(id, strerror(-r));
3633 }
3634 return r;
3635 }
3636
kvm_accel_has_memory(MachineState * ms,AddressSpace * as,hwaddr start_addr,hwaddr size)3637 static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
3638 hwaddr start_addr, hwaddr size)
3639 {
3640 KVMState *kvm = KVM_STATE(ms->accelerator);
3641 int i;
3642
3643 for (i = 0; i < kvm->nr_as; ++i) {
3644 if (kvm->as[i].as == as && kvm->as[i].ml) {
3645 size = MIN(kvm_max_slot_size, size);
3646 return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
3647 start_addr, size);
3648 }
3649 }
3650
3651 return false;
3652 }
3653
kvm_get_kvm_shadow_mem(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3654 static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
3655 const char *name, void *opaque,
3656 Error **errp)
3657 {
3658 KVMState *s = KVM_STATE(obj);
3659 int64_t value = s->kvm_shadow_mem;
3660
3661 visit_type_int(v, name, &value, errp);
3662 }
3663
kvm_set_kvm_shadow_mem(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3664 static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
3665 const char *name, void *opaque,
3666 Error **errp)
3667 {
3668 KVMState *s = KVM_STATE(obj);
3669 int64_t value;
3670
3671 if (s->fd != -1) {
3672 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3673 return;
3674 }
3675
3676 if (!visit_type_int(v, name, &value, errp)) {
3677 return;
3678 }
3679
3680 s->kvm_shadow_mem = value;
3681 }
3682
kvm_set_kernel_irqchip(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3683 static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
3684 const char *name, void *opaque,
3685 Error **errp)
3686 {
3687 KVMState *s = KVM_STATE(obj);
3688 OnOffSplit mode;
3689
3690 if (s->fd != -1) {
3691 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3692 return;
3693 }
3694
3695 if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
3696 return;
3697 }
3698 switch (mode) {
3699 case ON_OFF_SPLIT_ON:
3700 s->kernel_irqchip_allowed = true;
3701 s->kernel_irqchip_required = true;
3702 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3703 break;
3704 case ON_OFF_SPLIT_OFF:
3705 s->kernel_irqchip_allowed = false;
3706 s->kernel_irqchip_required = false;
3707 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3708 break;
3709 case ON_OFF_SPLIT_SPLIT:
3710 s->kernel_irqchip_allowed = true;
3711 s->kernel_irqchip_required = true;
3712 s->kernel_irqchip_split = ON_OFF_AUTO_ON;
3713 break;
3714 default:
3715 /* The value was checked in visit_type_OnOffSplit() above. If
3716 * we get here, then something is wrong in QEMU.
3717 */
3718 abort();
3719 }
3720 }
3721
kvm_kernel_irqchip_allowed(void)3722 bool kvm_kernel_irqchip_allowed(void)
3723 {
3724 return kvm_state->kernel_irqchip_allowed;
3725 }
3726
kvm_kernel_irqchip_required(void)3727 bool kvm_kernel_irqchip_required(void)
3728 {
3729 return kvm_state->kernel_irqchip_required;
3730 }
3731
kvm_kernel_irqchip_split(void)3732 bool kvm_kernel_irqchip_split(void)
3733 {
3734 return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
3735 }
3736
kvm_get_dirty_ring_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3737 static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
3738 const char *name, void *opaque,
3739 Error **errp)
3740 {
3741 KVMState *s = KVM_STATE(obj);
3742 uint32_t value = s->kvm_dirty_ring_size;
3743
3744 visit_type_uint32(v, name, &value, errp);
3745 }
3746
kvm_set_dirty_ring_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3747 static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
3748 const char *name, void *opaque,
3749 Error **errp)
3750 {
3751 KVMState *s = KVM_STATE(obj);
3752 uint32_t value;
3753
3754 if (s->fd != -1) {
3755 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3756 return;
3757 }
3758
3759 if (!visit_type_uint32(v, name, &value, errp)) {
3760 return;
3761 }
3762 if (value & (value - 1)) {
3763 error_setg(errp, "dirty-ring-size must be a power of two.");
3764 return;
3765 }
3766
3767 s->kvm_dirty_ring_size = value;
3768 }
3769
kvm_get_device(Object * obj,Error ** errp G_GNUC_UNUSED)3770 static char *kvm_get_device(Object *obj,
3771 Error **errp G_GNUC_UNUSED)
3772 {
3773 KVMState *s = KVM_STATE(obj);
3774
3775 return g_strdup(s->device);
3776 }
3777
kvm_set_device(Object * obj,const char * value,Error ** errp G_GNUC_UNUSED)3778 static void kvm_set_device(Object *obj,
3779 const char *value,
3780 Error **errp G_GNUC_UNUSED)
3781 {
3782 KVMState *s = KVM_STATE(obj);
3783
3784 g_free(s->device);
3785 s->device = g_strdup(value);
3786 }
3787
kvm_set_kvm_rapl(Object * obj,bool value,Error ** errp)3788 static void kvm_set_kvm_rapl(Object *obj, bool value, Error **errp)
3789 {
3790 KVMState *s = KVM_STATE(obj);
3791 s->msr_energy.enable = value;
3792 }
3793
kvm_set_kvm_rapl_socket_path(Object * obj,const char * str,Error ** errp)3794 static void kvm_set_kvm_rapl_socket_path(Object *obj,
3795 const char *str,
3796 Error **errp)
3797 {
3798 KVMState *s = KVM_STATE(obj);
3799 g_free(s->msr_energy.socket_path);
3800 s->msr_energy.socket_path = g_strdup(str);
3801 }
3802
kvm_accel_instance_init(Object * obj)3803 static void kvm_accel_instance_init(Object *obj)
3804 {
3805 KVMState *s = KVM_STATE(obj);
3806
3807 s->fd = -1;
3808 s->vmfd = -1;
3809 s->kvm_shadow_mem = -1;
3810 s->kernel_irqchip_allowed = true;
3811 s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
3812 /* KVM dirty ring is by default off */
3813 s->kvm_dirty_ring_size = 0;
3814 s->kvm_dirty_ring_with_bitmap = false;
3815 s->kvm_eager_split_size = 0;
3816 s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
3817 s->notify_window = 0;
3818 s->xen_version = 0;
3819 s->xen_gnttab_max_frames = 64;
3820 s->xen_evtchn_max_pirq = 256;
3821 s->device = NULL;
3822 s->msr_energy.enable = false;
3823 }
3824
3825 /**
3826 * kvm_gdbstub_sstep_flags():
3827 *
3828 * Returns: SSTEP_* flags that KVM supports for guest debug. The
3829 * support is probed during kvm_init()
3830 */
kvm_gdbstub_sstep_flags(void)3831 static int kvm_gdbstub_sstep_flags(void)
3832 {
3833 return kvm_sstep_flags;
3834 }
3835
kvm_accel_class_init(ObjectClass * oc,void * data)3836 static void kvm_accel_class_init(ObjectClass *oc, void *data)
3837 {
3838 AccelClass *ac = ACCEL_CLASS(oc);
3839 ac->name = "KVM";
3840 ac->init_machine = kvm_init;
3841 ac->has_memory = kvm_accel_has_memory;
3842 ac->allowed = &kvm_allowed;
3843 ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags;
3844
3845 object_class_property_add(oc, "kernel-irqchip", "on|off|split",
3846 NULL, kvm_set_kernel_irqchip,
3847 NULL, NULL);
3848 object_class_property_set_description(oc, "kernel-irqchip",
3849 "Configure KVM in-kernel irqchip");
3850
3851 object_class_property_add(oc, "kvm-shadow-mem", "int",
3852 kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
3853 NULL, NULL);
3854 object_class_property_set_description(oc, "kvm-shadow-mem",
3855 "KVM shadow MMU size");
3856
3857 object_class_property_add(oc, "dirty-ring-size", "uint32",
3858 kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
3859 NULL, NULL);
3860 object_class_property_set_description(oc, "dirty-ring-size",
3861 "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
3862
3863 object_class_property_add_str(oc, "device", kvm_get_device, kvm_set_device);
3864 object_class_property_set_description(oc, "device",
3865 "Path to the device node to use (default: /dev/kvm)");
3866
3867 object_class_property_add_bool(oc, "rapl",
3868 NULL,
3869 kvm_set_kvm_rapl);
3870 object_class_property_set_description(oc, "rapl",
3871 "Allow energy related MSRs for RAPL interface in Guest");
3872
3873 object_class_property_add_str(oc, "rapl-helper-socket", NULL,
3874 kvm_set_kvm_rapl_socket_path);
3875 object_class_property_set_description(oc, "rapl-helper-socket",
3876 "Socket Path for comminucating with the Virtual MSR helper daemon");
3877
3878 kvm_arch_accel_class_init(oc);
3879 }
3880
3881 static const TypeInfo kvm_accel_type = {
3882 .name = TYPE_KVM_ACCEL,
3883 .parent = TYPE_ACCEL,
3884 .instance_init = kvm_accel_instance_init,
3885 .class_init = kvm_accel_class_init,
3886 .instance_size = sizeof(KVMState),
3887 };
3888
kvm_type_init(void)3889 static void kvm_type_init(void)
3890 {
3891 type_register_static(&kvm_accel_type);
3892 }
3893
3894 type_init(kvm_type_init);
3895
3896 typedef struct StatsArgs {
3897 union StatsResultsType {
3898 StatsResultList **stats;
3899 StatsSchemaList **schema;
3900 } result;
3901 strList *names;
3902 Error **errp;
3903 } StatsArgs;
3904
add_kvmstat_entry(struct kvm_stats_desc * pdesc,uint64_t * stats_data,StatsList * stats_list,Error ** errp)3905 static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc,
3906 uint64_t *stats_data,
3907 StatsList *stats_list,
3908 Error **errp)
3909 {
3910
3911 Stats *stats;
3912 uint64List *val_list = NULL;
3913
3914 /* Only add stats that we understand. */
3915 switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
3916 case KVM_STATS_TYPE_CUMULATIVE:
3917 case KVM_STATS_TYPE_INSTANT:
3918 case KVM_STATS_TYPE_PEAK:
3919 case KVM_STATS_TYPE_LINEAR_HIST:
3920 case KVM_STATS_TYPE_LOG_HIST:
3921 break;
3922 default:
3923 return stats_list;
3924 }
3925
3926 switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
3927 case KVM_STATS_UNIT_NONE:
3928 case KVM_STATS_UNIT_BYTES:
3929 case KVM_STATS_UNIT_CYCLES:
3930 case KVM_STATS_UNIT_SECONDS:
3931 case KVM_STATS_UNIT_BOOLEAN:
3932 break;
3933 default:
3934 return stats_list;
3935 }
3936
3937 switch (pdesc->flags & KVM_STATS_BASE_MASK) {
3938 case KVM_STATS_BASE_POW10:
3939 case KVM_STATS_BASE_POW2:
3940 break;
3941 default:
3942 return stats_list;
3943 }
3944
3945 /* Alloc and populate data list */
3946 stats = g_new0(Stats, 1);
3947 stats->name = g_strdup(pdesc->name);
3948 stats->value = g_new0(StatsValue, 1);
3949
3950 if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) {
3951 stats->value->u.boolean = *stats_data;
3952 stats->value->type = QTYPE_QBOOL;
3953 } else if (pdesc->size == 1) {
3954 stats->value->u.scalar = *stats_data;
3955 stats->value->type = QTYPE_QNUM;
3956 } else {
3957 int i;
3958 for (i = 0; i < pdesc->size; i++) {
3959 QAPI_LIST_PREPEND(val_list, stats_data[i]);
3960 }
3961 stats->value->u.list = val_list;
3962 stats->value->type = QTYPE_QLIST;
3963 }
3964
3965 QAPI_LIST_PREPEND(stats_list, stats);
3966 return stats_list;
3967 }
3968
add_kvmschema_entry(struct kvm_stats_desc * pdesc,StatsSchemaValueList * list,Error ** errp)3969 static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc,
3970 StatsSchemaValueList *list,
3971 Error **errp)
3972 {
3973 StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
3974 schema_entry->value = g_new0(StatsSchemaValue, 1);
3975
3976 switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
3977 case KVM_STATS_TYPE_CUMULATIVE:
3978 schema_entry->value->type = STATS_TYPE_CUMULATIVE;
3979 break;
3980 case KVM_STATS_TYPE_INSTANT:
3981 schema_entry->value->type = STATS_TYPE_INSTANT;
3982 break;
3983 case KVM_STATS_TYPE_PEAK:
3984 schema_entry->value->type = STATS_TYPE_PEAK;
3985 break;
3986 case KVM_STATS_TYPE_LINEAR_HIST:
3987 schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM;
3988 schema_entry->value->bucket_size = pdesc->bucket_size;
3989 schema_entry->value->has_bucket_size = true;
3990 break;
3991 case KVM_STATS_TYPE_LOG_HIST:
3992 schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM;
3993 break;
3994 default:
3995 goto exit;
3996 }
3997
3998 switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
3999 case KVM_STATS_UNIT_NONE:
4000 break;
4001 case KVM_STATS_UNIT_BOOLEAN:
4002 schema_entry->value->has_unit = true;
4003 schema_entry->value->unit = STATS_UNIT_BOOLEAN;
4004 break;
4005 case KVM_STATS_UNIT_BYTES:
4006 schema_entry->value->has_unit = true;
4007 schema_entry->value->unit = STATS_UNIT_BYTES;
4008 break;
4009 case KVM_STATS_UNIT_CYCLES:
4010 schema_entry->value->has_unit = true;
4011 schema_entry->value->unit = STATS_UNIT_CYCLES;
4012 break;
4013 case KVM_STATS_UNIT_SECONDS:
4014 schema_entry->value->has_unit = true;
4015 schema_entry->value->unit = STATS_UNIT_SECONDS;
4016 break;
4017 default:
4018 goto exit;
4019 }
4020
4021 schema_entry->value->exponent = pdesc->exponent;
4022 if (pdesc->exponent) {
4023 switch (pdesc->flags & KVM_STATS_BASE_MASK) {
4024 case KVM_STATS_BASE_POW10:
4025 schema_entry->value->has_base = true;
4026 schema_entry->value->base = 10;
4027 break;
4028 case KVM_STATS_BASE_POW2:
4029 schema_entry->value->has_base = true;
4030 schema_entry->value->base = 2;
4031 break;
4032 default:
4033 goto exit;
4034 }
4035 }
4036
4037 schema_entry->value->name = g_strdup(pdesc->name);
4038 schema_entry->next = list;
4039 return schema_entry;
4040 exit:
4041 g_free(schema_entry->value);
4042 g_free(schema_entry);
4043 return list;
4044 }
4045
4046 /* Cached stats descriptors */
4047 typedef struct StatsDescriptors {
4048 const char *ident; /* cache key, currently the StatsTarget */
4049 struct kvm_stats_desc *kvm_stats_desc;
4050 struct kvm_stats_header kvm_stats_header;
4051 QTAILQ_ENTRY(StatsDescriptors) next;
4052 } StatsDescriptors;
4053
4054 static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors =
4055 QTAILQ_HEAD_INITIALIZER(stats_descriptors);
4056
4057 /*
4058 * Return the descriptors for 'target', that either have already been read
4059 * or are retrieved from 'stats_fd'.
4060 */
find_stats_descriptors(StatsTarget target,int stats_fd,Error ** errp)4061 static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd,
4062 Error **errp)
4063 {
4064 StatsDescriptors *descriptors;
4065 const char *ident;
4066 struct kvm_stats_desc *kvm_stats_desc;
4067 struct kvm_stats_header *kvm_stats_header;
4068 size_t size_desc;
4069 ssize_t ret;
4070
4071 ident = StatsTarget_str(target);
4072 QTAILQ_FOREACH(descriptors, &stats_descriptors, next) {
4073 if (g_str_equal(descriptors->ident, ident)) {
4074 return descriptors;
4075 }
4076 }
4077
4078 descriptors = g_new0(StatsDescriptors, 1);
4079
4080 /* Read stats header */
4081 kvm_stats_header = &descriptors->kvm_stats_header;
4082 ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0);
4083 if (ret != sizeof(*kvm_stats_header)) {
4084 error_setg(errp, "KVM stats: failed to read stats header: "
4085 "expected %zu actual %zu",
4086 sizeof(*kvm_stats_header), ret);
4087 g_free(descriptors);
4088 return NULL;
4089 }
4090 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4091
4092 /* Read stats descriptors */
4093 kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc);
4094 ret = pread(stats_fd, kvm_stats_desc,
4095 size_desc * kvm_stats_header->num_desc,
4096 kvm_stats_header->desc_offset);
4097
4098 if (ret != size_desc * kvm_stats_header->num_desc) {
4099 error_setg(errp, "KVM stats: failed to read stats descriptors: "
4100 "expected %zu actual %zu",
4101 size_desc * kvm_stats_header->num_desc, ret);
4102 g_free(descriptors);
4103 g_free(kvm_stats_desc);
4104 return NULL;
4105 }
4106 descriptors->kvm_stats_desc = kvm_stats_desc;
4107 descriptors->ident = ident;
4108 QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next);
4109 return descriptors;
4110 }
4111
query_stats(StatsResultList ** result,StatsTarget target,strList * names,int stats_fd,CPUState * cpu,Error ** errp)4112 static void query_stats(StatsResultList **result, StatsTarget target,
4113 strList *names, int stats_fd, CPUState *cpu,
4114 Error **errp)
4115 {
4116 struct kvm_stats_desc *kvm_stats_desc;
4117 struct kvm_stats_header *kvm_stats_header;
4118 StatsDescriptors *descriptors;
4119 g_autofree uint64_t *stats_data = NULL;
4120 struct kvm_stats_desc *pdesc;
4121 StatsList *stats_list = NULL;
4122 size_t size_desc, size_data = 0;
4123 ssize_t ret;
4124 int i;
4125
4126 descriptors = find_stats_descriptors(target, stats_fd, errp);
4127 if (!descriptors) {
4128 return;
4129 }
4130
4131 kvm_stats_header = &descriptors->kvm_stats_header;
4132 kvm_stats_desc = descriptors->kvm_stats_desc;
4133 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4134
4135 /* Tally the total data size; read schema data */
4136 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4137 pdesc = (void *)kvm_stats_desc + i * size_desc;
4138 size_data += pdesc->size * sizeof(*stats_data);
4139 }
4140
4141 stats_data = g_malloc0(size_data);
4142 ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset);
4143
4144 if (ret != size_data) {
4145 error_setg(errp, "KVM stats: failed to read data: "
4146 "expected %zu actual %zu", size_data, ret);
4147 return;
4148 }
4149
4150 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4151 uint64_t *stats;
4152 pdesc = (void *)kvm_stats_desc + i * size_desc;
4153
4154 /* Add entry to the list */
4155 stats = (void *)stats_data + pdesc->offset;
4156 if (!apply_str_list_filter(pdesc->name, names)) {
4157 continue;
4158 }
4159 stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp);
4160 }
4161
4162 if (!stats_list) {
4163 return;
4164 }
4165
4166 switch (target) {
4167 case STATS_TARGET_VM:
4168 add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list);
4169 break;
4170 case STATS_TARGET_VCPU:
4171 add_stats_entry(result, STATS_PROVIDER_KVM,
4172 cpu->parent_obj.canonical_path,
4173 stats_list);
4174 break;
4175 default:
4176 g_assert_not_reached();
4177 }
4178 }
4179
query_stats_schema(StatsSchemaList ** result,StatsTarget target,int stats_fd,Error ** errp)4180 static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
4181 int stats_fd, Error **errp)
4182 {
4183 struct kvm_stats_desc *kvm_stats_desc;
4184 struct kvm_stats_header *kvm_stats_header;
4185 StatsDescriptors *descriptors;
4186 struct kvm_stats_desc *pdesc;
4187 StatsSchemaValueList *stats_list = NULL;
4188 size_t size_desc;
4189 int i;
4190
4191 descriptors = find_stats_descriptors(target, stats_fd, errp);
4192 if (!descriptors) {
4193 return;
4194 }
4195
4196 kvm_stats_header = &descriptors->kvm_stats_header;
4197 kvm_stats_desc = descriptors->kvm_stats_desc;
4198 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4199
4200 /* Tally the total data size; read schema data */
4201 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4202 pdesc = (void *)kvm_stats_desc + i * size_desc;
4203 stats_list = add_kvmschema_entry(pdesc, stats_list, errp);
4204 }
4205
4206 add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
4207 }
4208
query_stats_vcpu(CPUState * cpu,StatsArgs * kvm_stats_args)4209 static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4210 {
4211 int stats_fd = cpu->kvm_vcpu_stats_fd;
4212 Error *local_err = NULL;
4213
4214 if (stats_fd == -1) {
4215 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4216 error_propagate(kvm_stats_args->errp, local_err);
4217 return;
4218 }
4219 query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
4220 kvm_stats_args->names, stats_fd, cpu,
4221 kvm_stats_args->errp);
4222 }
4223
query_stats_schema_vcpu(CPUState * cpu,StatsArgs * kvm_stats_args)4224 static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4225 {
4226 int stats_fd = cpu->kvm_vcpu_stats_fd;
4227 Error *local_err = NULL;
4228
4229 if (stats_fd == -1) {
4230 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4231 error_propagate(kvm_stats_args->errp, local_err);
4232 return;
4233 }
4234 query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
4235 kvm_stats_args->errp);
4236 }
4237
query_stats_cb(StatsResultList ** result,StatsTarget target,strList * names,strList * targets,Error ** errp)4238 static void query_stats_cb(StatsResultList **result, StatsTarget target,
4239 strList *names, strList *targets, Error **errp)
4240 {
4241 KVMState *s = kvm_state;
4242 CPUState *cpu;
4243 int stats_fd;
4244
4245 switch (target) {
4246 case STATS_TARGET_VM:
4247 {
4248 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4249 if (stats_fd == -1) {
4250 error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4251 return;
4252 }
4253 query_stats(result, target, names, stats_fd, NULL, errp);
4254 close(stats_fd);
4255 break;
4256 }
4257 case STATS_TARGET_VCPU:
4258 {
4259 StatsArgs stats_args;
4260 stats_args.result.stats = result;
4261 stats_args.names = names;
4262 stats_args.errp = errp;
4263 CPU_FOREACH(cpu) {
4264 if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
4265 continue;
4266 }
4267 query_stats_vcpu(cpu, &stats_args);
4268 }
4269 break;
4270 }
4271 default:
4272 break;
4273 }
4274 }
4275
query_stats_schemas_cb(StatsSchemaList ** result,Error ** errp)4276 void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
4277 {
4278 StatsArgs stats_args;
4279 KVMState *s = kvm_state;
4280 int stats_fd;
4281
4282 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4283 if (stats_fd == -1) {
4284 error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4285 return;
4286 }
4287 query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp);
4288 close(stats_fd);
4289
4290 if (first_cpu) {
4291 stats_args.result.schema = result;
4292 stats_args.errp = errp;
4293 query_stats_schema_vcpu(first_cpu, &stats_args);
4294 }
4295 }
4296
kvm_mark_guest_state_protected(void)4297 void kvm_mark_guest_state_protected(void)
4298 {
4299 kvm_state->guest_state_protected = true;
4300 }
4301
kvm_create_guest_memfd(uint64_t size,uint64_t flags,Error ** errp)4302 int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp)
4303 {
4304 int fd;
4305 struct kvm_create_guest_memfd guest_memfd = {
4306 .size = size,
4307 .flags = flags,
4308 };
4309
4310 if (!kvm_guest_memfd_supported) {
4311 error_setg(errp, "KVM does not support guest_memfd");
4312 return -1;
4313 }
4314
4315 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
4316 if (fd < 0) {
4317 error_setg_errno(errp, errno, "Error creating KVM guest_memfd");
4318 return -1;
4319 }
4320
4321 return fd;
4322 }
4323