1 /*
2 * QEMU KVM support
3 *
4 * Copyright IBM, Corp. 2008
5 * Red Hat, Inc. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
18 #include <poll.h>
19
20 #include <linux/kvm.h>
21
22 #include "qemu/atomic.h"
23 #include "qemu/option.h"
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
26 #include "qapi/error.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/kvm_int.h"
32 #include "sysemu/runstate.h"
33 #include "sysemu/cpus.h"
34 #include "sysemu/accel-blocker.h"
35 #include "qemu/bswap.h"
36 #include "exec/memory.h"
37 #include "exec/ram_addr.h"
38 #include "qemu/event_notifier.h"
39 #include "qemu/main-loop.h"
40 #include "trace.h"
41 #include "hw/irq.h"
42 #include "qapi/visitor.h"
43 #include "qapi/qapi-types-common.h"
44 #include "qapi/qapi-visit-common.h"
45 #include "sysemu/reset.h"
46 #include "qemu/guest-random.h"
47 #include "sysemu/hw_accel.h"
48 #include "kvm-cpus.h"
49 #include "sysemu/dirtylimit.h"
50 #include "qemu/range.h"
51
52 #include "hw/boards.h"
53 #include "sysemu/stats.h"
54
55 /* This check must be after config-host.h is included */
56 #ifdef CONFIG_EVENTFD
57 #include <sys/eventfd.h>
58 #endif
59
60 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
61 * need to use the real host PAGE_SIZE, as that's what KVM will use.
62 */
63 #ifdef PAGE_SIZE
64 #undef PAGE_SIZE
65 #endif
66 #define PAGE_SIZE qemu_real_host_page_size()
67
68 #ifndef KVM_GUESTDBG_BLOCKIRQ
69 #define KVM_GUESTDBG_BLOCKIRQ 0
70 #endif
71
72 struct KVMParkedVcpu {
73 unsigned long vcpu_id;
74 int kvm_fd;
75 QLIST_ENTRY(KVMParkedVcpu) node;
76 };
77
78 KVMState *kvm_state;
79 bool kvm_kernel_irqchip;
80 bool kvm_split_irqchip;
81 bool kvm_async_interrupts_allowed;
82 bool kvm_halt_in_kernel_allowed;
83 bool kvm_resamplefds_allowed;
84 bool kvm_msi_via_irqfd_allowed;
85 bool kvm_gsi_routing_allowed;
86 bool kvm_gsi_direct_mapping;
87 bool kvm_allowed;
88 bool kvm_readonly_mem_allowed;
89 bool kvm_vm_attributes_allowed;
90 bool kvm_msi_use_devid;
91 static bool kvm_has_guest_debug;
92 static int kvm_sstep_flags;
93 static bool kvm_immediate_exit;
94 static uint64_t kvm_supported_memory_attributes;
95 static bool kvm_guest_memfd_supported;
96 static hwaddr kvm_max_slot_size = ~0;
97
98 static const KVMCapabilityInfo kvm_required_capabilites[] = {
99 KVM_CAP_INFO(USER_MEMORY),
100 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
101 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
102 KVM_CAP_INFO(INTERNAL_ERROR_DATA),
103 KVM_CAP_INFO(IOEVENTFD),
104 KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH),
105 KVM_CAP_LAST_INFO
106 };
107
108 static NotifierList kvm_irqchip_change_notifiers =
109 NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
110
111 struct KVMResampleFd {
112 int gsi;
113 EventNotifier *resample_event;
114 QLIST_ENTRY(KVMResampleFd) node;
115 };
116 typedef struct KVMResampleFd KVMResampleFd;
117
118 /*
119 * Only used with split irqchip where we need to do the resample fd
120 * kick for the kernel from userspace.
121 */
122 static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
123 QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
124
125 static QemuMutex kml_slots_lock;
126
127 #define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock)
128 #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
129
130 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
131
kvm_resample_fd_remove(int gsi)132 static inline void kvm_resample_fd_remove(int gsi)
133 {
134 KVMResampleFd *rfd;
135
136 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
137 if (rfd->gsi == gsi) {
138 QLIST_REMOVE(rfd, node);
139 g_free(rfd);
140 break;
141 }
142 }
143 }
144
kvm_resample_fd_insert(int gsi,EventNotifier * event)145 static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
146 {
147 KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
148
149 rfd->gsi = gsi;
150 rfd->resample_event = event;
151
152 QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
153 }
154
kvm_resample_fd_notify(int gsi)155 void kvm_resample_fd_notify(int gsi)
156 {
157 KVMResampleFd *rfd;
158
159 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
160 if (rfd->gsi == gsi) {
161 event_notifier_set(rfd->resample_event);
162 trace_kvm_resample_fd_notify(gsi);
163 return;
164 }
165 }
166 }
167
kvm_get_max_memslots(void)168 unsigned int kvm_get_max_memslots(void)
169 {
170 KVMState *s = KVM_STATE(current_accel());
171
172 return s->nr_slots;
173 }
174
kvm_get_free_memslots(void)175 unsigned int kvm_get_free_memslots(void)
176 {
177 unsigned int used_slots = 0;
178 KVMState *s = kvm_state;
179 int i;
180
181 kvm_slots_lock();
182 for (i = 0; i < s->nr_as; i++) {
183 if (!s->as[i].ml) {
184 continue;
185 }
186 used_slots = MAX(used_slots, s->as[i].ml->nr_used_slots);
187 }
188 kvm_slots_unlock();
189
190 return s->nr_slots - used_slots;
191 }
192
193 /* Called with KVMMemoryListener.slots_lock held */
kvm_get_free_slot(KVMMemoryListener * kml)194 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
195 {
196 KVMState *s = kvm_state;
197 int i;
198
199 for (i = 0; i < s->nr_slots; i++) {
200 if (kml->slots[i].memory_size == 0) {
201 return &kml->slots[i];
202 }
203 }
204
205 return NULL;
206 }
207
208 /* Called with KVMMemoryListener.slots_lock held */
kvm_alloc_slot(KVMMemoryListener * kml)209 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
210 {
211 KVMSlot *slot = kvm_get_free_slot(kml);
212
213 if (slot) {
214 return slot;
215 }
216
217 fprintf(stderr, "%s: no free slot available\n", __func__);
218 abort();
219 }
220
kvm_lookup_matching_slot(KVMMemoryListener * kml,hwaddr start_addr,hwaddr size)221 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
222 hwaddr start_addr,
223 hwaddr size)
224 {
225 KVMState *s = kvm_state;
226 int i;
227
228 for (i = 0; i < s->nr_slots; i++) {
229 KVMSlot *mem = &kml->slots[i];
230
231 if (start_addr == mem->start_addr && size == mem->memory_size) {
232 return mem;
233 }
234 }
235
236 return NULL;
237 }
238
239 /*
240 * Calculate and align the start address and the size of the section.
241 * Return the size. If the size is 0, the aligned section is empty.
242 */
kvm_align_section(MemoryRegionSection * section,hwaddr * start)243 static hwaddr kvm_align_section(MemoryRegionSection *section,
244 hwaddr *start)
245 {
246 hwaddr size = int128_get64(section->size);
247 hwaddr delta, aligned;
248
249 /* kvm works in page size chunks, but the function may be called
250 with sub-page size and unaligned start address. Pad the start
251 address to next and truncate size to previous page boundary. */
252 aligned = ROUND_UP(section->offset_within_address_space,
253 qemu_real_host_page_size());
254 delta = aligned - section->offset_within_address_space;
255 *start = aligned;
256 if (delta > size) {
257 return 0;
258 }
259
260 return (size - delta) & qemu_real_host_page_mask();
261 }
262
kvm_physical_memory_addr_from_host(KVMState * s,void * ram,hwaddr * phys_addr)263 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
264 hwaddr *phys_addr)
265 {
266 KVMMemoryListener *kml = &s->memory_listener;
267 int i, ret = 0;
268
269 kvm_slots_lock();
270 for (i = 0; i < s->nr_slots; i++) {
271 KVMSlot *mem = &kml->slots[i];
272
273 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
274 *phys_addr = mem->start_addr + (ram - mem->ram);
275 ret = 1;
276 break;
277 }
278 }
279 kvm_slots_unlock();
280
281 return ret;
282 }
283
kvm_set_user_memory_region(KVMMemoryListener * kml,KVMSlot * slot,bool new)284 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
285 {
286 KVMState *s = kvm_state;
287 struct kvm_userspace_memory_region2 mem;
288 int ret;
289
290 mem.slot = slot->slot | (kml->as_id << 16);
291 mem.guest_phys_addr = slot->start_addr;
292 mem.userspace_addr = (unsigned long)slot->ram;
293 mem.flags = slot->flags;
294 mem.guest_memfd = slot->guest_memfd;
295 mem.guest_memfd_offset = slot->guest_memfd_offset;
296
297 if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
298 /* Set the slot size to 0 before setting the slot to the desired
299 * value. This is needed based on KVM commit 75d61fbc. */
300 mem.memory_size = 0;
301
302 if (kvm_guest_memfd_supported) {
303 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
304 } else {
305 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
306 }
307 if (ret < 0) {
308 goto err;
309 }
310 }
311 mem.memory_size = slot->memory_size;
312 if (kvm_guest_memfd_supported) {
313 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
314 } else {
315 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
316 }
317 slot->old_flags = mem.flags;
318 err:
319 trace_kvm_set_user_memory(mem.slot >> 16, (uint16_t)mem.slot, mem.flags,
320 mem.guest_phys_addr, mem.memory_size,
321 mem.userspace_addr, mem.guest_memfd,
322 mem.guest_memfd_offset, ret);
323 if (ret < 0) {
324 if (kvm_guest_memfd_supported) {
325 error_report("%s: KVM_SET_USER_MEMORY_REGION2 failed, slot=%d,"
326 " start=0x%" PRIx64 ", size=0x%" PRIx64 ","
327 " flags=0x%" PRIx32 ", guest_memfd=%" PRId32 ","
328 " guest_memfd_offset=0x%" PRIx64 ": %s",
329 __func__, mem.slot, slot->start_addr,
330 (uint64_t)mem.memory_size, mem.flags,
331 mem.guest_memfd, (uint64_t)mem.guest_memfd_offset,
332 strerror(errno));
333 } else {
334 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
335 " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
336 __func__, mem.slot, slot->start_addr,
337 (uint64_t)mem.memory_size, strerror(errno));
338 }
339 }
340 return ret;
341 }
342
do_kvm_destroy_vcpu(CPUState * cpu)343 static int do_kvm_destroy_vcpu(CPUState *cpu)
344 {
345 KVMState *s = kvm_state;
346 long mmap_size;
347 struct KVMParkedVcpu *vcpu = NULL;
348 int ret = 0;
349
350 trace_kvm_destroy_vcpu();
351
352 ret = kvm_arch_destroy_vcpu(cpu);
353 if (ret < 0) {
354 goto err;
355 }
356
357 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
358 if (mmap_size < 0) {
359 ret = mmap_size;
360 trace_kvm_failed_get_vcpu_mmap_size();
361 goto err;
362 }
363
364 ret = munmap(cpu->kvm_run, mmap_size);
365 if (ret < 0) {
366 goto err;
367 }
368
369 if (cpu->kvm_dirty_gfns) {
370 ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
371 if (ret < 0) {
372 goto err;
373 }
374 }
375
376 vcpu = g_malloc0(sizeof(*vcpu));
377 vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
378 vcpu->kvm_fd = cpu->kvm_fd;
379 QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
380 err:
381 return ret;
382 }
383
kvm_destroy_vcpu(CPUState * cpu)384 void kvm_destroy_vcpu(CPUState *cpu)
385 {
386 if (do_kvm_destroy_vcpu(cpu) < 0) {
387 error_report("kvm_destroy_vcpu failed");
388 exit(EXIT_FAILURE);
389 }
390 }
391
kvm_get_vcpu(KVMState * s,unsigned long vcpu_id)392 static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
393 {
394 struct KVMParkedVcpu *cpu;
395
396 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
397 if (cpu->vcpu_id == vcpu_id) {
398 int kvm_fd;
399
400 QLIST_REMOVE(cpu, node);
401 kvm_fd = cpu->kvm_fd;
402 g_free(cpu);
403 return kvm_fd;
404 }
405 }
406
407 return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
408 }
409
kvm_init_vcpu(CPUState * cpu,Error ** errp)410 int kvm_init_vcpu(CPUState *cpu, Error **errp)
411 {
412 KVMState *s = kvm_state;
413 long mmap_size;
414 int ret;
415
416 trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
417
418 ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
419 if (ret < 0) {
420 error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
421 kvm_arch_vcpu_id(cpu));
422 goto err;
423 }
424
425 cpu->kvm_fd = ret;
426 cpu->kvm_state = s;
427 cpu->vcpu_dirty = true;
428 cpu->dirty_pages = 0;
429 cpu->throttle_us_per_full = 0;
430
431 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
432 if (mmap_size < 0) {
433 ret = mmap_size;
434 error_setg_errno(errp, -mmap_size,
435 "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
436 goto err;
437 }
438
439 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
440 cpu->kvm_fd, 0);
441 if (cpu->kvm_run == MAP_FAILED) {
442 ret = -errno;
443 error_setg_errno(errp, ret,
444 "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
445 kvm_arch_vcpu_id(cpu));
446 goto err;
447 }
448
449 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
450 s->coalesced_mmio_ring =
451 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
452 }
453
454 if (s->kvm_dirty_ring_size) {
455 /* Use MAP_SHARED to share pages with the kernel */
456 cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes,
457 PROT_READ | PROT_WRITE, MAP_SHARED,
458 cpu->kvm_fd,
459 PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
460 if (cpu->kvm_dirty_gfns == MAP_FAILED) {
461 ret = -errno;
462 goto err;
463 }
464 }
465
466 ret = kvm_arch_init_vcpu(cpu);
467 if (ret < 0) {
468 error_setg_errno(errp, -ret,
469 "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
470 kvm_arch_vcpu_id(cpu));
471 }
472 cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
473
474 err:
475 return ret;
476 }
477
478 /*
479 * dirty pages logging control
480 */
481
kvm_mem_flags(MemoryRegion * mr)482 static int kvm_mem_flags(MemoryRegion *mr)
483 {
484 bool readonly = mr->readonly || memory_region_is_romd(mr);
485 int flags = 0;
486
487 if (memory_region_get_dirty_log_mask(mr) != 0) {
488 flags |= KVM_MEM_LOG_DIRTY_PAGES;
489 }
490 if (readonly && kvm_readonly_mem_allowed) {
491 flags |= KVM_MEM_READONLY;
492 }
493 if (memory_region_has_guest_memfd(mr)) {
494 assert(kvm_guest_memfd_supported);
495 flags |= KVM_MEM_GUEST_MEMFD;
496 }
497 return flags;
498 }
499
500 /* Called with KVMMemoryListener.slots_lock held */
kvm_slot_update_flags(KVMMemoryListener * kml,KVMSlot * mem,MemoryRegion * mr)501 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
502 MemoryRegion *mr)
503 {
504 mem->flags = kvm_mem_flags(mr);
505
506 /* If nothing changed effectively, no need to issue ioctl */
507 if (mem->flags == mem->old_flags) {
508 return 0;
509 }
510
511 kvm_slot_init_dirty_bitmap(mem);
512 return kvm_set_user_memory_region(kml, mem, false);
513 }
514
kvm_section_update_flags(KVMMemoryListener * kml,MemoryRegionSection * section)515 static int kvm_section_update_flags(KVMMemoryListener *kml,
516 MemoryRegionSection *section)
517 {
518 hwaddr start_addr, size, slot_size;
519 KVMSlot *mem;
520 int ret = 0;
521
522 size = kvm_align_section(section, &start_addr);
523 if (!size) {
524 return 0;
525 }
526
527 kvm_slots_lock();
528
529 while (size && !ret) {
530 slot_size = MIN(kvm_max_slot_size, size);
531 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
532 if (!mem) {
533 /* We don't have a slot if we want to trap every access. */
534 goto out;
535 }
536
537 ret = kvm_slot_update_flags(kml, mem, section->mr);
538 start_addr += slot_size;
539 size -= slot_size;
540 }
541
542 out:
543 kvm_slots_unlock();
544 return ret;
545 }
546
kvm_log_start(MemoryListener * listener,MemoryRegionSection * section,int old,int new)547 static void kvm_log_start(MemoryListener *listener,
548 MemoryRegionSection *section,
549 int old, int new)
550 {
551 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
552 int r;
553
554 if (old != 0) {
555 return;
556 }
557
558 r = kvm_section_update_flags(kml, section);
559 if (r < 0) {
560 abort();
561 }
562 }
563
kvm_log_stop(MemoryListener * listener,MemoryRegionSection * section,int old,int new)564 static void kvm_log_stop(MemoryListener *listener,
565 MemoryRegionSection *section,
566 int old, int new)
567 {
568 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
569 int r;
570
571 if (new != 0) {
572 return;
573 }
574
575 r = kvm_section_update_flags(kml, section);
576 if (r < 0) {
577 abort();
578 }
579 }
580
581 /* get kvm's dirty pages bitmap and update qemu's */
kvm_slot_sync_dirty_pages(KVMSlot * slot)582 static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
583 {
584 ram_addr_t start = slot->ram_start_offset;
585 ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
586
587 cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
588 }
589
kvm_slot_reset_dirty_pages(KVMSlot * slot)590 static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
591 {
592 memset(slot->dirty_bmap, 0, slot->dirty_bmap_size);
593 }
594
595 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
596
597 /* Allocate the dirty bitmap for a slot */
kvm_slot_init_dirty_bitmap(KVMSlot * mem)598 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
599 {
600 if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
601 return;
602 }
603
604 /*
605 * XXX bad kernel interface alert
606 * For dirty bitmap, kernel allocates array of size aligned to
607 * bits-per-long. But for case when the kernel is 64bits and
608 * the userspace is 32bits, userspace can't align to the same
609 * bits-per-long, since sizeof(long) is different between kernel
610 * and user space. This way, userspace will provide buffer which
611 * may be 4 bytes less than the kernel will use, resulting in
612 * userspace memory corruption (which is not detectable by valgrind
613 * too, in most cases).
614 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
615 * a hope that sizeof(long) won't become >8 any time soon.
616 *
617 * Note: the granule of kvm dirty log is qemu_real_host_page_size.
618 * And mem->memory_size is aligned to it (otherwise this mem can't
619 * be registered to KVM).
620 */
621 hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
622 /*HOST_LONG_BITS*/ 64) / 8;
623 mem->dirty_bmap = g_malloc0(bitmap_size);
624 mem->dirty_bmap_size = bitmap_size;
625 }
626
627 /*
628 * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
629 * succeeded, false otherwise
630 */
kvm_slot_get_dirty_log(KVMState * s,KVMSlot * slot)631 static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot)
632 {
633 struct kvm_dirty_log d = {};
634 int ret;
635
636 d.dirty_bitmap = slot->dirty_bmap;
637 d.slot = slot->slot | (slot->as_id << 16);
638 ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
639
640 if (ret == -ENOENT) {
641 /* kernel does not have dirty bitmap in this slot */
642 ret = 0;
643 }
644 if (ret) {
645 error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
646 __func__, ret);
647 }
648 return ret == 0;
649 }
650
651 /* Should be with all slots_lock held for the address spaces. */
kvm_dirty_ring_mark_page(KVMState * s,uint32_t as_id,uint32_t slot_id,uint64_t offset)652 static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
653 uint32_t slot_id, uint64_t offset)
654 {
655 KVMMemoryListener *kml;
656 KVMSlot *mem;
657
658 if (as_id >= s->nr_as) {
659 return;
660 }
661
662 kml = s->as[as_id].ml;
663 mem = &kml->slots[slot_id];
664
665 if (!mem->memory_size || offset >=
666 (mem->memory_size / qemu_real_host_page_size())) {
667 return;
668 }
669
670 set_bit(offset, mem->dirty_bmap);
671 }
672
dirty_gfn_is_dirtied(struct kvm_dirty_gfn * gfn)673 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
674 {
675 /*
676 * Read the flags before the value. Pairs with barrier in
677 * KVM's kvm_dirty_ring_push() function.
678 */
679 return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
680 }
681
dirty_gfn_set_collected(struct kvm_dirty_gfn * gfn)682 static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
683 {
684 /*
685 * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS
686 * sees the full content of the ring:
687 *
688 * CPU0 CPU1 CPU2
689 * ------------------------------------------------------------------------------
690 * fill gfn0
691 * store-rel flags for gfn0
692 * load-acq flags for gfn0
693 * store-rel RESET for gfn0
694 * ioctl(RESET_RINGS)
695 * load-acq flags for gfn0
696 * check if flags have RESET
697 *
698 * The synchronization goes from CPU2 to CPU0 to CPU1.
699 */
700 qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
701 }
702
703 /*
704 * Should be with all slots_lock held for the address spaces. It returns the
705 * dirty page we've collected on this dirty ring.
706 */
kvm_dirty_ring_reap_one(KVMState * s,CPUState * cpu)707 static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
708 {
709 struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
710 uint32_t ring_size = s->kvm_dirty_ring_size;
711 uint32_t count = 0, fetch = cpu->kvm_fetch_index;
712
713 /*
714 * It's possible that we race with vcpu creation code where the vcpu is
715 * put onto the vcpus list but not yet initialized the dirty ring
716 * structures. If so, skip it.
717 */
718 if (!cpu->created) {
719 return 0;
720 }
721
722 assert(dirty_gfns && ring_size);
723 trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
724
725 while (true) {
726 cur = &dirty_gfns[fetch % ring_size];
727 if (!dirty_gfn_is_dirtied(cur)) {
728 break;
729 }
730 kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff,
731 cur->offset);
732 dirty_gfn_set_collected(cur);
733 trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset);
734 fetch++;
735 count++;
736 }
737 cpu->kvm_fetch_index = fetch;
738 cpu->dirty_pages += count;
739
740 return count;
741 }
742
743 /* Must be with slots_lock held */
kvm_dirty_ring_reap_locked(KVMState * s,CPUState * cpu)744 static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu)
745 {
746 int ret;
747 uint64_t total = 0;
748 int64_t stamp;
749
750 stamp = get_clock();
751
752 if (cpu) {
753 total = kvm_dirty_ring_reap_one(s, cpu);
754 } else {
755 CPU_FOREACH(cpu) {
756 total += kvm_dirty_ring_reap_one(s, cpu);
757 }
758 }
759
760 if (total) {
761 ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS);
762 assert(ret == total);
763 }
764
765 stamp = get_clock() - stamp;
766
767 if (total) {
768 trace_kvm_dirty_ring_reap(total, stamp / 1000);
769 }
770
771 return total;
772 }
773
774 /*
775 * Currently for simplicity, we must hold BQL before calling this. We can
776 * consider to drop the BQL if we're clear with all the race conditions.
777 */
kvm_dirty_ring_reap(KVMState * s,CPUState * cpu)778 static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu)
779 {
780 uint64_t total;
781
782 /*
783 * We need to lock all kvm slots for all address spaces here,
784 * because:
785 *
786 * (1) We need to mark dirty for dirty bitmaps in multiple slots
787 * and for tons of pages, so it's better to take the lock here
788 * once rather than once per page. And more importantly,
789 *
790 * (2) We must _NOT_ publish dirty bits to the other threads
791 * (e.g., the migration thread) via the kvm memory slot dirty
792 * bitmaps before correctly re-protect those dirtied pages.
793 * Otherwise we can have potential risk of data corruption if
794 * the page data is read in the other thread before we do
795 * reset below.
796 */
797 kvm_slots_lock();
798 total = kvm_dirty_ring_reap_locked(s, cpu);
799 kvm_slots_unlock();
800
801 return total;
802 }
803
do_kvm_cpu_synchronize_kick(CPUState * cpu,run_on_cpu_data arg)804 static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg)
805 {
806 /* No need to do anything */
807 }
808
809 /*
810 * Kick all vcpus out in a synchronized way. When returned, we
811 * guarantee that every vcpu has been kicked and at least returned to
812 * userspace once.
813 */
kvm_cpu_synchronize_kick_all(void)814 static void kvm_cpu_synchronize_kick_all(void)
815 {
816 CPUState *cpu;
817
818 CPU_FOREACH(cpu) {
819 run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL);
820 }
821 }
822
823 /*
824 * Flush all the existing dirty pages to the KVM slot buffers. When
825 * this call returns, we guarantee that all the touched dirty pages
826 * before calling this function have been put into the per-kvmslot
827 * dirty bitmap.
828 *
829 * This function must be called with BQL held.
830 */
kvm_dirty_ring_flush(void)831 static void kvm_dirty_ring_flush(void)
832 {
833 trace_kvm_dirty_ring_flush(0);
834 /*
835 * The function needs to be serialized. Since this function
836 * should always be with BQL held, serialization is guaranteed.
837 * However, let's be sure of it.
838 */
839 assert(bql_locked());
840 /*
841 * First make sure to flush the hardware buffers by kicking all
842 * vcpus out in a synchronous way.
843 */
844 kvm_cpu_synchronize_kick_all();
845 kvm_dirty_ring_reap(kvm_state, NULL);
846 trace_kvm_dirty_ring_flush(1);
847 }
848
849 /**
850 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
851 *
852 * This function will first try to fetch dirty bitmap from the kernel,
853 * and then updates qemu's dirty bitmap.
854 *
855 * NOTE: caller must be with kml->slots_lock held.
856 *
857 * @kml: the KVM memory listener object
858 * @section: the memory section to sync the dirty bitmap with
859 */
kvm_physical_sync_dirty_bitmap(KVMMemoryListener * kml,MemoryRegionSection * section)860 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
861 MemoryRegionSection *section)
862 {
863 KVMState *s = kvm_state;
864 KVMSlot *mem;
865 hwaddr start_addr, size;
866 hwaddr slot_size;
867
868 size = kvm_align_section(section, &start_addr);
869 while (size) {
870 slot_size = MIN(kvm_max_slot_size, size);
871 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
872 if (!mem) {
873 /* We don't have a slot if we want to trap every access. */
874 return;
875 }
876 if (kvm_slot_get_dirty_log(s, mem)) {
877 kvm_slot_sync_dirty_pages(mem);
878 }
879 start_addr += slot_size;
880 size -= slot_size;
881 }
882 }
883
884 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
885 #define KVM_CLEAR_LOG_SHIFT 6
886 #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
887 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
888
kvm_log_clear_one_slot(KVMSlot * mem,int as_id,uint64_t start,uint64_t size)889 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
890 uint64_t size)
891 {
892 KVMState *s = kvm_state;
893 uint64_t end, bmap_start, start_delta, bmap_npages;
894 struct kvm_clear_dirty_log d;
895 unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
896 int ret;
897
898 /*
899 * We need to extend either the start or the size or both to
900 * satisfy the KVM interface requirement. Firstly, do the start
901 * page alignment on 64 host pages
902 */
903 bmap_start = start & KVM_CLEAR_LOG_MASK;
904 start_delta = start - bmap_start;
905 bmap_start /= psize;
906
907 /*
908 * The kernel interface has restriction on the size too, that either:
909 *
910 * (1) the size is 64 host pages aligned (just like the start), or
911 * (2) the size fills up until the end of the KVM memslot.
912 */
913 bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
914 << KVM_CLEAR_LOG_SHIFT;
915 end = mem->memory_size / psize;
916 if (bmap_npages > end - bmap_start) {
917 bmap_npages = end - bmap_start;
918 }
919 start_delta /= psize;
920
921 /*
922 * Prepare the bitmap to clear dirty bits. Here we must guarantee
923 * that we won't clear any unknown dirty bits otherwise we might
924 * accidentally clear some set bits which are not yet synced from
925 * the kernel into QEMU's bitmap, then we'll lose track of the
926 * guest modifications upon those pages (which can directly lead
927 * to guest data loss or panic after migration).
928 *
929 * Layout of the KVMSlot.dirty_bmap:
930 *
931 * |<-------- bmap_npages -----------..>|
932 * [1]
933 * start_delta size
934 * |----------------|-------------|------------------|------------|
935 * ^ ^ ^ ^
936 * | | | |
937 * start bmap_start (start) end
938 * of memslot of memslot
939 *
940 * [1] bmap_npages can be aligned to either 64 pages or the end of slot
941 */
942
943 assert(bmap_start % BITS_PER_LONG == 0);
944 /* We should never do log_clear before log_sync */
945 assert(mem->dirty_bmap);
946 if (start_delta || bmap_npages - size / psize) {
947 /* Slow path - we need to manipulate a temp bitmap */
948 bmap_clear = bitmap_new(bmap_npages);
949 bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
950 bmap_start, start_delta + size / psize);
951 /*
952 * We need to fill the holes at start because that was not
953 * specified by the caller and we extended the bitmap only for
954 * 64 pages alignment
955 */
956 bitmap_clear(bmap_clear, 0, start_delta);
957 d.dirty_bitmap = bmap_clear;
958 } else {
959 /*
960 * Fast path - both start and size align well with BITS_PER_LONG
961 * (or the end of memory slot)
962 */
963 d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
964 }
965
966 d.first_page = bmap_start;
967 /* It should never overflow. If it happens, say something */
968 assert(bmap_npages <= UINT32_MAX);
969 d.num_pages = bmap_npages;
970 d.slot = mem->slot | (as_id << 16);
971
972 ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d);
973 if (ret < 0 && ret != -ENOENT) {
974 error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
975 "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
976 __func__, d.slot, (uint64_t)d.first_page,
977 (uint32_t)d.num_pages, ret);
978 } else {
979 ret = 0;
980 trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
981 }
982
983 /*
984 * After we have updated the remote dirty bitmap, we update the
985 * cached bitmap as well for the memslot, then if another user
986 * clears the same region we know we shouldn't clear it again on
987 * the remote otherwise it's data loss as well.
988 */
989 bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
990 size / psize);
991 /* This handles the NULL case well */
992 g_free(bmap_clear);
993 return ret;
994 }
995
996
997 /**
998 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
999 *
1000 * NOTE: this will be a no-op if we haven't enabled manual dirty log
1001 * protection in the host kernel because in that case this operation
1002 * will be done within log_sync().
1003 *
1004 * @kml: the kvm memory listener
1005 * @section: the memory range to clear dirty bitmap
1006 */
kvm_physical_log_clear(KVMMemoryListener * kml,MemoryRegionSection * section)1007 static int kvm_physical_log_clear(KVMMemoryListener *kml,
1008 MemoryRegionSection *section)
1009 {
1010 KVMState *s = kvm_state;
1011 uint64_t start, size, offset, count;
1012 KVMSlot *mem;
1013 int ret = 0, i;
1014
1015 if (!s->manual_dirty_log_protect) {
1016 /* No need to do explicit clear */
1017 return ret;
1018 }
1019
1020 start = section->offset_within_address_space;
1021 size = int128_get64(section->size);
1022
1023 if (!size) {
1024 /* Nothing more we can do... */
1025 return ret;
1026 }
1027
1028 kvm_slots_lock();
1029
1030 for (i = 0; i < s->nr_slots; i++) {
1031 mem = &kml->slots[i];
1032 /* Discard slots that are empty or do not overlap the section */
1033 if (!mem->memory_size ||
1034 mem->start_addr > start + size - 1 ||
1035 start > mem->start_addr + mem->memory_size - 1) {
1036 continue;
1037 }
1038
1039 if (start >= mem->start_addr) {
1040 /* The slot starts before section or is aligned to it. */
1041 offset = start - mem->start_addr;
1042 count = MIN(mem->memory_size - offset, size);
1043 } else {
1044 /* The slot starts after section. */
1045 offset = 0;
1046 count = MIN(mem->memory_size, size - (mem->start_addr - start));
1047 }
1048 ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
1049 if (ret < 0) {
1050 break;
1051 }
1052 }
1053
1054 kvm_slots_unlock();
1055
1056 return ret;
1057 }
1058
kvm_coalesce_mmio_region(MemoryListener * listener,MemoryRegionSection * secion,hwaddr start,hwaddr size)1059 static void kvm_coalesce_mmio_region(MemoryListener *listener,
1060 MemoryRegionSection *secion,
1061 hwaddr start, hwaddr size)
1062 {
1063 KVMState *s = kvm_state;
1064
1065 if (s->coalesced_mmio) {
1066 struct kvm_coalesced_mmio_zone zone;
1067
1068 zone.addr = start;
1069 zone.size = size;
1070 zone.pad = 0;
1071
1072 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1073 }
1074 }
1075
kvm_uncoalesce_mmio_region(MemoryListener * listener,MemoryRegionSection * secion,hwaddr start,hwaddr size)1076 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
1077 MemoryRegionSection *secion,
1078 hwaddr start, hwaddr size)
1079 {
1080 KVMState *s = kvm_state;
1081
1082 if (s->coalesced_mmio) {
1083 struct kvm_coalesced_mmio_zone zone;
1084
1085 zone.addr = start;
1086 zone.size = size;
1087 zone.pad = 0;
1088
1089 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1090 }
1091 }
1092
kvm_coalesce_pio_add(MemoryListener * listener,MemoryRegionSection * section,hwaddr start,hwaddr size)1093 static void kvm_coalesce_pio_add(MemoryListener *listener,
1094 MemoryRegionSection *section,
1095 hwaddr start, hwaddr size)
1096 {
1097 KVMState *s = kvm_state;
1098
1099 if (s->coalesced_pio) {
1100 struct kvm_coalesced_mmio_zone zone;
1101
1102 zone.addr = start;
1103 zone.size = size;
1104 zone.pio = 1;
1105
1106 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1107 }
1108 }
1109
kvm_coalesce_pio_del(MemoryListener * listener,MemoryRegionSection * section,hwaddr start,hwaddr size)1110 static void kvm_coalesce_pio_del(MemoryListener *listener,
1111 MemoryRegionSection *section,
1112 hwaddr start, hwaddr size)
1113 {
1114 KVMState *s = kvm_state;
1115
1116 if (s->coalesced_pio) {
1117 struct kvm_coalesced_mmio_zone zone;
1118
1119 zone.addr = start;
1120 zone.size = size;
1121 zone.pio = 1;
1122
1123 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1124 }
1125 }
1126
kvm_check_extension(KVMState * s,unsigned int extension)1127 int kvm_check_extension(KVMState *s, unsigned int extension)
1128 {
1129 int ret;
1130
1131 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1132 if (ret < 0) {
1133 ret = 0;
1134 }
1135
1136 return ret;
1137 }
1138
kvm_vm_check_extension(KVMState * s,unsigned int extension)1139 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
1140 {
1141 int ret;
1142
1143 ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1144 if (ret < 0) {
1145 /* VM wide version not implemented, use global one instead */
1146 ret = kvm_check_extension(s, extension);
1147 }
1148
1149 return ret;
1150 }
1151
1152 /*
1153 * We track the poisoned pages to be able to:
1154 * - replace them on VM reset
1155 * - block a migration for a VM with a poisoned page
1156 */
1157 typedef struct HWPoisonPage {
1158 ram_addr_t ram_addr;
1159 QLIST_ENTRY(HWPoisonPage) list;
1160 } HWPoisonPage;
1161
1162 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
1163 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
1164
kvm_unpoison_all(void * param)1165 static void kvm_unpoison_all(void *param)
1166 {
1167 HWPoisonPage *page, *next_page;
1168
1169 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
1170 QLIST_REMOVE(page, list);
1171 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
1172 g_free(page);
1173 }
1174 }
1175
kvm_hwpoison_page_add(ram_addr_t ram_addr)1176 void kvm_hwpoison_page_add(ram_addr_t ram_addr)
1177 {
1178 HWPoisonPage *page;
1179
1180 QLIST_FOREACH(page, &hwpoison_page_list, list) {
1181 if (page->ram_addr == ram_addr) {
1182 return;
1183 }
1184 }
1185 page = g_new(HWPoisonPage, 1);
1186 page->ram_addr = ram_addr;
1187 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
1188 }
1189
kvm_hwpoisoned_mem(void)1190 bool kvm_hwpoisoned_mem(void)
1191 {
1192 return !QLIST_EMPTY(&hwpoison_page_list);
1193 }
1194
adjust_ioeventfd_endianness(uint32_t val,uint32_t size)1195 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
1196 {
1197 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
1198 /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
1199 * endianness, but the memory core hands them in target endianness.
1200 * For example, PPC is always treated as big-endian even if running
1201 * on KVM and on PPC64LE. Correct here.
1202 */
1203 switch (size) {
1204 case 2:
1205 val = bswap16(val);
1206 break;
1207 case 4:
1208 val = bswap32(val);
1209 break;
1210 }
1211 #endif
1212 return val;
1213 }
1214
kvm_set_ioeventfd_mmio(int fd,hwaddr addr,uint32_t val,bool assign,uint32_t size,bool datamatch)1215 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
1216 bool assign, uint32_t size, bool datamatch)
1217 {
1218 int ret;
1219 struct kvm_ioeventfd iofd = {
1220 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1221 .addr = addr,
1222 .len = size,
1223 .flags = 0,
1224 .fd = fd,
1225 };
1226
1227 trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
1228 datamatch);
1229 if (!kvm_enabled()) {
1230 return -ENOSYS;
1231 }
1232
1233 if (datamatch) {
1234 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1235 }
1236 if (!assign) {
1237 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1238 }
1239
1240 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1241
1242 if (ret < 0) {
1243 return -errno;
1244 }
1245
1246 return 0;
1247 }
1248
kvm_set_ioeventfd_pio(int fd,uint16_t addr,uint16_t val,bool assign,uint32_t size,bool datamatch)1249 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
1250 bool assign, uint32_t size, bool datamatch)
1251 {
1252 struct kvm_ioeventfd kick = {
1253 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1254 .addr = addr,
1255 .flags = KVM_IOEVENTFD_FLAG_PIO,
1256 .len = size,
1257 .fd = fd,
1258 };
1259 int r;
1260 trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
1261 if (!kvm_enabled()) {
1262 return -ENOSYS;
1263 }
1264 if (datamatch) {
1265 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1266 }
1267 if (!assign) {
1268 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1269 }
1270 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1271 if (r < 0) {
1272 return r;
1273 }
1274 return 0;
1275 }
1276
1277
1278 static const KVMCapabilityInfo *
kvm_check_extension_list(KVMState * s,const KVMCapabilityInfo * list)1279 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
1280 {
1281 while (list->name) {
1282 if (!kvm_check_extension(s, list->value)) {
1283 return list;
1284 }
1285 list++;
1286 }
1287 return NULL;
1288 }
1289
kvm_set_max_memslot_size(hwaddr max_slot_size)1290 void kvm_set_max_memslot_size(hwaddr max_slot_size)
1291 {
1292 g_assert(
1293 ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
1294 );
1295 kvm_max_slot_size = max_slot_size;
1296 }
1297
kvm_set_memory_attributes(hwaddr start,uint64_t size,uint64_t attr)1298 static int kvm_set_memory_attributes(hwaddr start, uint64_t size, uint64_t attr)
1299 {
1300 struct kvm_memory_attributes attrs;
1301 int r;
1302
1303 assert((attr & kvm_supported_memory_attributes) == attr);
1304 attrs.attributes = attr;
1305 attrs.address = start;
1306 attrs.size = size;
1307 attrs.flags = 0;
1308
1309 r = kvm_vm_ioctl(kvm_state, KVM_SET_MEMORY_ATTRIBUTES, &attrs);
1310 if (r) {
1311 error_report("failed to set memory (0x%" HWADDR_PRIx "+0x%" PRIx64 ") "
1312 "with attr 0x%" PRIx64 " error '%s'",
1313 start, size, attr, strerror(errno));
1314 }
1315 return r;
1316 }
1317
kvm_set_memory_attributes_private(hwaddr start,uint64_t size)1318 int kvm_set_memory_attributes_private(hwaddr start, uint64_t size)
1319 {
1320 return kvm_set_memory_attributes(start, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
1321 }
1322
kvm_set_memory_attributes_shared(hwaddr start,uint64_t size)1323 int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size)
1324 {
1325 return kvm_set_memory_attributes(start, size, 0);
1326 }
1327
1328 /* Called with KVMMemoryListener.slots_lock held */
kvm_set_phys_mem(KVMMemoryListener * kml,MemoryRegionSection * section,bool add)1329 static void kvm_set_phys_mem(KVMMemoryListener *kml,
1330 MemoryRegionSection *section, bool add)
1331 {
1332 KVMSlot *mem;
1333 int err;
1334 MemoryRegion *mr = section->mr;
1335 bool writable = !mr->readonly && !mr->rom_device;
1336 hwaddr start_addr, size, slot_size, mr_offset;
1337 ram_addr_t ram_start_offset;
1338 void *ram;
1339
1340 if (!memory_region_is_ram(mr)) {
1341 if (writable || !kvm_readonly_mem_allowed) {
1342 return;
1343 } else if (!mr->romd_mode) {
1344 /* If the memory device is not in romd_mode, then we actually want
1345 * to remove the kvm memory slot so all accesses will trap. */
1346 add = false;
1347 }
1348 }
1349
1350 size = kvm_align_section(section, &start_addr);
1351 if (!size) {
1352 return;
1353 }
1354
1355 /* The offset of the kvmslot within the memory region */
1356 mr_offset = section->offset_within_region + start_addr -
1357 section->offset_within_address_space;
1358
1359 /* use aligned delta to align the ram address and offset */
1360 ram = memory_region_get_ram_ptr(mr) + mr_offset;
1361 ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
1362
1363 if (!add) {
1364 do {
1365 slot_size = MIN(kvm_max_slot_size, size);
1366 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1367 if (!mem) {
1368 return;
1369 }
1370 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1371 /*
1372 * NOTE: We should be aware of the fact that here we're only
1373 * doing a best effort to sync dirty bits. No matter whether
1374 * we're using dirty log or dirty ring, we ignored two facts:
1375 *
1376 * (1) dirty bits can reside in hardware buffers (PML)
1377 *
1378 * (2) after we collected dirty bits here, pages can be dirtied
1379 * again before we do the final KVM_SET_USER_MEMORY_REGION to
1380 * remove the slot.
1381 *
1382 * Not easy. Let's cross the fingers until it's fixed.
1383 */
1384 if (kvm_state->kvm_dirty_ring_size) {
1385 kvm_dirty_ring_reap_locked(kvm_state, NULL);
1386 if (kvm_state->kvm_dirty_ring_with_bitmap) {
1387 kvm_slot_sync_dirty_pages(mem);
1388 kvm_slot_get_dirty_log(kvm_state, mem);
1389 }
1390 } else {
1391 kvm_slot_get_dirty_log(kvm_state, mem);
1392 }
1393 kvm_slot_sync_dirty_pages(mem);
1394 }
1395
1396 /* unregister the slot */
1397 g_free(mem->dirty_bmap);
1398 mem->dirty_bmap = NULL;
1399 mem->memory_size = 0;
1400 mem->flags = 0;
1401 err = kvm_set_user_memory_region(kml, mem, false);
1402 if (err) {
1403 fprintf(stderr, "%s: error unregistering slot: %s\n",
1404 __func__, strerror(-err));
1405 abort();
1406 }
1407 start_addr += slot_size;
1408 size -= slot_size;
1409 kml->nr_used_slots--;
1410 } while (size);
1411 return;
1412 }
1413
1414 /* register the new slot */
1415 do {
1416 slot_size = MIN(kvm_max_slot_size, size);
1417 mem = kvm_alloc_slot(kml);
1418 mem->as_id = kml->as_id;
1419 mem->memory_size = slot_size;
1420 mem->start_addr = start_addr;
1421 mem->ram_start_offset = ram_start_offset;
1422 mem->ram = ram;
1423 mem->flags = kvm_mem_flags(mr);
1424 mem->guest_memfd = mr->ram_block->guest_memfd;
1425 mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host;
1426
1427 kvm_slot_init_dirty_bitmap(mem);
1428 err = kvm_set_user_memory_region(kml, mem, true);
1429 if (err) {
1430 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1431 strerror(-err));
1432 abort();
1433 }
1434
1435 if (memory_region_has_guest_memfd(mr)) {
1436 err = kvm_set_memory_attributes_private(start_addr, slot_size);
1437 if (err) {
1438 error_report("%s: failed to set memory attribute private: %s",
1439 __func__, strerror(-err));
1440 exit(1);
1441 }
1442 }
1443
1444 start_addr += slot_size;
1445 ram_start_offset += slot_size;
1446 ram += slot_size;
1447 size -= slot_size;
1448 kml->nr_used_slots++;
1449 } while (size);
1450 }
1451
kvm_dirty_ring_reaper_thread(void * data)1452 static void *kvm_dirty_ring_reaper_thread(void *data)
1453 {
1454 KVMState *s = data;
1455 struct KVMDirtyRingReaper *r = &s->reaper;
1456
1457 rcu_register_thread();
1458
1459 trace_kvm_dirty_ring_reaper("init");
1460
1461 while (true) {
1462 r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT;
1463 trace_kvm_dirty_ring_reaper("wait");
1464 /*
1465 * TODO: provide a smarter timeout rather than a constant?
1466 */
1467 sleep(1);
1468
1469 /* keep sleeping so that dirtylimit not be interfered by reaper */
1470 if (dirtylimit_in_service()) {
1471 continue;
1472 }
1473
1474 trace_kvm_dirty_ring_reaper("wakeup");
1475 r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
1476
1477 bql_lock();
1478 kvm_dirty_ring_reap(s, NULL);
1479 bql_unlock();
1480
1481 r->reaper_iteration++;
1482 }
1483
1484 trace_kvm_dirty_ring_reaper("exit");
1485
1486 rcu_unregister_thread();
1487
1488 return NULL;
1489 }
1490
kvm_dirty_ring_reaper_init(KVMState * s)1491 static void kvm_dirty_ring_reaper_init(KVMState *s)
1492 {
1493 struct KVMDirtyRingReaper *r = &s->reaper;
1494
1495 qemu_thread_create(&r->reaper_thr, "kvm-reaper",
1496 kvm_dirty_ring_reaper_thread,
1497 s, QEMU_THREAD_JOINABLE);
1498 }
1499
kvm_dirty_ring_init(KVMState * s)1500 static int kvm_dirty_ring_init(KVMState *s)
1501 {
1502 uint32_t ring_size = s->kvm_dirty_ring_size;
1503 uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn);
1504 unsigned int capability = KVM_CAP_DIRTY_LOG_RING;
1505 int ret;
1506
1507 s->kvm_dirty_ring_size = 0;
1508 s->kvm_dirty_ring_bytes = 0;
1509
1510 /* Bail if the dirty ring size isn't specified */
1511 if (!ring_size) {
1512 return 0;
1513 }
1514
1515 /*
1516 * Read the max supported pages. Fall back to dirty logging mode
1517 * if the dirty ring isn't supported.
1518 */
1519 ret = kvm_vm_check_extension(s, capability);
1520 if (ret <= 0) {
1521 capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL;
1522 ret = kvm_vm_check_extension(s, capability);
1523 }
1524
1525 if (ret <= 0) {
1526 warn_report("KVM dirty ring not available, using bitmap method");
1527 return 0;
1528 }
1529
1530 if (ring_bytes > ret) {
1531 error_report("KVM dirty ring size %" PRIu32 " too big "
1532 "(maximum is %ld). Please use a smaller value.",
1533 ring_size, (long)ret / sizeof(struct kvm_dirty_gfn));
1534 return -EINVAL;
1535 }
1536
1537 ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes);
1538 if (ret) {
1539 error_report("Enabling of KVM dirty ring failed: %s. "
1540 "Suggested minimum value is 1024.", strerror(-ret));
1541 return -EIO;
1542 }
1543
1544 /* Enable the backup bitmap if it is supported */
1545 ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP);
1546 if (ret > 0) {
1547 ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0);
1548 if (ret) {
1549 error_report("Enabling of KVM dirty ring's backup bitmap failed: "
1550 "%s. ", strerror(-ret));
1551 return -EIO;
1552 }
1553
1554 s->kvm_dirty_ring_with_bitmap = true;
1555 }
1556
1557 s->kvm_dirty_ring_size = ring_size;
1558 s->kvm_dirty_ring_bytes = ring_bytes;
1559
1560 return 0;
1561 }
1562
kvm_region_add(MemoryListener * listener,MemoryRegionSection * section)1563 static void kvm_region_add(MemoryListener *listener,
1564 MemoryRegionSection *section)
1565 {
1566 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1567 KVMMemoryUpdate *update;
1568
1569 update = g_new0(KVMMemoryUpdate, 1);
1570 update->section = *section;
1571
1572 QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next);
1573 }
1574
kvm_region_del(MemoryListener * listener,MemoryRegionSection * section)1575 static void kvm_region_del(MemoryListener *listener,
1576 MemoryRegionSection *section)
1577 {
1578 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1579 KVMMemoryUpdate *update;
1580
1581 update = g_new0(KVMMemoryUpdate, 1);
1582 update->section = *section;
1583
1584 QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next);
1585 }
1586
kvm_region_commit(MemoryListener * listener)1587 static void kvm_region_commit(MemoryListener *listener)
1588 {
1589 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener,
1590 listener);
1591 KVMMemoryUpdate *u1, *u2;
1592 bool need_inhibit = false;
1593
1594 if (QSIMPLEQ_EMPTY(&kml->transaction_add) &&
1595 QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1596 return;
1597 }
1598
1599 /*
1600 * We have to be careful when regions to add overlap with ranges to remove.
1601 * We have to simulate atomic KVM memslot updates by making sure no ioctl()
1602 * is currently active.
1603 *
1604 * The lists are order by addresses, so it's easy to find overlaps.
1605 */
1606 u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1607 u2 = QSIMPLEQ_FIRST(&kml->transaction_add);
1608 while (u1 && u2) {
1609 Range r1, r2;
1610
1611 range_init_nofail(&r1, u1->section.offset_within_address_space,
1612 int128_get64(u1->section.size));
1613 range_init_nofail(&r2, u2->section.offset_within_address_space,
1614 int128_get64(u2->section.size));
1615
1616 if (range_overlaps_range(&r1, &r2)) {
1617 need_inhibit = true;
1618 break;
1619 }
1620 if (range_lob(&r1) < range_lob(&r2)) {
1621 u1 = QSIMPLEQ_NEXT(u1, next);
1622 } else {
1623 u2 = QSIMPLEQ_NEXT(u2, next);
1624 }
1625 }
1626
1627 kvm_slots_lock();
1628 if (need_inhibit) {
1629 accel_ioctl_inhibit_begin();
1630 }
1631
1632 /* Remove all memslots before adding the new ones. */
1633 while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1634 u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1635 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next);
1636
1637 kvm_set_phys_mem(kml, &u1->section, false);
1638 memory_region_unref(u1->section.mr);
1639
1640 g_free(u1);
1641 }
1642 while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) {
1643 u1 = QSIMPLEQ_FIRST(&kml->transaction_add);
1644 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next);
1645
1646 memory_region_ref(u1->section.mr);
1647 kvm_set_phys_mem(kml, &u1->section, true);
1648
1649 g_free(u1);
1650 }
1651
1652 if (need_inhibit) {
1653 accel_ioctl_inhibit_end();
1654 }
1655 kvm_slots_unlock();
1656 }
1657
kvm_log_sync(MemoryListener * listener,MemoryRegionSection * section)1658 static void kvm_log_sync(MemoryListener *listener,
1659 MemoryRegionSection *section)
1660 {
1661 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1662
1663 kvm_slots_lock();
1664 kvm_physical_sync_dirty_bitmap(kml, section);
1665 kvm_slots_unlock();
1666 }
1667
kvm_log_sync_global(MemoryListener * l,bool last_stage)1668 static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
1669 {
1670 KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
1671 KVMState *s = kvm_state;
1672 KVMSlot *mem;
1673 int i;
1674
1675 /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1676 kvm_dirty_ring_flush();
1677
1678 /*
1679 * TODO: make this faster when nr_slots is big while there are
1680 * only a few used slots (small VMs).
1681 */
1682 kvm_slots_lock();
1683 for (i = 0; i < s->nr_slots; i++) {
1684 mem = &kml->slots[i];
1685 if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1686 kvm_slot_sync_dirty_pages(mem);
1687
1688 if (s->kvm_dirty_ring_with_bitmap && last_stage &&
1689 kvm_slot_get_dirty_log(s, mem)) {
1690 kvm_slot_sync_dirty_pages(mem);
1691 }
1692
1693 /*
1694 * This is not needed by KVM_GET_DIRTY_LOG because the
1695 * ioctl will unconditionally overwrite the whole region.
1696 * However kvm dirty ring has no such side effect.
1697 */
1698 kvm_slot_reset_dirty_pages(mem);
1699 }
1700 }
1701 kvm_slots_unlock();
1702 }
1703
kvm_log_clear(MemoryListener * listener,MemoryRegionSection * section)1704 static void kvm_log_clear(MemoryListener *listener,
1705 MemoryRegionSection *section)
1706 {
1707 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1708 int r;
1709
1710 r = kvm_physical_log_clear(kml, section);
1711 if (r < 0) {
1712 error_report_once("%s: kvm log clear failed: mr=%s "
1713 "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1714 section->mr->name, section->offset_within_region,
1715 int128_get64(section->size));
1716 abort();
1717 }
1718 }
1719
kvm_mem_ioeventfd_add(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1720 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1721 MemoryRegionSection *section,
1722 bool match_data, uint64_t data,
1723 EventNotifier *e)
1724 {
1725 int fd = event_notifier_get_fd(e);
1726 int r;
1727
1728 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1729 data, true, int128_get64(section->size),
1730 match_data);
1731 if (r < 0) {
1732 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1733 __func__, strerror(-r), -r);
1734 abort();
1735 }
1736 }
1737
kvm_mem_ioeventfd_del(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1738 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1739 MemoryRegionSection *section,
1740 bool match_data, uint64_t data,
1741 EventNotifier *e)
1742 {
1743 int fd = event_notifier_get_fd(e);
1744 int r;
1745
1746 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1747 data, false, int128_get64(section->size),
1748 match_data);
1749 if (r < 0) {
1750 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1751 __func__, strerror(-r), -r);
1752 abort();
1753 }
1754 }
1755
kvm_io_ioeventfd_add(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1756 static void kvm_io_ioeventfd_add(MemoryListener *listener,
1757 MemoryRegionSection *section,
1758 bool match_data, uint64_t data,
1759 EventNotifier *e)
1760 {
1761 int fd = event_notifier_get_fd(e);
1762 int r;
1763
1764 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1765 data, true, int128_get64(section->size),
1766 match_data);
1767 if (r < 0) {
1768 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1769 __func__, strerror(-r), -r);
1770 abort();
1771 }
1772 }
1773
kvm_io_ioeventfd_del(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1774 static void kvm_io_ioeventfd_del(MemoryListener *listener,
1775 MemoryRegionSection *section,
1776 bool match_data, uint64_t data,
1777 EventNotifier *e)
1778
1779 {
1780 int fd = event_notifier_get_fd(e);
1781 int r;
1782
1783 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1784 data, false, int128_get64(section->size),
1785 match_data);
1786 if (r < 0) {
1787 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1788 __func__, strerror(-r), -r);
1789 abort();
1790 }
1791 }
1792
kvm_memory_listener_register(KVMState * s,KVMMemoryListener * kml,AddressSpace * as,int as_id,const char * name)1793 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1794 AddressSpace *as, int as_id, const char *name)
1795 {
1796 int i;
1797
1798 kml->slots = g_new0(KVMSlot, s->nr_slots);
1799 kml->as_id = as_id;
1800
1801 for (i = 0; i < s->nr_slots; i++) {
1802 kml->slots[i].slot = i;
1803 }
1804
1805 QSIMPLEQ_INIT(&kml->transaction_add);
1806 QSIMPLEQ_INIT(&kml->transaction_del);
1807
1808 kml->listener.region_add = kvm_region_add;
1809 kml->listener.region_del = kvm_region_del;
1810 kml->listener.commit = kvm_region_commit;
1811 kml->listener.log_start = kvm_log_start;
1812 kml->listener.log_stop = kvm_log_stop;
1813 kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL;
1814 kml->listener.name = name;
1815
1816 if (s->kvm_dirty_ring_size) {
1817 kml->listener.log_sync_global = kvm_log_sync_global;
1818 } else {
1819 kml->listener.log_sync = kvm_log_sync;
1820 kml->listener.log_clear = kvm_log_clear;
1821 }
1822
1823 memory_listener_register(&kml->listener, as);
1824
1825 for (i = 0; i < s->nr_as; ++i) {
1826 if (!s->as[i].as) {
1827 s->as[i].as = as;
1828 s->as[i].ml = kml;
1829 break;
1830 }
1831 }
1832 }
1833
1834 static MemoryListener kvm_io_listener = {
1835 .name = "kvm-io",
1836 .coalesced_io_add = kvm_coalesce_pio_add,
1837 .coalesced_io_del = kvm_coalesce_pio_del,
1838 .eventfd_add = kvm_io_ioeventfd_add,
1839 .eventfd_del = kvm_io_ioeventfd_del,
1840 .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND,
1841 };
1842
kvm_set_irq(KVMState * s,int irq,int level)1843 int kvm_set_irq(KVMState *s, int irq, int level)
1844 {
1845 struct kvm_irq_level event;
1846 int ret;
1847
1848 assert(kvm_async_interrupts_enabled());
1849
1850 event.level = level;
1851 event.irq = irq;
1852 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1853 if (ret < 0) {
1854 perror("kvm_set_irq");
1855 abort();
1856 }
1857
1858 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1859 }
1860
1861 #ifdef KVM_CAP_IRQ_ROUTING
1862 typedef struct KVMMSIRoute {
1863 struct kvm_irq_routing_entry kroute;
1864 QTAILQ_ENTRY(KVMMSIRoute) entry;
1865 } KVMMSIRoute;
1866
set_gsi(KVMState * s,unsigned int gsi)1867 static void set_gsi(KVMState *s, unsigned int gsi)
1868 {
1869 set_bit(gsi, s->used_gsi_bitmap);
1870 }
1871
clear_gsi(KVMState * s,unsigned int gsi)1872 static void clear_gsi(KVMState *s, unsigned int gsi)
1873 {
1874 clear_bit(gsi, s->used_gsi_bitmap);
1875 }
1876
kvm_init_irq_routing(KVMState * s)1877 void kvm_init_irq_routing(KVMState *s)
1878 {
1879 int gsi_count;
1880
1881 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1882 if (gsi_count > 0) {
1883 /* Round up so we can search ints using ffs */
1884 s->used_gsi_bitmap = bitmap_new(gsi_count);
1885 s->gsi_count = gsi_count;
1886 }
1887
1888 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1889 s->nr_allocated_irq_routes = 0;
1890
1891 kvm_arch_init_irq_routing(s);
1892 }
1893
kvm_irqchip_commit_routes(KVMState * s)1894 void kvm_irqchip_commit_routes(KVMState *s)
1895 {
1896 int ret;
1897
1898 if (kvm_gsi_direct_mapping()) {
1899 return;
1900 }
1901
1902 if (!kvm_gsi_routing_enabled()) {
1903 return;
1904 }
1905
1906 s->irq_routes->flags = 0;
1907 trace_kvm_irqchip_commit_routes();
1908 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1909 assert(ret == 0);
1910 }
1911
kvm_add_routing_entry(KVMState * s,struct kvm_irq_routing_entry * entry)1912 static void kvm_add_routing_entry(KVMState *s,
1913 struct kvm_irq_routing_entry *entry)
1914 {
1915 struct kvm_irq_routing_entry *new;
1916 int n, size;
1917
1918 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1919 n = s->nr_allocated_irq_routes * 2;
1920 if (n < 64) {
1921 n = 64;
1922 }
1923 size = sizeof(struct kvm_irq_routing);
1924 size += n * sizeof(*new);
1925 s->irq_routes = g_realloc(s->irq_routes, size);
1926 s->nr_allocated_irq_routes = n;
1927 }
1928 n = s->irq_routes->nr++;
1929 new = &s->irq_routes->entries[n];
1930
1931 *new = *entry;
1932
1933 set_gsi(s, entry->gsi);
1934 }
1935
kvm_update_routing_entry(KVMState * s,struct kvm_irq_routing_entry * new_entry)1936 static int kvm_update_routing_entry(KVMState *s,
1937 struct kvm_irq_routing_entry *new_entry)
1938 {
1939 struct kvm_irq_routing_entry *entry;
1940 int n;
1941
1942 for (n = 0; n < s->irq_routes->nr; n++) {
1943 entry = &s->irq_routes->entries[n];
1944 if (entry->gsi != new_entry->gsi) {
1945 continue;
1946 }
1947
1948 if(!memcmp(entry, new_entry, sizeof *entry)) {
1949 return 0;
1950 }
1951
1952 *entry = *new_entry;
1953
1954 return 0;
1955 }
1956
1957 return -ESRCH;
1958 }
1959
kvm_irqchip_add_irq_route(KVMState * s,int irq,int irqchip,int pin)1960 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1961 {
1962 struct kvm_irq_routing_entry e = {};
1963
1964 assert(pin < s->gsi_count);
1965
1966 e.gsi = irq;
1967 e.type = KVM_IRQ_ROUTING_IRQCHIP;
1968 e.flags = 0;
1969 e.u.irqchip.irqchip = irqchip;
1970 e.u.irqchip.pin = pin;
1971 kvm_add_routing_entry(s, &e);
1972 }
1973
kvm_irqchip_release_virq(KVMState * s,int virq)1974 void kvm_irqchip_release_virq(KVMState *s, int virq)
1975 {
1976 struct kvm_irq_routing_entry *e;
1977 int i;
1978
1979 if (kvm_gsi_direct_mapping()) {
1980 return;
1981 }
1982
1983 for (i = 0; i < s->irq_routes->nr; i++) {
1984 e = &s->irq_routes->entries[i];
1985 if (e->gsi == virq) {
1986 s->irq_routes->nr--;
1987 *e = s->irq_routes->entries[s->irq_routes->nr];
1988 }
1989 }
1990 clear_gsi(s, virq);
1991 kvm_arch_release_virq_post(virq);
1992 trace_kvm_irqchip_release_virq(virq);
1993 }
1994
kvm_irqchip_add_change_notifier(Notifier * n)1995 void kvm_irqchip_add_change_notifier(Notifier *n)
1996 {
1997 notifier_list_add(&kvm_irqchip_change_notifiers, n);
1998 }
1999
kvm_irqchip_remove_change_notifier(Notifier * n)2000 void kvm_irqchip_remove_change_notifier(Notifier *n)
2001 {
2002 notifier_remove(n);
2003 }
2004
kvm_irqchip_change_notify(void)2005 void kvm_irqchip_change_notify(void)
2006 {
2007 notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
2008 }
2009
kvm_irqchip_get_virq(KVMState * s)2010 static int kvm_irqchip_get_virq(KVMState *s)
2011 {
2012 int next_virq;
2013
2014 /* Return the lowest unused GSI in the bitmap */
2015 next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
2016 if (next_virq >= s->gsi_count) {
2017 return -ENOSPC;
2018 } else {
2019 return next_virq;
2020 }
2021 }
2022
kvm_irqchip_send_msi(KVMState * s,MSIMessage msg)2023 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2024 {
2025 struct kvm_msi msi;
2026
2027 msi.address_lo = (uint32_t)msg.address;
2028 msi.address_hi = msg.address >> 32;
2029 msi.data = le32_to_cpu(msg.data);
2030 msi.flags = 0;
2031 memset(msi.pad, 0, sizeof(msi.pad));
2032
2033 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
2034 }
2035
kvm_irqchip_add_msi_route(KVMRouteChange * c,int vector,PCIDevice * dev)2036 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2037 {
2038 struct kvm_irq_routing_entry kroute = {};
2039 int virq;
2040 KVMState *s = c->s;
2041 MSIMessage msg = {0, 0};
2042
2043 if (pci_available && dev) {
2044 msg = pci_get_msi_message(dev, vector);
2045 }
2046
2047 if (kvm_gsi_direct_mapping()) {
2048 return kvm_arch_msi_data_to_gsi(msg.data);
2049 }
2050
2051 if (!kvm_gsi_routing_enabled()) {
2052 return -ENOSYS;
2053 }
2054
2055 virq = kvm_irqchip_get_virq(s);
2056 if (virq < 0) {
2057 return virq;
2058 }
2059
2060 kroute.gsi = virq;
2061 kroute.type = KVM_IRQ_ROUTING_MSI;
2062 kroute.flags = 0;
2063 kroute.u.msi.address_lo = (uint32_t)msg.address;
2064 kroute.u.msi.address_hi = msg.address >> 32;
2065 kroute.u.msi.data = le32_to_cpu(msg.data);
2066 if (pci_available && kvm_msi_devid_required()) {
2067 kroute.flags = KVM_MSI_VALID_DEVID;
2068 kroute.u.msi.devid = pci_requester_id(dev);
2069 }
2070 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2071 kvm_irqchip_release_virq(s, virq);
2072 return -EINVAL;
2073 }
2074
2075 if (s->irq_routes->nr < s->gsi_count) {
2076 trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
2077 vector, virq);
2078
2079 kvm_add_routing_entry(s, &kroute);
2080 kvm_arch_add_msi_route_post(&kroute, vector, dev);
2081 c->changes++;
2082 } else {
2083 kvm_irqchip_release_virq(s, virq);
2084 return -ENOSPC;
2085 }
2086
2087 return virq;
2088 }
2089
kvm_irqchip_update_msi_route(KVMState * s,int virq,MSIMessage msg,PCIDevice * dev)2090 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
2091 PCIDevice *dev)
2092 {
2093 struct kvm_irq_routing_entry kroute = {};
2094
2095 if (kvm_gsi_direct_mapping()) {
2096 return 0;
2097 }
2098
2099 if (!kvm_irqchip_in_kernel()) {
2100 return -ENOSYS;
2101 }
2102
2103 kroute.gsi = virq;
2104 kroute.type = KVM_IRQ_ROUTING_MSI;
2105 kroute.flags = 0;
2106 kroute.u.msi.address_lo = (uint32_t)msg.address;
2107 kroute.u.msi.address_hi = msg.address >> 32;
2108 kroute.u.msi.data = le32_to_cpu(msg.data);
2109 if (pci_available && kvm_msi_devid_required()) {
2110 kroute.flags = KVM_MSI_VALID_DEVID;
2111 kroute.u.msi.devid = pci_requester_id(dev);
2112 }
2113 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2114 return -EINVAL;
2115 }
2116
2117 trace_kvm_irqchip_update_msi_route(virq);
2118
2119 return kvm_update_routing_entry(s, &kroute);
2120 }
2121
kvm_irqchip_assign_irqfd(KVMState * s,EventNotifier * event,EventNotifier * resample,int virq,bool assign)2122 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2123 EventNotifier *resample, int virq,
2124 bool assign)
2125 {
2126 int fd = event_notifier_get_fd(event);
2127 int rfd = resample ? event_notifier_get_fd(resample) : -1;
2128
2129 struct kvm_irqfd irqfd = {
2130 .fd = fd,
2131 .gsi = virq,
2132 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
2133 };
2134
2135 if (rfd != -1) {
2136 assert(assign);
2137 if (kvm_irqchip_is_split()) {
2138 /*
2139 * When the slow irqchip (e.g. IOAPIC) is in the
2140 * userspace, KVM kernel resamplefd will not work because
2141 * the EOI of the interrupt will be delivered to userspace
2142 * instead, so the KVM kernel resamplefd kick will be
2143 * skipped. The userspace here mimics what the kernel
2144 * provides with resamplefd, remember the resamplefd and
2145 * kick it when we receive EOI of this IRQ.
2146 *
2147 * This is hackery because IOAPIC is mostly bypassed
2148 * (except EOI broadcasts) when irqfd is used. However
2149 * this can bring much performance back for split irqchip
2150 * with INTx IRQs (for VFIO, this gives 93% perf of the
2151 * full fast path, which is 46% perf boost comparing to
2152 * the INTx slow path).
2153 */
2154 kvm_resample_fd_insert(virq, resample);
2155 } else {
2156 irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
2157 irqfd.resamplefd = rfd;
2158 }
2159 } else if (!assign) {
2160 if (kvm_irqchip_is_split()) {
2161 kvm_resample_fd_remove(virq);
2162 }
2163 }
2164
2165 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
2166 }
2167
kvm_irqchip_add_adapter_route(KVMState * s,AdapterInfo * adapter)2168 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2169 {
2170 struct kvm_irq_routing_entry kroute = {};
2171 int virq;
2172
2173 if (!kvm_gsi_routing_enabled()) {
2174 return -ENOSYS;
2175 }
2176
2177 virq = kvm_irqchip_get_virq(s);
2178 if (virq < 0) {
2179 return virq;
2180 }
2181
2182 kroute.gsi = virq;
2183 kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
2184 kroute.flags = 0;
2185 kroute.u.adapter.summary_addr = adapter->summary_addr;
2186 kroute.u.adapter.ind_addr = adapter->ind_addr;
2187 kroute.u.adapter.summary_offset = adapter->summary_offset;
2188 kroute.u.adapter.ind_offset = adapter->ind_offset;
2189 kroute.u.adapter.adapter_id = adapter->adapter_id;
2190
2191 kvm_add_routing_entry(s, &kroute);
2192
2193 return virq;
2194 }
2195
kvm_irqchip_add_hv_sint_route(KVMState * s,uint32_t vcpu,uint32_t sint)2196 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2197 {
2198 struct kvm_irq_routing_entry kroute = {};
2199 int virq;
2200
2201 if (!kvm_gsi_routing_enabled()) {
2202 return -ENOSYS;
2203 }
2204 if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
2205 return -ENOSYS;
2206 }
2207 virq = kvm_irqchip_get_virq(s);
2208 if (virq < 0) {
2209 return virq;
2210 }
2211
2212 kroute.gsi = virq;
2213 kroute.type = KVM_IRQ_ROUTING_HV_SINT;
2214 kroute.flags = 0;
2215 kroute.u.hv_sint.vcpu = vcpu;
2216 kroute.u.hv_sint.sint = sint;
2217
2218 kvm_add_routing_entry(s, &kroute);
2219 kvm_irqchip_commit_routes(s);
2220
2221 return virq;
2222 }
2223
2224 #else /* !KVM_CAP_IRQ_ROUTING */
2225
kvm_init_irq_routing(KVMState * s)2226 void kvm_init_irq_routing(KVMState *s)
2227 {
2228 }
2229
kvm_irqchip_release_virq(KVMState * s,int virq)2230 void kvm_irqchip_release_virq(KVMState *s, int virq)
2231 {
2232 }
2233
kvm_irqchip_send_msi(KVMState * s,MSIMessage msg)2234 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2235 {
2236 abort();
2237 }
2238
kvm_irqchip_add_msi_route(KVMRouteChange * c,int vector,PCIDevice * dev)2239 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2240 {
2241 return -ENOSYS;
2242 }
2243
kvm_irqchip_add_adapter_route(KVMState * s,AdapterInfo * adapter)2244 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2245 {
2246 return -ENOSYS;
2247 }
2248
kvm_irqchip_add_hv_sint_route(KVMState * s,uint32_t vcpu,uint32_t sint)2249 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2250 {
2251 return -ENOSYS;
2252 }
2253
kvm_irqchip_assign_irqfd(KVMState * s,EventNotifier * event,EventNotifier * resample,int virq,bool assign)2254 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2255 EventNotifier *resample, int virq,
2256 bool assign)
2257 {
2258 abort();
2259 }
2260
kvm_irqchip_update_msi_route(KVMState * s,int virq,MSIMessage msg)2261 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
2262 {
2263 return -ENOSYS;
2264 }
2265 #endif /* !KVM_CAP_IRQ_ROUTING */
2266
kvm_irqchip_add_irqfd_notifier_gsi(KVMState * s,EventNotifier * n,EventNotifier * rn,int virq)2267 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2268 EventNotifier *rn, int virq)
2269 {
2270 return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
2271 }
2272
kvm_irqchip_remove_irqfd_notifier_gsi(KVMState * s,EventNotifier * n,int virq)2273 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2274 int virq)
2275 {
2276 return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
2277 }
2278
kvm_irqchip_add_irqfd_notifier(KVMState * s,EventNotifier * n,EventNotifier * rn,qemu_irq irq)2279 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
2280 EventNotifier *rn, qemu_irq irq)
2281 {
2282 gpointer key, gsi;
2283 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2284
2285 if (!found) {
2286 return -ENXIO;
2287 }
2288 return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
2289 }
2290
kvm_irqchip_remove_irqfd_notifier(KVMState * s,EventNotifier * n,qemu_irq irq)2291 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
2292 qemu_irq irq)
2293 {
2294 gpointer key, gsi;
2295 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2296
2297 if (!found) {
2298 return -ENXIO;
2299 }
2300 return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
2301 }
2302
kvm_irqchip_set_qemuirq_gsi(KVMState * s,qemu_irq irq,int gsi)2303 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
2304 {
2305 g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
2306 }
2307
kvm_irqchip_create(KVMState * s)2308 static void kvm_irqchip_create(KVMState *s)
2309 {
2310 int ret;
2311
2312 assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
2313 if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
2314 ;
2315 } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
2316 ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
2317 if (ret < 0) {
2318 fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
2319 exit(1);
2320 }
2321 } else {
2322 return;
2323 }
2324
2325 if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) {
2326 fprintf(stderr, "kvm: irqfd not implemented\n");
2327 exit(1);
2328 }
2329
2330 /* First probe and see if there's a arch-specific hook to create the
2331 * in-kernel irqchip for us */
2332 ret = kvm_arch_irqchip_create(s);
2333 if (ret == 0) {
2334 if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
2335 error_report("Split IRQ chip mode not supported.");
2336 exit(1);
2337 } else {
2338 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
2339 }
2340 }
2341 if (ret < 0) {
2342 fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
2343 exit(1);
2344 }
2345
2346 kvm_kernel_irqchip = true;
2347 /* If we have an in-kernel IRQ chip then we must have asynchronous
2348 * interrupt delivery (though the reverse is not necessarily true)
2349 */
2350 kvm_async_interrupts_allowed = true;
2351 kvm_halt_in_kernel_allowed = true;
2352
2353 kvm_init_irq_routing(s);
2354
2355 s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
2356 }
2357
2358 /* Find number of supported CPUs using the recommended
2359 * procedure from the kernel API documentation to cope with
2360 * older kernels that may be missing capabilities.
2361 */
kvm_recommended_vcpus(KVMState * s)2362 static int kvm_recommended_vcpus(KVMState *s)
2363 {
2364 int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
2365 return (ret) ? ret : 4;
2366 }
2367
kvm_max_vcpus(KVMState * s)2368 static int kvm_max_vcpus(KVMState *s)
2369 {
2370 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
2371 return (ret) ? ret : kvm_recommended_vcpus(s);
2372 }
2373
kvm_max_vcpu_id(KVMState * s)2374 static int kvm_max_vcpu_id(KVMState *s)
2375 {
2376 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
2377 return (ret) ? ret : kvm_max_vcpus(s);
2378 }
2379
kvm_vcpu_id_is_valid(int vcpu_id)2380 bool kvm_vcpu_id_is_valid(int vcpu_id)
2381 {
2382 KVMState *s = KVM_STATE(current_accel());
2383 return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
2384 }
2385
kvm_dirty_ring_enabled(void)2386 bool kvm_dirty_ring_enabled(void)
2387 {
2388 return kvm_state->kvm_dirty_ring_size ? true : false;
2389 }
2390
2391 static void query_stats_cb(StatsResultList **result, StatsTarget target,
2392 strList *names, strList *targets, Error **errp);
2393 static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp);
2394
kvm_dirty_ring_size(void)2395 uint32_t kvm_dirty_ring_size(void)
2396 {
2397 return kvm_state->kvm_dirty_ring_size;
2398 }
2399
kvm_init(MachineState * ms)2400 static int kvm_init(MachineState *ms)
2401 {
2402 MachineClass *mc = MACHINE_GET_CLASS(ms);
2403 static const char upgrade_note[] =
2404 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2405 "(see http://sourceforge.net/projects/kvm).\n";
2406 const struct {
2407 const char *name;
2408 int num;
2409 } num_cpus[] = {
2410 { "SMP", ms->smp.cpus },
2411 { "hotpluggable", ms->smp.max_cpus },
2412 { /* end of list */ }
2413 }, *nc = num_cpus;
2414 int soft_vcpus_limit, hard_vcpus_limit;
2415 KVMState *s;
2416 const KVMCapabilityInfo *missing_cap;
2417 int ret;
2418 int type;
2419 uint64_t dirty_log_manual_caps;
2420
2421 qemu_mutex_init(&kml_slots_lock);
2422
2423 s = KVM_STATE(ms->accelerator);
2424
2425 /*
2426 * On systems where the kernel can support different base page
2427 * sizes, host page size may be different from TARGET_PAGE_SIZE,
2428 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
2429 * page size for the system though.
2430 */
2431 assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
2432
2433 s->sigmask_len = 8;
2434 accel_blocker_init();
2435
2436 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2437 QTAILQ_INIT(&s->kvm_sw_breakpoints);
2438 #endif
2439 QLIST_INIT(&s->kvm_parked_vcpus);
2440 s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR);
2441 if (s->fd == -1) {
2442 fprintf(stderr, "Could not access KVM kernel module: %m\n");
2443 ret = -errno;
2444 goto err;
2445 }
2446
2447 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
2448 if (ret < KVM_API_VERSION) {
2449 if (ret >= 0) {
2450 ret = -EINVAL;
2451 }
2452 fprintf(stderr, "kvm version too old\n");
2453 goto err;
2454 }
2455
2456 if (ret > KVM_API_VERSION) {
2457 ret = -EINVAL;
2458 fprintf(stderr, "kvm version not supported\n");
2459 goto err;
2460 }
2461
2462 kvm_supported_memory_attributes = kvm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES);
2463 kvm_guest_memfd_supported =
2464 kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) &&
2465 kvm_check_extension(s, KVM_CAP_USER_MEMORY2) &&
2466 (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE);
2467
2468 kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
2469 s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2470
2471 /* If unspecified, use the default value */
2472 if (!s->nr_slots) {
2473 s->nr_slots = 32;
2474 }
2475
2476 s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
2477 if (s->nr_as <= 1) {
2478 s->nr_as = 1;
2479 }
2480 s->as = g_new0(struct KVMAs, s->nr_as);
2481
2482 if (object_property_find(OBJECT(current_machine), "kvm-type")) {
2483 g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine),
2484 "kvm-type",
2485 &error_abort);
2486 type = mc->kvm_type(ms, kvm_type);
2487 } else if (mc->kvm_type) {
2488 type = mc->kvm_type(ms, NULL);
2489 } else {
2490 type = kvm_arch_get_default_type(ms);
2491 }
2492
2493 if (type < 0) {
2494 ret = -EINVAL;
2495 goto err;
2496 }
2497
2498 do {
2499 ret = kvm_ioctl(s, KVM_CREATE_VM, type);
2500 } while (ret == -EINTR);
2501
2502 if (ret < 0) {
2503 fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
2504 strerror(-ret));
2505
2506 #ifdef TARGET_S390X
2507 if (ret == -EINVAL) {
2508 fprintf(stderr,
2509 "Host kernel setup problem detected. Please verify:\n");
2510 fprintf(stderr, "- for kernels supporting the switch_amode or"
2511 " user_mode parameters, whether\n");
2512 fprintf(stderr,
2513 " user space is running in primary address space\n");
2514 fprintf(stderr,
2515 "- for kernels supporting the vm.allocate_pgste sysctl, "
2516 "whether it is enabled\n");
2517 }
2518 #elif defined(TARGET_PPC)
2519 if (ret == -EINVAL) {
2520 fprintf(stderr,
2521 "PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2522 (type == 2) ? "pr" : "hv");
2523 }
2524 #endif
2525 goto err;
2526 }
2527
2528 s->vmfd = ret;
2529
2530 /* check the vcpu limits */
2531 soft_vcpus_limit = kvm_recommended_vcpus(s);
2532 hard_vcpus_limit = kvm_max_vcpus(s);
2533
2534 while (nc->name) {
2535 if (nc->num > soft_vcpus_limit) {
2536 warn_report("Number of %s cpus requested (%d) exceeds "
2537 "the recommended cpus supported by KVM (%d)",
2538 nc->name, nc->num, soft_vcpus_limit);
2539
2540 if (nc->num > hard_vcpus_limit) {
2541 fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
2542 "the maximum cpus supported by KVM (%d)\n",
2543 nc->name, nc->num, hard_vcpus_limit);
2544 exit(1);
2545 }
2546 }
2547 nc++;
2548 }
2549
2550 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
2551 if (!missing_cap) {
2552 missing_cap =
2553 kvm_check_extension_list(s, kvm_arch_required_capabilities);
2554 }
2555 if (missing_cap) {
2556 ret = -EINVAL;
2557 fprintf(stderr, "kvm does not support %s\n%s",
2558 missing_cap->name, upgrade_note);
2559 goto err;
2560 }
2561
2562 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
2563 s->coalesced_pio = s->coalesced_mmio &&
2564 kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
2565
2566 /*
2567 * Enable KVM dirty ring if supported, otherwise fall back to
2568 * dirty logging mode
2569 */
2570 ret = kvm_dirty_ring_init(s);
2571 if (ret < 0) {
2572 goto err;
2573 }
2574
2575 /*
2576 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2577 * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2578 * page is wr-protected initially, which is against how kvm dirty ring is
2579 * usage - kvm dirty ring requires all pages are wr-protected at the very
2580 * beginning. Enabling this feature for dirty ring causes data corruption.
2581 *
2582 * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2583 * we may expect a higher stall time when starting the migration. In the
2584 * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2585 * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2586 * guest pages.
2587 */
2588 if (!s->kvm_dirty_ring_size) {
2589 dirty_log_manual_caps =
2590 kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
2591 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
2592 KVM_DIRTY_LOG_INITIALLY_SET);
2593 s->manual_dirty_log_protect = dirty_log_manual_caps;
2594 if (dirty_log_manual_caps) {
2595 ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
2596 dirty_log_manual_caps);
2597 if (ret) {
2598 warn_report("Trying to enable capability %"PRIu64" of "
2599 "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2600 "Falling back to the legacy mode. ",
2601 dirty_log_manual_caps);
2602 s->manual_dirty_log_protect = 0;
2603 }
2604 }
2605 }
2606
2607 #ifdef KVM_CAP_VCPU_EVENTS
2608 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2609 #endif
2610 s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2611
2612 s->irq_set_ioctl = KVM_IRQ_LINE;
2613 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2614 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2615 }
2616
2617 kvm_readonly_mem_allowed =
2618 (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2619
2620 kvm_resamplefds_allowed =
2621 (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2622
2623 kvm_vm_attributes_allowed =
2624 (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2625
2626 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2627 kvm_has_guest_debug =
2628 (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
2629 #endif
2630
2631 kvm_sstep_flags = 0;
2632 if (kvm_has_guest_debug) {
2633 kvm_sstep_flags = SSTEP_ENABLE;
2634
2635 #if defined TARGET_KVM_HAVE_GUEST_DEBUG
2636 int guest_debug_flags =
2637 kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2);
2638
2639 if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) {
2640 kvm_sstep_flags |= SSTEP_NOIRQ;
2641 }
2642 #endif
2643 }
2644
2645 kvm_state = s;
2646
2647 ret = kvm_arch_init(ms, s);
2648 if (ret < 0) {
2649 goto err;
2650 }
2651
2652 if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
2653 s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2654 }
2655
2656 qemu_register_reset(kvm_unpoison_all, NULL);
2657
2658 if (s->kernel_irqchip_allowed) {
2659 kvm_irqchip_create(s);
2660 }
2661
2662 s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2663 s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2664 s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2665 s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2666
2667 kvm_memory_listener_register(s, &s->memory_listener,
2668 &address_space_memory, 0, "kvm-memory");
2669 memory_listener_register(&kvm_io_listener,
2670 &address_space_io);
2671
2672 s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2673 if (!s->sync_mmu) {
2674 ret = ram_block_discard_disable(true);
2675 assert(!ret);
2676 }
2677
2678 if (s->kvm_dirty_ring_size) {
2679 kvm_dirty_ring_reaper_init(s);
2680 }
2681
2682 if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
2683 add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb,
2684 query_stats_schemas_cb);
2685 }
2686
2687 return 0;
2688
2689 err:
2690 assert(ret < 0);
2691 if (s->vmfd >= 0) {
2692 close(s->vmfd);
2693 }
2694 if (s->fd != -1) {
2695 close(s->fd);
2696 }
2697 g_free(s->as);
2698 g_free(s->memory_listener.slots);
2699
2700 return ret;
2701 }
2702
kvm_set_sigmask_len(KVMState * s,unsigned int sigmask_len)2703 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2704 {
2705 s->sigmask_len = sigmask_len;
2706 }
2707
kvm_handle_io(uint16_t port,MemTxAttrs attrs,void * data,int direction,int size,uint32_t count)2708 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2709 int size, uint32_t count)
2710 {
2711 int i;
2712 uint8_t *ptr = data;
2713
2714 for (i = 0; i < count; i++) {
2715 address_space_rw(&address_space_io, port, attrs,
2716 ptr, size,
2717 direction == KVM_EXIT_IO_OUT);
2718 ptr += size;
2719 }
2720 }
2721
kvm_handle_internal_error(CPUState * cpu,struct kvm_run * run)2722 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2723 {
2724 int i;
2725
2726 fprintf(stderr, "KVM internal error. Suberror: %d\n",
2727 run->internal.suberror);
2728
2729 for (i = 0; i < run->internal.ndata; ++i) {
2730 fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
2731 i, (uint64_t)run->internal.data[i]);
2732 }
2733 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2734 fprintf(stderr, "emulation failure\n");
2735 if (!kvm_arch_stop_on_emulation_error(cpu)) {
2736 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2737 return EXCP_INTERRUPT;
2738 }
2739 }
2740 /* FIXME: Should trigger a qmp message to let management know
2741 * something went wrong.
2742 */
2743 return -1;
2744 }
2745
kvm_flush_coalesced_mmio_buffer(void)2746 void kvm_flush_coalesced_mmio_buffer(void)
2747 {
2748 KVMState *s = kvm_state;
2749
2750 if (!s || s->coalesced_flush_in_progress) {
2751 return;
2752 }
2753
2754 s->coalesced_flush_in_progress = true;
2755
2756 if (s->coalesced_mmio_ring) {
2757 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2758 while (ring->first != ring->last) {
2759 struct kvm_coalesced_mmio *ent;
2760
2761 ent = &ring->coalesced_mmio[ring->first];
2762
2763 if (ent->pio == 1) {
2764 address_space_write(&address_space_io, ent->phys_addr,
2765 MEMTXATTRS_UNSPECIFIED, ent->data,
2766 ent->len);
2767 } else {
2768 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2769 }
2770 smp_wmb();
2771 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2772 }
2773 }
2774
2775 s->coalesced_flush_in_progress = false;
2776 }
2777
do_kvm_cpu_synchronize_state(CPUState * cpu,run_on_cpu_data arg)2778 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2779 {
2780 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
2781 int ret = kvm_arch_get_registers(cpu);
2782 if (ret) {
2783 error_report("Failed to get registers: %s", strerror(-ret));
2784 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2785 vm_stop(RUN_STATE_INTERNAL_ERROR);
2786 }
2787
2788 cpu->vcpu_dirty = true;
2789 }
2790 }
2791
kvm_cpu_synchronize_state(CPUState * cpu)2792 void kvm_cpu_synchronize_state(CPUState *cpu)
2793 {
2794 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
2795 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2796 }
2797 }
2798
do_kvm_cpu_synchronize_post_reset(CPUState * cpu,run_on_cpu_data arg)2799 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2800 {
2801 int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
2802 if (ret) {
2803 error_report("Failed to put registers after reset: %s", strerror(-ret));
2804 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2805 vm_stop(RUN_STATE_INTERNAL_ERROR);
2806 }
2807
2808 cpu->vcpu_dirty = false;
2809 }
2810
kvm_cpu_synchronize_post_reset(CPUState * cpu)2811 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2812 {
2813 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2814 }
2815
do_kvm_cpu_synchronize_post_init(CPUState * cpu,run_on_cpu_data arg)2816 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2817 {
2818 int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
2819 if (ret) {
2820 error_report("Failed to put registers after init: %s", strerror(-ret));
2821 exit(1);
2822 }
2823
2824 cpu->vcpu_dirty = false;
2825 }
2826
kvm_cpu_synchronize_post_init(CPUState * cpu)2827 void kvm_cpu_synchronize_post_init(CPUState *cpu)
2828 {
2829 if (!kvm_state->guest_state_protected) {
2830 /*
2831 * This runs before the machine_init_done notifiers, and is the last
2832 * opportunity to synchronize the state of confidential guests.
2833 */
2834 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2835 }
2836 }
2837
do_kvm_cpu_synchronize_pre_loadvm(CPUState * cpu,run_on_cpu_data arg)2838 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2839 {
2840 cpu->vcpu_dirty = true;
2841 }
2842
kvm_cpu_synchronize_pre_loadvm(CPUState * cpu)2843 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2844 {
2845 run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2846 }
2847
2848 #ifdef KVM_HAVE_MCE_INJECTION
2849 static __thread void *pending_sigbus_addr;
2850 static __thread int pending_sigbus_code;
2851 static __thread bool have_sigbus_pending;
2852 #endif
2853
kvm_cpu_kick(CPUState * cpu)2854 static void kvm_cpu_kick(CPUState *cpu)
2855 {
2856 qatomic_set(&cpu->kvm_run->immediate_exit, 1);
2857 }
2858
kvm_cpu_kick_self(void)2859 static void kvm_cpu_kick_self(void)
2860 {
2861 if (kvm_immediate_exit) {
2862 kvm_cpu_kick(current_cpu);
2863 } else {
2864 qemu_cpu_kick_self();
2865 }
2866 }
2867
kvm_eat_signals(CPUState * cpu)2868 static void kvm_eat_signals(CPUState *cpu)
2869 {
2870 struct timespec ts = { 0, 0 };
2871 siginfo_t siginfo;
2872 sigset_t waitset;
2873 sigset_t chkset;
2874 int r;
2875
2876 if (kvm_immediate_exit) {
2877 qatomic_set(&cpu->kvm_run->immediate_exit, 0);
2878 /* Write kvm_run->immediate_exit before the cpu->exit_request
2879 * write in kvm_cpu_exec.
2880 */
2881 smp_wmb();
2882 return;
2883 }
2884
2885 sigemptyset(&waitset);
2886 sigaddset(&waitset, SIG_IPI);
2887
2888 do {
2889 r = sigtimedwait(&waitset, &siginfo, &ts);
2890 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2891 perror("sigtimedwait");
2892 exit(1);
2893 }
2894
2895 r = sigpending(&chkset);
2896 if (r == -1) {
2897 perror("sigpending");
2898 exit(1);
2899 }
2900 } while (sigismember(&chkset, SIG_IPI));
2901 }
2902
kvm_convert_memory(hwaddr start,hwaddr size,bool to_private)2903 int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private)
2904 {
2905 MemoryRegionSection section;
2906 ram_addr_t offset;
2907 MemoryRegion *mr;
2908 RAMBlock *rb;
2909 void *addr;
2910 int ret = -1;
2911
2912 trace_kvm_convert_memory(start, size, to_private ? "shared_to_private" : "private_to_shared");
2913
2914 if (!QEMU_PTR_IS_ALIGNED(start, qemu_real_host_page_size()) ||
2915 !QEMU_PTR_IS_ALIGNED(size, qemu_real_host_page_size())) {
2916 return -1;
2917 }
2918
2919 if (!size) {
2920 return -1;
2921 }
2922
2923 section = memory_region_find(get_system_memory(), start, size);
2924 mr = section.mr;
2925 if (!mr) {
2926 /*
2927 * Ignore converting non-assigned region to shared.
2928 *
2929 * TDX requires vMMIO region to be shared to inject #VE to guest.
2930 * OVMF issues conservatively MapGPA(shared) on 32bit PCI MMIO region,
2931 * and vIO-APIC 0xFEC00000 4K page.
2932 * OVMF assigns 32bit PCI MMIO region to
2933 * [top of low memory: typically 2GB=0xC000000, 0xFC00000)
2934 */
2935 if (!to_private) {
2936 return 0;
2937 }
2938 return -1;
2939 }
2940
2941 if (!memory_region_has_guest_memfd(mr)) {
2942 /*
2943 * Because vMMIO region must be shared, guest TD may convert vMMIO
2944 * region to shared explicitly. Don't complain such case. See
2945 * memory_region_type() for checking if the region is MMIO region.
2946 */
2947 if (!to_private &&
2948 !memory_region_is_ram(mr) &&
2949 !memory_region_is_ram_device(mr) &&
2950 !memory_region_is_rom(mr) &&
2951 !memory_region_is_romd(mr)) {
2952 ret = 0;
2953 } else {
2954 error_report("Convert non guest_memfd backed memory region "
2955 "(0x%"HWADDR_PRIx" ,+ 0x%"HWADDR_PRIx") to %s",
2956 start, size, to_private ? "private" : "shared");
2957 }
2958 goto out_unref;
2959 }
2960
2961 if (to_private) {
2962 ret = kvm_set_memory_attributes_private(start, size);
2963 } else {
2964 ret = kvm_set_memory_attributes_shared(start, size);
2965 }
2966 if (ret) {
2967 goto out_unref;
2968 }
2969
2970 addr = memory_region_get_ram_ptr(mr) + section.offset_within_region;
2971 rb = qemu_ram_block_from_host(addr, false, &offset);
2972
2973 if (to_private) {
2974 if (rb->page_size != qemu_real_host_page_size()) {
2975 /*
2976 * shared memory is backed by hugetlb, which is supposed to be
2977 * pre-allocated and doesn't need to be discarded
2978 */
2979 goto out_unref;
2980 }
2981 ret = ram_block_discard_range(rb, offset, size);
2982 } else {
2983 ret = ram_block_discard_guest_memfd_range(rb, offset, size);
2984 }
2985
2986 out_unref:
2987 memory_region_unref(mr);
2988 return ret;
2989 }
2990
kvm_cpu_exec(CPUState * cpu)2991 int kvm_cpu_exec(CPUState *cpu)
2992 {
2993 struct kvm_run *run = cpu->kvm_run;
2994 int ret, run_ret;
2995
2996 trace_kvm_cpu_exec();
2997
2998 if (kvm_arch_process_async_events(cpu)) {
2999 qatomic_set(&cpu->exit_request, 0);
3000 return EXCP_HLT;
3001 }
3002
3003 bql_unlock();
3004 cpu_exec_start(cpu);
3005
3006 do {
3007 MemTxAttrs attrs;
3008
3009 if (cpu->vcpu_dirty) {
3010 ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
3011 if (ret) {
3012 error_report("Failed to put registers after init: %s",
3013 strerror(-ret));
3014 ret = -1;
3015 break;
3016 }
3017
3018 cpu->vcpu_dirty = false;
3019 }
3020
3021 kvm_arch_pre_run(cpu, run);
3022 if (qatomic_read(&cpu->exit_request)) {
3023 trace_kvm_interrupt_exit_request();
3024 /*
3025 * KVM requires us to reenter the kernel after IO exits to complete
3026 * instruction emulation. This self-signal will ensure that we
3027 * leave ASAP again.
3028 */
3029 kvm_cpu_kick_self();
3030 }
3031
3032 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
3033 * Matching barrier in kvm_eat_signals.
3034 */
3035 smp_rmb();
3036
3037 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
3038
3039 attrs = kvm_arch_post_run(cpu, run);
3040
3041 #ifdef KVM_HAVE_MCE_INJECTION
3042 if (unlikely(have_sigbus_pending)) {
3043 bql_lock();
3044 kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
3045 pending_sigbus_addr);
3046 have_sigbus_pending = false;
3047 bql_unlock();
3048 }
3049 #endif
3050
3051 if (run_ret < 0) {
3052 if (run_ret == -EINTR || run_ret == -EAGAIN) {
3053 trace_kvm_io_window_exit();
3054 kvm_eat_signals(cpu);
3055 ret = EXCP_INTERRUPT;
3056 break;
3057 }
3058 if (!(run_ret == -EFAULT && run->exit_reason == KVM_EXIT_MEMORY_FAULT)) {
3059 fprintf(stderr, "error: kvm run failed %s\n",
3060 strerror(-run_ret));
3061 #ifdef TARGET_PPC
3062 if (run_ret == -EBUSY) {
3063 fprintf(stderr,
3064 "This is probably because your SMT is enabled.\n"
3065 "VCPU can only run on primary threads with all "
3066 "secondary threads offline.\n");
3067 }
3068 #endif
3069 ret = -1;
3070 break;
3071 }
3072 }
3073
3074 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
3075 switch (run->exit_reason) {
3076 case KVM_EXIT_IO:
3077 /* Called outside BQL */
3078 kvm_handle_io(run->io.port, attrs,
3079 (uint8_t *)run + run->io.data_offset,
3080 run->io.direction,
3081 run->io.size,
3082 run->io.count);
3083 ret = 0;
3084 break;
3085 case KVM_EXIT_MMIO:
3086 /* Called outside BQL */
3087 address_space_rw(&address_space_memory,
3088 run->mmio.phys_addr, attrs,
3089 run->mmio.data,
3090 run->mmio.len,
3091 run->mmio.is_write);
3092 ret = 0;
3093 break;
3094 case KVM_EXIT_IRQ_WINDOW_OPEN:
3095 ret = EXCP_INTERRUPT;
3096 break;
3097 case KVM_EXIT_SHUTDOWN:
3098 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3099 ret = EXCP_INTERRUPT;
3100 break;
3101 case KVM_EXIT_UNKNOWN:
3102 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
3103 (uint64_t)run->hw.hardware_exit_reason);
3104 ret = -1;
3105 break;
3106 case KVM_EXIT_INTERNAL_ERROR:
3107 ret = kvm_handle_internal_error(cpu, run);
3108 break;
3109 case KVM_EXIT_DIRTY_RING_FULL:
3110 /*
3111 * We shouldn't continue if the dirty ring of this vcpu is
3112 * still full. Got kicked by KVM_RESET_DIRTY_RINGS.
3113 */
3114 trace_kvm_dirty_ring_full(cpu->cpu_index);
3115 bql_lock();
3116 /*
3117 * We throttle vCPU by making it sleep once it exit from kernel
3118 * due to dirty ring full. In the dirtylimit scenario, reaping
3119 * all vCPUs after a single vCPU dirty ring get full result in
3120 * the miss of sleep, so just reap the ring-fulled vCPU.
3121 */
3122 if (dirtylimit_in_service()) {
3123 kvm_dirty_ring_reap(kvm_state, cpu);
3124 } else {
3125 kvm_dirty_ring_reap(kvm_state, NULL);
3126 }
3127 bql_unlock();
3128 dirtylimit_vcpu_execute(cpu);
3129 ret = 0;
3130 break;
3131 case KVM_EXIT_SYSTEM_EVENT:
3132 trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type);
3133 switch (run->system_event.type) {
3134 case KVM_SYSTEM_EVENT_SHUTDOWN:
3135 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
3136 ret = EXCP_INTERRUPT;
3137 break;
3138 case KVM_SYSTEM_EVENT_RESET:
3139 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3140 ret = EXCP_INTERRUPT;
3141 break;
3142 case KVM_SYSTEM_EVENT_CRASH:
3143 kvm_cpu_synchronize_state(cpu);
3144 bql_lock();
3145 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
3146 bql_unlock();
3147 ret = 0;
3148 break;
3149 default:
3150 ret = kvm_arch_handle_exit(cpu, run);
3151 break;
3152 }
3153 break;
3154 case KVM_EXIT_MEMORY_FAULT:
3155 trace_kvm_memory_fault(run->memory_fault.gpa,
3156 run->memory_fault.size,
3157 run->memory_fault.flags);
3158 if (run->memory_fault.flags & ~KVM_MEMORY_EXIT_FLAG_PRIVATE) {
3159 error_report("KVM_EXIT_MEMORY_FAULT: Unknown flag 0x%" PRIx64,
3160 (uint64_t)run->memory_fault.flags);
3161 ret = -1;
3162 break;
3163 }
3164 ret = kvm_convert_memory(run->memory_fault.gpa, run->memory_fault.size,
3165 run->memory_fault.flags & KVM_MEMORY_EXIT_FLAG_PRIVATE);
3166 break;
3167 default:
3168 ret = kvm_arch_handle_exit(cpu, run);
3169 break;
3170 }
3171 } while (ret == 0);
3172
3173 cpu_exec_end(cpu);
3174 bql_lock();
3175
3176 if (ret < 0) {
3177 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
3178 vm_stop(RUN_STATE_INTERNAL_ERROR);
3179 }
3180
3181 qatomic_set(&cpu->exit_request, 0);
3182 return ret;
3183 }
3184
kvm_ioctl(KVMState * s,int type,...)3185 int kvm_ioctl(KVMState *s, int type, ...)
3186 {
3187 int ret;
3188 void *arg;
3189 va_list ap;
3190
3191 va_start(ap, type);
3192 arg = va_arg(ap, void *);
3193 va_end(ap);
3194
3195 trace_kvm_ioctl(type, arg);
3196 ret = ioctl(s->fd, type, arg);
3197 if (ret == -1) {
3198 ret = -errno;
3199 }
3200 return ret;
3201 }
3202
kvm_vm_ioctl(KVMState * s,int type,...)3203 int kvm_vm_ioctl(KVMState *s, int type, ...)
3204 {
3205 int ret;
3206 void *arg;
3207 va_list ap;
3208
3209 va_start(ap, type);
3210 arg = va_arg(ap, void *);
3211 va_end(ap);
3212
3213 trace_kvm_vm_ioctl(type, arg);
3214 accel_ioctl_begin();
3215 ret = ioctl(s->vmfd, type, arg);
3216 accel_ioctl_end();
3217 if (ret == -1) {
3218 ret = -errno;
3219 }
3220 return ret;
3221 }
3222
kvm_vcpu_ioctl(CPUState * cpu,int type,...)3223 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
3224 {
3225 int ret;
3226 void *arg;
3227 va_list ap;
3228
3229 va_start(ap, type);
3230 arg = va_arg(ap, void *);
3231 va_end(ap);
3232
3233 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
3234 accel_cpu_ioctl_begin(cpu);
3235 ret = ioctl(cpu->kvm_fd, type, arg);
3236 accel_cpu_ioctl_end(cpu);
3237 if (ret == -1) {
3238 ret = -errno;
3239 }
3240 return ret;
3241 }
3242
kvm_device_ioctl(int fd,int type,...)3243 int kvm_device_ioctl(int fd, int type, ...)
3244 {
3245 int ret;
3246 void *arg;
3247 va_list ap;
3248
3249 va_start(ap, type);
3250 arg = va_arg(ap, void *);
3251 va_end(ap);
3252
3253 trace_kvm_device_ioctl(fd, type, arg);
3254 accel_ioctl_begin();
3255 ret = ioctl(fd, type, arg);
3256 accel_ioctl_end();
3257 if (ret == -1) {
3258 ret = -errno;
3259 }
3260 return ret;
3261 }
3262
kvm_vm_check_attr(KVMState * s,uint32_t group,uint64_t attr)3263 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
3264 {
3265 int ret;
3266 struct kvm_device_attr attribute = {
3267 .group = group,
3268 .attr = attr,
3269 };
3270
3271 if (!kvm_vm_attributes_allowed) {
3272 return 0;
3273 }
3274
3275 ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
3276 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3277 return ret ? 0 : 1;
3278 }
3279
kvm_device_check_attr(int dev_fd,uint32_t group,uint64_t attr)3280 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
3281 {
3282 struct kvm_device_attr attribute = {
3283 .group = group,
3284 .attr = attr,
3285 .flags = 0,
3286 };
3287
3288 return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
3289 }
3290
kvm_device_access(int fd,int group,uint64_t attr,void * val,bool write,Error ** errp)3291 int kvm_device_access(int fd, int group, uint64_t attr,
3292 void *val, bool write, Error **errp)
3293 {
3294 struct kvm_device_attr kvmattr;
3295 int err;
3296
3297 kvmattr.flags = 0;
3298 kvmattr.group = group;
3299 kvmattr.attr = attr;
3300 kvmattr.addr = (uintptr_t)val;
3301
3302 err = kvm_device_ioctl(fd,
3303 write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
3304 &kvmattr);
3305 if (err < 0) {
3306 error_setg_errno(errp, -err,
3307 "KVM_%s_DEVICE_ATTR failed: Group %d "
3308 "attr 0x%016" PRIx64,
3309 write ? "SET" : "GET", group, attr);
3310 }
3311 return err;
3312 }
3313
kvm_has_sync_mmu(void)3314 bool kvm_has_sync_mmu(void)
3315 {
3316 return kvm_state->sync_mmu;
3317 }
3318
kvm_has_vcpu_events(void)3319 int kvm_has_vcpu_events(void)
3320 {
3321 return kvm_state->vcpu_events;
3322 }
3323
kvm_max_nested_state_length(void)3324 int kvm_max_nested_state_length(void)
3325 {
3326 return kvm_state->max_nested_state_len;
3327 }
3328
kvm_has_gsi_routing(void)3329 int kvm_has_gsi_routing(void)
3330 {
3331 #ifdef KVM_CAP_IRQ_ROUTING
3332 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
3333 #else
3334 return false;
3335 #endif
3336 }
3337
kvm_arm_supports_user_irq(void)3338 bool kvm_arm_supports_user_irq(void)
3339 {
3340 return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
3341 }
3342
3343 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
kvm_find_sw_breakpoint(CPUState * cpu,vaddr pc)3344 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc)
3345 {
3346 struct kvm_sw_breakpoint *bp;
3347
3348 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
3349 if (bp->pc == pc) {
3350 return bp;
3351 }
3352 }
3353 return NULL;
3354 }
3355
kvm_sw_breakpoints_active(CPUState * cpu)3356 int kvm_sw_breakpoints_active(CPUState *cpu)
3357 {
3358 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
3359 }
3360
3361 struct kvm_set_guest_debug_data {
3362 struct kvm_guest_debug dbg;
3363 int err;
3364 };
3365
kvm_invoke_set_guest_debug(CPUState * cpu,run_on_cpu_data data)3366 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
3367 {
3368 struct kvm_set_guest_debug_data *dbg_data =
3369 (struct kvm_set_guest_debug_data *) data.host_ptr;
3370
3371 dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
3372 &dbg_data->dbg);
3373 }
3374
kvm_update_guest_debug(CPUState * cpu,unsigned long reinject_trap)3375 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3376 {
3377 struct kvm_set_guest_debug_data data;
3378
3379 data.dbg.control = reinject_trap;
3380
3381 if (cpu->singlestep_enabled) {
3382 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
3383
3384 if (cpu->singlestep_enabled & SSTEP_NOIRQ) {
3385 data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ;
3386 }
3387 }
3388 kvm_arch_update_guest_debug(cpu, &data.dbg);
3389
3390 run_on_cpu(cpu, kvm_invoke_set_guest_debug,
3391 RUN_ON_CPU_HOST_PTR(&data));
3392 return data.err;
3393 }
3394
kvm_supports_guest_debug(void)3395 bool kvm_supports_guest_debug(void)
3396 {
3397 /* probed during kvm_init() */
3398 return kvm_has_guest_debug;
3399 }
3400
kvm_insert_breakpoint(CPUState * cpu,int type,vaddr addr,vaddr len)3401 int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3402 {
3403 struct kvm_sw_breakpoint *bp;
3404 int err;
3405
3406 if (type == GDB_BREAKPOINT_SW) {
3407 bp = kvm_find_sw_breakpoint(cpu, addr);
3408 if (bp) {
3409 bp->use_count++;
3410 return 0;
3411 }
3412
3413 bp = g_new(struct kvm_sw_breakpoint, 1);
3414 bp->pc = addr;
3415 bp->use_count = 1;
3416 err = kvm_arch_insert_sw_breakpoint(cpu, bp);
3417 if (err) {
3418 g_free(bp);
3419 return err;
3420 }
3421
3422 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3423 } else {
3424 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
3425 if (err) {
3426 return err;
3427 }
3428 }
3429
3430 CPU_FOREACH(cpu) {
3431 err = kvm_update_guest_debug(cpu, 0);
3432 if (err) {
3433 return err;
3434 }
3435 }
3436 return 0;
3437 }
3438
kvm_remove_breakpoint(CPUState * cpu,int type,vaddr addr,vaddr len)3439 int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3440 {
3441 struct kvm_sw_breakpoint *bp;
3442 int err;
3443
3444 if (type == GDB_BREAKPOINT_SW) {
3445 bp = kvm_find_sw_breakpoint(cpu, addr);
3446 if (!bp) {
3447 return -ENOENT;
3448 }
3449
3450 if (bp->use_count > 1) {
3451 bp->use_count--;
3452 return 0;
3453 }
3454
3455 err = kvm_arch_remove_sw_breakpoint(cpu, bp);
3456 if (err) {
3457 return err;
3458 }
3459
3460 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3461 g_free(bp);
3462 } else {
3463 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
3464 if (err) {
3465 return err;
3466 }
3467 }
3468
3469 CPU_FOREACH(cpu) {
3470 err = kvm_update_guest_debug(cpu, 0);
3471 if (err) {
3472 return err;
3473 }
3474 }
3475 return 0;
3476 }
3477
kvm_remove_all_breakpoints(CPUState * cpu)3478 void kvm_remove_all_breakpoints(CPUState *cpu)
3479 {
3480 struct kvm_sw_breakpoint *bp, *next;
3481 KVMState *s = cpu->kvm_state;
3482 CPUState *tmpcpu;
3483
3484 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
3485 if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
3486 /* Try harder to find a CPU that currently sees the breakpoint. */
3487 CPU_FOREACH(tmpcpu) {
3488 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
3489 break;
3490 }
3491 }
3492 }
3493 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
3494 g_free(bp);
3495 }
3496 kvm_arch_remove_all_hw_breakpoints();
3497
3498 CPU_FOREACH(cpu) {
3499 kvm_update_guest_debug(cpu, 0);
3500 }
3501 }
3502
3503 #endif /* !TARGET_KVM_HAVE_GUEST_DEBUG */
3504
kvm_set_signal_mask(CPUState * cpu,const sigset_t * sigset)3505 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
3506 {
3507 KVMState *s = kvm_state;
3508 struct kvm_signal_mask *sigmask;
3509 int r;
3510
3511 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
3512
3513 sigmask->len = s->sigmask_len;
3514 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
3515 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
3516 g_free(sigmask);
3517
3518 return r;
3519 }
3520
kvm_ipi_signal(int sig)3521 static void kvm_ipi_signal(int sig)
3522 {
3523 if (current_cpu) {
3524 assert(kvm_immediate_exit);
3525 kvm_cpu_kick(current_cpu);
3526 }
3527 }
3528
kvm_init_cpu_signals(CPUState * cpu)3529 void kvm_init_cpu_signals(CPUState *cpu)
3530 {
3531 int r;
3532 sigset_t set;
3533 struct sigaction sigact;
3534
3535 memset(&sigact, 0, sizeof(sigact));
3536 sigact.sa_handler = kvm_ipi_signal;
3537 sigaction(SIG_IPI, &sigact, NULL);
3538
3539 pthread_sigmask(SIG_BLOCK, NULL, &set);
3540 #if defined KVM_HAVE_MCE_INJECTION
3541 sigdelset(&set, SIGBUS);
3542 pthread_sigmask(SIG_SETMASK, &set, NULL);
3543 #endif
3544 sigdelset(&set, SIG_IPI);
3545 if (kvm_immediate_exit) {
3546 r = pthread_sigmask(SIG_SETMASK, &set, NULL);
3547 } else {
3548 r = kvm_set_signal_mask(cpu, &set);
3549 }
3550 if (r) {
3551 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
3552 exit(1);
3553 }
3554 }
3555
3556 /* Called asynchronously in VCPU thread. */
kvm_on_sigbus_vcpu(CPUState * cpu,int code,void * addr)3557 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
3558 {
3559 #ifdef KVM_HAVE_MCE_INJECTION
3560 if (have_sigbus_pending) {
3561 return 1;
3562 }
3563 have_sigbus_pending = true;
3564 pending_sigbus_addr = addr;
3565 pending_sigbus_code = code;
3566 qatomic_set(&cpu->exit_request, 1);
3567 return 0;
3568 #else
3569 return 1;
3570 #endif
3571 }
3572
3573 /* Called synchronously (via signalfd) in main thread. */
kvm_on_sigbus(int code,void * addr)3574 int kvm_on_sigbus(int code, void *addr)
3575 {
3576 #ifdef KVM_HAVE_MCE_INJECTION
3577 /* Action required MCE kills the process if SIGBUS is blocked. Because
3578 * that's what happens in the I/O thread, where we handle MCE via signalfd,
3579 * we can only get action optional here.
3580 */
3581 assert(code != BUS_MCEERR_AR);
3582 kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
3583 return 0;
3584 #else
3585 return 1;
3586 #endif
3587 }
3588
kvm_create_device(KVMState * s,uint64_t type,bool test)3589 int kvm_create_device(KVMState *s, uint64_t type, bool test)
3590 {
3591 int ret;
3592 struct kvm_create_device create_dev;
3593
3594 create_dev.type = type;
3595 create_dev.fd = -1;
3596 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
3597
3598 if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
3599 return -ENOTSUP;
3600 }
3601
3602 ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
3603 if (ret) {
3604 return ret;
3605 }
3606
3607 return test ? 0 : create_dev.fd;
3608 }
3609
kvm_device_supported(int vmfd,uint64_t type)3610 bool kvm_device_supported(int vmfd, uint64_t type)
3611 {
3612 struct kvm_create_device create_dev = {
3613 .type = type,
3614 .fd = -1,
3615 .flags = KVM_CREATE_DEVICE_TEST,
3616 };
3617
3618 if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
3619 return false;
3620 }
3621
3622 return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
3623 }
3624
kvm_set_one_reg(CPUState * cs,uint64_t id,void * source)3625 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
3626 {
3627 struct kvm_one_reg reg;
3628 int r;
3629
3630 reg.id = id;
3631 reg.addr = (uintptr_t) source;
3632 r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
3633 if (r) {
3634 trace_kvm_failed_reg_set(id, strerror(-r));
3635 }
3636 return r;
3637 }
3638
kvm_get_one_reg(CPUState * cs,uint64_t id,void * target)3639 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
3640 {
3641 struct kvm_one_reg reg;
3642 int r;
3643
3644 reg.id = id;
3645 reg.addr = (uintptr_t) target;
3646 r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
3647 if (r) {
3648 trace_kvm_failed_reg_get(id, strerror(-r));
3649 }
3650 return r;
3651 }
3652
kvm_accel_has_memory(MachineState * ms,AddressSpace * as,hwaddr start_addr,hwaddr size)3653 static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
3654 hwaddr start_addr, hwaddr size)
3655 {
3656 KVMState *kvm = KVM_STATE(ms->accelerator);
3657 int i;
3658
3659 for (i = 0; i < kvm->nr_as; ++i) {
3660 if (kvm->as[i].as == as && kvm->as[i].ml) {
3661 size = MIN(kvm_max_slot_size, size);
3662 return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
3663 start_addr, size);
3664 }
3665 }
3666
3667 return false;
3668 }
3669
kvm_get_kvm_shadow_mem(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3670 static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
3671 const char *name, void *opaque,
3672 Error **errp)
3673 {
3674 KVMState *s = KVM_STATE(obj);
3675 int64_t value = s->kvm_shadow_mem;
3676
3677 visit_type_int(v, name, &value, errp);
3678 }
3679
kvm_set_kvm_shadow_mem(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3680 static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
3681 const char *name, void *opaque,
3682 Error **errp)
3683 {
3684 KVMState *s = KVM_STATE(obj);
3685 int64_t value;
3686
3687 if (s->fd != -1) {
3688 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3689 return;
3690 }
3691
3692 if (!visit_type_int(v, name, &value, errp)) {
3693 return;
3694 }
3695
3696 s->kvm_shadow_mem = value;
3697 }
3698
kvm_set_kernel_irqchip(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3699 static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
3700 const char *name, void *opaque,
3701 Error **errp)
3702 {
3703 KVMState *s = KVM_STATE(obj);
3704 OnOffSplit mode;
3705
3706 if (s->fd != -1) {
3707 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3708 return;
3709 }
3710
3711 if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
3712 return;
3713 }
3714 switch (mode) {
3715 case ON_OFF_SPLIT_ON:
3716 s->kernel_irqchip_allowed = true;
3717 s->kernel_irqchip_required = true;
3718 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3719 break;
3720 case ON_OFF_SPLIT_OFF:
3721 s->kernel_irqchip_allowed = false;
3722 s->kernel_irqchip_required = false;
3723 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3724 break;
3725 case ON_OFF_SPLIT_SPLIT:
3726 s->kernel_irqchip_allowed = true;
3727 s->kernel_irqchip_required = true;
3728 s->kernel_irqchip_split = ON_OFF_AUTO_ON;
3729 break;
3730 default:
3731 /* The value was checked in visit_type_OnOffSplit() above. If
3732 * we get here, then something is wrong in QEMU.
3733 */
3734 abort();
3735 }
3736 }
3737
kvm_kernel_irqchip_allowed(void)3738 bool kvm_kernel_irqchip_allowed(void)
3739 {
3740 return kvm_state->kernel_irqchip_allowed;
3741 }
3742
kvm_kernel_irqchip_required(void)3743 bool kvm_kernel_irqchip_required(void)
3744 {
3745 return kvm_state->kernel_irqchip_required;
3746 }
3747
kvm_kernel_irqchip_split(void)3748 bool kvm_kernel_irqchip_split(void)
3749 {
3750 return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
3751 }
3752
kvm_get_dirty_ring_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3753 static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
3754 const char *name, void *opaque,
3755 Error **errp)
3756 {
3757 KVMState *s = KVM_STATE(obj);
3758 uint32_t value = s->kvm_dirty_ring_size;
3759
3760 visit_type_uint32(v, name, &value, errp);
3761 }
3762
kvm_set_dirty_ring_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3763 static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
3764 const char *name, void *opaque,
3765 Error **errp)
3766 {
3767 KVMState *s = KVM_STATE(obj);
3768 uint32_t value;
3769
3770 if (s->fd != -1) {
3771 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3772 return;
3773 }
3774
3775 if (!visit_type_uint32(v, name, &value, errp)) {
3776 return;
3777 }
3778 if (value & (value - 1)) {
3779 error_setg(errp, "dirty-ring-size must be a power of two.");
3780 return;
3781 }
3782
3783 s->kvm_dirty_ring_size = value;
3784 }
3785
kvm_get_device(Object * obj,Error ** errp G_GNUC_UNUSED)3786 static char *kvm_get_device(Object *obj,
3787 Error **errp G_GNUC_UNUSED)
3788 {
3789 KVMState *s = KVM_STATE(obj);
3790
3791 return g_strdup(s->device);
3792 }
3793
kvm_set_device(Object * obj,const char * value,Error ** errp G_GNUC_UNUSED)3794 static void kvm_set_device(Object *obj,
3795 const char *value,
3796 Error **errp G_GNUC_UNUSED)
3797 {
3798 KVMState *s = KVM_STATE(obj);
3799
3800 g_free(s->device);
3801 s->device = g_strdup(value);
3802 }
3803
kvm_accel_instance_init(Object * obj)3804 static void kvm_accel_instance_init(Object *obj)
3805 {
3806 KVMState *s = KVM_STATE(obj);
3807
3808 s->fd = -1;
3809 s->vmfd = -1;
3810 s->kvm_shadow_mem = -1;
3811 s->kernel_irqchip_allowed = true;
3812 s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
3813 /* KVM dirty ring is by default off */
3814 s->kvm_dirty_ring_size = 0;
3815 s->kvm_dirty_ring_with_bitmap = false;
3816 s->kvm_eager_split_size = 0;
3817 s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
3818 s->notify_window = 0;
3819 s->xen_version = 0;
3820 s->xen_gnttab_max_frames = 64;
3821 s->xen_evtchn_max_pirq = 256;
3822 s->device = NULL;
3823 }
3824
3825 /**
3826 * kvm_gdbstub_sstep_flags():
3827 *
3828 * Returns: SSTEP_* flags that KVM supports for guest debug. The
3829 * support is probed during kvm_init()
3830 */
kvm_gdbstub_sstep_flags(void)3831 static int kvm_gdbstub_sstep_flags(void)
3832 {
3833 return kvm_sstep_flags;
3834 }
3835
kvm_accel_class_init(ObjectClass * oc,void * data)3836 static void kvm_accel_class_init(ObjectClass *oc, void *data)
3837 {
3838 AccelClass *ac = ACCEL_CLASS(oc);
3839 ac->name = "KVM";
3840 ac->init_machine = kvm_init;
3841 ac->has_memory = kvm_accel_has_memory;
3842 ac->allowed = &kvm_allowed;
3843 ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags;
3844
3845 object_class_property_add(oc, "kernel-irqchip", "on|off|split",
3846 NULL, kvm_set_kernel_irqchip,
3847 NULL, NULL);
3848 object_class_property_set_description(oc, "kernel-irqchip",
3849 "Configure KVM in-kernel irqchip");
3850
3851 object_class_property_add(oc, "kvm-shadow-mem", "int",
3852 kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
3853 NULL, NULL);
3854 object_class_property_set_description(oc, "kvm-shadow-mem",
3855 "KVM shadow MMU size");
3856
3857 object_class_property_add(oc, "dirty-ring-size", "uint32",
3858 kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
3859 NULL, NULL);
3860 object_class_property_set_description(oc, "dirty-ring-size",
3861 "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
3862
3863 object_class_property_add_str(oc, "device", kvm_get_device, kvm_set_device);
3864 object_class_property_set_description(oc, "device",
3865 "Path to the device node to use (default: /dev/kvm)");
3866
3867 kvm_arch_accel_class_init(oc);
3868 }
3869
3870 static const TypeInfo kvm_accel_type = {
3871 .name = TYPE_KVM_ACCEL,
3872 .parent = TYPE_ACCEL,
3873 .instance_init = kvm_accel_instance_init,
3874 .class_init = kvm_accel_class_init,
3875 .instance_size = sizeof(KVMState),
3876 };
3877
kvm_type_init(void)3878 static void kvm_type_init(void)
3879 {
3880 type_register_static(&kvm_accel_type);
3881 }
3882
3883 type_init(kvm_type_init);
3884
3885 typedef struct StatsArgs {
3886 union StatsResultsType {
3887 StatsResultList **stats;
3888 StatsSchemaList **schema;
3889 } result;
3890 strList *names;
3891 Error **errp;
3892 } StatsArgs;
3893
add_kvmstat_entry(struct kvm_stats_desc * pdesc,uint64_t * stats_data,StatsList * stats_list,Error ** errp)3894 static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc,
3895 uint64_t *stats_data,
3896 StatsList *stats_list,
3897 Error **errp)
3898 {
3899
3900 Stats *stats;
3901 uint64List *val_list = NULL;
3902
3903 /* Only add stats that we understand. */
3904 switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
3905 case KVM_STATS_TYPE_CUMULATIVE:
3906 case KVM_STATS_TYPE_INSTANT:
3907 case KVM_STATS_TYPE_PEAK:
3908 case KVM_STATS_TYPE_LINEAR_HIST:
3909 case KVM_STATS_TYPE_LOG_HIST:
3910 break;
3911 default:
3912 return stats_list;
3913 }
3914
3915 switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
3916 case KVM_STATS_UNIT_NONE:
3917 case KVM_STATS_UNIT_BYTES:
3918 case KVM_STATS_UNIT_CYCLES:
3919 case KVM_STATS_UNIT_SECONDS:
3920 case KVM_STATS_UNIT_BOOLEAN:
3921 break;
3922 default:
3923 return stats_list;
3924 }
3925
3926 switch (pdesc->flags & KVM_STATS_BASE_MASK) {
3927 case KVM_STATS_BASE_POW10:
3928 case KVM_STATS_BASE_POW2:
3929 break;
3930 default:
3931 return stats_list;
3932 }
3933
3934 /* Alloc and populate data list */
3935 stats = g_new0(Stats, 1);
3936 stats->name = g_strdup(pdesc->name);
3937 stats->value = g_new0(StatsValue, 1);;
3938
3939 if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) {
3940 stats->value->u.boolean = *stats_data;
3941 stats->value->type = QTYPE_QBOOL;
3942 } else if (pdesc->size == 1) {
3943 stats->value->u.scalar = *stats_data;
3944 stats->value->type = QTYPE_QNUM;
3945 } else {
3946 int i;
3947 for (i = 0; i < pdesc->size; i++) {
3948 QAPI_LIST_PREPEND(val_list, stats_data[i]);
3949 }
3950 stats->value->u.list = val_list;
3951 stats->value->type = QTYPE_QLIST;
3952 }
3953
3954 QAPI_LIST_PREPEND(stats_list, stats);
3955 return stats_list;
3956 }
3957
add_kvmschema_entry(struct kvm_stats_desc * pdesc,StatsSchemaValueList * list,Error ** errp)3958 static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc,
3959 StatsSchemaValueList *list,
3960 Error **errp)
3961 {
3962 StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
3963 schema_entry->value = g_new0(StatsSchemaValue, 1);
3964
3965 switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
3966 case KVM_STATS_TYPE_CUMULATIVE:
3967 schema_entry->value->type = STATS_TYPE_CUMULATIVE;
3968 break;
3969 case KVM_STATS_TYPE_INSTANT:
3970 schema_entry->value->type = STATS_TYPE_INSTANT;
3971 break;
3972 case KVM_STATS_TYPE_PEAK:
3973 schema_entry->value->type = STATS_TYPE_PEAK;
3974 break;
3975 case KVM_STATS_TYPE_LINEAR_HIST:
3976 schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM;
3977 schema_entry->value->bucket_size = pdesc->bucket_size;
3978 schema_entry->value->has_bucket_size = true;
3979 break;
3980 case KVM_STATS_TYPE_LOG_HIST:
3981 schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM;
3982 break;
3983 default:
3984 goto exit;
3985 }
3986
3987 switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
3988 case KVM_STATS_UNIT_NONE:
3989 break;
3990 case KVM_STATS_UNIT_BOOLEAN:
3991 schema_entry->value->has_unit = true;
3992 schema_entry->value->unit = STATS_UNIT_BOOLEAN;
3993 break;
3994 case KVM_STATS_UNIT_BYTES:
3995 schema_entry->value->has_unit = true;
3996 schema_entry->value->unit = STATS_UNIT_BYTES;
3997 break;
3998 case KVM_STATS_UNIT_CYCLES:
3999 schema_entry->value->has_unit = true;
4000 schema_entry->value->unit = STATS_UNIT_CYCLES;
4001 break;
4002 case KVM_STATS_UNIT_SECONDS:
4003 schema_entry->value->has_unit = true;
4004 schema_entry->value->unit = STATS_UNIT_SECONDS;
4005 break;
4006 default:
4007 goto exit;
4008 }
4009
4010 schema_entry->value->exponent = pdesc->exponent;
4011 if (pdesc->exponent) {
4012 switch (pdesc->flags & KVM_STATS_BASE_MASK) {
4013 case KVM_STATS_BASE_POW10:
4014 schema_entry->value->has_base = true;
4015 schema_entry->value->base = 10;
4016 break;
4017 case KVM_STATS_BASE_POW2:
4018 schema_entry->value->has_base = true;
4019 schema_entry->value->base = 2;
4020 break;
4021 default:
4022 goto exit;
4023 }
4024 }
4025
4026 schema_entry->value->name = g_strdup(pdesc->name);
4027 schema_entry->next = list;
4028 return schema_entry;
4029 exit:
4030 g_free(schema_entry->value);
4031 g_free(schema_entry);
4032 return list;
4033 }
4034
4035 /* Cached stats descriptors */
4036 typedef struct StatsDescriptors {
4037 const char *ident; /* cache key, currently the StatsTarget */
4038 struct kvm_stats_desc *kvm_stats_desc;
4039 struct kvm_stats_header kvm_stats_header;
4040 QTAILQ_ENTRY(StatsDescriptors) next;
4041 } StatsDescriptors;
4042
4043 static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors =
4044 QTAILQ_HEAD_INITIALIZER(stats_descriptors);
4045
4046 /*
4047 * Return the descriptors for 'target', that either have already been read
4048 * or are retrieved from 'stats_fd'.
4049 */
find_stats_descriptors(StatsTarget target,int stats_fd,Error ** errp)4050 static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd,
4051 Error **errp)
4052 {
4053 StatsDescriptors *descriptors;
4054 const char *ident;
4055 struct kvm_stats_desc *kvm_stats_desc;
4056 struct kvm_stats_header *kvm_stats_header;
4057 size_t size_desc;
4058 ssize_t ret;
4059
4060 ident = StatsTarget_str(target);
4061 QTAILQ_FOREACH(descriptors, &stats_descriptors, next) {
4062 if (g_str_equal(descriptors->ident, ident)) {
4063 return descriptors;
4064 }
4065 }
4066
4067 descriptors = g_new0(StatsDescriptors, 1);
4068
4069 /* Read stats header */
4070 kvm_stats_header = &descriptors->kvm_stats_header;
4071 ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0);
4072 if (ret != sizeof(*kvm_stats_header)) {
4073 error_setg(errp, "KVM stats: failed to read stats header: "
4074 "expected %zu actual %zu",
4075 sizeof(*kvm_stats_header), ret);
4076 g_free(descriptors);
4077 return NULL;
4078 }
4079 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4080
4081 /* Read stats descriptors */
4082 kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc);
4083 ret = pread(stats_fd, kvm_stats_desc,
4084 size_desc * kvm_stats_header->num_desc,
4085 kvm_stats_header->desc_offset);
4086
4087 if (ret != size_desc * kvm_stats_header->num_desc) {
4088 error_setg(errp, "KVM stats: failed to read stats descriptors: "
4089 "expected %zu actual %zu",
4090 size_desc * kvm_stats_header->num_desc, ret);
4091 g_free(descriptors);
4092 g_free(kvm_stats_desc);
4093 return NULL;
4094 }
4095 descriptors->kvm_stats_desc = kvm_stats_desc;
4096 descriptors->ident = ident;
4097 QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next);
4098 return descriptors;
4099 }
4100
query_stats(StatsResultList ** result,StatsTarget target,strList * names,int stats_fd,CPUState * cpu,Error ** errp)4101 static void query_stats(StatsResultList **result, StatsTarget target,
4102 strList *names, int stats_fd, CPUState *cpu,
4103 Error **errp)
4104 {
4105 struct kvm_stats_desc *kvm_stats_desc;
4106 struct kvm_stats_header *kvm_stats_header;
4107 StatsDescriptors *descriptors;
4108 g_autofree uint64_t *stats_data = NULL;
4109 struct kvm_stats_desc *pdesc;
4110 StatsList *stats_list = NULL;
4111 size_t size_desc, size_data = 0;
4112 ssize_t ret;
4113 int i;
4114
4115 descriptors = find_stats_descriptors(target, stats_fd, errp);
4116 if (!descriptors) {
4117 return;
4118 }
4119
4120 kvm_stats_header = &descriptors->kvm_stats_header;
4121 kvm_stats_desc = descriptors->kvm_stats_desc;
4122 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4123
4124 /* Tally the total data size; read schema data */
4125 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4126 pdesc = (void *)kvm_stats_desc + i * size_desc;
4127 size_data += pdesc->size * sizeof(*stats_data);
4128 }
4129
4130 stats_data = g_malloc0(size_data);
4131 ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset);
4132
4133 if (ret != size_data) {
4134 error_setg(errp, "KVM stats: failed to read data: "
4135 "expected %zu actual %zu", size_data, ret);
4136 return;
4137 }
4138
4139 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4140 uint64_t *stats;
4141 pdesc = (void *)kvm_stats_desc + i * size_desc;
4142
4143 /* Add entry to the list */
4144 stats = (void *)stats_data + pdesc->offset;
4145 if (!apply_str_list_filter(pdesc->name, names)) {
4146 continue;
4147 }
4148 stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp);
4149 }
4150
4151 if (!stats_list) {
4152 return;
4153 }
4154
4155 switch (target) {
4156 case STATS_TARGET_VM:
4157 add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list);
4158 break;
4159 case STATS_TARGET_VCPU:
4160 add_stats_entry(result, STATS_PROVIDER_KVM,
4161 cpu->parent_obj.canonical_path,
4162 stats_list);
4163 break;
4164 default:
4165 g_assert_not_reached();
4166 }
4167 }
4168
query_stats_schema(StatsSchemaList ** result,StatsTarget target,int stats_fd,Error ** errp)4169 static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
4170 int stats_fd, Error **errp)
4171 {
4172 struct kvm_stats_desc *kvm_stats_desc;
4173 struct kvm_stats_header *kvm_stats_header;
4174 StatsDescriptors *descriptors;
4175 struct kvm_stats_desc *pdesc;
4176 StatsSchemaValueList *stats_list = NULL;
4177 size_t size_desc;
4178 int i;
4179
4180 descriptors = find_stats_descriptors(target, stats_fd, errp);
4181 if (!descriptors) {
4182 return;
4183 }
4184
4185 kvm_stats_header = &descriptors->kvm_stats_header;
4186 kvm_stats_desc = descriptors->kvm_stats_desc;
4187 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4188
4189 /* Tally the total data size; read schema data */
4190 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4191 pdesc = (void *)kvm_stats_desc + i * size_desc;
4192 stats_list = add_kvmschema_entry(pdesc, stats_list, errp);
4193 }
4194
4195 add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
4196 }
4197
query_stats_vcpu(CPUState * cpu,StatsArgs * kvm_stats_args)4198 static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4199 {
4200 int stats_fd = cpu->kvm_vcpu_stats_fd;
4201 Error *local_err = NULL;
4202
4203 if (stats_fd == -1) {
4204 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4205 error_propagate(kvm_stats_args->errp, local_err);
4206 return;
4207 }
4208 query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
4209 kvm_stats_args->names, stats_fd, cpu,
4210 kvm_stats_args->errp);
4211 }
4212
query_stats_schema_vcpu(CPUState * cpu,StatsArgs * kvm_stats_args)4213 static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4214 {
4215 int stats_fd = cpu->kvm_vcpu_stats_fd;
4216 Error *local_err = NULL;
4217
4218 if (stats_fd == -1) {
4219 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4220 error_propagate(kvm_stats_args->errp, local_err);
4221 return;
4222 }
4223 query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
4224 kvm_stats_args->errp);
4225 }
4226
query_stats_cb(StatsResultList ** result,StatsTarget target,strList * names,strList * targets,Error ** errp)4227 static void query_stats_cb(StatsResultList **result, StatsTarget target,
4228 strList *names, strList *targets, Error **errp)
4229 {
4230 KVMState *s = kvm_state;
4231 CPUState *cpu;
4232 int stats_fd;
4233
4234 switch (target) {
4235 case STATS_TARGET_VM:
4236 {
4237 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4238 if (stats_fd == -1) {
4239 error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4240 return;
4241 }
4242 query_stats(result, target, names, stats_fd, NULL, errp);
4243 close(stats_fd);
4244 break;
4245 }
4246 case STATS_TARGET_VCPU:
4247 {
4248 StatsArgs stats_args;
4249 stats_args.result.stats = result;
4250 stats_args.names = names;
4251 stats_args.errp = errp;
4252 CPU_FOREACH(cpu) {
4253 if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
4254 continue;
4255 }
4256 query_stats_vcpu(cpu, &stats_args);
4257 }
4258 break;
4259 }
4260 default:
4261 break;
4262 }
4263 }
4264
query_stats_schemas_cb(StatsSchemaList ** result,Error ** errp)4265 void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
4266 {
4267 StatsArgs stats_args;
4268 KVMState *s = kvm_state;
4269 int stats_fd;
4270
4271 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4272 if (stats_fd == -1) {
4273 error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4274 return;
4275 }
4276 query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp);
4277 close(stats_fd);
4278
4279 if (first_cpu) {
4280 stats_args.result.schema = result;
4281 stats_args.errp = errp;
4282 query_stats_schema_vcpu(first_cpu, &stats_args);
4283 }
4284 }
4285
kvm_mark_guest_state_protected(void)4286 void kvm_mark_guest_state_protected(void)
4287 {
4288 kvm_state->guest_state_protected = true;
4289 }
4290
kvm_create_guest_memfd(uint64_t size,uint64_t flags,Error ** errp)4291 int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp)
4292 {
4293 int fd;
4294 struct kvm_create_guest_memfd guest_memfd = {
4295 .size = size,
4296 .flags = flags,
4297 };
4298
4299 if (!kvm_guest_memfd_supported) {
4300 error_setg(errp, "KVM does not support guest_memfd");
4301 return -1;
4302 }
4303
4304 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
4305 if (fd < 0) {
4306 error_setg_errno(errp, errno, "Error creating KVM guest_memfd");
4307 return -1;
4308 }
4309
4310 return fd;
4311 }
4312