1 /*
2 * QEMU KVM support
3 *
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 *
13 */
14
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
19
20 #include <linux/kvm.h>
21 #include "standard-headers/asm-x86/kvm_para.h"
22
23 #include "cpu.h"
24 #include "sysemu/sysemu.h"
25 #include "sysemu/hw_accel.h"
26 #include "sysemu/kvm_int.h"
27 #include "sysemu/reset.h"
28 #include "sysemu/runstate.h"
29 #include "kvm_i386.h"
30 #include "hyperv.h"
31 #include "hyperv-proto.h"
32
33 #include "exec/gdbstub.h"
34 #include "qemu/host-utils.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #include "hw/i386/x86.h"
39 #include "hw/i386/apic.h"
40 #include "hw/i386/apic_internal.h"
41 #include "hw/i386/apic-msidef.h"
42 #include "hw/i386/intel_iommu.h"
43 #include "hw/i386/x86-iommu.h"
44 #include "hw/i386/e820_memory_layout.h"
45
46 #include "hw/pci/pci.h"
47 #include "hw/pci/msi.h"
48 #include "hw/pci/msix.h"
49 #include "migration/blocker.h"
50 #include "exec/memattrs.h"
51 #include "trace.h"
52
53 //#define DEBUG_KVM
54
55 #ifdef DEBUG_KVM
56 #define DPRINTF(fmt, ...) \
57 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
58 #else
59 #define DPRINTF(fmt, ...) \
60 do { } while (0)
61 #endif
62
63 #define MSR_KVM_WALL_CLOCK 0x11
64 #define MSR_KVM_SYSTEM_TIME 0x12
65
66 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
67 * 255 kvm_msr_entry structs */
68 #define MSR_BUF_SIZE 4096
69
70 static void kvm_init_msrs(X86CPU *cpu);
71
72 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
73 KVM_CAP_INFO(SET_TSS_ADDR),
74 KVM_CAP_INFO(EXT_CPUID),
75 KVM_CAP_INFO(MP_STATE),
76 KVM_CAP_LAST_INFO
77 };
78
79 static bool has_msr_star;
80 static bool has_msr_hsave_pa;
81 static bool has_msr_tsc_aux;
82 static bool has_msr_tsc_adjust;
83 static bool has_msr_tsc_deadline;
84 static bool has_msr_feature_control;
85 static bool has_msr_misc_enable;
86 static bool has_msr_smbase;
87 static bool has_msr_bndcfgs;
88 static int lm_capable_kernel;
89 static bool has_msr_hv_hypercall;
90 static bool has_msr_hv_crash;
91 static bool has_msr_hv_reset;
92 static bool has_msr_hv_vpindex;
93 static bool hv_vpindex_settable;
94 static bool has_msr_hv_runtime;
95 static bool has_msr_hv_synic;
96 static bool has_msr_hv_stimer;
97 static bool has_msr_hv_frequencies;
98 static bool has_msr_hv_reenlightenment;
99 static bool has_msr_xss;
100 static bool has_msr_umwait;
101 static bool has_msr_spec_ctrl;
102 static bool has_msr_tsx_ctrl;
103 static bool has_msr_virt_ssbd;
104 static bool has_msr_smi_count;
105 static bool has_msr_arch_capabs;
106 static bool has_msr_core_capabs;
107 static bool has_msr_vmx_vmfunc;
108 static bool has_msr_ucode_rev;
109 static bool has_msr_vmx_procbased_ctls2;
110
111 static uint32_t has_architectural_pmu_version;
112 static uint32_t num_architectural_pmu_gp_counters;
113 static uint32_t num_architectural_pmu_fixed_counters;
114
115 static int has_xsave;
116 static int has_xcrs;
117 static int has_pit_state2;
118 static int has_exception_payload;
119
120 static bool has_msr_mcg_ext_ctl;
121
122 static struct kvm_cpuid2 *cpuid_cache;
123 static struct kvm_msr_list *kvm_feature_msrs;
124
kvm_has_pit_state2(void)125 int kvm_has_pit_state2(void)
126 {
127 return has_pit_state2;
128 }
129
kvm_has_smm(void)130 bool kvm_has_smm(void)
131 {
132 return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
133 }
134
kvm_has_adjust_clock_stable(void)135 bool kvm_has_adjust_clock_stable(void)
136 {
137 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
138
139 return (ret == KVM_CLOCK_TSC_STABLE);
140 }
141
kvm_has_exception_payload(void)142 bool kvm_has_exception_payload(void)
143 {
144 return has_exception_payload;
145 }
146
kvm_allows_irq0_override(void)147 bool kvm_allows_irq0_override(void)
148 {
149 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
150 }
151
kvm_x2apic_api_set_flags(uint64_t flags)152 static bool kvm_x2apic_api_set_flags(uint64_t flags)
153 {
154 KVMState *s = KVM_STATE(current_accel());
155
156 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
157 }
158
159 #define MEMORIZE(fn, _result) \
160 ({ \
161 static bool _memorized; \
162 \
163 if (_memorized) { \
164 return _result; \
165 } \
166 _memorized = true; \
167 _result = fn; \
168 })
169
170 static bool has_x2apic_api;
171
kvm_has_x2apic_api(void)172 bool kvm_has_x2apic_api(void)
173 {
174 return has_x2apic_api;
175 }
176
kvm_enable_x2apic(void)177 bool kvm_enable_x2apic(void)
178 {
179 return MEMORIZE(
180 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
181 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
182 has_x2apic_api);
183 }
184
kvm_hv_vpindex_settable(void)185 bool kvm_hv_vpindex_settable(void)
186 {
187 return hv_vpindex_settable;
188 }
189
kvm_get_tsc(CPUState * cs)190 static int kvm_get_tsc(CPUState *cs)
191 {
192 X86CPU *cpu = X86_CPU(cs);
193 CPUX86State *env = &cpu->env;
194 struct {
195 struct kvm_msrs info;
196 struct kvm_msr_entry entries[1];
197 } msr_data = {};
198 int ret;
199
200 if (env->tsc_valid) {
201 return 0;
202 }
203
204 memset(&msr_data, 0, sizeof(msr_data));
205 msr_data.info.nmsrs = 1;
206 msr_data.entries[0].index = MSR_IA32_TSC;
207 env->tsc_valid = !runstate_is_running();
208
209 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
210 if (ret < 0) {
211 return ret;
212 }
213
214 assert(ret == 1);
215 env->tsc = msr_data.entries[0].data;
216 return 0;
217 }
218
do_kvm_synchronize_tsc(CPUState * cpu,run_on_cpu_data arg)219 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
220 {
221 kvm_get_tsc(cpu);
222 }
223
kvm_synchronize_all_tsc(void)224 void kvm_synchronize_all_tsc(void)
225 {
226 CPUState *cpu;
227
228 if (kvm_enabled()) {
229 CPU_FOREACH(cpu) {
230 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
231 }
232 }
233 }
234
try_get_cpuid(KVMState * s,int max)235 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
236 {
237 struct kvm_cpuid2 *cpuid;
238 int r, size;
239
240 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
241 cpuid = g_malloc0(size);
242 cpuid->nent = max;
243 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
244 if (r == 0 && cpuid->nent >= max) {
245 r = -E2BIG;
246 }
247 if (r < 0) {
248 if (r == -E2BIG) {
249 g_free(cpuid);
250 return NULL;
251 } else {
252 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
253 strerror(-r));
254 exit(1);
255 }
256 }
257 return cpuid;
258 }
259
260 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
261 * for all entries.
262 */
get_supported_cpuid(KVMState * s)263 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
264 {
265 struct kvm_cpuid2 *cpuid;
266 int max = 1;
267
268 if (cpuid_cache != NULL) {
269 return cpuid_cache;
270 }
271 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
272 max *= 2;
273 }
274 cpuid_cache = cpuid;
275 return cpuid;
276 }
277
278 static const struct kvm_para_features {
279 int cap;
280 int feature;
281 } para_features[] = {
282 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
283 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
284 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
285 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
286 };
287
get_para_features(KVMState * s)288 static int get_para_features(KVMState *s)
289 {
290 int i, features = 0;
291
292 for (i = 0; i < ARRAY_SIZE(para_features); i++) {
293 if (kvm_check_extension(s, para_features[i].cap)) {
294 features |= (1 << para_features[i].feature);
295 }
296 }
297
298 return features;
299 }
300
host_tsx_blacklisted(void)301 static bool host_tsx_blacklisted(void)
302 {
303 int family, model, stepping;\
304 char vendor[CPUID_VENDOR_SZ + 1];
305
306 host_vendor_fms(vendor, &family, &model, &stepping);
307
308 /* Check if we are running on a Haswell host known to have broken TSX */
309 return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
310 (family == 6) &&
311 ((model == 63 && stepping < 4) ||
312 model == 60 || model == 69 || model == 70);
313 }
314
315 /* Returns the value for a specific register on the cpuid entry
316 */
cpuid_entry_get_reg(struct kvm_cpuid_entry2 * entry,int reg)317 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
318 {
319 uint32_t ret = 0;
320 switch (reg) {
321 case R_EAX:
322 ret = entry->eax;
323 break;
324 case R_EBX:
325 ret = entry->ebx;
326 break;
327 case R_ECX:
328 ret = entry->ecx;
329 break;
330 case R_EDX:
331 ret = entry->edx;
332 break;
333 }
334 return ret;
335 }
336
337 /* Find matching entry for function/index on kvm_cpuid2 struct
338 */
cpuid_find_entry(struct kvm_cpuid2 * cpuid,uint32_t function,uint32_t index)339 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
340 uint32_t function,
341 uint32_t index)
342 {
343 int i;
344 for (i = 0; i < cpuid->nent; ++i) {
345 if (cpuid->entries[i].function == function &&
346 cpuid->entries[i].index == index) {
347 return &cpuid->entries[i];
348 }
349 }
350 /* not found: */
351 return NULL;
352 }
353
kvm_arch_get_supported_cpuid(KVMState * s,uint32_t function,uint32_t index,int reg)354 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
355 uint32_t index, int reg)
356 {
357 struct kvm_cpuid2 *cpuid;
358 uint32_t ret = 0;
359 uint32_t cpuid_1_edx;
360 bool found = false;
361
362 cpuid = get_supported_cpuid(s);
363
364 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
365 if (entry) {
366 found = true;
367 ret = cpuid_entry_get_reg(entry, reg);
368 }
369
370 /* Fixups for the data returned by KVM, below */
371
372 if (function == 1 && reg == R_EDX) {
373 /* KVM before 2.6.30 misreports the following features */
374 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
375 } else if (function == 1 && reg == R_ECX) {
376 /* We can set the hypervisor flag, even if KVM does not return it on
377 * GET_SUPPORTED_CPUID
378 */
379 ret |= CPUID_EXT_HYPERVISOR;
380 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
381 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
382 * and the irqchip is in the kernel.
383 */
384 if (kvm_irqchip_in_kernel() &&
385 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
386 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
387 }
388
389 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
390 * without the in-kernel irqchip
391 */
392 if (!kvm_irqchip_in_kernel()) {
393 ret &= ~CPUID_EXT_X2APIC;
394 }
395
396 if (enable_cpu_pm) {
397 int disable_exits = kvm_check_extension(s,
398 KVM_CAP_X86_DISABLE_EXITS);
399
400 if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) {
401 ret |= CPUID_EXT_MONITOR;
402 }
403 }
404 } else if (function == 6 && reg == R_EAX) {
405 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
406 } else if (function == 7 && index == 0 && reg == R_EBX) {
407 if (host_tsx_blacklisted()) {
408 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
409 }
410 } else if (function == 7 && index == 0 && reg == R_EDX) {
411 /*
412 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
413 * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
414 * returned by KVM_GET_MSR_INDEX_LIST.
415 */
416 if (!has_msr_arch_capabs) {
417 ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES;
418 }
419 } else if (function == 0x80000001 && reg == R_ECX) {
420 /*
421 * It's safe to enable TOPOEXT even if it's not returned by
422 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows
423 * us to keep CPU models including TOPOEXT runnable on older kernels.
424 */
425 ret |= CPUID_EXT3_TOPOEXT;
426 } else if (function == 0x80000001 && reg == R_EDX) {
427 /* On Intel, kvm returns cpuid according to the Intel spec,
428 * so add missing bits according to the AMD spec:
429 */
430 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
431 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
432 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
433 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
434 * be enabled without the in-kernel irqchip
435 */
436 if (!kvm_irqchip_in_kernel()) {
437 ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
438 }
439 } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
440 ret |= 1U << KVM_HINTS_REALTIME;
441 found = 1;
442 }
443
444 /* fallback for older kernels */
445 if ((function == KVM_CPUID_FEATURES) && !found) {
446 ret = get_para_features(s);
447 }
448
449 return ret;
450 }
451
kvm_arch_get_supported_msr_feature(KVMState * s,uint32_t index)452 uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
453 {
454 struct {
455 struct kvm_msrs info;
456 struct kvm_msr_entry entries[1];
457 } msr_data = {};
458 uint64_t value;
459 uint32_t ret, can_be_one, must_be_one;
460
461 if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */
462 return 0;
463 }
464
465 /* Check if requested MSR is supported feature MSR */
466 int i;
467 for (i = 0; i < kvm_feature_msrs->nmsrs; i++)
468 if (kvm_feature_msrs->indices[i] == index) {
469 break;
470 }
471 if (i == kvm_feature_msrs->nmsrs) {
472 return 0; /* if the feature MSR is not supported, simply return 0 */
473 }
474
475 msr_data.info.nmsrs = 1;
476 msr_data.entries[0].index = index;
477
478 ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data);
479 if (ret != 1) {
480 error_report("KVM get MSR (index=0x%x) feature failed, %s",
481 index, strerror(-ret));
482 exit(1);
483 }
484
485 value = msr_data.entries[0].data;
486 switch (index) {
487 case MSR_IA32_VMX_PROCBASED_CTLS2:
488 if (!has_msr_vmx_procbased_ctls2) {
489 /* KVM forgot to add these bits for some time, do this ourselves. */
490 if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) &
491 CPUID_XSAVE_XSAVES) {
492 value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32;
493 }
494 if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) &
495 CPUID_EXT_RDRAND) {
496 value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32;
497 }
498 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
499 CPUID_7_0_EBX_INVPCID) {
500 value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32;
501 }
502 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
503 CPUID_7_0_EBX_RDSEED) {
504 value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32;
505 }
506 if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) &
507 CPUID_EXT2_RDTSCP) {
508 value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32;
509 }
510 }
511 /* fall through */
512 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
513 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
514 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
515 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
516 /*
517 * Return true for bits that can be one, but do not have to be one.
518 * The SDM tells us which bits could have a "must be one" setting,
519 * so we can do the opposite transformation in make_vmx_msr_value.
520 */
521 must_be_one = (uint32_t)value;
522 can_be_one = (uint32_t)(value >> 32);
523 return can_be_one & ~must_be_one;
524
525 default:
526 return value;
527 }
528 }
529
530
531 typedef struct HWPoisonPage {
532 ram_addr_t ram_addr;
533 QLIST_ENTRY(HWPoisonPage) list;
534 } HWPoisonPage;
535
536 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
537 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
538
kvm_unpoison_all(void * param)539 static void kvm_unpoison_all(void *param)
540 {
541 HWPoisonPage *page, *next_page;
542
543 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
544 QLIST_REMOVE(page, list);
545 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
546 g_free(page);
547 }
548 }
549
kvm_hwpoison_page_add(ram_addr_t ram_addr)550 static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
551 {
552 HWPoisonPage *page;
553
554 QLIST_FOREACH(page, &hwpoison_page_list, list) {
555 if (page->ram_addr == ram_addr) {
556 return;
557 }
558 }
559 page = g_new(HWPoisonPage, 1);
560 page->ram_addr = ram_addr;
561 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
562 }
563
kvm_get_mce_cap_supported(KVMState * s,uint64_t * mce_cap,int * max_banks)564 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
565 int *max_banks)
566 {
567 int r;
568
569 r = kvm_check_extension(s, KVM_CAP_MCE);
570 if (r > 0) {
571 *max_banks = r;
572 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
573 }
574 return -ENOSYS;
575 }
576
kvm_mce_inject(X86CPU * cpu,hwaddr paddr,int code)577 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
578 {
579 CPUState *cs = CPU(cpu);
580 CPUX86State *env = &cpu->env;
581 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
582 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
583 uint64_t mcg_status = MCG_STATUS_MCIP;
584 int flags = 0;
585
586 if (code == BUS_MCEERR_AR) {
587 status |= MCI_STATUS_AR | 0x134;
588 mcg_status |= MCG_STATUS_EIPV;
589 } else {
590 status |= 0xc0;
591 mcg_status |= MCG_STATUS_RIPV;
592 }
593
594 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
595 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
596 * guest kernel back into env->mcg_ext_ctl.
597 */
598 cpu_synchronize_state(cs);
599 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
600 mcg_status |= MCG_STATUS_LMCE;
601 flags = 0;
602 }
603
604 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
605 (MCM_ADDR_PHYS << 6) | 0xc, flags);
606 }
607
hardware_memory_error(void * host_addr)608 static void hardware_memory_error(void *host_addr)
609 {
610 error_report("QEMU got Hardware memory error at addr %p", host_addr);
611 exit(1);
612 }
613
kvm_arch_on_sigbus_vcpu(CPUState * c,int code,void * addr)614 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
615 {
616 X86CPU *cpu = X86_CPU(c);
617 CPUX86State *env = &cpu->env;
618 ram_addr_t ram_addr;
619 hwaddr paddr;
620
621 /* If we get an action required MCE, it has been injected by KVM
622 * while the VM was running. An action optional MCE instead should
623 * be coming from the main thread, which qemu_init_sigbus identifies
624 * as the "early kill" thread.
625 */
626 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
627
628 if ((env->mcg_cap & MCG_SER_P) && addr) {
629 ram_addr = qemu_ram_addr_from_host(addr);
630 if (ram_addr != RAM_ADDR_INVALID &&
631 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
632 kvm_hwpoison_page_add(ram_addr);
633 kvm_mce_inject(cpu, paddr, code);
634
635 /*
636 * Use different logging severity based on error type.
637 * If there is additional MCE reporting on the hypervisor, QEMU VA
638 * could be another source to identify the PA and MCE details.
639 */
640 if (code == BUS_MCEERR_AR) {
641 error_report("Guest MCE Memory Error at QEMU addr %p and "
642 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
643 addr, paddr, "BUS_MCEERR_AR");
644 } else {
645 warn_report("Guest MCE Memory Error at QEMU addr %p and "
646 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
647 addr, paddr, "BUS_MCEERR_AO");
648 }
649
650 return;
651 }
652
653 if (code == BUS_MCEERR_AO) {
654 warn_report("Hardware memory error at addr %p of type %s "
655 "for memory used by QEMU itself instead of guest system!",
656 addr, "BUS_MCEERR_AO");
657 }
658 }
659
660 if (code == BUS_MCEERR_AR) {
661 hardware_memory_error(addr);
662 }
663
664 /* Hope we are lucky for AO MCE */
665 }
666
kvm_reset_exception(CPUX86State * env)667 static void kvm_reset_exception(CPUX86State *env)
668 {
669 env->exception_nr = -1;
670 env->exception_pending = 0;
671 env->exception_injected = 0;
672 env->exception_has_payload = false;
673 env->exception_payload = 0;
674 }
675
kvm_queue_exception(CPUX86State * env,int32_t exception_nr,uint8_t exception_has_payload,uint64_t exception_payload)676 static void kvm_queue_exception(CPUX86State *env,
677 int32_t exception_nr,
678 uint8_t exception_has_payload,
679 uint64_t exception_payload)
680 {
681 assert(env->exception_nr == -1);
682 assert(!env->exception_pending);
683 assert(!env->exception_injected);
684 assert(!env->exception_has_payload);
685
686 env->exception_nr = exception_nr;
687
688 if (has_exception_payload) {
689 env->exception_pending = 1;
690
691 env->exception_has_payload = exception_has_payload;
692 env->exception_payload = exception_payload;
693 } else {
694 env->exception_injected = 1;
695
696 if (exception_nr == EXCP01_DB) {
697 assert(exception_has_payload);
698 env->dr[6] = exception_payload;
699 } else if (exception_nr == EXCP0E_PAGE) {
700 assert(exception_has_payload);
701 env->cr[2] = exception_payload;
702 } else {
703 assert(!exception_has_payload);
704 }
705 }
706 }
707
kvm_inject_mce_oldstyle(X86CPU * cpu)708 static int kvm_inject_mce_oldstyle(X86CPU *cpu)
709 {
710 CPUX86State *env = &cpu->env;
711
712 if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) {
713 unsigned int bank, bank_num = env->mcg_cap & 0xff;
714 struct kvm_x86_mce mce;
715
716 kvm_reset_exception(env);
717
718 /*
719 * There must be at least one bank in use if an MCE is pending.
720 * Find it and use its values for the event injection.
721 */
722 for (bank = 0; bank < bank_num; bank++) {
723 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
724 break;
725 }
726 }
727 assert(bank < bank_num);
728
729 mce.bank = bank;
730 mce.status = env->mce_banks[bank * 4 + 1];
731 mce.mcg_status = env->mcg_status;
732 mce.addr = env->mce_banks[bank * 4 + 2];
733 mce.misc = env->mce_banks[bank * 4 + 3];
734
735 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
736 }
737 return 0;
738 }
739
cpu_update_state(void * opaque,int running,RunState state)740 static void cpu_update_state(void *opaque, int running, RunState state)
741 {
742 CPUX86State *env = opaque;
743
744 if (running) {
745 env->tsc_valid = false;
746 }
747 }
748
kvm_arch_vcpu_id(CPUState * cs)749 unsigned long kvm_arch_vcpu_id(CPUState *cs)
750 {
751 X86CPU *cpu = X86_CPU(cs);
752 return cpu->apic_id;
753 }
754
755 #ifndef KVM_CPUID_SIGNATURE_NEXT
756 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
757 #endif
758
hyperv_enabled(X86CPU * cpu)759 static bool hyperv_enabled(X86CPU *cpu)
760 {
761 CPUState *cs = CPU(cpu);
762 return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
763 ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) ||
764 cpu->hyperv_features || cpu->hyperv_passthrough);
765 }
766
kvm_arch_set_tsc_khz(CPUState * cs)767 static int kvm_arch_set_tsc_khz(CPUState *cs)
768 {
769 X86CPU *cpu = X86_CPU(cs);
770 CPUX86State *env = &cpu->env;
771 int r;
772
773 if (!env->tsc_khz) {
774 return 0;
775 }
776
777 r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
778 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
779 -ENOTSUP;
780 if (r < 0) {
781 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
782 * TSC frequency doesn't match the one we want.
783 */
784 int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
785 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
786 -ENOTSUP;
787 if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
788 warn_report("TSC frequency mismatch between "
789 "VM (%" PRId64 " kHz) and host (%d kHz), "
790 "and TSC scaling unavailable",
791 env->tsc_khz, cur_freq);
792 return r;
793 }
794 }
795
796 return 0;
797 }
798
tsc_is_stable_and_known(CPUX86State * env)799 static bool tsc_is_stable_and_known(CPUX86State *env)
800 {
801 if (!env->tsc_khz) {
802 return false;
803 }
804 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
805 || env->user_tsc_khz;
806 }
807
808 static struct {
809 const char *desc;
810 struct {
811 uint32_t fw;
812 uint32_t bits;
813 } flags[2];
814 uint64_t dependencies;
815 } kvm_hyperv_properties[] = {
816 [HYPERV_FEAT_RELAXED] = {
817 .desc = "relaxed timing (hv-relaxed)",
818 .flags = {
819 {.fw = FEAT_HYPERV_EAX,
820 .bits = HV_HYPERCALL_AVAILABLE},
821 {.fw = FEAT_HV_RECOMM_EAX,
822 .bits = HV_RELAXED_TIMING_RECOMMENDED}
823 }
824 },
825 [HYPERV_FEAT_VAPIC] = {
826 .desc = "virtual APIC (hv-vapic)",
827 .flags = {
828 {.fw = FEAT_HYPERV_EAX,
829 .bits = HV_HYPERCALL_AVAILABLE | HV_APIC_ACCESS_AVAILABLE},
830 {.fw = FEAT_HV_RECOMM_EAX,
831 .bits = HV_APIC_ACCESS_RECOMMENDED}
832 }
833 },
834 [HYPERV_FEAT_TIME] = {
835 .desc = "clocksources (hv-time)",
836 .flags = {
837 {.fw = FEAT_HYPERV_EAX,
838 .bits = HV_HYPERCALL_AVAILABLE | HV_TIME_REF_COUNT_AVAILABLE |
839 HV_REFERENCE_TSC_AVAILABLE}
840 }
841 },
842 [HYPERV_FEAT_CRASH] = {
843 .desc = "crash MSRs (hv-crash)",
844 .flags = {
845 {.fw = FEAT_HYPERV_EDX,
846 .bits = HV_GUEST_CRASH_MSR_AVAILABLE}
847 }
848 },
849 [HYPERV_FEAT_RESET] = {
850 .desc = "reset MSR (hv-reset)",
851 .flags = {
852 {.fw = FEAT_HYPERV_EAX,
853 .bits = HV_RESET_AVAILABLE}
854 }
855 },
856 [HYPERV_FEAT_VPINDEX] = {
857 .desc = "VP_INDEX MSR (hv-vpindex)",
858 .flags = {
859 {.fw = FEAT_HYPERV_EAX,
860 .bits = HV_VP_INDEX_AVAILABLE}
861 }
862 },
863 [HYPERV_FEAT_RUNTIME] = {
864 .desc = "VP_RUNTIME MSR (hv-runtime)",
865 .flags = {
866 {.fw = FEAT_HYPERV_EAX,
867 .bits = HV_VP_RUNTIME_AVAILABLE}
868 }
869 },
870 [HYPERV_FEAT_SYNIC] = {
871 .desc = "synthetic interrupt controller (hv-synic)",
872 .flags = {
873 {.fw = FEAT_HYPERV_EAX,
874 .bits = HV_SYNIC_AVAILABLE}
875 }
876 },
877 [HYPERV_FEAT_STIMER] = {
878 .desc = "synthetic timers (hv-stimer)",
879 .flags = {
880 {.fw = FEAT_HYPERV_EAX,
881 .bits = HV_SYNTIMERS_AVAILABLE}
882 },
883 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME)
884 },
885 [HYPERV_FEAT_FREQUENCIES] = {
886 .desc = "frequency MSRs (hv-frequencies)",
887 .flags = {
888 {.fw = FEAT_HYPERV_EAX,
889 .bits = HV_ACCESS_FREQUENCY_MSRS},
890 {.fw = FEAT_HYPERV_EDX,
891 .bits = HV_FREQUENCY_MSRS_AVAILABLE}
892 }
893 },
894 [HYPERV_FEAT_REENLIGHTENMENT] = {
895 .desc = "reenlightenment MSRs (hv-reenlightenment)",
896 .flags = {
897 {.fw = FEAT_HYPERV_EAX,
898 .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL}
899 }
900 },
901 [HYPERV_FEAT_TLBFLUSH] = {
902 .desc = "paravirtualized TLB flush (hv-tlbflush)",
903 .flags = {
904 {.fw = FEAT_HV_RECOMM_EAX,
905 .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED |
906 HV_EX_PROCESSOR_MASKS_RECOMMENDED}
907 },
908 .dependencies = BIT(HYPERV_FEAT_VPINDEX)
909 },
910 [HYPERV_FEAT_EVMCS] = {
911 .desc = "enlightened VMCS (hv-evmcs)",
912 .flags = {
913 {.fw = FEAT_HV_RECOMM_EAX,
914 .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED}
915 },
916 .dependencies = BIT(HYPERV_FEAT_VAPIC)
917 },
918 [HYPERV_FEAT_IPI] = {
919 .desc = "paravirtualized IPI (hv-ipi)",
920 .flags = {
921 {.fw = FEAT_HV_RECOMM_EAX,
922 .bits = HV_CLUSTER_IPI_RECOMMENDED |
923 HV_EX_PROCESSOR_MASKS_RECOMMENDED}
924 },
925 .dependencies = BIT(HYPERV_FEAT_VPINDEX)
926 },
927 [HYPERV_FEAT_STIMER_DIRECT] = {
928 .desc = "direct mode synthetic timers (hv-stimer-direct)",
929 .flags = {
930 {.fw = FEAT_HYPERV_EDX,
931 .bits = HV_STIMER_DIRECT_MODE_AVAILABLE}
932 },
933 .dependencies = BIT(HYPERV_FEAT_STIMER)
934 },
935 };
936
try_get_hv_cpuid(CPUState * cs,int max)937 static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max)
938 {
939 struct kvm_cpuid2 *cpuid;
940 int r, size;
941
942 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
943 cpuid = g_malloc0(size);
944 cpuid->nent = max;
945
946 r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
947 if (r == 0 && cpuid->nent >= max) {
948 r = -E2BIG;
949 }
950 if (r < 0) {
951 if (r == -E2BIG) {
952 g_free(cpuid);
953 return NULL;
954 } else {
955 fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
956 strerror(-r));
957 exit(1);
958 }
959 }
960 return cpuid;
961 }
962
963 /*
964 * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
965 * for all entries.
966 */
get_supported_hv_cpuid(CPUState * cs)967 static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs)
968 {
969 struct kvm_cpuid2 *cpuid;
970 int max = 7; /* 0x40000000..0x40000005, 0x4000000A */
971
972 /*
973 * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
974 * -E2BIG, however, it doesn't report back the right size. Keep increasing
975 * it and re-trying until we succeed.
976 */
977 while ((cpuid = try_get_hv_cpuid(cs, max)) == NULL) {
978 max++;
979 }
980 return cpuid;
981 }
982
983 /*
984 * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
985 * leaves from KVM_CAP_HYPERV* and present MSRs data.
986 */
get_supported_hv_cpuid_legacy(CPUState * cs)987 static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs)
988 {
989 X86CPU *cpu = X86_CPU(cs);
990 struct kvm_cpuid2 *cpuid;
991 struct kvm_cpuid_entry2 *entry_feat, *entry_recomm;
992
993 /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
994 cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries));
995 cpuid->nent = 2;
996
997 /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
998 entry_feat = &cpuid->entries[0];
999 entry_feat->function = HV_CPUID_FEATURES;
1000
1001 entry_recomm = &cpuid->entries[1];
1002 entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO;
1003 entry_recomm->ebx = cpu->hyperv_spinlock_attempts;
1004
1005 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) {
1006 entry_feat->eax |= HV_HYPERCALL_AVAILABLE;
1007 entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE;
1008 entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1009 entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED;
1010 entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED;
1011 }
1012
1013 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
1014 entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE;
1015 entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE;
1016 }
1017
1018 if (has_msr_hv_frequencies) {
1019 entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
1020 entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE;
1021 }
1022
1023 if (has_msr_hv_crash) {
1024 entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE;
1025 }
1026
1027 if (has_msr_hv_reenlightenment) {
1028 entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
1029 }
1030
1031 if (has_msr_hv_reset) {
1032 entry_feat->eax |= HV_RESET_AVAILABLE;
1033 }
1034
1035 if (has_msr_hv_vpindex) {
1036 entry_feat->eax |= HV_VP_INDEX_AVAILABLE;
1037 }
1038
1039 if (has_msr_hv_runtime) {
1040 entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE;
1041 }
1042
1043 if (has_msr_hv_synic) {
1044 unsigned int cap = cpu->hyperv_synic_kvm_only ?
1045 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1046
1047 if (kvm_check_extension(cs->kvm_state, cap) > 0) {
1048 entry_feat->eax |= HV_SYNIC_AVAILABLE;
1049 }
1050 }
1051
1052 if (has_msr_hv_stimer) {
1053 entry_feat->eax |= HV_SYNTIMERS_AVAILABLE;
1054 }
1055
1056 if (kvm_check_extension(cs->kvm_state,
1057 KVM_CAP_HYPERV_TLBFLUSH) > 0) {
1058 entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
1059 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1060 }
1061
1062 if (kvm_check_extension(cs->kvm_state,
1063 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
1064 entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
1065 }
1066
1067 if (kvm_check_extension(cs->kvm_state,
1068 KVM_CAP_HYPERV_SEND_IPI) > 0) {
1069 entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED;
1070 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1071 }
1072
1073 return cpuid;
1074 }
1075
hv_cpuid_get_fw(struct kvm_cpuid2 * cpuid,int fw,uint32_t * r)1076 static int hv_cpuid_get_fw(struct kvm_cpuid2 *cpuid, int fw, uint32_t *r)
1077 {
1078 struct kvm_cpuid_entry2 *entry;
1079 uint32_t func;
1080 int reg;
1081
1082 switch (fw) {
1083 case FEAT_HYPERV_EAX:
1084 reg = R_EAX;
1085 func = HV_CPUID_FEATURES;
1086 break;
1087 case FEAT_HYPERV_EDX:
1088 reg = R_EDX;
1089 func = HV_CPUID_FEATURES;
1090 break;
1091 case FEAT_HV_RECOMM_EAX:
1092 reg = R_EAX;
1093 func = HV_CPUID_ENLIGHTMENT_INFO;
1094 break;
1095 default:
1096 return -EINVAL;
1097 }
1098
1099 entry = cpuid_find_entry(cpuid, func, 0);
1100 if (!entry) {
1101 return -ENOENT;
1102 }
1103
1104 switch (reg) {
1105 case R_EAX:
1106 *r = entry->eax;
1107 break;
1108 case R_EDX:
1109 *r = entry->edx;
1110 break;
1111 default:
1112 return -EINVAL;
1113 }
1114
1115 return 0;
1116 }
1117
hv_cpuid_check_and_set(CPUState * cs,struct kvm_cpuid2 * cpuid,int feature)1118 static int hv_cpuid_check_and_set(CPUState *cs, struct kvm_cpuid2 *cpuid,
1119 int feature)
1120 {
1121 X86CPU *cpu = X86_CPU(cs);
1122 CPUX86State *env = &cpu->env;
1123 uint32_t r, fw, bits;
1124 uint64_t deps;
1125 int i, dep_feat;
1126
1127 if (!hyperv_feat_enabled(cpu, feature) && !cpu->hyperv_passthrough) {
1128 return 0;
1129 }
1130
1131 deps = kvm_hyperv_properties[feature].dependencies;
1132 while (deps) {
1133 dep_feat = ctz64(deps);
1134 if (!(hyperv_feat_enabled(cpu, dep_feat))) {
1135 fprintf(stderr,
1136 "Hyper-V %s requires Hyper-V %s\n",
1137 kvm_hyperv_properties[feature].desc,
1138 kvm_hyperv_properties[dep_feat].desc);
1139 return 1;
1140 }
1141 deps &= ~(1ull << dep_feat);
1142 }
1143
1144 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
1145 fw = kvm_hyperv_properties[feature].flags[i].fw;
1146 bits = kvm_hyperv_properties[feature].flags[i].bits;
1147
1148 if (!fw) {
1149 continue;
1150 }
1151
1152 if (hv_cpuid_get_fw(cpuid, fw, &r) || (r & bits) != bits) {
1153 if (hyperv_feat_enabled(cpu, feature)) {
1154 fprintf(stderr,
1155 "Hyper-V %s is not supported by kernel\n",
1156 kvm_hyperv_properties[feature].desc);
1157 return 1;
1158 } else {
1159 return 0;
1160 }
1161 }
1162
1163 env->features[fw] |= bits;
1164 }
1165
1166 if (cpu->hyperv_passthrough) {
1167 cpu->hyperv_features |= BIT(feature);
1168 }
1169
1170 return 0;
1171 }
1172
1173 /*
1174 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent in
1175 * case of success, errno < 0 in case of failure and 0 when no Hyper-V
1176 * extentions are enabled.
1177 */
hyperv_handle_properties(CPUState * cs,struct kvm_cpuid_entry2 * cpuid_ent)1178 static int hyperv_handle_properties(CPUState *cs,
1179 struct kvm_cpuid_entry2 *cpuid_ent)
1180 {
1181 X86CPU *cpu = X86_CPU(cs);
1182 CPUX86State *env = &cpu->env;
1183 struct kvm_cpuid2 *cpuid;
1184 struct kvm_cpuid_entry2 *c;
1185 uint32_t signature[3];
1186 uint32_t cpuid_i = 0;
1187 int r;
1188
1189 if (!hyperv_enabled(cpu))
1190 return 0;
1191
1192 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ||
1193 cpu->hyperv_passthrough) {
1194 uint16_t evmcs_version;
1195
1196 r = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
1197 (uintptr_t)&evmcs_version);
1198
1199 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) && r) {
1200 fprintf(stderr, "Hyper-V %s is not supported by kernel\n",
1201 kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc);
1202 return -ENOSYS;
1203 }
1204
1205 if (!r) {
1206 env->features[FEAT_HV_RECOMM_EAX] |=
1207 HV_ENLIGHTENED_VMCS_RECOMMENDED;
1208 env->features[FEAT_HV_NESTED_EAX] = evmcs_version;
1209 }
1210 }
1211
1212 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
1213 cpuid = get_supported_hv_cpuid(cs);
1214 } else {
1215 cpuid = get_supported_hv_cpuid_legacy(cs);
1216 }
1217
1218 if (cpu->hyperv_passthrough) {
1219 memcpy(cpuid_ent, &cpuid->entries[0],
1220 cpuid->nent * sizeof(cpuid->entries[0]));
1221
1222 c = cpuid_find_entry(cpuid, HV_CPUID_FEATURES, 0);
1223 if (c) {
1224 env->features[FEAT_HYPERV_EAX] = c->eax;
1225 env->features[FEAT_HYPERV_EBX] = c->ebx;
1226 env->features[FEAT_HYPERV_EDX] = c->eax;
1227 }
1228 c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0);
1229 if (c) {
1230 env->features[FEAT_HV_RECOMM_EAX] = c->eax;
1231
1232 /* hv-spinlocks may have been overriden */
1233 if (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) {
1234 c->ebx = cpu->hyperv_spinlock_attempts;
1235 }
1236 }
1237 c = cpuid_find_entry(cpuid, HV_CPUID_NESTED_FEATURES, 0);
1238 if (c) {
1239 env->features[FEAT_HV_NESTED_EAX] = c->eax;
1240 }
1241 }
1242
1243 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) {
1244 env->features[FEAT_HV_RECOMM_EAX] |= HV_NO_NONARCH_CORESHARING;
1245 } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) {
1246 c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0);
1247 if (c) {
1248 env->features[FEAT_HV_RECOMM_EAX] |=
1249 c->eax & HV_NO_NONARCH_CORESHARING;
1250 }
1251 }
1252
1253 /* Features */
1254 r = hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RELAXED);
1255 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VAPIC);
1256 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TIME);
1257 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_CRASH);
1258 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RESET);
1259 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VPINDEX);
1260 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RUNTIME);
1261 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_SYNIC);
1262 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER);
1263 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_FREQUENCIES);
1264 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_REENLIGHTENMENT);
1265 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TLBFLUSH);
1266 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_EVMCS);
1267 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_IPI);
1268 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER_DIRECT);
1269
1270 /* Additional dependencies not covered by kvm_hyperv_properties[] */
1271 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1272 !cpu->hyperv_synic_kvm_only &&
1273 !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) {
1274 fprintf(stderr, "Hyper-V %s requires Hyper-V %s\n",
1275 kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc,
1276 kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc);
1277 r |= 1;
1278 }
1279
1280 /* Not exposed by KVM but needed to make CPU hotplug in Windows work */
1281 env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1282
1283 if (r) {
1284 r = -ENOSYS;
1285 goto free;
1286 }
1287
1288 if (cpu->hyperv_passthrough) {
1289 /* We already copied all feature words from KVM as is */
1290 r = cpuid->nent;
1291 goto free;
1292 }
1293
1294 c = &cpuid_ent[cpuid_i++];
1295 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
1296 if (!cpu->hyperv_vendor_id) {
1297 memcpy(signature, "Microsoft Hv", 12);
1298 } else {
1299 size_t len = strlen(cpu->hyperv_vendor_id);
1300
1301 if (len > 12) {
1302 error_report("hv-vendor-id truncated to 12 characters");
1303 len = 12;
1304 }
1305 memset(signature, 0, 12);
1306 memcpy(signature, cpu->hyperv_vendor_id, len);
1307 }
1308 c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ?
1309 HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
1310 c->ebx = signature[0];
1311 c->ecx = signature[1];
1312 c->edx = signature[2];
1313
1314 c = &cpuid_ent[cpuid_i++];
1315 c->function = HV_CPUID_INTERFACE;
1316 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
1317 c->eax = signature[0];
1318 c->ebx = 0;
1319 c->ecx = 0;
1320 c->edx = 0;
1321
1322 c = &cpuid_ent[cpuid_i++];
1323 c->function = HV_CPUID_VERSION;
1324 c->eax = 0x00001bbc;
1325 c->ebx = 0x00060001;
1326
1327 c = &cpuid_ent[cpuid_i++];
1328 c->function = HV_CPUID_FEATURES;
1329 c->eax = env->features[FEAT_HYPERV_EAX];
1330 c->ebx = env->features[FEAT_HYPERV_EBX];
1331 c->edx = env->features[FEAT_HYPERV_EDX];
1332
1333 c = &cpuid_ent[cpuid_i++];
1334 c->function = HV_CPUID_ENLIGHTMENT_INFO;
1335 c->eax = env->features[FEAT_HV_RECOMM_EAX];
1336 c->ebx = cpu->hyperv_spinlock_attempts;
1337
1338 c = &cpuid_ent[cpuid_i++];
1339 c->function = HV_CPUID_IMPLEMENT_LIMITS;
1340 c->eax = cpu->hv_max_vps;
1341 c->ebx = 0x40;
1342
1343 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1344 __u32 function;
1345
1346 /* Create zeroed 0x40000006..0x40000009 leaves */
1347 for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
1348 function < HV_CPUID_NESTED_FEATURES; function++) {
1349 c = &cpuid_ent[cpuid_i++];
1350 c->function = function;
1351 }
1352
1353 c = &cpuid_ent[cpuid_i++];
1354 c->function = HV_CPUID_NESTED_FEATURES;
1355 c->eax = env->features[FEAT_HV_NESTED_EAX];
1356 }
1357 r = cpuid_i;
1358
1359 free:
1360 g_free(cpuid);
1361
1362 return r;
1363 }
1364
1365 static Error *hv_passthrough_mig_blocker;
1366 static Error *hv_no_nonarch_cs_mig_blocker;
1367
hyperv_init_vcpu(X86CPU * cpu)1368 static int hyperv_init_vcpu(X86CPU *cpu)
1369 {
1370 CPUState *cs = CPU(cpu);
1371 Error *local_err = NULL;
1372 int ret;
1373
1374 if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) {
1375 error_setg(&hv_passthrough_mig_blocker,
1376 "'hv-passthrough' CPU flag prevents migration, use explicit"
1377 " set of hv-* flags instead");
1378 ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err);
1379 if (local_err) {
1380 error_report_err(local_err);
1381 error_free(hv_passthrough_mig_blocker);
1382 return ret;
1383 }
1384 }
1385
1386 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO &&
1387 hv_no_nonarch_cs_mig_blocker == NULL) {
1388 error_setg(&hv_no_nonarch_cs_mig_blocker,
1389 "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration"
1390 " use explicit 'hv-no-nonarch-coresharing=on' instead (but"
1391 " make sure SMT is disabled and/or that vCPUs are properly"
1392 " pinned)");
1393 ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err);
1394 if (local_err) {
1395 error_report_err(local_err);
1396 error_free(hv_no_nonarch_cs_mig_blocker);
1397 return ret;
1398 }
1399 }
1400
1401 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) {
1402 /*
1403 * the kernel doesn't support setting vp_index; assert that its value
1404 * is in sync
1405 */
1406 struct {
1407 struct kvm_msrs info;
1408 struct kvm_msr_entry entries[1];
1409 } msr_data = {
1410 .info.nmsrs = 1,
1411 .entries[0].index = HV_X64_MSR_VP_INDEX,
1412 };
1413
1414 ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data);
1415 if (ret < 0) {
1416 return ret;
1417 }
1418 assert(ret == 1);
1419
1420 if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) {
1421 error_report("kernel's vp_index != QEMU's vp_index");
1422 return -ENXIO;
1423 }
1424 }
1425
1426 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
1427 uint32_t synic_cap = cpu->hyperv_synic_kvm_only ?
1428 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1429 ret = kvm_vcpu_enable_cap(cs, synic_cap, 0);
1430 if (ret < 0) {
1431 error_report("failed to turn on HyperV SynIC in KVM: %s",
1432 strerror(-ret));
1433 return ret;
1434 }
1435
1436 if (!cpu->hyperv_synic_kvm_only) {
1437 ret = hyperv_x86_synic_add(cpu);
1438 if (ret < 0) {
1439 error_report("failed to create HyperV SynIC: %s",
1440 strerror(-ret));
1441 return ret;
1442 }
1443 }
1444 }
1445
1446 return 0;
1447 }
1448
1449 static Error *invtsc_mig_blocker;
1450
1451 #define KVM_MAX_CPUID_ENTRIES 100
1452
kvm_arch_init_vcpu(CPUState * cs)1453 int kvm_arch_init_vcpu(CPUState *cs)
1454 {
1455 struct {
1456 struct kvm_cpuid2 cpuid;
1457 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
1458 } cpuid_data;
1459 /*
1460 * The kernel defines these structs with padding fields so there
1461 * should be no extra padding in our cpuid_data struct.
1462 */
1463 QEMU_BUILD_BUG_ON(sizeof(cpuid_data) !=
1464 sizeof(struct kvm_cpuid2) +
1465 sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
1466
1467 X86CPU *cpu = X86_CPU(cs);
1468 CPUX86State *env = &cpu->env;
1469 uint32_t limit, i, j, cpuid_i;
1470 uint32_t unused;
1471 struct kvm_cpuid_entry2 *c;
1472 uint32_t signature[3];
1473 int kvm_base = KVM_CPUID_SIGNATURE;
1474 int max_nested_state_len;
1475 int r;
1476 Error *local_err = NULL;
1477
1478 memset(&cpuid_data, 0, sizeof(cpuid_data));
1479
1480 cpuid_i = 0;
1481
1482 r = kvm_arch_set_tsc_khz(cs);
1483 if (r < 0) {
1484 return r;
1485 }
1486
1487 /* vcpu's TSC frequency is either specified by user, or following
1488 * the value used by KVM if the former is not present. In the
1489 * latter case, we query it from KVM and record in env->tsc_khz,
1490 * so that vcpu's TSC frequency can be migrated later via this field.
1491 */
1492 if (!env->tsc_khz) {
1493 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
1494 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
1495 -ENOTSUP;
1496 if (r > 0) {
1497 env->tsc_khz = r;
1498 }
1499 }
1500
1501 /* Paravirtualization CPUIDs */
1502 r = hyperv_handle_properties(cs, cpuid_data.entries);
1503 if (r < 0) {
1504 return r;
1505 } else if (r > 0) {
1506 cpuid_i = r;
1507 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
1508 has_msr_hv_hypercall = true;
1509 }
1510
1511 if (cpu->expose_kvm) {
1512 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
1513 c = &cpuid_data.entries[cpuid_i++];
1514 c->function = KVM_CPUID_SIGNATURE | kvm_base;
1515 c->eax = KVM_CPUID_FEATURES | kvm_base;
1516 c->ebx = signature[0];
1517 c->ecx = signature[1];
1518 c->edx = signature[2];
1519
1520 c = &cpuid_data.entries[cpuid_i++];
1521 c->function = KVM_CPUID_FEATURES | kvm_base;
1522 c->eax = env->features[FEAT_KVM];
1523 c->edx = env->features[FEAT_KVM_HINTS];
1524 }
1525
1526 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
1527
1528 for (i = 0; i <= limit; i++) {
1529 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1530 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
1531 abort();
1532 }
1533 c = &cpuid_data.entries[cpuid_i++];
1534
1535 switch (i) {
1536 case 2: {
1537 /* Keep reading function 2 till all the input is received */
1538 int times;
1539
1540 c->function = i;
1541 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
1542 KVM_CPUID_FLAG_STATE_READ_NEXT;
1543 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1544 times = c->eax & 0xff;
1545
1546 for (j = 1; j < times; ++j) {
1547 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1548 fprintf(stderr, "cpuid_data is full, no space for "
1549 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
1550 abort();
1551 }
1552 c = &cpuid_data.entries[cpuid_i++];
1553 c->function = i;
1554 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
1555 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1556 }
1557 break;
1558 }
1559 case 0x1f:
1560 if (env->nr_dies < 2) {
1561 break;
1562 }
1563 case 4:
1564 case 0xb:
1565 case 0xd:
1566 for (j = 0; ; j++) {
1567 if (i == 0xd && j == 64) {
1568 break;
1569 }
1570
1571 if (i == 0x1f && j == 64) {
1572 break;
1573 }
1574
1575 c->function = i;
1576 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1577 c->index = j;
1578 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1579
1580 if (i == 4 && c->eax == 0) {
1581 break;
1582 }
1583 if (i == 0xb && !(c->ecx & 0xff00)) {
1584 break;
1585 }
1586 if (i == 0x1f && !(c->ecx & 0xff00)) {
1587 break;
1588 }
1589 if (i == 0xd && c->eax == 0) {
1590 continue;
1591 }
1592 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1593 fprintf(stderr, "cpuid_data is full, no space for "
1594 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1595 abort();
1596 }
1597 c = &cpuid_data.entries[cpuid_i++];
1598 }
1599 break;
1600 case 0x7:
1601 case 0x14: {
1602 uint32_t times;
1603
1604 c->function = i;
1605 c->index = 0;
1606 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1607 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1608 times = c->eax;
1609
1610 for (j = 1; j <= times; ++j) {
1611 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1612 fprintf(stderr, "cpuid_data is full, no space for "
1613 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1614 abort();
1615 }
1616 c = &cpuid_data.entries[cpuid_i++];
1617 c->function = i;
1618 c->index = j;
1619 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1620 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1621 }
1622 break;
1623 }
1624 default:
1625 c->function = i;
1626 c->flags = 0;
1627 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1628 if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
1629 /*
1630 * KVM already returns all zeroes if a CPUID entry is missing,
1631 * so we can omit it and avoid hitting KVM's 80-entry limit.
1632 */
1633 cpuid_i--;
1634 }
1635 break;
1636 }
1637 }
1638
1639 if (limit >= 0x0a) {
1640 uint32_t eax, edx;
1641
1642 cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
1643
1644 has_architectural_pmu_version = eax & 0xff;
1645 if (has_architectural_pmu_version > 0) {
1646 num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
1647
1648 /* Shouldn't be more than 32, since that's the number of bits
1649 * available in EBX to tell us _which_ counters are available.
1650 * Play it safe.
1651 */
1652 if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
1653 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
1654 }
1655
1656 if (has_architectural_pmu_version > 1) {
1657 num_architectural_pmu_fixed_counters = edx & 0x1f;
1658
1659 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
1660 num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
1661 }
1662 }
1663 }
1664 }
1665
1666 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
1667
1668 for (i = 0x80000000; i <= limit; i++) {
1669 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1670 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
1671 abort();
1672 }
1673 c = &cpuid_data.entries[cpuid_i++];
1674
1675 switch (i) {
1676 case 0x8000001d:
1677 /* Query for all AMD cache information leaves */
1678 for (j = 0; ; j++) {
1679 c->function = i;
1680 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1681 c->index = j;
1682 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1683
1684 if (c->eax == 0) {
1685 break;
1686 }
1687 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1688 fprintf(stderr, "cpuid_data is full, no space for "
1689 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1690 abort();
1691 }
1692 c = &cpuid_data.entries[cpuid_i++];
1693 }
1694 break;
1695 default:
1696 c->function = i;
1697 c->flags = 0;
1698 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1699 if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
1700 /*
1701 * KVM already returns all zeroes if a CPUID entry is missing,
1702 * so we can omit it and avoid hitting KVM's 80-entry limit.
1703 */
1704 cpuid_i--;
1705 }
1706 break;
1707 }
1708 }
1709
1710 /* Call Centaur's CPUID instructions they are supported. */
1711 if (env->cpuid_xlevel2 > 0) {
1712 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
1713
1714 for (i = 0xC0000000; i <= limit; i++) {
1715 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1716 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
1717 abort();
1718 }
1719 c = &cpuid_data.entries[cpuid_i++];
1720
1721 c->function = i;
1722 c->flags = 0;
1723 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1724 }
1725 }
1726
1727 cpuid_data.cpuid.nent = cpuid_i;
1728
1729 if (((env->cpuid_version >> 8)&0xF) >= 6
1730 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
1731 (CPUID_MCE | CPUID_MCA)
1732 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
1733 uint64_t mcg_cap, unsupported_caps;
1734 int banks;
1735 int ret;
1736
1737 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
1738 if (ret < 0) {
1739 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
1740 return ret;
1741 }
1742
1743 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
1744 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
1745 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
1746 return -ENOTSUP;
1747 }
1748
1749 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
1750 if (unsupported_caps) {
1751 if (unsupported_caps & MCG_LMCE_P) {
1752 error_report("kvm: LMCE not supported");
1753 return -ENOTSUP;
1754 }
1755 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
1756 unsupported_caps);
1757 }
1758
1759 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
1760 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
1761 if (ret < 0) {
1762 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
1763 return ret;
1764 }
1765 }
1766
1767 qemu_add_vm_change_state_handler(cpu_update_state, env);
1768
1769 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
1770 if (c) {
1771 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
1772 !!(c->ecx & CPUID_EXT_SMX);
1773 }
1774
1775 if (env->mcg_cap & MCG_LMCE_P) {
1776 has_msr_mcg_ext_ctl = has_msr_feature_control = true;
1777 }
1778
1779 if (!env->user_tsc_khz) {
1780 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
1781 invtsc_mig_blocker == NULL) {
1782 error_setg(&invtsc_mig_blocker,
1783 "State blocked by non-migratable CPU device"
1784 " (invtsc flag)");
1785 r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
1786 if (local_err) {
1787 error_report_err(local_err);
1788 error_free(invtsc_mig_blocker);
1789 return r;
1790 }
1791 }
1792 }
1793
1794 if (cpu->vmware_cpuid_freq
1795 /* Guests depend on 0x40000000 to detect this feature, so only expose
1796 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
1797 && cpu->expose_kvm
1798 && kvm_base == KVM_CPUID_SIGNATURE
1799 /* TSC clock must be stable and known for this feature. */
1800 && tsc_is_stable_and_known(env)) {
1801
1802 c = &cpuid_data.entries[cpuid_i++];
1803 c->function = KVM_CPUID_SIGNATURE | 0x10;
1804 c->eax = env->tsc_khz;
1805 /* LAPIC resolution of 1ns (freq: 1GHz) is hardcoded in KVM's
1806 * APIC_BUS_CYCLE_NS */
1807 c->ebx = 1000000;
1808 c->ecx = c->edx = 0;
1809
1810 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
1811 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
1812 }
1813
1814 cpuid_data.cpuid.nent = cpuid_i;
1815
1816 cpuid_data.cpuid.padding = 0;
1817 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
1818 if (r) {
1819 goto fail;
1820 }
1821
1822 if (has_xsave) {
1823 env->xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
1824 memset(env->xsave_buf, 0, sizeof(struct kvm_xsave));
1825 }
1826
1827 max_nested_state_len = kvm_max_nested_state_length();
1828 if (max_nested_state_len > 0) {
1829 assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
1830
1831 if (cpu_has_vmx(env)) {
1832 struct kvm_vmx_nested_state_hdr *vmx_hdr;
1833
1834 env->nested_state = g_malloc0(max_nested_state_len);
1835 env->nested_state->size = max_nested_state_len;
1836 env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
1837
1838 vmx_hdr = &env->nested_state->hdr.vmx;
1839 vmx_hdr->vmxon_pa = -1ull;
1840 vmx_hdr->vmcs12_pa = -1ull;
1841 }
1842 }
1843
1844 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
1845
1846 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
1847 has_msr_tsc_aux = false;
1848 }
1849
1850 kvm_init_msrs(cpu);
1851
1852 r = hyperv_init_vcpu(cpu);
1853 if (r) {
1854 goto fail;
1855 }
1856
1857 return 0;
1858
1859 fail:
1860 migrate_del_blocker(invtsc_mig_blocker);
1861
1862 return r;
1863 }
1864
kvm_arch_destroy_vcpu(CPUState * cs)1865 int kvm_arch_destroy_vcpu(CPUState *cs)
1866 {
1867 X86CPU *cpu = X86_CPU(cs);
1868 CPUX86State *env = &cpu->env;
1869
1870 if (cpu->kvm_msr_buf) {
1871 g_free(cpu->kvm_msr_buf);
1872 cpu->kvm_msr_buf = NULL;
1873 }
1874
1875 if (env->nested_state) {
1876 g_free(env->nested_state);
1877 env->nested_state = NULL;
1878 }
1879
1880 return 0;
1881 }
1882
kvm_arch_reset_vcpu(X86CPU * cpu)1883 void kvm_arch_reset_vcpu(X86CPU *cpu)
1884 {
1885 CPUX86State *env = &cpu->env;
1886
1887 env->xcr0 = 1;
1888 if (kvm_irqchip_in_kernel()) {
1889 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
1890 KVM_MP_STATE_UNINITIALIZED;
1891 } else {
1892 env->mp_state = KVM_MP_STATE_RUNNABLE;
1893 }
1894
1895 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
1896 int i;
1897 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
1898 env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
1899 }
1900
1901 hyperv_x86_synic_reset(cpu);
1902 }
1903 /* enabled by default */
1904 env->poll_control_msr = 1;
1905 }
1906
kvm_arch_do_init_vcpu(X86CPU * cpu)1907 void kvm_arch_do_init_vcpu(X86CPU *cpu)
1908 {
1909 CPUX86State *env = &cpu->env;
1910
1911 /* APs get directly into wait-for-SIPI state. */
1912 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
1913 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
1914 }
1915 }
1916
kvm_get_supported_feature_msrs(KVMState * s)1917 static int kvm_get_supported_feature_msrs(KVMState *s)
1918 {
1919 int ret = 0;
1920
1921 if (kvm_feature_msrs != NULL) {
1922 return 0;
1923 }
1924
1925 if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) {
1926 return 0;
1927 }
1928
1929 struct kvm_msr_list msr_list;
1930
1931 msr_list.nmsrs = 0;
1932 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list);
1933 if (ret < 0 && ret != -E2BIG) {
1934 error_report("Fetch KVM feature MSR list failed: %s",
1935 strerror(-ret));
1936 return ret;
1937 }
1938
1939 assert(msr_list.nmsrs > 0);
1940 kvm_feature_msrs = (struct kvm_msr_list *) \
1941 g_malloc0(sizeof(msr_list) +
1942 msr_list.nmsrs * sizeof(msr_list.indices[0]));
1943
1944 kvm_feature_msrs->nmsrs = msr_list.nmsrs;
1945 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs);
1946
1947 if (ret < 0) {
1948 error_report("Fetch KVM feature MSR list failed: %s",
1949 strerror(-ret));
1950 g_free(kvm_feature_msrs);
1951 kvm_feature_msrs = NULL;
1952 return ret;
1953 }
1954
1955 return 0;
1956 }
1957
kvm_get_supported_msrs(KVMState * s)1958 static int kvm_get_supported_msrs(KVMState *s)
1959 {
1960 int ret = 0;
1961 struct kvm_msr_list msr_list, *kvm_msr_list;
1962
1963 /*
1964 * Obtain MSR list from KVM. These are the MSRs that we must
1965 * save/restore.
1966 */
1967 msr_list.nmsrs = 0;
1968 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
1969 if (ret < 0 && ret != -E2BIG) {
1970 return ret;
1971 }
1972 /*
1973 * Old kernel modules had a bug and could write beyond the provided
1974 * memory. Allocate at least a safe amount of 1K.
1975 */
1976 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
1977 msr_list.nmsrs *
1978 sizeof(msr_list.indices[0])));
1979
1980 kvm_msr_list->nmsrs = msr_list.nmsrs;
1981 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
1982 if (ret >= 0) {
1983 int i;
1984
1985 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
1986 switch (kvm_msr_list->indices[i]) {
1987 case MSR_STAR:
1988 has_msr_star = true;
1989 break;
1990 case MSR_VM_HSAVE_PA:
1991 has_msr_hsave_pa = true;
1992 break;
1993 case MSR_TSC_AUX:
1994 has_msr_tsc_aux = true;
1995 break;
1996 case MSR_TSC_ADJUST:
1997 has_msr_tsc_adjust = true;
1998 break;
1999 case MSR_IA32_TSCDEADLINE:
2000 has_msr_tsc_deadline = true;
2001 break;
2002 case MSR_IA32_SMBASE:
2003 has_msr_smbase = true;
2004 break;
2005 case MSR_SMI_COUNT:
2006 has_msr_smi_count = true;
2007 break;
2008 case MSR_IA32_MISC_ENABLE:
2009 has_msr_misc_enable = true;
2010 break;
2011 case MSR_IA32_BNDCFGS:
2012 has_msr_bndcfgs = true;
2013 break;
2014 case MSR_IA32_XSS:
2015 has_msr_xss = true;
2016 break;
2017 case MSR_IA32_UMWAIT_CONTROL:
2018 has_msr_umwait = true;
2019 break;
2020 case HV_X64_MSR_CRASH_CTL:
2021 has_msr_hv_crash = true;
2022 break;
2023 case HV_X64_MSR_RESET:
2024 has_msr_hv_reset = true;
2025 break;
2026 case HV_X64_MSR_VP_INDEX:
2027 has_msr_hv_vpindex = true;
2028 break;
2029 case HV_X64_MSR_VP_RUNTIME:
2030 has_msr_hv_runtime = true;
2031 break;
2032 case HV_X64_MSR_SCONTROL:
2033 has_msr_hv_synic = true;
2034 break;
2035 case HV_X64_MSR_STIMER0_CONFIG:
2036 has_msr_hv_stimer = true;
2037 break;
2038 case HV_X64_MSR_TSC_FREQUENCY:
2039 has_msr_hv_frequencies = true;
2040 break;
2041 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
2042 has_msr_hv_reenlightenment = true;
2043 break;
2044 case MSR_IA32_SPEC_CTRL:
2045 has_msr_spec_ctrl = true;
2046 break;
2047 case MSR_IA32_TSX_CTRL:
2048 has_msr_tsx_ctrl = true;
2049 break;
2050 case MSR_VIRT_SSBD:
2051 has_msr_virt_ssbd = true;
2052 break;
2053 case MSR_IA32_ARCH_CAPABILITIES:
2054 has_msr_arch_capabs = true;
2055 break;
2056 case MSR_IA32_CORE_CAPABILITY:
2057 has_msr_core_capabs = true;
2058 break;
2059 case MSR_IA32_VMX_VMFUNC:
2060 has_msr_vmx_vmfunc = true;
2061 break;
2062 case MSR_IA32_UCODE_REV:
2063 has_msr_ucode_rev = true;
2064 break;
2065 case MSR_IA32_VMX_PROCBASED_CTLS2:
2066 has_msr_vmx_procbased_ctls2 = true;
2067 break;
2068 }
2069 }
2070 }
2071
2072 g_free(kvm_msr_list);
2073
2074 return ret;
2075 }
2076
2077 static Notifier smram_machine_done;
2078 static KVMMemoryListener smram_listener;
2079 static AddressSpace smram_address_space;
2080 static MemoryRegion smram_as_root;
2081 static MemoryRegion smram_as_mem;
2082
register_smram_listener(Notifier * n,void * unused)2083 static void register_smram_listener(Notifier *n, void *unused)
2084 {
2085 MemoryRegion *smram =
2086 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2087
2088 /* Outer container... */
2089 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
2090 memory_region_set_enabled(&smram_as_root, true);
2091
2092 /* ... with two regions inside: normal system memory with low
2093 * priority, and...
2094 */
2095 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
2096 get_system_memory(), 0, ~0ull);
2097 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
2098 memory_region_set_enabled(&smram_as_mem, true);
2099
2100 if (smram) {
2101 /* ... SMRAM with higher priority */
2102 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
2103 memory_region_set_enabled(smram, true);
2104 }
2105
2106 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
2107 kvm_memory_listener_register(kvm_state, &smram_listener,
2108 &smram_address_space, 1);
2109 }
2110
kvm_arch_init(MachineState * ms,KVMState * s)2111 int kvm_arch_init(MachineState *ms, KVMState *s)
2112 {
2113 uint64_t identity_base = 0xfffbc000;
2114 uint64_t shadow_mem;
2115 int ret;
2116 struct utsname utsname;
2117
2118 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
2119 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
2120 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
2121
2122 hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
2123
2124 has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
2125 if (has_exception_payload) {
2126 ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
2127 if (ret < 0) {
2128 error_report("kvm: Failed to enable exception payload cap: %s",
2129 strerror(-ret));
2130 return ret;
2131 }
2132 }
2133
2134 ret = kvm_get_supported_msrs(s);
2135 if (ret < 0) {
2136 return ret;
2137 }
2138
2139 kvm_get_supported_feature_msrs(s);
2140
2141 uname(&utsname);
2142 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
2143
2144 /*
2145 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
2146 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
2147 * Since these must be part of guest physical memory, we need to allocate
2148 * them, both by setting their start addresses in the kernel and by
2149 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
2150 *
2151 * Older KVM versions may not support setting the identity map base. In
2152 * that case we need to stick with the default, i.e. a 256K maximum BIOS
2153 * size.
2154 */
2155 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
2156 /* Allows up to 16M BIOSes. */
2157 identity_base = 0xfeffc000;
2158
2159 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
2160 if (ret < 0) {
2161 return ret;
2162 }
2163 }
2164
2165 /* Set TSS base one page after EPT identity map. */
2166 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
2167 if (ret < 0) {
2168 return ret;
2169 }
2170
2171 /* Tell fw_cfg to notify the BIOS to reserve the range. */
2172 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
2173 if (ret < 0) {
2174 fprintf(stderr, "e820_add_entry() table is full\n");
2175 return ret;
2176 }
2177 qemu_register_reset(kvm_unpoison_all, NULL);
2178
2179 shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort);
2180 if (shadow_mem != -1) {
2181 shadow_mem /= 4096;
2182 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
2183 if (ret < 0) {
2184 return ret;
2185 }
2186 }
2187
2188 if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
2189 object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) &&
2190 x86_machine_is_smm_enabled(X86_MACHINE(ms))) {
2191 smram_machine_done.notify = register_smram_listener;
2192 qemu_add_machine_init_done_notifier(&smram_machine_done);
2193 }
2194
2195 if (enable_cpu_pm) {
2196 int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
2197 int ret;
2198
2199 /* Work around for kernel header with a typo. TODO: fix header and drop. */
2200 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
2201 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
2202 #endif
2203 if (disable_exits) {
2204 disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
2205 KVM_X86_DISABLE_EXITS_HLT |
2206 KVM_X86_DISABLE_EXITS_PAUSE |
2207 KVM_X86_DISABLE_EXITS_CSTATE);
2208 }
2209
2210 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
2211 disable_exits);
2212 if (ret < 0) {
2213 error_report("kvm: guest stopping CPU not supported: %s",
2214 strerror(-ret));
2215 }
2216 }
2217
2218 return 0;
2219 }
2220
set_v8086_seg(struct kvm_segment * lhs,const SegmentCache * rhs)2221 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2222 {
2223 lhs->selector = rhs->selector;
2224 lhs->base = rhs->base;
2225 lhs->limit = rhs->limit;
2226 lhs->type = 3;
2227 lhs->present = 1;
2228 lhs->dpl = 3;
2229 lhs->db = 0;
2230 lhs->s = 1;
2231 lhs->l = 0;
2232 lhs->g = 0;
2233 lhs->avl = 0;
2234 lhs->unusable = 0;
2235 }
2236
set_seg(struct kvm_segment * lhs,const SegmentCache * rhs)2237 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2238 {
2239 unsigned flags = rhs->flags;
2240 lhs->selector = rhs->selector;
2241 lhs->base = rhs->base;
2242 lhs->limit = rhs->limit;
2243 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
2244 lhs->present = (flags & DESC_P_MASK) != 0;
2245 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
2246 lhs->db = (flags >> DESC_B_SHIFT) & 1;
2247 lhs->s = (flags & DESC_S_MASK) != 0;
2248 lhs->l = (flags >> DESC_L_SHIFT) & 1;
2249 lhs->g = (flags & DESC_G_MASK) != 0;
2250 lhs->avl = (flags & DESC_AVL_MASK) != 0;
2251 lhs->unusable = !lhs->present;
2252 lhs->padding = 0;
2253 }
2254
get_seg(SegmentCache * lhs,const struct kvm_segment * rhs)2255 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
2256 {
2257 lhs->selector = rhs->selector;
2258 lhs->base = rhs->base;
2259 lhs->limit = rhs->limit;
2260 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
2261 ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
2262 (rhs->dpl << DESC_DPL_SHIFT) |
2263 (rhs->db << DESC_B_SHIFT) |
2264 (rhs->s * DESC_S_MASK) |
2265 (rhs->l << DESC_L_SHIFT) |
2266 (rhs->g * DESC_G_MASK) |
2267 (rhs->avl * DESC_AVL_MASK);
2268 }
2269
kvm_getput_reg(__u64 * kvm_reg,target_ulong * qemu_reg,int set)2270 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
2271 {
2272 if (set) {
2273 *kvm_reg = *qemu_reg;
2274 } else {
2275 *qemu_reg = *kvm_reg;
2276 }
2277 }
2278
kvm_getput_regs(X86CPU * cpu,int set)2279 static int kvm_getput_regs(X86CPU *cpu, int set)
2280 {
2281 CPUX86State *env = &cpu->env;
2282 struct kvm_regs regs;
2283 int ret = 0;
2284
2285 if (!set) {
2286 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s);
2287 if (ret < 0) {
2288 return ret;
2289 }
2290 }
2291
2292 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
2293 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
2294 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
2295 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
2296 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
2297 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
2298 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
2299 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
2300 #ifdef TARGET_X86_64
2301 kvm_getput_reg(®s.r8, &env->regs[8], set);
2302 kvm_getput_reg(®s.r9, &env->regs[9], set);
2303 kvm_getput_reg(®s.r10, &env->regs[10], set);
2304 kvm_getput_reg(®s.r11, &env->regs[11], set);
2305 kvm_getput_reg(®s.r12, &env->regs[12], set);
2306 kvm_getput_reg(®s.r13, &env->regs[13], set);
2307 kvm_getput_reg(®s.r14, &env->regs[14], set);
2308 kvm_getput_reg(®s.r15, &env->regs[15], set);
2309 #endif
2310
2311 kvm_getput_reg(®s.rflags, &env->eflags, set);
2312 kvm_getput_reg(®s.rip, &env->eip, set);
2313
2314 if (set) {
2315 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s);
2316 }
2317
2318 return ret;
2319 }
2320
kvm_put_fpu(X86CPU * cpu)2321 static int kvm_put_fpu(X86CPU *cpu)
2322 {
2323 CPUX86State *env = &cpu->env;
2324 struct kvm_fpu fpu;
2325 int i;
2326
2327 memset(&fpu, 0, sizeof fpu);
2328 fpu.fsw = env->fpus & ~(7 << 11);
2329 fpu.fsw |= (env->fpstt & 7) << 11;
2330 fpu.fcw = env->fpuc;
2331 fpu.last_opcode = env->fpop;
2332 fpu.last_ip = env->fpip;
2333 fpu.last_dp = env->fpdp;
2334 for (i = 0; i < 8; ++i) {
2335 fpu.ftwx |= (!env->fptags[i]) << i;
2336 }
2337 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
2338 for (i = 0; i < CPU_NB_REGS; i++) {
2339 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
2340 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
2341 }
2342 fpu.mxcsr = env->mxcsr;
2343
2344 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
2345 }
2346
2347 #define XSAVE_FCW_FSW 0
2348 #define XSAVE_FTW_FOP 1
2349 #define XSAVE_CWD_RIP 2
2350 #define XSAVE_CWD_RDP 4
2351 #define XSAVE_MXCSR 6
2352 #define XSAVE_ST_SPACE 8
2353 #define XSAVE_XMM_SPACE 40
2354 #define XSAVE_XSTATE_BV 128
2355 #define XSAVE_YMMH_SPACE 144
2356 #define XSAVE_BNDREGS 240
2357 #define XSAVE_BNDCSR 256
2358 #define XSAVE_OPMASK 272
2359 #define XSAVE_ZMM_Hi256 288
2360 #define XSAVE_Hi16_ZMM 416
2361 #define XSAVE_PKRU 672
2362
2363 #define XSAVE_BYTE_OFFSET(word_offset) \
2364 ((word_offset) * sizeof_field(struct kvm_xsave, region[0]))
2365
2366 #define ASSERT_OFFSET(word_offset, field) \
2367 QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
2368 offsetof(X86XSaveArea, field))
2369
2370 ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
2371 ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
2372 ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
2373 ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
2374 ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
2375 ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
2376 ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
2377 ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
2378 ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
2379 ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
2380 ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
2381 ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
2382 ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
2383 ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
2384 ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
2385
kvm_put_xsave(X86CPU * cpu)2386 static int kvm_put_xsave(X86CPU *cpu)
2387 {
2388 CPUX86State *env = &cpu->env;
2389 X86XSaveArea *xsave = env->xsave_buf;
2390
2391 if (!has_xsave) {
2392 return kvm_put_fpu(cpu);
2393 }
2394 x86_cpu_xsave_all_areas(cpu, xsave);
2395
2396 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
2397 }
2398
kvm_put_xcrs(X86CPU * cpu)2399 static int kvm_put_xcrs(X86CPU *cpu)
2400 {
2401 CPUX86State *env = &cpu->env;
2402 struct kvm_xcrs xcrs = {};
2403
2404 if (!has_xcrs) {
2405 return 0;
2406 }
2407
2408 xcrs.nr_xcrs = 1;
2409 xcrs.flags = 0;
2410 xcrs.xcrs[0].xcr = 0;
2411 xcrs.xcrs[0].value = env->xcr0;
2412 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
2413 }
2414
kvm_put_sregs(X86CPU * cpu)2415 static int kvm_put_sregs(X86CPU *cpu)
2416 {
2417 CPUX86State *env = &cpu->env;
2418 struct kvm_sregs sregs;
2419
2420 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
2421 if (env->interrupt_injected >= 0) {
2422 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
2423 (uint64_t)1 << (env->interrupt_injected % 64);
2424 }
2425
2426 if ((env->eflags & VM_MASK)) {
2427 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
2428 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
2429 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
2430 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
2431 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
2432 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
2433 } else {
2434 set_seg(&sregs.cs, &env->segs[R_CS]);
2435 set_seg(&sregs.ds, &env->segs[R_DS]);
2436 set_seg(&sregs.es, &env->segs[R_ES]);
2437 set_seg(&sregs.fs, &env->segs[R_FS]);
2438 set_seg(&sregs.gs, &env->segs[R_GS]);
2439 set_seg(&sregs.ss, &env->segs[R_SS]);
2440 }
2441
2442 set_seg(&sregs.tr, &env->tr);
2443 set_seg(&sregs.ldt, &env->ldt);
2444
2445 sregs.idt.limit = env->idt.limit;
2446 sregs.idt.base = env->idt.base;
2447 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
2448 sregs.gdt.limit = env->gdt.limit;
2449 sregs.gdt.base = env->gdt.base;
2450 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
2451
2452 sregs.cr0 = env->cr[0];
2453 sregs.cr2 = env->cr[2];
2454 sregs.cr3 = env->cr[3];
2455 sregs.cr4 = env->cr[4];
2456
2457 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
2458 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
2459
2460 sregs.efer = env->efer;
2461
2462 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
2463 }
2464
kvm_msr_buf_reset(X86CPU * cpu)2465 static void kvm_msr_buf_reset(X86CPU *cpu)
2466 {
2467 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
2468 }
2469
kvm_msr_entry_add(X86CPU * cpu,uint32_t index,uint64_t value)2470 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
2471 {
2472 struct kvm_msrs *msrs = cpu->kvm_msr_buf;
2473 void *limit = ((void *)msrs) + MSR_BUF_SIZE;
2474 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
2475
2476 assert((void *)(entry + 1) <= limit);
2477
2478 entry->index = index;
2479 entry->reserved = 0;
2480 entry->data = value;
2481 msrs->nmsrs++;
2482 }
2483
kvm_put_one_msr(X86CPU * cpu,int index,uint64_t value)2484 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
2485 {
2486 kvm_msr_buf_reset(cpu);
2487 kvm_msr_entry_add(cpu, index, value);
2488
2489 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2490 }
2491
kvm_put_apicbase(X86CPU * cpu,uint64_t value)2492 void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
2493 {
2494 int ret;
2495
2496 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
2497 assert(ret == 1);
2498 }
2499
kvm_put_tscdeadline_msr(X86CPU * cpu)2500 static int kvm_put_tscdeadline_msr(X86CPU *cpu)
2501 {
2502 CPUX86State *env = &cpu->env;
2503 int ret;
2504
2505 if (!has_msr_tsc_deadline) {
2506 return 0;
2507 }
2508
2509 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
2510 if (ret < 0) {
2511 return ret;
2512 }
2513
2514 assert(ret == 1);
2515 return 0;
2516 }
2517
2518 /*
2519 * Provide a separate write service for the feature control MSR in order to
2520 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
2521 * before writing any other state because forcibly leaving nested mode
2522 * invalidates the VCPU state.
2523 */
kvm_put_msr_feature_control(X86CPU * cpu)2524 static int kvm_put_msr_feature_control(X86CPU *cpu)
2525 {
2526 int ret;
2527
2528 if (!has_msr_feature_control) {
2529 return 0;
2530 }
2531
2532 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
2533 cpu->env.msr_ia32_feature_control);
2534 if (ret < 0) {
2535 return ret;
2536 }
2537
2538 assert(ret == 1);
2539 return 0;
2540 }
2541
make_vmx_msr_value(uint32_t index,uint32_t features)2542 static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features)
2543 {
2544 uint32_t default1, can_be_one, can_be_zero;
2545 uint32_t must_be_one;
2546
2547 switch (index) {
2548 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2549 default1 = 0x00000016;
2550 break;
2551 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2552 default1 = 0x0401e172;
2553 break;
2554 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2555 default1 = 0x000011ff;
2556 break;
2557 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2558 default1 = 0x00036dff;
2559 break;
2560 case MSR_IA32_VMX_PROCBASED_CTLS2:
2561 default1 = 0;
2562 break;
2563 default:
2564 abort();
2565 }
2566
2567 /* If a feature bit is set, the control can be either set or clear.
2568 * Otherwise the value is limited to either 0 or 1 by default1.
2569 */
2570 can_be_one = features | default1;
2571 can_be_zero = features | ~default1;
2572 must_be_one = ~can_be_zero;
2573
2574 /*
2575 * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one).
2576 * Bit 32:63 -> 1 if the control bit can be one.
2577 */
2578 return must_be_one | (((uint64_t)can_be_one) << 32);
2579 }
2580
2581 #define VMCS12_MAX_FIELD_INDEX (0x17)
2582
kvm_msr_entry_add_vmx(X86CPU * cpu,FeatureWordArray f)2583 static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f)
2584 {
2585 uint64_t kvm_vmx_basic =
2586 kvm_arch_get_supported_msr_feature(kvm_state,
2587 MSR_IA32_VMX_BASIC);
2588
2589 if (!kvm_vmx_basic) {
2590 /* If the kernel doesn't support VMX feature (kvm_intel.nested=0),
2591 * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail.
2592 */
2593 return;
2594 }
2595
2596 uint64_t kvm_vmx_misc =
2597 kvm_arch_get_supported_msr_feature(kvm_state,
2598 MSR_IA32_VMX_MISC);
2599 uint64_t kvm_vmx_ept_vpid =
2600 kvm_arch_get_supported_msr_feature(kvm_state,
2601 MSR_IA32_VMX_EPT_VPID_CAP);
2602
2603 /*
2604 * If the guest is 64-bit, a value of 1 is allowed for the host address
2605 * space size vmexit control.
2606 */
2607 uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM
2608 ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0;
2609
2610 /*
2611 * Bits 0-30, 32-44 and 50-53 come from the host. KVM should
2612 * not change them for backwards compatibility.
2613 */
2614 uint64_t fixed_vmx_basic = kvm_vmx_basic &
2615 (MSR_VMX_BASIC_VMCS_REVISION_MASK |
2616 MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK |
2617 MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK);
2618
2619 /*
2620 * Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can
2621 * change in the future but are always zero for now, clear them to be
2622 * future proof. Bits 32-63 in theory could change, though KVM does
2623 * not support dual-monitor treatment and probably never will; mask
2624 * them out as well.
2625 */
2626 uint64_t fixed_vmx_misc = kvm_vmx_misc &
2627 (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK |
2628 MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK);
2629
2630 /*
2631 * EPT memory types should not change either, so we do not bother
2632 * adding features for them.
2633 */
2634 uint64_t fixed_vmx_ept_mask =
2635 (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ?
2636 MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0);
2637 uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask;
2638
2639 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
2640 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
2641 f[FEAT_VMX_PROCBASED_CTLS]));
2642 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
2643 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS,
2644 f[FEAT_VMX_PINBASED_CTLS]));
2645 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS,
2646 make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS,
2647 f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit);
2648 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
2649 make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS,
2650 f[FEAT_VMX_ENTRY_CTLS]));
2651 kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2,
2652 make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2,
2653 f[FEAT_VMX_SECONDARY_CTLS]));
2654 kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP,
2655 f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid);
2656 kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC,
2657 f[FEAT_VMX_BASIC] | fixed_vmx_basic);
2658 kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC,
2659 f[FEAT_VMX_MISC] | fixed_vmx_misc);
2660 if (has_msr_vmx_vmfunc) {
2661 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]);
2662 }
2663
2664 /*
2665 * Just to be safe, write these with constant values. The CRn_FIXED1
2666 * MSRs are generated by KVM based on the vCPU's CPUID.
2667 */
2668 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0,
2669 CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK);
2670 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0,
2671 CR4_VMXE_MASK);
2672 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM,
2673 VMCS12_MAX_FIELD_INDEX << 1);
2674 }
2675
kvm_buf_set_msrs(X86CPU * cpu)2676 static int kvm_buf_set_msrs(X86CPU *cpu)
2677 {
2678 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2679 if (ret < 0) {
2680 return ret;
2681 }
2682
2683 if (ret < cpu->kvm_msr_buf->nmsrs) {
2684 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
2685 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
2686 (uint32_t)e->index, (uint64_t)e->data);
2687 }
2688
2689 assert(ret == cpu->kvm_msr_buf->nmsrs);
2690 return 0;
2691 }
2692
kvm_init_msrs(X86CPU * cpu)2693 static void kvm_init_msrs(X86CPU *cpu)
2694 {
2695 CPUX86State *env = &cpu->env;
2696
2697 kvm_msr_buf_reset(cpu);
2698 if (has_msr_arch_capabs) {
2699 kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
2700 env->features[FEAT_ARCH_CAPABILITIES]);
2701 }
2702
2703 if (has_msr_core_capabs) {
2704 kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
2705 env->features[FEAT_CORE_CAPABILITY]);
2706 }
2707
2708 if (has_msr_ucode_rev) {
2709 kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
2710 }
2711
2712 /*
2713 * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
2714 * all kernels with MSR features should have them.
2715 */
2716 if (kvm_feature_msrs && cpu_has_vmx(env)) {
2717 kvm_msr_entry_add_vmx(cpu, env->features);
2718 }
2719
2720 assert(kvm_buf_set_msrs(cpu) == 0);
2721 }
2722
kvm_put_msrs(X86CPU * cpu,int level)2723 static int kvm_put_msrs(X86CPU *cpu, int level)
2724 {
2725 CPUX86State *env = &cpu->env;
2726 int i;
2727
2728 kvm_msr_buf_reset(cpu);
2729
2730 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
2731 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
2732 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
2733 kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
2734 if (has_msr_star) {
2735 kvm_msr_entry_add(cpu, MSR_STAR, env->star);
2736 }
2737 if (has_msr_hsave_pa) {
2738 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
2739 }
2740 if (has_msr_tsc_aux) {
2741 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
2742 }
2743 if (has_msr_tsc_adjust) {
2744 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
2745 }
2746 if (has_msr_misc_enable) {
2747 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
2748 env->msr_ia32_misc_enable);
2749 }
2750 if (has_msr_smbase) {
2751 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
2752 }
2753 if (has_msr_smi_count) {
2754 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
2755 }
2756 if (has_msr_bndcfgs) {
2757 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
2758 }
2759 if (has_msr_xss) {
2760 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
2761 }
2762 if (has_msr_umwait) {
2763 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait);
2764 }
2765 if (has_msr_spec_ctrl) {
2766 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
2767 }
2768 if (has_msr_tsx_ctrl) {
2769 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl);
2770 }
2771 if (has_msr_virt_ssbd) {
2772 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
2773 }
2774
2775 #ifdef TARGET_X86_64
2776 if (lm_capable_kernel) {
2777 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
2778 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
2779 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
2780 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
2781 }
2782 #endif
2783
2784 /*
2785 * The following MSRs have side effects on the guest or are too heavy
2786 * for normal writeback. Limit them to reset or full state updates.
2787 */
2788 if (level >= KVM_PUT_RESET_STATE) {
2789 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
2790 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
2791 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
2792 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
2793 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
2794 }
2795 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
2796 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
2797 }
2798 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
2799 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
2800 }
2801
2802 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
2803 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
2804 }
2805
2806 if (has_architectural_pmu_version > 0) {
2807 if (has_architectural_pmu_version > 1) {
2808 /* Stop the counter. */
2809 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
2810 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
2811 }
2812
2813 /* Set the counter values. */
2814 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
2815 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
2816 env->msr_fixed_counters[i]);
2817 }
2818 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
2819 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
2820 env->msr_gp_counters[i]);
2821 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
2822 env->msr_gp_evtsel[i]);
2823 }
2824 if (has_architectural_pmu_version > 1) {
2825 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
2826 env->msr_global_status);
2827 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
2828 env->msr_global_ovf_ctrl);
2829
2830 /* Now start the PMU. */
2831 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
2832 env->msr_fixed_ctr_ctrl);
2833 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
2834 env->msr_global_ctrl);
2835 }
2836 }
2837 /*
2838 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
2839 * only sync them to KVM on the first cpu
2840 */
2841 if (current_cpu == first_cpu) {
2842 if (has_msr_hv_hypercall) {
2843 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
2844 env->msr_hv_guest_os_id);
2845 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
2846 env->msr_hv_hypercall);
2847 }
2848 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
2849 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
2850 env->msr_hv_tsc);
2851 }
2852 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
2853 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
2854 env->msr_hv_reenlightenment_control);
2855 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
2856 env->msr_hv_tsc_emulation_control);
2857 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
2858 env->msr_hv_tsc_emulation_status);
2859 }
2860 }
2861 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
2862 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
2863 env->msr_hv_vapic);
2864 }
2865 if (has_msr_hv_crash) {
2866 int j;
2867
2868 for (j = 0; j < HV_CRASH_PARAMS; j++)
2869 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
2870 env->msr_hv_crash_params[j]);
2871
2872 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
2873 }
2874 if (has_msr_hv_runtime) {
2875 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
2876 }
2877 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)
2878 && hv_vpindex_settable) {
2879 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX,
2880 hyperv_vp_index(CPU(cpu)));
2881 }
2882 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
2883 int j;
2884
2885 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
2886
2887 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
2888 env->msr_hv_synic_control);
2889 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
2890 env->msr_hv_synic_evt_page);
2891 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
2892 env->msr_hv_synic_msg_page);
2893
2894 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
2895 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
2896 env->msr_hv_synic_sint[j]);
2897 }
2898 }
2899 if (has_msr_hv_stimer) {
2900 int j;
2901
2902 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
2903 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
2904 env->msr_hv_stimer_config[j]);
2905 }
2906
2907 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
2908 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
2909 env->msr_hv_stimer_count[j]);
2910 }
2911 }
2912 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
2913 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
2914
2915 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
2916 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
2917 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
2918 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
2919 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
2920 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
2921 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
2922 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
2923 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
2924 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
2925 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
2926 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
2927 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
2928 /* The CPU GPs if we write to a bit above the physical limit of
2929 * the host CPU (and KVM emulates that)
2930 */
2931 uint64_t mask = env->mtrr_var[i].mask;
2932 mask &= phys_mask;
2933
2934 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
2935 env->mtrr_var[i].base);
2936 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
2937 }
2938 }
2939 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
2940 int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
2941 0x14, 1, R_EAX) & 0x7;
2942
2943 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
2944 env->msr_rtit_ctrl);
2945 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
2946 env->msr_rtit_status);
2947 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
2948 env->msr_rtit_output_base);
2949 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
2950 env->msr_rtit_output_mask);
2951 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
2952 env->msr_rtit_cr3_match);
2953 for (i = 0; i < addr_num; i++) {
2954 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
2955 env->msr_rtit_addrs[i]);
2956 }
2957 }
2958
2959 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
2960 * kvm_put_msr_feature_control. */
2961 }
2962
2963 if (env->mcg_cap) {
2964 int i;
2965
2966 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
2967 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
2968 if (has_msr_mcg_ext_ctl) {
2969 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
2970 }
2971 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
2972 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
2973 }
2974 }
2975
2976 return kvm_buf_set_msrs(cpu);
2977 }
2978
2979
kvm_get_fpu(X86CPU * cpu)2980 static int kvm_get_fpu(X86CPU *cpu)
2981 {
2982 CPUX86State *env = &cpu->env;
2983 struct kvm_fpu fpu;
2984 int i, ret;
2985
2986 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
2987 if (ret < 0) {
2988 return ret;
2989 }
2990
2991 env->fpstt = (fpu.fsw >> 11) & 7;
2992 env->fpus = fpu.fsw;
2993 env->fpuc = fpu.fcw;
2994 env->fpop = fpu.last_opcode;
2995 env->fpip = fpu.last_ip;
2996 env->fpdp = fpu.last_dp;
2997 for (i = 0; i < 8; ++i) {
2998 env->fptags[i] = !((fpu.ftwx >> i) & 1);
2999 }
3000 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
3001 for (i = 0; i < CPU_NB_REGS; i++) {
3002 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
3003 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
3004 }
3005 env->mxcsr = fpu.mxcsr;
3006
3007 return 0;
3008 }
3009
kvm_get_xsave(X86CPU * cpu)3010 static int kvm_get_xsave(X86CPU *cpu)
3011 {
3012 CPUX86State *env = &cpu->env;
3013 X86XSaveArea *xsave = env->xsave_buf;
3014 int ret;
3015
3016 if (!has_xsave) {
3017 return kvm_get_fpu(cpu);
3018 }
3019
3020 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
3021 if (ret < 0) {
3022 return ret;
3023 }
3024 x86_cpu_xrstor_all_areas(cpu, xsave);
3025
3026 return 0;
3027 }
3028
kvm_get_xcrs(X86CPU * cpu)3029 static int kvm_get_xcrs(X86CPU *cpu)
3030 {
3031 CPUX86State *env = &cpu->env;
3032 int i, ret;
3033 struct kvm_xcrs xcrs;
3034
3035 if (!has_xcrs) {
3036 return 0;
3037 }
3038
3039 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
3040 if (ret < 0) {
3041 return ret;
3042 }
3043
3044 for (i = 0; i < xcrs.nr_xcrs; i++) {
3045 /* Only support xcr0 now */
3046 if (xcrs.xcrs[i].xcr == 0) {
3047 env->xcr0 = xcrs.xcrs[i].value;
3048 break;
3049 }
3050 }
3051 return 0;
3052 }
3053
kvm_get_sregs(X86CPU * cpu)3054 static int kvm_get_sregs(X86CPU *cpu)
3055 {
3056 CPUX86State *env = &cpu->env;
3057 struct kvm_sregs sregs;
3058 int bit, i, ret;
3059
3060 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
3061 if (ret < 0) {
3062 return ret;
3063 }
3064
3065 /* There can only be one pending IRQ set in the bitmap at a time, so try
3066 to find it and save its number instead (-1 for none). */
3067 env->interrupt_injected = -1;
3068 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
3069 if (sregs.interrupt_bitmap[i]) {
3070 bit = ctz64(sregs.interrupt_bitmap[i]);
3071 env->interrupt_injected = i * 64 + bit;
3072 break;
3073 }
3074 }
3075
3076 get_seg(&env->segs[R_CS], &sregs.cs);
3077 get_seg(&env->segs[R_DS], &sregs.ds);
3078 get_seg(&env->segs[R_ES], &sregs.es);
3079 get_seg(&env->segs[R_FS], &sregs.fs);
3080 get_seg(&env->segs[R_GS], &sregs.gs);
3081 get_seg(&env->segs[R_SS], &sregs.ss);
3082
3083 get_seg(&env->tr, &sregs.tr);
3084 get_seg(&env->ldt, &sregs.ldt);
3085
3086 env->idt.limit = sregs.idt.limit;
3087 env->idt.base = sregs.idt.base;
3088 env->gdt.limit = sregs.gdt.limit;
3089 env->gdt.base = sregs.gdt.base;
3090
3091 env->cr[0] = sregs.cr0;
3092 env->cr[2] = sregs.cr2;
3093 env->cr[3] = sregs.cr3;
3094 env->cr[4] = sregs.cr4;
3095
3096 env->efer = sregs.efer;
3097
3098 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
3099 x86_update_hflags(env);
3100
3101 return 0;
3102 }
3103
kvm_get_msrs(X86CPU * cpu)3104 static int kvm_get_msrs(X86CPU *cpu)
3105 {
3106 CPUX86State *env = &cpu->env;
3107 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
3108 int ret, i;
3109 uint64_t mtrr_top_bits;
3110
3111 kvm_msr_buf_reset(cpu);
3112
3113 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
3114 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
3115 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
3116 kvm_msr_entry_add(cpu, MSR_PAT, 0);
3117 if (has_msr_star) {
3118 kvm_msr_entry_add(cpu, MSR_STAR, 0);
3119 }
3120 if (has_msr_hsave_pa) {
3121 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
3122 }
3123 if (has_msr_tsc_aux) {
3124 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
3125 }
3126 if (has_msr_tsc_adjust) {
3127 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
3128 }
3129 if (has_msr_tsc_deadline) {
3130 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
3131 }
3132 if (has_msr_misc_enable) {
3133 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
3134 }
3135 if (has_msr_smbase) {
3136 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
3137 }
3138 if (has_msr_smi_count) {
3139 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
3140 }
3141 if (has_msr_feature_control) {
3142 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
3143 }
3144 if (has_msr_bndcfgs) {
3145 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
3146 }
3147 if (has_msr_xss) {
3148 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
3149 }
3150 if (has_msr_umwait) {
3151 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0);
3152 }
3153 if (has_msr_spec_ctrl) {
3154 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
3155 }
3156 if (has_msr_tsx_ctrl) {
3157 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0);
3158 }
3159 if (has_msr_virt_ssbd) {
3160 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
3161 }
3162 if (!env->tsc_valid) {
3163 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
3164 env->tsc_valid = !runstate_is_running();
3165 }
3166
3167 #ifdef TARGET_X86_64
3168 if (lm_capable_kernel) {
3169 kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
3170 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
3171 kvm_msr_entry_add(cpu, MSR_FMASK, 0);
3172 kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
3173 }
3174 #endif
3175 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
3176 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
3177 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
3178 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
3179 }
3180 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
3181 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
3182 }
3183 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
3184 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
3185 }
3186 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
3187 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
3188 }
3189 if (has_architectural_pmu_version > 0) {
3190 if (has_architectural_pmu_version > 1) {
3191 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
3192 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
3193 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
3194 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
3195 }
3196 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
3197 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
3198 }
3199 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
3200 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
3201 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
3202 }
3203 }
3204
3205 if (env->mcg_cap) {
3206 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
3207 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
3208 if (has_msr_mcg_ext_ctl) {
3209 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
3210 }
3211 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
3212 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
3213 }
3214 }
3215
3216 if (has_msr_hv_hypercall) {
3217 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
3218 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
3219 }
3220 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
3221 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
3222 }
3223 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
3224 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
3225 }
3226 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
3227 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
3228 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
3229 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
3230 }
3231 if (has_msr_hv_crash) {
3232 int j;
3233
3234 for (j = 0; j < HV_CRASH_PARAMS; j++) {
3235 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
3236 }
3237 }
3238 if (has_msr_hv_runtime) {
3239 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
3240 }
3241 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
3242 uint32_t msr;
3243
3244 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
3245 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
3246 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
3247 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
3248 kvm_msr_entry_add(cpu, msr, 0);
3249 }
3250 }
3251 if (has_msr_hv_stimer) {
3252 uint32_t msr;
3253
3254 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
3255 msr++) {
3256 kvm_msr_entry_add(cpu, msr, 0);
3257 }
3258 }
3259 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
3260 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
3261 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
3262 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
3263 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
3264 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
3265 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
3266 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
3267 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
3268 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
3269 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
3270 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
3271 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
3272 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
3273 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
3274 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
3275 }
3276 }
3277
3278 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
3279 int addr_num =
3280 kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
3281
3282 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
3283 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
3284 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
3285 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
3286 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
3287 for (i = 0; i < addr_num; i++) {
3288 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
3289 }
3290 }
3291
3292 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
3293 if (ret < 0) {
3294 return ret;
3295 }
3296
3297 if (ret < cpu->kvm_msr_buf->nmsrs) {
3298 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
3299 error_report("error: failed to get MSR 0x%" PRIx32,
3300 (uint32_t)e->index);
3301 }
3302
3303 assert(ret == cpu->kvm_msr_buf->nmsrs);
3304 /*
3305 * MTRR masks: Each mask consists of 5 parts
3306 * a 10..0: must be zero
3307 * b 11 : valid bit
3308 * c n-1.12: actual mask bits
3309 * d 51..n: reserved must be zero
3310 * e 63.52: reserved must be zero
3311 *
3312 * 'n' is the number of physical bits supported by the CPU and is
3313 * apparently always <= 52. We know our 'n' but don't know what
3314 * the destinations 'n' is; it might be smaller, in which case
3315 * it masks (c) on loading. It might be larger, in which case
3316 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
3317 * we're migrating to.
3318 */
3319
3320 if (cpu->fill_mtrr_mask) {
3321 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
3322 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
3323 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
3324 } else {
3325 mtrr_top_bits = 0;
3326 }
3327
3328 for (i = 0; i < ret; i++) {
3329 uint32_t index = msrs[i].index;
3330 switch (index) {
3331 case MSR_IA32_SYSENTER_CS:
3332 env->sysenter_cs = msrs[i].data;
3333 break;
3334 case MSR_IA32_SYSENTER_ESP:
3335 env->sysenter_esp = msrs[i].data;
3336 break;
3337 case MSR_IA32_SYSENTER_EIP:
3338 env->sysenter_eip = msrs[i].data;
3339 break;
3340 case MSR_PAT:
3341 env->pat = msrs[i].data;
3342 break;
3343 case MSR_STAR:
3344 env->star = msrs[i].data;
3345 break;
3346 #ifdef TARGET_X86_64
3347 case MSR_CSTAR:
3348 env->cstar = msrs[i].data;
3349 break;
3350 case MSR_KERNELGSBASE:
3351 env->kernelgsbase = msrs[i].data;
3352 break;
3353 case MSR_FMASK:
3354 env->fmask = msrs[i].data;
3355 break;
3356 case MSR_LSTAR:
3357 env->lstar = msrs[i].data;
3358 break;
3359 #endif
3360 case MSR_IA32_TSC:
3361 env->tsc = msrs[i].data;
3362 break;
3363 case MSR_TSC_AUX:
3364 env->tsc_aux = msrs[i].data;
3365 break;
3366 case MSR_TSC_ADJUST:
3367 env->tsc_adjust = msrs[i].data;
3368 break;
3369 case MSR_IA32_TSCDEADLINE:
3370 env->tsc_deadline = msrs[i].data;
3371 break;
3372 case MSR_VM_HSAVE_PA:
3373 env->vm_hsave = msrs[i].data;
3374 break;
3375 case MSR_KVM_SYSTEM_TIME:
3376 env->system_time_msr = msrs[i].data;
3377 break;
3378 case MSR_KVM_WALL_CLOCK:
3379 env->wall_clock_msr = msrs[i].data;
3380 break;
3381 case MSR_MCG_STATUS:
3382 env->mcg_status = msrs[i].data;
3383 break;
3384 case MSR_MCG_CTL:
3385 env->mcg_ctl = msrs[i].data;
3386 break;
3387 case MSR_MCG_EXT_CTL:
3388 env->mcg_ext_ctl = msrs[i].data;
3389 break;
3390 case MSR_IA32_MISC_ENABLE:
3391 env->msr_ia32_misc_enable = msrs[i].data;
3392 break;
3393 case MSR_IA32_SMBASE:
3394 env->smbase = msrs[i].data;
3395 break;
3396 case MSR_SMI_COUNT:
3397 env->msr_smi_count = msrs[i].data;
3398 break;
3399 case MSR_IA32_FEATURE_CONTROL:
3400 env->msr_ia32_feature_control = msrs[i].data;
3401 break;
3402 case MSR_IA32_BNDCFGS:
3403 env->msr_bndcfgs = msrs[i].data;
3404 break;
3405 case MSR_IA32_XSS:
3406 env->xss = msrs[i].data;
3407 break;
3408 case MSR_IA32_UMWAIT_CONTROL:
3409 env->umwait = msrs[i].data;
3410 break;
3411 default:
3412 if (msrs[i].index >= MSR_MC0_CTL &&
3413 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
3414 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
3415 }
3416 break;
3417 case MSR_KVM_ASYNC_PF_EN:
3418 env->async_pf_en_msr = msrs[i].data;
3419 break;
3420 case MSR_KVM_PV_EOI_EN:
3421 env->pv_eoi_en_msr = msrs[i].data;
3422 break;
3423 case MSR_KVM_STEAL_TIME:
3424 env->steal_time_msr = msrs[i].data;
3425 break;
3426 case MSR_KVM_POLL_CONTROL: {
3427 env->poll_control_msr = msrs[i].data;
3428 break;
3429 }
3430 case MSR_CORE_PERF_FIXED_CTR_CTRL:
3431 env->msr_fixed_ctr_ctrl = msrs[i].data;
3432 break;
3433 case MSR_CORE_PERF_GLOBAL_CTRL:
3434 env->msr_global_ctrl = msrs[i].data;
3435 break;
3436 case MSR_CORE_PERF_GLOBAL_STATUS:
3437 env->msr_global_status = msrs[i].data;
3438 break;
3439 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
3440 env->msr_global_ovf_ctrl = msrs[i].data;
3441 break;
3442 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
3443 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
3444 break;
3445 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
3446 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
3447 break;
3448 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
3449 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
3450 break;
3451 case HV_X64_MSR_HYPERCALL:
3452 env->msr_hv_hypercall = msrs[i].data;
3453 break;
3454 case HV_X64_MSR_GUEST_OS_ID:
3455 env->msr_hv_guest_os_id = msrs[i].data;
3456 break;
3457 case HV_X64_MSR_APIC_ASSIST_PAGE:
3458 env->msr_hv_vapic = msrs[i].data;
3459 break;
3460 case HV_X64_MSR_REFERENCE_TSC:
3461 env->msr_hv_tsc = msrs[i].data;
3462 break;
3463 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
3464 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
3465 break;
3466 case HV_X64_MSR_VP_RUNTIME:
3467 env->msr_hv_runtime = msrs[i].data;
3468 break;
3469 case HV_X64_MSR_SCONTROL:
3470 env->msr_hv_synic_control = msrs[i].data;
3471 break;
3472 case HV_X64_MSR_SIEFP:
3473 env->msr_hv_synic_evt_page = msrs[i].data;
3474 break;
3475 case HV_X64_MSR_SIMP:
3476 env->msr_hv_synic_msg_page = msrs[i].data;
3477 break;
3478 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
3479 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
3480 break;
3481 case HV_X64_MSR_STIMER0_CONFIG:
3482 case HV_X64_MSR_STIMER1_CONFIG:
3483 case HV_X64_MSR_STIMER2_CONFIG:
3484 case HV_X64_MSR_STIMER3_CONFIG:
3485 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
3486 msrs[i].data;
3487 break;
3488 case HV_X64_MSR_STIMER0_COUNT:
3489 case HV_X64_MSR_STIMER1_COUNT:
3490 case HV_X64_MSR_STIMER2_COUNT:
3491 case HV_X64_MSR_STIMER3_COUNT:
3492 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
3493 msrs[i].data;
3494 break;
3495 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
3496 env->msr_hv_reenlightenment_control = msrs[i].data;
3497 break;
3498 case HV_X64_MSR_TSC_EMULATION_CONTROL:
3499 env->msr_hv_tsc_emulation_control = msrs[i].data;
3500 break;
3501 case HV_X64_MSR_TSC_EMULATION_STATUS:
3502 env->msr_hv_tsc_emulation_status = msrs[i].data;
3503 break;
3504 case MSR_MTRRdefType:
3505 env->mtrr_deftype = msrs[i].data;
3506 break;
3507 case MSR_MTRRfix64K_00000:
3508 env->mtrr_fixed[0] = msrs[i].data;
3509 break;
3510 case MSR_MTRRfix16K_80000:
3511 env->mtrr_fixed[1] = msrs[i].data;
3512 break;
3513 case MSR_MTRRfix16K_A0000:
3514 env->mtrr_fixed[2] = msrs[i].data;
3515 break;
3516 case MSR_MTRRfix4K_C0000:
3517 env->mtrr_fixed[3] = msrs[i].data;
3518 break;
3519 case MSR_MTRRfix4K_C8000:
3520 env->mtrr_fixed[4] = msrs[i].data;
3521 break;
3522 case MSR_MTRRfix4K_D0000:
3523 env->mtrr_fixed[5] = msrs[i].data;
3524 break;
3525 case MSR_MTRRfix4K_D8000:
3526 env->mtrr_fixed[6] = msrs[i].data;
3527 break;
3528 case MSR_MTRRfix4K_E0000:
3529 env->mtrr_fixed[7] = msrs[i].data;
3530 break;
3531 case MSR_MTRRfix4K_E8000:
3532 env->mtrr_fixed[8] = msrs[i].data;
3533 break;
3534 case MSR_MTRRfix4K_F0000:
3535 env->mtrr_fixed[9] = msrs[i].data;
3536 break;
3537 case MSR_MTRRfix4K_F8000:
3538 env->mtrr_fixed[10] = msrs[i].data;
3539 break;
3540 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
3541 if (index & 1) {
3542 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
3543 mtrr_top_bits;
3544 } else {
3545 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
3546 }
3547 break;
3548 case MSR_IA32_SPEC_CTRL:
3549 env->spec_ctrl = msrs[i].data;
3550 break;
3551 case MSR_IA32_TSX_CTRL:
3552 env->tsx_ctrl = msrs[i].data;
3553 break;
3554 case MSR_VIRT_SSBD:
3555 env->virt_ssbd = msrs[i].data;
3556 break;
3557 case MSR_IA32_RTIT_CTL:
3558 env->msr_rtit_ctrl = msrs[i].data;
3559 break;
3560 case MSR_IA32_RTIT_STATUS:
3561 env->msr_rtit_status = msrs[i].data;
3562 break;
3563 case MSR_IA32_RTIT_OUTPUT_BASE:
3564 env->msr_rtit_output_base = msrs[i].data;
3565 break;
3566 case MSR_IA32_RTIT_OUTPUT_MASK:
3567 env->msr_rtit_output_mask = msrs[i].data;
3568 break;
3569 case MSR_IA32_RTIT_CR3_MATCH:
3570 env->msr_rtit_cr3_match = msrs[i].data;
3571 break;
3572 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
3573 env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
3574 break;
3575 }
3576 }
3577
3578 return 0;
3579 }
3580
kvm_put_mp_state(X86CPU * cpu)3581 static int kvm_put_mp_state(X86CPU *cpu)
3582 {
3583 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
3584
3585 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
3586 }
3587
kvm_get_mp_state(X86CPU * cpu)3588 static int kvm_get_mp_state(X86CPU *cpu)
3589 {
3590 CPUState *cs = CPU(cpu);
3591 CPUX86State *env = &cpu->env;
3592 struct kvm_mp_state mp_state;
3593 int ret;
3594
3595 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
3596 if (ret < 0) {
3597 return ret;
3598 }
3599 env->mp_state = mp_state.mp_state;
3600 if (kvm_irqchip_in_kernel()) {
3601 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
3602 }
3603 return 0;
3604 }
3605
kvm_get_apic(X86CPU * cpu)3606 static int kvm_get_apic(X86CPU *cpu)
3607 {
3608 DeviceState *apic = cpu->apic_state;
3609 struct kvm_lapic_state kapic;
3610 int ret;
3611
3612 if (apic && kvm_irqchip_in_kernel()) {
3613 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
3614 if (ret < 0) {
3615 return ret;
3616 }
3617
3618 kvm_get_apic_state(apic, &kapic);
3619 }
3620 return 0;
3621 }
3622
kvm_put_vcpu_events(X86CPU * cpu,int level)3623 static int kvm_put_vcpu_events(X86CPU *cpu, int level)
3624 {
3625 CPUState *cs = CPU(cpu);
3626 CPUX86State *env = &cpu->env;
3627 struct kvm_vcpu_events events = {};
3628
3629 if (!kvm_has_vcpu_events()) {
3630 return 0;
3631 }
3632
3633 events.flags = 0;
3634
3635 if (has_exception_payload) {
3636 events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
3637 events.exception.pending = env->exception_pending;
3638 events.exception_has_payload = env->exception_has_payload;
3639 events.exception_payload = env->exception_payload;
3640 }
3641 events.exception.nr = env->exception_nr;
3642 events.exception.injected = env->exception_injected;
3643 events.exception.has_error_code = env->has_error_code;
3644 events.exception.error_code = env->error_code;
3645
3646 events.interrupt.injected = (env->interrupt_injected >= 0);
3647 events.interrupt.nr = env->interrupt_injected;
3648 events.interrupt.soft = env->soft_interrupt;
3649
3650 events.nmi.injected = env->nmi_injected;
3651 events.nmi.pending = env->nmi_pending;
3652 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
3653
3654 events.sipi_vector = env->sipi_vector;
3655
3656 if (has_msr_smbase) {
3657 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
3658 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
3659 if (kvm_irqchip_in_kernel()) {
3660 /* As soon as these are moved to the kernel, remove them
3661 * from cs->interrupt_request.
3662 */
3663 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
3664 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
3665 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
3666 } else {
3667 /* Keep these in cs->interrupt_request. */
3668 events.smi.pending = 0;
3669 events.smi.latched_init = 0;
3670 }
3671 /* Stop SMI delivery on old machine types to avoid a reboot
3672 * on an inward migration of an old VM.
3673 */
3674 if (!cpu->kvm_no_smi_migration) {
3675 events.flags |= KVM_VCPUEVENT_VALID_SMM;
3676 }
3677 }
3678
3679 if (level >= KVM_PUT_RESET_STATE) {
3680 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
3681 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
3682 events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
3683 }
3684 }
3685
3686 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
3687 }
3688
kvm_get_vcpu_events(X86CPU * cpu)3689 static int kvm_get_vcpu_events(X86CPU *cpu)
3690 {
3691 CPUX86State *env = &cpu->env;
3692 struct kvm_vcpu_events events;
3693 int ret;
3694
3695 if (!kvm_has_vcpu_events()) {
3696 return 0;
3697 }
3698
3699 memset(&events, 0, sizeof(events));
3700 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
3701 if (ret < 0) {
3702 return ret;
3703 }
3704
3705 if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
3706 env->exception_pending = events.exception.pending;
3707 env->exception_has_payload = events.exception_has_payload;
3708 env->exception_payload = events.exception_payload;
3709 } else {
3710 env->exception_pending = 0;
3711 env->exception_has_payload = false;
3712 }
3713 env->exception_injected = events.exception.injected;
3714 env->exception_nr =
3715 (env->exception_pending || env->exception_injected) ?
3716 events.exception.nr : -1;
3717 env->has_error_code = events.exception.has_error_code;
3718 env->error_code = events.exception.error_code;
3719
3720 env->interrupt_injected =
3721 events.interrupt.injected ? events.interrupt.nr : -1;
3722 env->soft_interrupt = events.interrupt.soft;
3723
3724 env->nmi_injected = events.nmi.injected;
3725 env->nmi_pending = events.nmi.pending;
3726 if (events.nmi.masked) {
3727 env->hflags2 |= HF2_NMI_MASK;
3728 } else {
3729 env->hflags2 &= ~HF2_NMI_MASK;
3730 }
3731
3732 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
3733 if (events.smi.smm) {
3734 env->hflags |= HF_SMM_MASK;
3735 } else {
3736 env->hflags &= ~HF_SMM_MASK;
3737 }
3738 if (events.smi.pending) {
3739 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
3740 } else {
3741 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
3742 }
3743 if (events.smi.smm_inside_nmi) {
3744 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
3745 } else {
3746 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
3747 }
3748 if (events.smi.latched_init) {
3749 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
3750 } else {
3751 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
3752 }
3753 }
3754
3755 env->sipi_vector = events.sipi_vector;
3756
3757 return 0;
3758 }
3759
kvm_guest_debug_workarounds(X86CPU * cpu)3760 static int kvm_guest_debug_workarounds(X86CPU *cpu)
3761 {
3762 CPUState *cs = CPU(cpu);
3763 CPUX86State *env = &cpu->env;
3764 int ret = 0;
3765 unsigned long reinject_trap = 0;
3766
3767 if (!kvm_has_vcpu_events()) {
3768 if (env->exception_nr == EXCP01_DB) {
3769 reinject_trap = KVM_GUESTDBG_INJECT_DB;
3770 } else if (env->exception_injected == EXCP03_INT3) {
3771 reinject_trap = KVM_GUESTDBG_INJECT_BP;
3772 }
3773 kvm_reset_exception(env);
3774 }
3775
3776 /*
3777 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
3778 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
3779 * by updating the debug state once again if single-stepping is on.
3780 * Another reason to call kvm_update_guest_debug here is a pending debug
3781 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
3782 * reinject them via SET_GUEST_DEBUG.
3783 */
3784 if (reinject_trap ||
3785 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
3786 ret = kvm_update_guest_debug(cs, reinject_trap);
3787 }
3788 return ret;
3789 }
3790
kvm_put_debugregs(X86CPU * cpu)3791 static int kvm_put_debugregs(X86CPU *cpu)
3792 {
3793 CPUX86State *env = &cpu->env;
3794 struct kvm_debugregs dbgregs;
3795 int i;
3796
3797 if (!kvm_has_debugregs()) {
3798 return 0;
3799 }
3800
3801 memset(&dbgregs, 0, sizeof(dbgregs));
3802 for (i = 0; i < 4; i++) {
3803 dbgregs.db[i] = env->dr[i];
3804 }
3805 dbgregs.dr6 = env->dr[6];
3806 dbgregs.dr7 = env->dr[7];
3807 dbgregs.flags = 0;
3808
3809 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
3810 }
3811
kvm_get_debugregs(X86CPU * cpu)3812 static int kvm_get_debugregs(X86CPU *cpu)
3813 {
3814 CPUX86State *env = &cpu->env;
3815 struct kvm_debugregs dbgregs;
3816 int i, ret;
3817
3818 if (!kvm_has_debugregs()) {
3819 return 0;
3820 }
3821
3822 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
3823 if (ret < 0) {
3824 return ret;
3825 }
3826 for (i = 0; i < 4; i++) {
3827 env->dr[i] = dbgregs.db[i];
3828 }
3829 env->dr[4] = env->dr[6] = dbgregs.dr6;
3830 env->dr[5] = env->dr[7] = dbgregs.dr7;
3831
3832 return 0;
3833 }
3834
kvm_put_nested_state(X86CPU * cpu)3835 static int kvm_put_nested_state(X86CPU *cpu)
3836 {
3837 CPUX86State *env = &cpu->env;
3838 int max_nested_state_len = kvm_max_nested_state_length();
3839
3840 if (!env->nested_state) {
3841 return 0;
3842 }
3843
3844 assert(env->nested_state->size <= max_nested_state_len);
3845 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
3846 }
3847
kvm_get_nested_state(X86CPU * cpu)3848 static int kvm_get_nested_state(X86CPU *cpu)
3849 {
3850 CPUX86State *env = &cpu->env;
3851 int max_nested_state_len = kvm_max_nested_state_length();
3852 int ret;
3853
3854 if (!env->nested_state) {
3855 return 0;
3856 }
3857
3858 /*
3859 * It is possible that migration restored a smaller size into
3860 * nested_state->hdr.size than what our kernel support.
3861 * We preserve migration origin nested_state->hdr.size for
3862 * call to KVM_SET_NESTED_STATE but wish that our next call
3863 * to KVM_GET_NESTED_STATE will use max size our kernel support.
3864 */
3865 env->nested_state->size = max_nested_state_len;
3866
3867 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state);
3868 if (ret < 0) {
3869 return ret;
3870 }
3871
3872 if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
3873 env->hflags |= HF_GUEST_MASK;
3874 } else {
3875 env->hflags &= ~HF_GUEST_MASK;
3876 }
3877
3878 return ret;
3879 }
3880
kvm_arch_put_registers(CPUState * cpu,int level)3881 int kvm_arch_put_registers(CPUState *cpu, int level)
3882 {
3883 X86CPU *x86_cpu = X86_CPU(cpu);
3884 int ret;
3885
3886 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
3887
3888 if (level >= KVM_PUT_RESET_STATE) {
3889 ret = kvm_put_nested_state(x86_cpu);
3890 if (ret < 0) {
3891 return ret;
3892 }
3893
3894 ret = kvm_put_msr_feature_control(x86_cpu);
3895 if (ret < 0) {
3896 return ret;
3897 }
3898 }
3899
3900 if (level == KVM_PUT_FULL_STATE) {
3901 /* We don't check for kvm_arch_set_tsc_khz() errors here,
3902 * because TSC frequency mismatch shouldn't abort migration,
3903 * unless the user explicitly asked for a more strict TSC
3904 * setting (e.g. using an explicit "tsc-freq" option).
3905 */
3906 kvm_arch_set_tsc_khz(cpu);
3907 }
3908
3909 ret = kvm_getput_regs(x86_cpu, 1);
3910 if (ret < 0) {
3911 return ret;
3912 }
3913 ret = kvm_put_xsave(x86_cpu);
3914 if (ret < 0) {
3915 return ret;
3916 }
3917 ret = kvm_put_xcrs(x86_cpu);
3918 if (ret < 0) {
3919 return ret;
3920 }
3921 ret = kvm_put_sregs(x86_cpu);
3922 if (ret < 0) {
3923 return ret;
3924 }
3925 /* must be before kvm_put_msrs */
3926 ret = kvm_inject_mce_oldstyle(x86_cpu);
3927 if (ret < 0) {
3928 return ret;
3929 }
3930 ret = kvm_put_msrs(x86_cpu, level);
3931 if (ret < 0) {
3932 return ret;
3933 }
3934 ret = kvm_put_vcpu_events(x86_cpu, level);
3935 if (ret < 0) {
3936 return ret;
3937 }
3938 if (level >= KVM_PUT_RESET_STATE) {
3939 ret = kvm_put_mp_state(x86_cpu);
3940 if (ret < 0) {
3941 return ret;
3942 }
3943 }
3944
3945 ret = kvm_put_tscdeadline_msr(x86_cpu);
3946 if (ret < 0) {
3947 return ret;
3948 }
3949 ret = kvm_put_debugregs(x86_cpu);
3950 if (ret < 0) {
3951 return ret;
3952 }
3953 /* must be last */
3954 ret = kvm_guest_debug_workarounds(x86_cpu);
3955 if (ret < 0) {
3956 return ret;
3957 }
3958 return 0;
3959 }
3960
kvm_arch_get_registers(CPUState * cs)3961 int kvm_arch_get_registers(CPUState *cs)
3962 {
3963 X86CPU *cpu = X86_CPU(cs);
3964 int ret;
3965
3966 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
3967
3968 ret = kvm_get_vcpu_events(cpu);
3969 if (ret < 0) {
3970 goto out;
3971 }
3972 /*
3973 * KVM_GET_MPSTATE can modify CS and RIP, call it before
3974 * KVM_GET_REGS and KVM_GET_SREGS.
3975 */
3976 ret = kvm_get_mp_state(cpu);
3977 if (ret < 0) {
3978 goto out;
3979 }
3980 ret = kvm_getput_regs(cpu, 0);
3981 if (ret < 0) {
3982 goto out;
3983 }
3984 ret = kvm_get_xsave(cpu);
3985 if (ret < 0) {
3986 goto out;
3987 }
3988 ret = kvm_get_xcrs(cpu);
3989 if (ret < 0) {
3990 goto out;
3991 }
3992 ret = kvm_get_sregs(cpu);
3993 if (ret < 0) {
3994 goto out;
3995 }
3996 ret = kvm_get_msrs(cpu);
3997 if (ret < 0) {
3998 goto out;
3999 }
4000 ret = kvm_get_apic(cpu);
4001 if (ret < 0) {
4002 goto out;
4003 }
4004 ret = kvm_get_debugregs(cpu);
4005 if (ret < 0) {
4006 goto out;
4007 }
4008 ret = kvm_get_nested_state(cpu);
4009 if (ret < 0) {
4010 goto out;
4011 }
4012 ret = 0;
4013 out:
4014 cpu_sync_bndcs_hflags(&cpu->env);
4015 return ret;
4016 }
4017
kvm_arch_pre_run(CPUState * cpu,struct kvm_run * run)4018 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
4019 {
4020 X86CPU *x86_cpu = X86_CPU(cpu);
4021 CPUX86State *env = &x86_cpu->env;
4022 int ret;
4023
4024 /* Inject NMI */
4025 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
4026 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
4027 qemu_mutex_lock_iothread();
4028 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
4029 qemu_mutex_unlock_iothread();
4030 DPRINTF("injected NMI\n");
4031 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
4032 if (ret < 0) {
4033 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
4034 strerror(-ret));
4035 }
4036 }
4037 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
4038 qemu_mutex_lock_iothread();
4039 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
4040 qemu_mutex_unlock_iothread();
4041 DPRINTF("injected SMI\n");
4042 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
4043 if (ret < 0) {
4044 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
4045 strerror(-ret));
4046 }
4047 }
4048 }
4049
4050 if (!kvm_pic_in_kernel()) {
4051 qemu_mutex_lock_iothread();
4052 }
4053
4054 /* Force the VCPU out of its inner loop to process any INIT requests
4055 * or (for userspace APIC, but it is cheap to combine the checks here)
4056 * pending TPR access reports.
4057 */
4058 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
4059 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
4060 !(env->hflags & HF_SMM_MASK)) {
4061 cpu->exit_request = 1;
4062 }
4063 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
4064 cpu->exit_request = 1;
4065 }
4066 }
4067
4068 if (!kvm_pic_in_kernel()) {
4069 /* Try to inject an interrupt if the guest can accept it */
4070 if (run->ready_for_interrupt_injection &&
4071 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
4072 (env->eflags & IF_MASK)) {
4073 int irq;
4074
4075 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
4076 irq = cpu_get_pic_interrupt(env);
4077 if (irq >= 0) {
4078 struct kvm_interrupt intr;
4079
4080 intr.irq = irq;
4081 DPRINTF("injected interrupt %d\n", irq);
4082 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
4083 if (ret < 0) {
4084 fprintf(stderr,
4085 "KVM: injection failed, interrupt lost (%s)\n",
4086 strerror(-ret));
4087 }
4088 }
4089 }
4090
4091 /* If we have an interrupt but the guest is not ready to receive an
4092 * interrupt, request an interrupt window exit. This will
4093 * cause a return to userspace as soon as the guest is ready to
4094 * receive interrupts. */
4095 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
4096 run->request_interrupt_window = 1;
4097 } else {
4098 run->request_interrupt_window = 0;
4099 }
4100
4101 DPRINTF("setting tpr\n");
4102 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
4103
4104 qemu_mutex_unlock_iothread();
4105 }
4106 }
4107
kvm_arch_post_run(CPUState * cpu,struct kvm_run * run)4108 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
4109 {
4110 X86CPU *x86_cpu = X86_CPU(cpu);
4111 CPUX86State *env = &x86_cpu->env;
4112
4113 if (run->flags & KVM_RUN_X86_SMM) {
4114 env->hflags |= HF_SMM_MASK;
4115 } else {
4116 env->hflags &= ~HF_SMM_MASK;
4117 }
4118 if (run->if_flag) {
4119 env->eflags |= IF_MASK;
4120 } else {
4121 env->eflags &= ~IF_MASK;
4122 }
4123
4124 /* We need to protect the apic state against concurrent accesses from
4125 * different threads in case the userspace irqchip is used. */
4126 if (!kvm_irqchip_in_kernel()) {
4127 qemu_mutex_lock_iothread();
4128 }
4129 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
4130 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
4131 if (!kvm_irqchip_in_kernel()) {
4132 qemu_mutex_unlock_iothread();
4133 }
4134 return cpu_get_mem_attrs(env);
4135 }
4136
kvm_arch_process_async_events(CPUState * cs)4137 int kvm_arch_process_async_events(CPUState *cs)
4138 {
4139 X86CPU *cpu = X86_CPU(cs);
4140 CPUX86State *env = &cpu->env;
4141
4142 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
4143 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
4144 assert(env->mcg_cap);
4145
4146 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
4147
4148 kvm_cpu_synchronize_state(cs);
4149
4150 if (env->exception_nr == EXCP08_DBLE) {
4151 /* this means triple fault */
4152 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
4153 cs->exit_request = 1;
4154 return 0;
4155 }
4156 kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
4157 env->has_error_code = 0;
4158
4159 cs->halted = 0;
4160 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
4161 env->mp_state = KVM_MP_STATE_RUNNABLE;
4162 }
4163 }
4164
4165 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
4166 !(env->hflags & HF_SMM_MASK)) {
4167 kvm_cpu_synchronize_state(cs);
4168 do_cpu_init(cpu);
4169 }
4170
4171 if (kvm_irqchip_in_kernel()) {
4172 return 0;
4173 }
4174
4175 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
4176 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
4177 apic_poll_irq(cpu->apic_state);
4178 }
4179 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4180 (env->eflags & IF_MASK)) ||
4181 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4182 cs->halted = 0;
4183 }
4184 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
4185 kvm_cpu_synchronize_state(cs);
4186 do_cpu_sipi(cpu);
4187 }
4188 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
4189 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
4190 kvm_cpu_synchronize_state(cs);
4191 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
4192 env->tpr_access_type);
4193 }
4194
4195 return cs->halted;
4196 }
4197
kvm_handle_halt(X86CPU * cpu)4198 static int kvm_handle_halt(X86CPU *cpu)
4199 {
4200 CPUState *cs = CPU(cpu);
4201 CPUX86State *env = &cpu->env;
4202
4203 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4204 (env->eflags & IF_MASK)) &&
4205 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4206 cs->halted = 1;
4207 return EXCP_HLT;
4208 }
4209
4210 return 0;
4211 }
4212
kvm_handle_tpr_access(X86CPU * cpu)4213 static int kvm_handle_tpr_access(X86CPU *cpu)
4214 {
4215 CPUState *cs = CPU(cpu);
4216 struct kvm_run *run = cs->kvm_run;
4217
4218 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
4219 run->tpr_access.is_write ? TPR_ACCESS_WRITE
4220 : TPR_ACCESS_READ);
4221 return 1;
4222 }
4223
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)4224 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4225 {
4226 static const uint8_t int3 = 0xcc;
4227
4228 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
4229 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
4230 return -EINVAL;
4231 }
4232 return 0;
4233 }
4234
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)4235 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4236 {
4237 uint8_t int3;
4238
4239 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
4240 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
4241 return -EINVAL;
4242 }
4243 return 0;
4244 }
4245
4246 static struct {
4247 target_ulong addr;
4248 int len;
4249 int type;
4250 } hw_breakpoint[4];
4251
4252 static int nb_hw_breakpoint;
4253
find_hw_breakpoint(target_ulong addr,int len,int type)4254 static int find_hw_breakpoint(target_ulong addr, int len, int type)
4255 {
4256 int n;
4257
4258 for (n = 0; n < nb_hw_breakpoint; n++) {
4259 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
4260 (hw_breakpoint[n].len == len || len == -1)) {
4261 return n;
4262 }
4263 }
4264 return -1;
4265 }
4266
kvm_arch_insert_hw_breakpoint(target_ulong addr,target_ulong len,int type)4267 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
4268 target_ulong len, int type)
4269 {
4270 switch (type) {
4271 case GDB_BREAKPOINT_HW:
4272 len = 1;
4273 break;
4274 case GDB_WATCHPOINT_WRITE:
4275 case GDB_WATCHPOINT_ACCESS:
4276 switch (len) {
4277 case 1:
4278 break;
4279 case 2:
4280 case 4:
4281 case 8:
4282 if (addr & (len - 1)) {
4283 return -EINVAL;
4284 }
4285 break;
4286 default:
4287 return -EINVAL;
4288 }
4289 break;
4290 default:
4291 return -ENOSYS;
4292 }
4293
4294 if (nb_hw_breakpoint == 4) {
4295 return -ENOBUFS;
4296 }
4297 if (find_hw_breakpoint(addr, len, type) >= 0) {
4298 return -EEXIST;
4299 }
4300 hw_breakpoint[nb_hw_breakpoint].addr = addr;
4301 hw_breakpoint[nb_hw_breakpoint].len = len;
4302 hw_breakpoint[nb_hw_breakpoint].type = type;
4303 nb_hw_breakpoint++;
4304
4305 return 0;
4306 }
4307
kvm_arch_remove_hw_breakpoint(target_ulong addr,target_ulong len,int type)4308 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
4309 target_ulong len, int type)
4310 {
4311 int n;
4312
4313 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
4314 if (n < 0) {
4315 return -ENOENT;
4316 }
4317 nb_hw_breakpoint--;
4318 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
4319
4320 return 0;
4321 }
4322
kvm_arch_remove_all_hw_breakpoints(void)4323 void kvm_arch_remove_all_hw_breakpoints(void)
4324 {
4325 nb_hw_breakpoint = 0;
4326 }
4327
4328 static CPUWatchpoint hw_watchpoint;
4329
kvm_handle_debug(X86CPU * cpu,struct kvm_debug_exit_arch * arch_info)4330 static int kvm_handle_debug(X86CPU *cpu,
4331 struct kvm_debug_exit_arch *arch_info)
4332 {
4333 CPUState *cs = CPU(cpu);
4334 CPUX86State *env = &cpu->env;
4335 int ret = 0;
4336 int n;
4337
4338 if (arch_info->exception == EXCP01_DB) {
4339 if (arch_info->dr6 & DR6_BS) {
4340 if (cs->singlestep_enabled) {
4341 ret = EXCP_DEBUG;
4342 }
4343 } else {
4344 for (n = 0; n < 4; n++) {
4345 if (arch_info->dr6 & (1 << n)) {
4346 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
4347 case 0x0:
4348 ret = EXCP_DEBUG;
4349 break;
4350 case 0x1:
4351 ret = EXCP_DEBUG;
4352 cs->watchpoint_hit = &hw_watchpoint;
4353 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
4354 hw_watchpoint.flags = BP_MEM_WRITE;
4355 break;
4356 case 0x3:
4357 ret = EXCP_DEBUG;
4358 cs->watchpoint_hit = &hw_watchpoint;
4359 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
4360 hw_watchpoint.flags = BP_MEM_ACCESS;
4361 break;
4362 }
4363 }
4364 }
4365 }
4366 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
4367 ret = EXCP_DEBUG;
4368 }
4369 if (ret == 0) {
4370 cpu_synchronize_state(cs);
4371 assert(env->exception_nr == -1);
4372
4373 /* pass to guest */
4374 kvm_queue_exception(env, arch_info->exception,
4375 arch_info->exception == EXCP01_DB,
4376 arch_info->dr6);
4377 env->has_error_code = 0;
4378 }
4379
4380 return ret;
4381 }
4382
kvm_arch_update_guest_debug(CPUState * cpu,struct kvm_guest_debug * dbg)4383 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
4384 {
4385 const uint8_t type_code[] = {
4386 [GDB_BREAKPOINT_HW] = 0x0,
4387 [GDB_WATCHPOINT_WRITE] = 0x1,
4388 [GDB_WATCHPOINT_ACCESS] = 0x3
4389 };
4390 const uint8_t len_code[] = {
4391 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
4392 };
4393 int n;
4394
4395 if (kvm_sw_breakpoints_active(cpu)) {
4396 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
4397 }
4398 if (nb_hw_breakpoint > 0) {
4399 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
4400 dbg->arch.debugreg[7] = 0x0600;
4401 for (n = 0; n < nb_hw_breakpoint; n++) {
4402 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
4403 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
4404 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
4405 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
4406 }
4407 }
4408 }
4409
host_supports_vmx(void)4410 static bool host_supports_vmx(void)
4411 {
4412 uint32_t ecx, unused;
4413
4414 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
4415 return ecx & CPUID_EXT_VMX;
4416 }
4417
4418 #define VMX_INVALID_GUEST_STATE 0x80000021
4419
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)4420 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
4421 {
4422 X86CPU *cpu = X86_CPU(cs);
4423 uint64_t code;
4424 int ret;
4425
4426 switch (run->exit_reason) {
4427 case KVM_EXIT_HLT:
4428 DPRINTF("handle_hlt\n");
4429 qemu_mutex_lock_iothread();
4430 ret = kvm_handle_halt(cpu);
4431 qemu_mutex_unlock_iothread();
4432 break;
4433 case KVM_EXIT_SET_TPR:
4434 ret = 0;
4435 break;
4436 case KVM_EXIT_TPR_ACCESS:
4437 qemu_mutex_lock_iothread();
4438 ret = kvm_handle_tpr_access(cpu);
4439 qemu_mutex_unlock_iothread();
4440 break;
4441 case KVM_EXIT_FAIL_ENTRY:
4442 code = run->fail_entry.hardware_entry_failure_reason;
4443 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
4444 code);
4445 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
4446 fprintf(stderr,
4447 "\nIf you're running a guest on an Intel machine without "
4448 "unrestricted mode\n"
4449 "support, the failure can be most likely due to the guest "
4450 "entering an invalid\n"
4451 "state for Intel VT. For example, the guest maybe running "
4452 "in big real mode\n"
4453 "which is not supported on less recent Intel processors."
4454 "\n\n");
4455 }
4456 ret = -1;
4457 break;
4458 case KVM_EXIT_EXCEPTION:
4459 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
4460 run->ex.exception, run->ex.error_code);
4461 ret = -1;
4462 break;
4463 case KVM_EXIT_DEBUG:
4464 DPRINTF("kvm_exit_debug\n");
4465 qemu_mutex_lock_iothread();
4466 ret = kvm_handle_debug(cpu, &run->debug.arch);
4467 qemu_mutex_unlock_iothread();
4468 break;
4469 case KVM_EXIT_HYPERV:
4470 ret = kvm_hv_handle_exit(cpu, &run->hyperv);
4471 break;
4472 case KVM_EXIT_IOAPIC_EOI:
4473 ioapic_eoi_broadcast(run->eoi.vector);
4474 ret = 0;
4475 break;
4476 default:
4477 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
4478 ret = -1;
4479 break;
4480 }
4481
4482 return ret;
4483 }
4484
kvm_arch_stop_on_emulation_error(CPUState * cs)4485 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
4486 {
4487 X86CPU *cpu = X86_CPU(cs);
4488 CPUX86State *env = &cpu->env;
4489
4490 kvm_cpu_synchronize_state(cs);
4491 return !(env->cr[0] & CR0_PE_MASK) ||
4492 ((env->segs[R_CS].selector & 3) != 3);
4493 }
4494
kvm_arch_init_irq_routing(KVMState * s)4495 void kvm_arch_init_irq_routing(KVMState *s)
4496 {
4497 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
4498 /* If kernel can't do irq routing, interrupt source
4499 * override 0->2 cannot be set up as required by HPET.
4500 * So we have to disable it.
4501 */
4502 no_hpet = 1;
4503 }
4504 /* We know at this point that we're using the in-kernel
4505 * irqchip, so we can use irqfds, and on x86 we know
4506 * we can use msi via irqfd and GSI routing.
4507 */
4508 kvm_msi_via_irqfd_allowed = true;
4509 kvm_gsi_routing_allowed = true;
4510
4511 if (kvm_irqchip_is_split()) {
4512 int i;
4513
4514 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
4515 MSI routes for signaling interrupts to the local apics. */
4516 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
4517 if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) {
4518 error_report("Could not enable split IRQ mode.");
4519 exit(1);
4520 }
4521 }
4522 }
4523 }
4524
kvm_arch_irqchip_create(KVMState * s)4525 int kvm_arch_irqchip_create(KVMState *s)
4526 {
4527 int ret;
4528 if (kvm_kernel_irqchip_split()) {
4529 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
4530 if (ret) {
4531 error_report("Could not enable split irqchip mode: %s",
4532 strerror(-ret));
4533 exit(1);
4534 } else {
4535 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
4536 kvm_split_irqchip = true;
4537 return 1;
4538 }
4539 } else {
4540 return 0;
4541 }
4542 }
4543
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)4544 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
4545 uint64_t address, uint32_t data, PCIDevice *dev)
4546 {
4547 X86IOMMUState *iommu = x86_iommu_get_default();
4548
4549 if (iommu) {
4550 int ret;
4551 MSIMessage src, dst;
4552 X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu);
4553
4554 if (!class->int_remap) {
4555 return 0;
4556 }
4557
4558 src.address = route->u.msi.address_hi;
4559 src.address <<= VTD_MSI_ADDR_HI_SHIFT;
4560 src.address |= route->u.msi.address_lo;
4561 src.data = route->u.msi.data;
4562
4563 ret = class->int_remap(iommu, &src, &dst, dev ? \
4564 pci_requester_id(dev) : \
4565 X86_IOMMU_SID_INVALID);
4566 if (ret) {
4567 trace_kvm_x86_fixup_msi_error(route->gsi);
4568 return 1;
4569 }
4570
4571 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
4572 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
4573 route->u.msi.data = dst.data;
4574 }
4575
4576 return 0;
4577 }
4578
4579 typedef struct MSIRouteEntry MSIRouteEntry;
4580
4581 struct MSIRouteEntry {
4582 PCIDevice *dev; /* Device pointer */
4583 int vector; /* MSI/MSIX vector index */
4584 int virq; /* Virtual IRQ index */
4585 QLIST_ENTRY(MSIRouteEntry) list;
4586 };
4587
4588 /* List of used GSI routes */
4589 static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
4590 QLIST_HEAD_INITIALIZER(msi_route_list);
4591
kvm_update_msi_routes_all(void * private,bool global,uint32_t index,uint32_t mask)4592 static void kvm_update_msi_routes_all(void *private, bool global,
4593 uint32_t index, uint32_t mask)
4594 {
4595 int cnt = 0, vector;
4596 MSIRouteEntry *entry;
4597 MSIMessage msg;
4598 PCIDevice *dev;
4599
4600 /* TODO: explicit route update */
4601 QLIST_FOREACH(entry, &msi_route_list, list) {
4602 cnt++;
4603 vector = entry->vector;
4604 dev = entry->dev;
4605 if (msix_enabled(dev) && !msix_is_masked(dev, vector)) {
4606 msg = msix_get_message(dev, vector);
4607 } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) {
4608 msg = msi_get_message(dev, vector);
4609 } else {
4610 /*
4611 * Either MSI/MSIX is disabled for the device, or the
4612 * specific message was masked out. Skip this one.
4613 */
4614 continue;
4615 }
4616 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
4617 }
4618 kvm_irqchip_commit_routes(kvm_state);
4619 trace_kvm_x86_update_msi_routes(cnt);
4620 }
4621
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)4622 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
4623 int vector, PCIDevice *dev)
4624 {
4625 static bool notify_list_inited = false;
4626 MSIRouteEntry *entry;
4627
4628 if (!dev) {
4629 /* These are (possibly) IOAPIC routes only used for split
4630 * kernel irqchip mode, while what we are housekeeping are
4631 * PCI devices only. */
4632 return 0;
4633 }
4634
4635 entry = g_new0(MSIRouteEntry, 1);
4636 entry->dev = dev;
4637 entry->vector = vector;
4638 entry->virq = route->gsi;
4639 QLIST_INSERT_HEAD(&msi_route_list, entry, list);
4640
4641 trace_kvm_x86_add_msi_route(route->gsi);
4642
4643 if (!notify_list_inited) {
4644 /* For the first time we do add route, add ourselves into
4645 * IOMMU's IEC notify list if needed. */
4646 X86IOMMUState *iommu = x86_iommu_get_default();
4647 if (iommu) {
4648 x86_iommu_iec_register_notifier(iommu,
4649 kvm_update_msi_routes_all,
4650 NULL);
4651 }
4652 notify_list_inited = true;
4653 }
4654 return 0;
4655 }
4656
kvm_arch_release_virq_post(int virq)4657 int kvm_arch_release_virq_post(int virq)
4658 {
4659 MSIRouteEntry *entry, *next;
4660 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
4661 if (entry->virq == virq) {
4662 trace_kvm_x86_remove_msi_route(virq);
4663 QLIST_REMOVE(entry, list);
4664 g_free(entry);
4665 break;
4666 }
4667 }
4668 return 0;
4669 }
4670
kvm_arch_msi_data_to_gsi(uint32_t data)4671 int kvm_arch_msi_data_to_gsi(uint32_t data)
4672 {
4673 abort();
4674 }
4675
kvm_has_waitpkg(void)4676 bool kvm_has_waitpkg(void)
4677 {
4678 return has_msr_umwait;
4679 }
4680