1 /*
2 * QEMU KVM support
3 *
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 *
13 */
14
15 #include "qemu/osdep.h"
16 #include "qapi/qapi-events-run-state.h"
17 #include "qapi/error.h"
18 #include <sys/ioctl.h>
19 #include <sys/utsname.h>
20
21 #include <linux/kvm.h>
22 #include "standard-headers/asm-x86/kvm_para.h"
23
24 #include "cpu.h"
25 #include "host-cpu.h"
26 #include "sysemu/sysemu.h"
27 #include "sysemu/hw_accel.h"
28 #include "sysemu/kvm_int.h"
29 #include "sysemu/runstate.h"
30 #include "kvm_i386.h"
31 #include "sev_i386.h"
32 #include "hyperv.h"
33 #include "hyperv-proto.h"
34
35 #include "exec/gdbstub.h"
36 #include "qemu/host-utils.h"
37 #include "qemu/main-loop.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "hw/i386/x86.h"
41 #include "hw/i386/apic.h"
42 #include "hw/i386/apic_internal.h"
43 #include "hw/i386/apic-msidef.h"
44 #include "hw/i386/intel_iommu.h"
45 #include "hw/i386/x86-iommu.h"
46 #include "hw/i386/e820_memory_layout.h"
47 #include "sysemu/sev.h"
48
49 #include "hw/pci/pci.h"
50 #include "hw/pci/msi.h"
51 #include "hw/pci/msix.h"
52 #include "migration/blocker.h"
53 #include "exec/memattrs.h"
54 #include "trace.h"
55
56 //#define DEBUG_KVM
57
58 #ifdef DEBUG_KVM
59 #define DPRINTF(fmt, ...) \
60 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
61 #else
62 #define DPRINTF(fmt, ...) \
63 do { } while (0)
64 #endif
65
66 /* From arch/x86/kvm/lapic.h */
67 #define KVM_APIC_BUS_CYCLE_NS 1
68 #define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS)
69
70 #define MSR_KVM_WALL_CLOCK 0x11
71 #define MSR_KVM_SYSTEM_TIME 0x12
72
73 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
74 * 255 kvm_msr_entry structs */
75 #define MSR_BUF_SIZE 4096
76
77 static void kvm_init_msrs(X86CPU *cpu);
78
79 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
80 KVM_CAP_INFO(SET_TSS_ADDR),
81 KVM_CAP_INFO(EXT_CPUID),
82 KVM_CAP_INFO(MP_STATE),
83 KVM_CAP_LAST_INFO
84 };
85
86 static bool has_msr_star;
87 static bool has_msr_hsave_pa;
88 static bool has_msr_tsc_aux;
89 static bool has_msr_tsc_adjust;
90 static bool has_msr_tsc_deadline;
91 static bool has_msr_feature_control;
92 static bool has_msr_misc_enable;
93 static bool has_msr_smbase;
94 static bool has_msr_bndcfgs;
95 static int lm_capable_kernel;
96 static bool has_msr_hv_hypercall;
97 static bool has_msr_hv_crash;
98 static bool has_msr_hv_reset;
99 static bool has_msr_hv_vpindex;
100 static bool hv_vpindex_settable;
101 static bool has_msr_hv_runtime;
102 static bool has_msr_hv_synic;
103 static bool has_msr_hv_stimer;
104 static bool has_msr_hv_frequencies;
105 static bool has_msr_hv_reenlightenment;
106 static bool has_msr_xss;
107 static bool has_msr_umwait;
108 static bool has_msr_spec_ctrl;
109 static bool has_msr_tsx_ctrl;
110 static bool has_msr_virt_ssbd;
111 static bool has_msr_smi_count;
112 static bool has_msr_arch_capabs;
113 static bool has_msr_core_capabs;
114 static bool has_msr_vmx_vmfunc;
115 static bool has_msr_ucode_rev;
116 static bool has_msr_vmx_procbased_ctls2;
117 static bool has_msr_perf_capabs;
118 static bool has_msr_pkrs;
119
120 static uint32_t has_architectural_pmu_version;
121 static uint32_t num_architectural_pmu_gp_counters;
122 static uint32_t num_architectural_pmu_fixed_counters;
123
124 static int has_xsave;
125 static int has_xcrs;
126 static int has_pit_state2;
127 static int has_exception_payload;
128
129 static bool has_msr_mcg_ext_ctl;
130
131 static struct kvm_cpuid2 *cpuid_cache;
132 static struct kvm_cpuid2 *hv_cpuid_cache;
133 static struct kvm_msr_list *kvm_feature_msrs;
134
135 #define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */
136 static RateLimit bus_lock_ratelimit_ctrl;
137
kvm_has_pit_state2(void)138 int kvm_has_pit_state2(void)
139 {
140 return has_pit_state2;
141 }
142
kvm_has_smm(void)143 bool kvm_has_smm(void)
144 {
145 return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM);
146 }
147
kvm_has_adjust_clock_stable(void)148 bool kvm_has_adjust_clock_stable(void)
149 {
150 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
151
152 return (ret == KVM_CLOCK_TSC_STABLE);
153 }
154
kvm_has_adjust_clock(void)155 bool kvm_has_adjust_clock(void)
156 {
157 return kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
158 }
159
kvm_has_exception_payload(void)160 bool kvm_has_exception_payload(void)
161 {
162 return has_exception_payload;
163 }
164
kvm_x2apic_api_set_flags(uint64_t flags)165 static bool kvm_x2apic_api_set_flags(uint64_t flags)
166 {
167 KVMState *s = KVM_STATE(current_accel());
168
169 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
170 }
171
172 #define MEMORIZE(fn, _result) \
173 ({ \
174 static bool _memorized; \
175 \
176 if (_memorized) { \
177 return _result; \
178 } \
179 _memorized = true; \
180 _result = fn; \
181 })
182
183 static bool has_x2apic_api;
184
kvm_has_x2apic_api(void)185 bool kvm_has_x2apic_api(void)
186 {
187 return has_x2apic_api;
188 }
189
kvm_enable_x2apic(void)190 bool kvm_enable_x2apic(void)
191 {
192 return MEMORIZE(
193 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
194 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
195 has_x2apic_api);
196 }
197
kvm_hv_vpindex_settable(void)198 bool kvm_hv_vpindex_settable(void)
199 {
200 return hv_vpindex_settable;
201 }
202
kvm_get_tsc(CPUState * cs)203 static int kvm_get_tsc(CPUState *cs)
204 {
205 X86CPU *cpu = X86_CPU(cs);
206 CPUX86State *env = &cpu->env;
207 struct {
208 struct kvm_msrs info;
209 struct kvm_msr_entry entries[1];
210 } msr_data = {};
211 int ret;
212
213 if (env->tsc_valid) {
214 return 0;
215 }
216
217 memset(&msr_data, 0, sizeof(msr_data));
218 msr_data.info.nmsrs = 1;
219 msr_data.entries[0].index = MSR_IA32_TSC;
220 env->tsc_valid = !runstate_is_running();
221
222 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
223 if (ret < 0) {
224 return ret;
225 }
226
227 assert(ret == 1);
228 env->tsc = msr_data.entries[0].data;
229 return 0;
230 }
231
do_kvm_synchronize_tsc(CPUState * cpu,run_on_cpu_data arg)232 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
233 {
234 kvm_get_tsc(cpu);
235 }
236
kvm_synchronize_all_tsc(void)237 void kvm_synchronize_all_tsc(void)
238 {
239 CPUState *cpu;
240
241 if (kvm_enabled()) {
242 CPU_FOREACH(cpu) {
243 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
244 }
245 }
246 }
247
try_get_cpuid(KVMState * s,int max)248 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
249 {
250 struct kvm_cpuid2 *cpuid;
251 int r, size;
252
253 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
254 cpuid = g_malloc0(size);
255 cpuid->nent = max;
256 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
257 if (r == 0 && cpuid->nent >= max) {
258 r = -E2BIG;
259 }
260 if (r < 0) {
261 if (r == -E2BIG) {
262 g_free(cpuid);
263 return NULL;
264 } else {
265 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
266 strerror(-r));
267 exit(1);
268 }
269 }
270 return cpuid;
271 }
272
273 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
274 * for all entries.
275 */
get_supported_cpuid(KVMState * s)276 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
277 {
278 struct kvm_cpuid2 *cpuid;
279 int max = 1;
280
281 if (cpuid_cache != NULL) {
282 return cpuid_cache;
283 }
284 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
285 max *= 2;
286 }
287 cpuid_cache = cpuid;
288 return cpuid;
289 }
290
host_tsx_broken(void)291 static bool host_tsx_broken(void)
292 {
293 int family, model, stepping;\
294 char vendor[CPUID_VENDOR_SZ + 1];
295
296 host_cpu_vendor_fms(vendor, &family, &model, &stepping);
297
298 /* Check if we are running on a Haswell host known to have broken TSX */
299 return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
300 (family == 6) &&
301 ((model == 63 && stepping < 4) ||
302 model == 60 || model == 69 || model == 70);
303 }
304
305 /* Returns the value for a specific register on the cpuid entry
306 */
cpuid_entry_get_reg(struct kvm_cpuid_entry2 * entry,int reg)307 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
308 {
309 uint32_t ret = 0;
310 switch (reg) {
311 case R_EAX:
312 ret = entry->eax;
313 break;
314 case R_EBX:
315 ret = entry->ebx;
316 break;
317 case R_ECX:
318 ret = entry->ecx;
319 break;
320 case R_EDX:
321 ret = entry->edx;
322 break;
323 }
324 return ret;
325 }
326
327 /* Find matching entry for function/index on kvm_cpuid2 struct
328 */
cpuid_find_entry(struct kvm_cpuid2 * cpuid,uint32_t function,uint32_t index)329 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
330 uint32_t function,
331 uint32_t index)
332 {
333 int i;
334 for (i = 0; i < cpuid->nent; ++i) {
335 if (cpuid->entries[i].function == function &&
336 cpuid->entries[i].index == index) {
337 return &cpuid->entries[i];
338 }
339 }
340 /* not found: */
341 return NULL;
342 }
343
kvm_arch_get_supported_cpuid(KVMState * s,uint32_t function,uint32_t index,int reg)344 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
345 uint32_t index, int reg)
346 {
347 struct kvm_cpuid2 *cpuid;
348 uint32_t ret = 0;
349 uint32_t cpuid_1_edx;
350
351 cpuid = get_supported_cpuid(s);
352
353 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
354 if (entry) {
355 ret = cpuid_entry_get_reg(entry, reg);
356 }
357
358 /* Fixups for the data returned by KVM, below */
359
360 if (function == 1 && reg == R_EDX) {
361 /* KVM before 2.6.30 misreports the following features */
362 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
363 } else if (function == 1 && reg == R_ECX) {
364 /* We can set the hypervisor flag, even if KVM does not return it on
365 * GET_SUPPORTED_CPUID
366 */
367 ret |= CPUID_EXT_HYPERVISOR;
368 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
369 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
370 * and the irqchip is in the kernel.
371 */
372 if (kvm_irqchip_in_kernel() &&
373 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
374 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
375 }
376
377 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
378 * without the in-kernel irqchip
379 */
380 if (!kvm_irqchip_in_kernel()) {
381 ret &= ~CPUID_EXT_X2APIC;
382 }
383
384 if (enable_cpu_pm) {
385 int disable_exits = kvm_check_extension(s,
386 KVM_CAP_X86_DISABLE_EXITS);
387
388 if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) {
389 ret |= CPUID_EXT_MONITOR;
390 }
391 }
392 } else if (function == 6 && reg == R_EAX) {
393 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
394 } else if (function == 7 && index == 0 && reg == R_EBX) {
395 if (host_tsx_broken()) {
396 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
397 }
398 } else if (function == 7 && index == 0 && reg == R_EDX) {
399 /*
400 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
401 * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
402 * returned by KVM_GET_MSR_INDEX_LIST.
403 */
404 if (!has_msr_arch_capabs) {
405 ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES;
406 }
407 } else if (function == 0x80000001 && reg == R_ECX) {
408 /*
409 * It's safe to enable TOPOEXT even if it's not returned by
410 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows
411 * us to keep CPU models including TOPOEXT runnable on older kernels.
412 */
413 ret |= CPUID_EXT3_TOPOEXT;
414 } else if (function == 0x80000001 && reg == R_EDX) {
415 /* On Intel, kvm returns cpuid according to the Intel spec,
416 * so add missing bits according to the AMD spec:
417 */
418 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
419 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
420 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
421 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
422 * be enabled without the in-kernel irqchip
423 */
424 if (!kvm_irqchip_in_kernel()) {
425 ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
426 }
427 if (kvm_irqchip_is_split()) {
428 ret |= 1U << KVM_FEATURE_MSI_EXT_DEST_ID;
429 }
430 } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
431 ret |= 1U << KVM_HINTS_REALTIME;
432 }
433
434 return ret;
435 }
436
kvm_arch_get_supported_msr_feature(KVMState * s,uint32_t index)437 uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
438 {
439 struct {
440 struct kvm_msrs info;
441 struct kvm_msr_entry entries[1];
442 } msr_data = {};
443 uint64_t value;
444 uint32_t ret, can_be_one, must_be_one;
445
446 if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */
447 return 0;
448 }
449
450 /* Check if requested MSR is supported feature MSR */
451 int i;
452 for (i = 0; i < kvm_feature_msrs->nmsrs; i++)
453 if (kvm_feature_msrs->indices[i] == index) {
454 break;
455 }
456 if (i == kvm_feature_msrs->nmsrs) {
457 return 0; /* if the feature MSR is not supported, simply return 0 */
458 }
459
460 msr_data.info.nmsrs = 1;
461 msr_data.entries[0].index = index;
462
463 ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data);
464 if (ret != 1) {
465 error_report("KVM get MSR (index=0x%x) feature failed, %s",
466 index, strerror(-ret));
467 exit(1);
468 }
469
470 value = msr_data.entries[0].data;
471 switch (index) {
472 case MSR_IA32_VMX_PROCBASED_CTLS2:
473 if (!has_msr_vmx_procbased_ctls2) {
474 /* KVM forgot to add these bits for some time, do this ourselves. */
475 if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) &
476 CPUID_XSAVE_XSAVES) {
477 value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32;
478 }
479 if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) &
480 CPUID_EXT_RDRAND) {
481 value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32;
482 }
483 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
484 CPUID_7_0_EBX_INVPCID) {
485 value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32;
486 }
487 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
488 CPUID_7_0_EBX_RDSEED) {
489 value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32;
490 }
491 if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) &
492 CPUID_EXT2_RDTSCP) {
493 value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32;
494 }
495 }
496 /* fall through */
497 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
498 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
499 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
500 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
501 /*
502 * Return true for bits that can be one, but do not have to be one.
503 * The SDM tells us which bits could have a "must be one" setting,
504 * so we can do the opposite transformation in make_vmx_msr_value.
505 */
506 must_be_one = (uint32_t)value;
507 can_be_one = (uint32_t)(value >> 32);
508 return can_be_one & ~must_be_one;
509
510 default:
511 return value;
512 }
513 }
514
kvm_get_mce_cap_supported(KVMState * s,uint64_t * mce_cap,int * max_banks)515 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
516 int *max_banks)
517 {
518 int r;
519
520 r = kvm_check_extension(s, KVM_CAP_MCE);
521 if (r > 0) {
522 *max_banks = r;
523 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
524 }
525 return -ENOSYS;
526 }
527
kvm_mce_inject(X86CPU * cpu,hwaddr paddr,int code)528 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
529 {
530 CPUState *cs = CPU(cpu);
531 CPUX86State *env = &cpu->env;
532 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
533 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
534 uint64_t mcg_status = MCG_STATUS_MCIP;
535 int flags = 0;
536
537 if (code == BUS_MCEERR_AR) {
538 status |= MCI_STATUS_AR | 0x134;
539 mcg_status |= MCG_STATUS_EIPV;
540 } else {
541 status |= 0xc0;
542 mcg_status |= MCG_STATUS_RIPV;
543 }
544
545 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
546 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
547 * guest kernel back into env->mcg_ext_ctl.
548 */
549 cpu_synchronize_state(cs);
550 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
551 mcg_status |= MCG_STATUS_LMCE;
552 flags = 0;
553 }
554
555 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
556 (MCM_ADDR_PHYS << 6) | 0xc, flags);
557 }
558
emit_hypervisor_memory_failure(MemoryFailureAction action,bool ar)559 static void emit_hypervisor_memory_failure(MemoryFailureAction action, bool ar)
560 {
561 MemoryFailureFlags mff = {.action_required = ar, .recursive = false};
562
563 qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_HYPERVISOR, action,
564 &mff);
565 }
566
hardware_memory_error(void * host_addr)567 static void hardware_memory_error(void *host_addr)
568 {
569 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_FATAL, true);
570 error_report("QEMU got Hardware memory error at addr %p", host_addr);
571 exit(1);
572 }
573
kvm_arch_on_sigbus_vcpu(CPUState * c,int code,void * addr)574 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
575 {
576 X86CPU *cpu = X86_CPU(c);
577 CPUX86State *env = &cpu->env;
578 ram_addr_t ram_addr;
579 hwaddr paddr;
580
581 /* If we get an action required MCE, it has been injected by KVM
582 * while the VM was running. An action optional MCE instead should
583 * be coming from the main thread, which qemu_init_sigbus identifies
584 * as the "early kill" thread.
585 */
586 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
587
588 if ((env->mcg_cap & MCG_SER_P) && addr) {
589 ram_addr = qemu_ram_addr_from_host(addr);
590 if (ram_addr != RAM_ADDR_INVALID &&
591 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
592 kvm_hwpoison_page_add(ram_addr);
593 kvm_mce_inject(cpu, paddr, code);
594
595 /*
596 * Use different logging severity based on error type.
597 * If there is additional MCE reporting on the hypervisor, QEMU VA
598 * could be another source to identify the PA and MCE details.
599 */
600 if (code == BUS_MCEERR_AR) {
601 error_report("Guest MCE Memory Error at QEMU addr %p and "
602 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
603 addr, paddr, "BUS_MCEERR_AR");
604 } else {
605 warn_report("Guest MCE Memory Error at QEMU addr %p and "
606 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
607 addr, paddr, "BUS_MCEERR_AO");
608 }
609
610 return;
611 }
612
613 if (code == BUS_MCEERR_AO) {
614 warn_report("Hardware memory error at addr %p of type %s "
615 "for memory used by QEMU itself instead of guest system!",
616 addr, "BUS_MCEERR_AO");
617 }
618 }
619
620 if (code == BUS_MCEERR_AR) {
621 hardware_memory_error(addr);
622 }
623
624 /* Hope we are lucky for AO MCE, just notify a event */
625 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false);
626 }
627
kvm_reset_exception(CPUX86State * env)628 static void kvm_reset_exception(CPUX86State *env)
629 {
630 env->exception_nr = -1;
631 env->exception_pending = 0;
632 env->exception_injected = 0;
633 env->exception_has_payload = false;
634 env->exception_payload = 0;
635 }
636
kvm_queue_exception(CPUX86State * env,int32_t exception_nr,uint8_t exception_has_payload,uint64_t exception_payload)637 static void kvm_queue_exception(CPUX86State *env,
638 int32_t exception_nr,
639 uint8_t exception_has_payload,
640 uint64_t exception_payload)
641 {
642 assert(env->exception_nr == -1);
643 assert(!env->exception_pending);
644 assert(!env->exception_injected);
645 assert(!env->exception_has_payload);
646
647 env->exception_nr = exception_nr;
648
649 if (has_exception_payload) {
650 env->exception_pending = 1;
651
652 env->exception_has_payload = exception_has_payload;
653 env->exception_payload = exception_payload;
654 } else {
655 env->exception_injected = 1;
656
657 if (exception_nr == EXCP01_DB) {
658 assert(exception_has_payload);
659 env->dr[6] = exception_payload;
660 } else if (exception_nr == EXCP0E_PAGE) {
661 assert(exception_has_payload);
662 env->cr[2] = exception_payload;
663 } else {
664 assert(!exception_has_payload);
665 }
666 }
667 }
668
kvm_inject_mce_oldstyle(X86CPU * cpu)669 static int kvm_inject_mce_oldstyle(X86CPU *cpu)
670 {
671 CPUX86State *env = &cpu->env;
672
673 if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) {
674 unsigned int bank, bank_num = env->mcg_cap & 0xff;
675 struct kvm_x86_mce mce;
676
677 kvm_reset_exception(env);
678
679 /*
680 * There must be at least one bank in use if an MCE is pending.
681 * Find it and use its values for the event injection.
682 */
683 for (bank = 0; bank < bank_num; bank++) {
684 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
685 break;
686 }
687 }
688 assert(bank < bank_num);
689
690 mce.bank = bank;
691 mce.status = env->mce_banks[bank * 4 + 1];
692 mce.mcg_status = env->mcg_status;
693 mce.addr = env->mce_banks[bank * 4 + 2];
694 mce.misc = env->mce_banks[bank * 4 + 3];
695
696 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
697 }
698 return 0;
699 }
700
cpu_update_state(void * opaque,bool running,RunState state)701 static void cpu_update_state(void *opaque, bool running, RunState state)
702 {
703 CPUX86State *env = opaque;
704
705 if (running) {
706 env->tsc_valid = false;
707 }
708 }
709
kvm_arch_vcpu_id(CPUState * cs)710 unsigned long kvm_arch_vcpu_id(CPUState *cs)
711 {
712 X86CPU *cpu = X86_CPU(cs);
713 return cpu->apic_id;
714 }
715
716 #ifndef KVM_CPUID_SIGNATURE_NEXT
717 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
718 #endif
719
hyperv_enabled(X86CPU * cpu)720 static bool hyperv_enabled(X86CPU *cpu)
721 {
722 return kvm_check_extension(kvm_state, KVM_CAP_HYPERV) > 0 &&
723 ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) ||
724 cpu->hyperv_features || cpu->hyperv_passthrough);
725 }
726
727 /*
728 * Check whether target_freq is within conservative
729 * ntp correctable bounds (250ppm) of freq
730 */
freq_within_bounds(int freq,int target_freq)731 static inline bool freq_within_bounds(int freq, int target_freq)
732 {
733 int max_freq = freq + (freq * 250 / 1000000);
734 int min_freq = freq - (freq * 250 / 1000000);
735
736 if (target_freq >= min_freq && target_freq <= max_freq) {
737 return true;
738 }
739
740 return false;
741 }
742
kvm_arch_set_tsc_khz(CPUState * cs)743 static int kvm_arch_set_tsc_khz(CPUState *cs)
744 {
745 X86CPU *cpu = X86_CPU(cs);
746 CPUX86State *env = &cpu->env;
747 int r, cur_freq;
748 bool set_ioctl = false;
749
750 if (!env->tsc_khz) {
751 return 0;
752 }
753
754 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
755 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP;
756
757 /*
758 * If TSC scaling is supported, attempt to set TSC frequency.
759 */
760 if (kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL)) {
761 set_ioctl = true;
762 }
763
764 /*
765 * If desired TSC frequency is within bounds of NTP correction,
766 * attempt to set TSC frequency.
767 */
768 if (cur_freq != -ENOTSUP && freq_within_bounds(cur_freq, env->tsc_khz)) {
769 set_ioctl = true;
770 }
771
772 r = set_ioctl ?
773 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
774 -ENOTSUP;
775
776 if (r < 0) {
777 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
778 * TSC frequency doesn't match the one we want.
779 */
780 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
781 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
782 -ENOTSUP;
783 if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
784 warn_report("TSC frequency mismatch between "
785 "VM (%" PRId64 " kHz) and host (%d kHz), "
786 "and TSC scaling unavailable",
787 env->tsc_khz, cur_freq);
788 return r;
789 }
790 }
791
792 return 0;
793 }
794
tsc_is_stable_and_known(CPUX86State * env)795 static bool tsc_is_stable_and_known(CPUX86State *env)
796 {
797 if (!env->tsc_khz) {
798 return false;
799 }
800 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
801 || env->user_tsc_khz;
802 }
803
804 static struct {
805 const char *desc;
806 struct {
807 uint32_t func;
808 int reg;
809 uint32_t bits;
810 } flags[2];
811 uint64_t dependencies;
812 } kvm_hyperv_properties[] = {
813 [HYPERV_FEAT_RELAXED] = {
814 .desc = "relaxed timing (hv-relaxed)",
815 .flags = {
816 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
817 .bits = HV_RELAXED_TIMING_RECOMMENDED}
818 }
819 },
820 [HYPERV_FEAT_VAPIC] = {
821 .desc = "virtual APIC (hv-vapic)",
822 .flags = {
823 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
824 .bits = HV_APIC_ACCESS_AVAILABLE},
825 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
826 .bits = HV_APIC_ACCESS_RECOMMENDED}
827 }
828 },
829 [HYPERV_FEAT_TIME] = {
830 .desc = "clocksources (hv-time)",
831 .flags = {
832 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
833 .bits = HV_TIME_REF_COUNT_AVAILABLE | HV_REFERENCE_TSC_AVAILABLE}
834 }
835 },
836 [HYPERV_FEAT_CRASH] = {
837 .desc = "crash MSRs (hv-crash)",
838 .flags = {
839 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
840 .bits = HV_GUEST_CRASH_MSR_AVAILABLE}
841 }
842 },
843 [HYPERV_FEAT_RESET] = {
844 .desc = "reset MSR (hv-reset)",
845 .flags = {
846 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
847 .bits = HV_RESET_AVAILABLE}
848 }
849 },
850 [HYPERV_FEAT_VPINDEX] = {
851 .desc = "VP_INDEX MSR (hv-vpindex)",
852 .flags = {
853 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
854 .bits = HV_VP_INDEX_AVAILABLE}
855 }
856 },
857 [HYPERV_FEAT_RUNTIME] = {
858 .desc = "VP_RUNTIME MSR (hv-runtime)",
859 .flags = {
860 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
861 .bits = HV_VP_RUNTIME_AVAILABLE}
862 }
863 },
864 [HYPERV_FEAT_SYNIC] = {
865 .desc = "synthetic interrupt controller (hv-synic)",
866 .flags = {
867 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
868 .bits = HV_SYNIC_AVAILABLE}
869 }
870 },
871 [HYPERV_FEAT_STIMER] = {
872 .desc = "synthetic timers (hv-stimer)",
873 .flags = {
874 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
875 .bits = HV_SYNTIMERS_AVAILABLE}
876 },
877 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME)
878 },
879 [HYPERV_FEAT_FREQUENCIES] = {
880 .desc = "frequency MSRs (hv-frequencies)",
881 .flags = {
882 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
883 .bits = HV_ACCESS_FREQUENCY_MSRS},
884 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
885 .bits = HV_FREQUENCY_MSRS_AVAILABLE}
886 }
887 },
888 [HYPERV_FEAT_REENLIGHTENMENT] = {
889 .desc = "reenlightenment MSRs (hv-reenlightenment)",
890 .flags = {
891 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
892 .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL}
893 }
894 },
895 [HYPERV_FEAT_TLBFLUSH] = {
896 .desc = "paravirtualized TLB flush (hv-tlbflush)",
897 .flags = {
898 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
899 .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED |
900 HV_EX_PROCESSOR_MASKS_RECOMMENDED}
901 },
902 .dependencies = BIT(HYPERV_FEAT_VPINDEX)
903 },
904 [HYPERV_FEAT_EVMCS] = {
905 .desc = "enlightened VMCS (hv-evmcs)",
906 .flags = {
907 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
908 .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED}
909 },
910 .dependencies = BIT(HYPERV_FEAT_VAPIC)
911 },
912 [HYPERV_FEAT_IPI] = {
913 .desc = "paravirtualized IPI (hv-ipi)",
914 .flags = {
915 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
916 .bits = HV_CLUSTER_IPI_RECOMMENDED |
917 HV_EX_PROCESSOR_MASKS_RECOMMENDED}
918 },
919 .dependencies = BIT(HYPERV_FEAT_VPINDEX)
920 },
921 [HYPERV_FEAT_STIMER_DIRECT] = {
922 .desc = "direct mode synthetic timers (hv-stimer-direct)",
923 .flags = {
924 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
925 .bits = HV_STIMER_DIRECT_MODE_AVAILABLE}
926 },
927 .dependencies = BIT(HYPERV_FEAT_STIMER)
928 },
929 };
930
try_get_hv_cpuid(CPUState * cs,int max,bool do_sys_ioctl)931 static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max,
932 bool do_sys_ioctl)
933 {
934 struct kvm_cpuid2 *cpuid;
935 int r, size;
936
937 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
938 cpuid = g_malloc0(size);
939 cpuid->nent = max;
940
941 if (do_sys_ioctl) {
942 r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
943 } else {
944 r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
945 }
946 if (r == 0 && cpuid->nent >= max) {
947 r = -E2BIG;
948 }
949 if (r < 0) {
950 if (r == -E2BIG) {
951 g_free(cpuid);
952 return NULL;
953 } else {
954 fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
955 strerror(-r));
956 exit(1);
957 }
958 }
959 return cpuid;
960 }
961
962 /*
963 * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
964 * for all entries.
965 */
get_supported_hv_cpuid(CPUState * cs)966 static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs)
967 {
968 struct kvm_cpuid2 *cpuid;
969 /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000080 leaves */
970 int max = 10;
971 int i;
972 bool do_sys_ioctl;
973
974 do_sys_ioctl =
975 kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID) > 0;
976
977 /*
978 * Non-empty KVM context is needed when KVM_CAP_SYS_HYPERV_CPUID is
979 * unsupported, kvm_hyperv_expand_features() checks for that.
980 */
981 assert(do_sys_ioctl || cs->kvm_state);
982
983 /*
984 * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
985 * -E2BIG, however, it doesn't report back the right size. Keep increasing
986 * it and re-trying until we succeed.
987 */
988 while ((cpuid = try_get_hv_cpuid(cs, max, do_sys_ioctl)) == NULL) {
989 max++;
990 }
991
992 /*
993 * KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before
994 * KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the
995 * information early, just check for the capability and set the bit
996 * manually.
997 */
998 if (!do_sys_ioctl && kvm_check_extension(cs->kvm_state,
999 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
1000 for (i = 0; i < cpuid->nent; i++) {
1001 if (cpuid->entries[i].function == HV_CPUID_ENLIGHTMENT_INFO) {
1002 cpuid->entries[i].eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
1003 }
1004 }
1005 }
1006
1007 return cpuid;
1008 }
1009
1010 /*
1011 * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
1012 * leaves from KVM_CAP_HYPERV* and present MSRs data.
1013 */
get_supported_hv_cpuid_legacy(CPUState * cs)1014 static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs)
1015 {
1016 X86CPU *cpu = X86_CPU(cs);
1017 struct kvm_cpuid2 *cpuid;
1018 struct kvm_cpuid_entry2 *entry_feat, *entry_recomm;
1019
1020 /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
1021 cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries));
1022 cpuid->nent = 2;
1023
1024 /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
1025 entry_feat = &cpuid->entries[0];
1026 entry_feat->function = HV_CPUID_FEATURES;
1027
1028 entry_recomm = &cpuid->entries[1];
1029 entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO;
1030 entry_recomm->ebx = cpu->hyperv_spinlock_attempts;
1031
1032 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) {
1033 entry_feat->eax |= HV_HYPERCALL_AVAILABLE;
1034 entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE;
1035 entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1036 entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED;
1037 entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED;
1038 }
1039
1040 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
1041 entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE;
1042 entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE;
1043 }
1044
1045 if (has_msr_hv_frequencies) {
1046 entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
1047 entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE;
1048 }
1049
1050 if (has_msr_hv_crash) {
1051 entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE;
1052 }
1053
1054 if (has_msr_hv_reenlightenment) {
1055 entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
1056 }
1057
1058 if (has_msr_hv_reset) {
1059 entry_feat->eax |= HV_RESET_AVAILABLE;
1060 }
1061
1062 if (has_msr_hv_vpindex) {
1063 entry_feat->eax |= HV_VP_INDEX_AVAILABLE;
1064 }
1065
1066 if (has_msr_hv_runtime) {
1067 entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE;
1068 }
1069
1070 if (has_msr_hv_synic) {
1071 unsigned int cap = cpu->hyperv_synic_kvm_only ?
1072 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1073
1074 if (kvm_check_extension(cs->kvm_state, cap) > 0) {
1075 entry_feat->eax |= HV_SYNIC_AVAILABLE;
1076 }
1077 }
1078
1079 if (has_msr_hv_stimer) {
1080 entry_feat->eax |= HV_SYNTIMERS_AVAILABLE;
1081 }
1082
1083 if (kvm_check_extension(cs->kvm_state,
1084 KVM_CAP_HYPERV_TLBFLUSH) > 0) {
1085 entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
1086 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1087 }
1088
1089 if (kvm_check_extension(cs->kvm_state,
1090 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
1091 entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
1092 }
1093
1094 if (kvm_check_extension(cs->kvm_state,
1095 KVM_CAP_HYPERV_SEND_IPI) > 0) {
1096 entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED;
1097 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1098 }
1099
1100 return cpuid;
1101 }
1102
hv_cpuid_get_host(CPUState * cs,uint32_t func,int reg)1103 static uint32_t hv_cpuid_get_host(CPUState *cs, uint32_t func, int reg)
1104 {
1105 struct kvm_cpuid_entry2 *entry;
1106 struct kvm_cpuid2 *cpuid;
1107
1108 if (hv_cpuid_cache) {
1109 cpuid = hv_cpuid_cache;
1110 } else {
1111 if (kvm_check_extension(kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
1112 cpuid = get_supported_hv_cpuid(cs);
1113 } else {
1114 /*
1115 * 'cs->kvm_state' may be NULL when Hyper-V features are expanded
1116 * before KVM context is created but this is only done when
1117 * KVM_CAP_SYS_HYPERV_CPUID is supported and it implies
1118 * KVM_CAP_HYPERV_CPUID.
1119 */
1120 assert(cs->kvm_state);
1121
1122 cpuid = get_supported_hv_cpuid_legacy(cs);
1123 }
1124 hv_cpuid_cache = cpuid;
1125 }
1126
1127 if (!cpuid) {
1128 return 0;
1129 }
1130
1131 entry = cpuid_find_entry(cpuid, func, 0);
1132 if (!entry) {
1133 return 0;
1134 }
1135
1136 return cpuid_entry_get_reg(entry, reg);
1137 }
1138
hyperv_feature_supported(CPUState * cs,int feature)1139 static bool hyperv_feature_supported(CPUState *cs, int feature)
1140 {
1141 uint32_t func, bits;
1142 int i, reg;
1143
1144 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
1145
1146 func = kvm_hyperv_properties[feature].flags[i].func;
1147 reg = kvm_hyperv_properties[feature].flags[i].reg;
1148 bits = kvm_hyperv_properties[feature].flags[i].bits;
1149
1150 if (!func) {
1151 continue;
1152 }
1153
1154 if ((hv_cpuid_get_host(cs, func, reg) & bits) != bits) {
1155 return false;
1156 }
1157 }
1158
1159 return true;
1160 }
1161
1162 /* Checks that all feature dependencies are enabled */
hv_feature_check_deps(X86CPU * cpu,int feature,Error ** errp)1163 static bool hv_feature_check_deps(X86CPU *cpu, int feature, Error **errp)
1164 {
1165 uint64_t deps;
1166 int dep_feat;
1167
1168 deps = kvm_hyperv_properties[feature].dependencies;
1169 while (deps) {
1170 dep_feat = ctz64(deps);
1171 if (!(hyperv_feat_enabled(cpu, dep_feat))) {
1172 error_setg(errp, "Hyper-V %s requires Hyper-V %s",
1173 kvm_hyperv_properties[feature].desc,
1174 kvm_hyperv_properties[dep_feat].desc);
1175 return false;
1176 }
1177 deps &= ~(1ull << dep_feat);
1178 }
1179
1180 return true;
1181 }
1182
hv_build_cpuid_leaf(CPUState * cs,uint32_t func,int reg)1183 static uint32_t hv_build_cpuid_leaf(CPUState *cs, uint32_t func, int reg)
1184 {
1185 X86CPU *cpu = X86_CPU(cs);
1186 uint32_t r = 0;
1187 int i, j;
1188
1189 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties); i++) {
1190 if (!hyperv_feat_enabled(cpu, i)) {
1191 continue;
1192 }
1193
1194 for (j = 0; j < ARRAY_SIZE(kvm_hyperv_properties[i].flags); j++) {
1195 if (kvm_hyperv_properties[i].flags[j].func != func) {
1196 continue;
1197 }
1198 if (kvm_hyperv_properties[i].flags[j].reg != reg) {
1199 continue;
1200 }
1201
1202 r |= kvm_hyperv_properties[i].flags[j].bits;
1203 }
1204 }
1205
1206 return r;
1207 }
1208
1209 /*
1210 * Expand Hyper-V CPU features. In partucular, check that all the requested
1211 * features are supported by the host and the sanity of the configuration
1212 * (that all the required dependencies are included). Also, this takes care
1213 * of 'hv_passthrough' mode and fills the environment with all supported
1214 * Hyper-V features.
1215 */
kvm_hyperv_expand_features(X86CPU * cpu,Error ** errp)1216 bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp)
1217 {
1218 CPUState *cs = CPU(cpu);
1219 Error *local_err = NULL;
1220 int feat;
1221
1222 if (!hyperv_enabled(cpu))
1223 return true;
1224
1225 /*
1226 * When kvm_hyperv_expand_features is called at CPU feature expansion
1227 * time per-CPU kvm_state is not available yet so we can only proceed
1228 * when KVM_CAP_SYS_HYPERV_CPUID is supported.
1229 */
1230 if (!cs->kvm_state &&
1231 !kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID))
1232 return true;
1233
1234 if (cpu->hyperv_passthrough) {
1235 cpu->hyperv_vendor_id[0] =
1236 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX);
1237 cpu->hyperv_vendor_id[1] =
1238 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX);
1239 cpu->hyperv_vendor_id[2] =
1240 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX);
1241 cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor,
1242 sizeof(cpu->hyperv_vendor_id) + 1);
1243 memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id,
1244 sizeof(cpu->hyperv_vendor_id));
1245 cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0;
1246
1247 cpu->hyperv_interface_id[0] =
1248 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EAX);
1249 cpu->hyperv_interface_id[1] =
1250 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EBX);
1251 cpu->hyperv_interface_id[2] =
1252 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_ECX);
1253 cpu->hyperv_interface_id[3] =
1254 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX);
1255
1256 cpu->hyperv_version_id[0] =
1257 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX);
1258 cpu->hyperv_version_id[1] =
1259 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX);
1260 cpu->hyperv_version_id[2] =
1261 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX);
1262 cpu->hyperv_version_id[3] =
1263 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX);
1264
1265 cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS,
1266 R_EAX);
1267 cpu->hyperv_limits[0] =
1268 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EBX);
1269 cpu->hyperv_limits[1] =
1270 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_ECX);
1271 cpu->hyperv_limits[2] =
1272 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EDX);
1273
1274 cpu->hyperv_spinlock_attempts =
1275 hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EBX);
1276
1277 /*
1278 * Mark feature as enabled in 'cpu->hyperv_features' as
1279 * hv_build_cpuid_leaf() uses this info to build guest CPUIDs.
1280 */
1281 for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
1282 if (hyperv_feature_supported(cs, feat)) {
1283 cpu->hyperv_features |= BIT(feat);
1284 }
1285 }
1286 } else {
1287 /* Check features availability and dependencies */
1288 for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
1289 /* If the feature was not requested skip it. */
1290 if (!hyperv_feat_enabled(cpu, feat)) {
1291 continue;
1292 }
1293
1294 /* Check if the feature is supported by KVM */
1295 if (!hyperv_feature_supported(cs, feat)) {
1296 error_setg(errp, "Hyper-V %s is not supported by kernel",
1297 kvm_hyperv_properties[feat].desc);
1298 return false;
1299 }
1300
1301 /* Check dependencies */
1302 if (!hv_feature_check_deps(cpu, feat, &local_err)) {
1303 error_propagate(errp, local_err);
1304 return false;
1305 }
1306 }
1307 }
1308
1309 /* Additional dependencies not covered by kvm_hyperv_properties[] */
1310 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1311 !cpu->hyperv_synic_kvm_only &&
1312 !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) {
1313 error_setg(errp, "Hyper-V %s requires Hyper-V %s",
1314 kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc,
1315 kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc);
1316 return false;
1317 }
1318
1319 return true;
1320 }
1321
1322 /*
1323 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent.
1324 */
hyperv_fill_cpuids(CPUState * cs,struct kvm_cpuid_entry2 * cpuid_ent)1325 static int hyperv_fill_cpuids(CPUState *cs,
1326 struct kvm_cpuid_entry2 *cpuid_ent)
1327 {
1328 X86CPU *cpu = X86_CPU(cs);
1329 struct kvm_cpuid_entry2 *c;
1330 uint32_t cpuid_i = 0;
1331
1332 c = &cpuid_ent[cpuid_i++];
1333 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
1334 c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ?
1335 HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
1336 c->ebx = cpu->hyperv_vendor_id[0];
1337 c->ecx = cpu->hyperv_vendor_id[1];
1338 c->edx = cpu->hyperv_vendor_id[2];
1339
1340 c = &cpuid_ent[cpuid_i++];
1341 c->function = HV_CPUID_INTERFACE;
1342 c->eax = cpu->hyperv_interface_id[0];
1343 c->ebx = cpu->hyperv_interface_id[1];
1344 c->ecx = cpu->hyperv_interface_id[2];
1345 c->edx = cpu->hyperv_interface_id[3];
1346
1347 c = &cpuid_ent[cpuid_i++];
1348 c->function = HV_CPUID_VERSION;
1349 c->eax = cpu->hyperv_version_id[0];
1350 c->ebx = cpu->hyperv_version_id[1];
1351 c->ecx = cpu->hyperv_version_id[2];
1352 c->edx = cpu->hyperv_version_id[3];
1353
1354 c = &cpuid_ent[cpuid_i++];
1355 c->function = HV_CPUID_FEATURES;
1356 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EAX);
1357 c->ebx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EBX);
1358 c->edx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EDX);
1359
1360 /* Unconditionally required with any Hyper-V enlightenment */
1361 c->eax |= HV_HYPERCALL_AVAILABLE;
1362
1363 /* SynIC and Vmbus devices require messages/signals hypercalls */
1364 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1365 !cpu->hyperv_synic_kvm_only) {
1366 c->ebx |= HV_POST_MESSAGES | HV_SIGNAL_EVENTS;
1367 }
1368
1369 /* Not exposed by KVM but needed to make CPU hotplug in Windows work */
1370 c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1371
1372 c = &cpuid_ent[cpuid_i++];
1373 c->function = HV_CPUID_ENLIGHTMENT_INFO;
1374 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX);
1375 c->ebx = cpu->hyperv_spinlock_attempts;
1376
1377 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) {
1378 c->eax |= HV_NO_NONARCH_CORESHARING;
1379 } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) {
1380 c->eax |= hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) &
1381 HV_NO_NONARCH_CORESHARING;
1382 }
1383
1384 c = &cpuid_ent[cpuid_i++];
1385 c->function = HV_CPUID_IMPLEMENT_LIMITS;
1386 c->eax = cpu->hv_max_vps;
1387 c->ebx = cpu->hyperv_limits[0];
1388 c->ecx = cpu->hyperv_limits[1];
1389 c->edx = cpu->hyperv_limits[2];
1390
1391 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1392 __u32 function;
1393
1394 /* Create zeroed 0x40000006..0x40000009 leaves */
1395 for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
1396 function < HV_CPUID_NESTED_FEATURES; function++) {
1397 c = &cpuid_ent[cpuid_i++];
1398 c->function = function;
1399 }
1400
1401 c = &cpuid_ent[cpuid_i++];
1402 c->function = HV_CPUID_NESTED_FEATURES;
1403 c->eax = cpu->hyperv_nested[0];
1404 }
1405
1406 return cpuid_i;
1407 }
1408
1409 static Error *hv_passthrough_mig_blocker;
1410 static Error *hv_no_nonarch_cs_mig_blocker;
1411
1412 /* Checks that the exposed eVMCS version range is supported by KVM */
evmcs_version_supported(uint16_t evmcs_version,uint16_t supported_evmcs_version)1413 static bool evmcs_version_supported(uint16_t evmcs_version,
1414 uint16_t supported_evmcs_version)
1415 {
1416 uint8_t min_version = evmcs_version & 0xff;
1417 uint8_t max_version = evmcs_version >> 8;
1418 uint8_t min_supported_version = supported_evmcs_version & 0xff;
1419 uint8_t max_supported_version = supported_evmcs_version >> 8;
1420
1421 return (min_version >= min_supported_version) &&
1422 (max_version <= max_supported_version);
1423 }
1424
1425 #define DEFAULT_EVMCS_VERSION ((1 << 8) | 1)
1426
hyperv_init_vcpu(X86CPU * cpu)1427 static int hyperv_init_vcpu(X86CPU *cpu)
1428 {
1429 CPUState *cs = CPU(cpu);
1430 Error *local_err = NULL;
1431 int ret;
1432
1433 if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) {
1434 error_setg(&hv_passthrough_mig_blocker,
1435 "'hv-passthrough' CPU flag prevents migration, use explicit"
1436 " set of hv-* flags instead");
1437 ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err);
1438 if (ret < 0) {
1439 error_report_err(local_err);
1440 return ret;
1441 }
1442 }
1443
1444 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO &&
1445 hv_no_nonarch_cs_mig_blocker == NULL) {
1446 error_setg(&hv_no_nonarch_cs_mig_blocker,
1447 "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration"
1448 " use explicit 'hv-no-nonarch-coresharing=on' instead (but"
1449 " make sure SMT is disabled and/or that vCPUs are properly"
1450 " pinned)");
1451 ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err);
1452 if (ret < 0) {
1453 error_report_err(local_err);
1454 return ret;
1455 }
1456 }
1457
1458 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) {
1459 /*
1460 * the kernel doesn't support setting vp_index; assert that its value
1461 * is in sync
1462 */
1463 struct {
1464 struct kvm_msrs info;
1465 struct kvm_msr_entry entries[1];
1466 } msr_data = {
1467 .info.nmsrs = 1,
1468 .entries[0].index = HV_X64_MSR_VP_INDEX,
1469 };
1470
1471 ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data);
1472 if (ret < 0) {
1473 return ret;
1474 }
1475 assert(ret == 1);
1476
1477 if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) {
1478 error_report("kernel's vp_index != QEMU's vp_index");
1479 return -ENXIO;
1480 }
1481 }
1482
1483 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
1484 uint32_t synic_cap = cpu->hyperv_synic_kvm_only ?
1485 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1486 ret = kvm_vcpu_enable_cap(cs, synic_cap, 0);
1487 if (ret < 0) {
1488 error_report("failed to turn on HyperV SynIC in KVM: %s",
1489 strerror(-ret));
1490 return ret;
1491 }
1492
1493 if (!cpu->hyperv_synic_kvm_only) {
1494 ret = hyperv_x86_synic_add(cpu);
1495 if (ret < 0) {
1496 error_report("failed to create HyperV SynIC: %s",
1497 strerror(-ret));
1498 return ret;
1499 }
1500 }
1501 }
1502
1503 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1504 uint16_t evmcs_version = DEFAULT_EVMCS_VERSION;
1505 uint16_t supported_evmcs_version;
1506
1507 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
1508 (uintptr_t)&supported_evmcs_version);
1509
1510 /*
1511 * KVM is required to support EVMCS ver.1. as that's what 'hv-evmcs'
1512 * option sets. Note: we hardcode the maximum supported eVMCS version
1513 * to '1' as well so 'hv-evmcs' feature is migratable even when (and if)
1514 * ver.2 is implemented. A new option (e.g. 'hv-evmcs=2') will then have
1515 * to be added.
1516 */
1517 if (ret < 0) {
1518 error_report("Hyper-V %s is not supported by kernel",
1519 kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc);
1520 return ret;
1521 }
1522
1523 if (!evmcs_version_supported(evmcs_version, supported_evmcs_version)) {
1524 error_report("eVMCS version range [%d..%d] is not supported by "
1525 "kernel (supported: [%d..%d])", evmcs_version & 0xff,
1526 evmcs_version >> 8, supported_evmcs_version & 0xff,
1527 supported_evmcs_version >> 8);
1528 return -ENOTSUP;
1529 }
1530
1531 cpu->hyperv_nested[0] = evmcs_version;
1532 }
1533
1534 return 0;
1535 }
1536
1537 static Error *invtsc_mig_blocker;
1538
1539 #define KVM_MAX_CPUID_ENTRIES 100
1540
kvm_arch_init_vcpu(CPUState * cs)1541 int kvm_arch_init_vcpu(CPUState *cs)
1542 {
1543 struct {
1544 struct kvm_cpuid2 cpuid;
1545 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
1546 } cpuid_data;
1547 /*
1548 * The kernel defines these structs with padding fields so there
1549 * should be no extra padding in our cpuid_data struct.
1550 */
1551 QEMU_BUILD_BUG_ON(sizeof(cpuid_data) !=
1552 sizeof(struct kvm_cpuid2) +
1553 sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
1554
1555 X86CPU *cpu = X86_CPU(cs);
1556 CPUX86State *env = &cpu->env;
1557 uint32_t limit, i, j, cpuid_i;
1558 uint32_t unused;
1559 struct kvm_cpuid_entry2 *c;
1560 uint32_t signature[3];
1561 int kvm_base = KVM_CPUID_SIGNATURE;
1562 int max_nested_state_len;
1563 int r;
1564 Error *local_err = NULL;
1565
1566 memset(&cpuid_data, 0, sizeof(cpuid_data));
1567
1568 cpuid_i = 0;
1569
1570 r = kvm_arch_set_tsc_khz(cs);
1571 if (r < 0) {
1572 return r;
1573 }
1574
1575 /* vcpu's TSC frequency is either specified by user, or following
1576 * the value used by KVM if the former is not present. In the
1577 * latter case, we query it from KVM and record in env->tsc_khz,
1578 * so that vcpu's TSC frequency can be migrated later via this field.
1579 */
1580 if (!env->tsc_khz) {
1581 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
1582 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
1583 -ENOTSUP;
1584 if (r > 0) {
1585 env->tsc_khz = r;
1586 }
1587 }
1588
1589 env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY;
1590
1591 /*
1592 * kvm_hyperv_expand_features() is called here for the second time in case
1593 * KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle
1594 * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to
1595 * check which Hyper-V enlightenments are supported and which are not, we
1596 * can still proceed and check/expand Hyper-V enlightenments here so legacy
1597 * behavior is preserved.
1598 */
1599 if (!kvm_hyperv_expand_features(cpu, &local_err)) {
1600 error_report_err(local_err);
1601 return -ENOSYS;
1602 }
1603
1604 if (hyperv_enabled(cpu)) {
1605 r = hyperv_init_vcpu(cpu);
1606 if (r) {
1607 return r;
1608 }
1609
1610 cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries);
1611 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
1612 has_msr_hv_hypercall = true;
1613 }
1614
1615 if (cpu->expose_kvm) {
1616 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
1617 c = &cpuid_data.entries[cpuid_i++];
1618 c->function = KVM_CPUID_SIGNATURE | kvm_base;
1619 c->eax = KVM_CPUID_FEATURES | kvm_base;
1620 c->ebx = signature[0];
1621 c->ecx = signature[1];
1622 c->edx = signature[2];
1623
1624 c = &cpuid_data.entries[cpuid_i++];
1625 c->function = KVM_CPUID_FEATURES | kvm_base;
1626 c->eax = env->features[FEAT_KVM];
1627 c->edx = env->features[FEAT_KVM_HINTS];
1628 }
1629
1630 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
1631
1632 for (i = 0; i <= limit; i++) {
1633 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1634 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
1635 abort();
1636 }
1637 c = &cpuid_data.entries[cpuid_i++];
1638
1639 switch (i) {
1640 case 2: {
1641 /* Keep reading function 2 till all the input is received */
1642 int times;
1643
1644 c->function = i;
1645 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
1646 KVM_CPUID_FLAG_STATE_READ_NEXT;
1647 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1648 times = c->eax & 0xff;
1649
1650 for (j = 1; j < times; ++j) {
1651 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1652 fprintf(stderr, "cpuid_data is full, no space for "
1653 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
1654 abort();
1655 }
1656 c = &cpuid_data.entries[cpuid_i++];
1657 c->function = i;
1658 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
1659 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1660 }
1661 break;
1662 }
1663 case 0x1f:
1664 if (env->nr_dies < 2) {
1665 break;
1666 }
1667 /* fallthrough */
1668 case 4:
1669 case 0xb:
1670 case 0xd:
1671 for (j = 0; ; j++) {
1672 if (i == 0xd && j == 64) {
1673 break;
1674 }
1675
1676 if (i == 0x1f && j == 64) {
1677 break;
1678 }
1679
1680 c->function = i;
1681 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1682 c->index = j;
1683 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1684
1685 if (i == 4 && c->eax == 0) {
1686 break;
1687 }
1688 if (i == 0xb && !(c->ecx & 0xff00)) {
1689 break;
1690 }
1691 if (i == 0x1f && !(c->ecx & 0xff00)) {
1692 break;
1693 }
1694 if (i == 0xd && c->eax == 0) {
1695 continue;
1696 }
1697 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1698 fprintf(stderr, "cpuid_data is full, no space for "
1699 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1700 abort();
1701 }
1702 c = &cpuid_data.entries[cpuid_i++];
1703 }
1704 break;
1705 case 0x7:
1706 case 0x14: {
1707 uint32_t times;
1708
1709 c->function = i;
1710 c->index = 0;
1711 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1712 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1713 times = c->eax;
1714
1715 for (j = 1; j <= times; ++j) {
1716 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1717 fprintf(stderr, "cpuid_data is full, no space for "
1718 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1719 abort();
1720 }
1721 c = &cpuid_data.entries[cpuid_i++];
1722 c->function = i;
1723 c->index = j;
1724 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1725 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1726 }
1727 break;
1728 }
1729 default:
1730 c->function = i;
1731 c->flags = 0;
1732 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1733 if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
1734 /*
1735 * KVM already returns all zeroes if a CPUID entry is missing,
1736 * so we can omit it and avoid hitting KVM's 80-entry limit.
1737 */
1738 cpuid_i--;
1739 }
1740 break;
1741 }
1742 }
1743
1744 if (limit >= 0x0a) {
1745 uint32_t eax, edx;
1746
1747 cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
1748
1749 has_architectural_pmu_version = eax & 0xff;
1750 if (has_architectural_pmu_version > 0) {
1751 num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
1752
1753 /* Shouldn't be more than 32, since that's the number of bits
1754 * available in EBX to tell us _which_ counters are available.
1755 * Play it safe.
1756 */
1757 if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
1758 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
1759 }
1760
1761 if (has_architectural_pmu_version > 1) {
1762 num_architectural_pmu_fixed_counters = edx & 0x1f;
1763
1764 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
1765 num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
1766 }
1767 }
1768 }
1769 }
1770
1771 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
1772
1773 for (i = 0x80000000; i <= limit; i++) {
1774 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1775 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
1776 abort();
1777 }
1778 c = &cpuid_data.entries[cpuid_i++];
1779
1780 switch (i) {
1781 case 0x8000001d:
1782 /* Query for all AMD cache information leaves */
1783 for (j = 0; ; j++) {
1784 c->function = i;
1785 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1786 c->index = j;
1787 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1788
1789 if (c->eax == 0) {
1790 break;
1791 }
1792 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1793 fprintf(stderr, "cpuid_data is full, no space for "
1794 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1795 abort();
1796 }
1797 c = &cpuid_data.entries[cpuid_i++];
1798 }
1799 break;
1800 default:
1801 c->function = i;
1802 c->flags = 0;
1803 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1804 if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
1805 /*
1806 * KVM already returns all zeroes if a CPUID entry is missing,
1807 * so we can omit it and avoid hitting KVM's 80-entry limit.
1808 */
1809 cpuid_i--;
1810 }
1811 break;
1812 }
1813 }
1814
1815 /* Call Centaur's CPUID instructions they are supported. */
1816 if (env->cpuid_xlevel2 > 0) {
1817 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
1818
1819 for (i = 0xC0000000; i <= limit; i++) {
1820 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1821 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
1822 abort();
1823 }
1824 c = &cpuid_data.entries[cpuid_i++];
1825
1826 c->function = i;
1827 c->flags = 0;
1828 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1829 }
1830 }
1831
1832 cpuid_data.cpuid.nent = cpuid_i;
1833
1834 if (((env->cpuid_version >> 8)&0xF) >= 6
1835 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
1836 (CPUID_MCE | CPUID_MCA)
1837 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
1838 uint64_t mcg_cap, unsupported_caps;
1839 int banks;
1840 int ret;
1841
1842 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
1843 if (ret < 0) {
1844 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
1845 return ret;
1846 }
1847
1848 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
1849 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
1850 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
1851 return -ENOTSUP;
1852 }
1853
1854 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
1855 if (unsupported_caps) {
1856 if (unsupported_caps & MCG_LMCE_P) {
1857 error_report("kvm: LMCE not supported");
1858 return -ENOTSUP;
1859 }
1860 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
1861 unsupported_caps);
1862 }
1863
1864 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
1865 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
1866 if (ret < 0) {
1867 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
1868 return ret;
1869 }
1870 }
1871
1872 cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env);
1873
1874 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
1875 if (c) {
1876 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
1877 !!(c->ecx & CPUID_EXT_SMX);
1878 }
1879
1880 if (env->mcg_cap & MCG_LMCE_P) {
1881 has_msr_mcg_ext_ctl = has_msr_feature_control = true;
1882 }
1883
1884 if (!env->user_tsc_khz) {
1885 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
1886 invtsc_mig_blocker == NULL) {
1887 error_setg(&invtsc_mig_blocker,
1888 "State blocked by non-migratable CPU device"
1889 " (invtsc flag)");
1890 r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
1891 if (r < 0) {
1892 error_report_err(local_err);
1893 return r;
1894 }
1895 }
1896 }
1897
1898 if (cpu->vmware_cpuid_freq
1899 /* Guests depend on 0x40000000 to detect this feature, so only expose
1900 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
1901 && cpu->expose_kvm
1902 && kvm_base == KVM_CPUID_SIGNATURE
1903 /* TSC clock must be stable and known for this feature. */
1904 && tsc_is_stable_and_known(env)) {
1905
1906 c = &cpuid_data.entries[cpuid_i++];
1907 c->function = KVM_CPUID_SIGNATURE | 0x10;
1908 c->eax = env->tsc_khz;
1909 c->ebx = env->apic_bus_freq / 1000; /* Hz to KHz */
1910 c->ecx = c->edx = 0;
1911
1912 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
1913 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
1914 }
1915
1916 cpuid_data.cpuid.nent = cpuid_i;
1917
1918 cpuid_data.cpuid.padding = 0;
1919 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
1920 if (r) {
1921 goto fail;
1922 }
1923
1924 if (has_xsave) {
1925 env->xsave_buf_len = sizeof(struct kvm_xsave);
1926 env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
1927 memset(env->xsave_buf, 0, env->xsave_buf_len);
1928
1929 /*
1930 * The allocated storage must be large enough for all of the
1931 * possible XSAVE state components.
1932 */
1933 assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX)
1934 <= env->xsave_buf_len);
1935 }
1936
1937 max_nested_state_len = kvm_max_nested_state_length();
1938 if (max_nested_state_len > 0) {
1939 assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
1940
1941 if (cpu_has_vmx(env) || cpu_has_svm(env)) {
1942 struct kvm_vmx_nested_state_hdr *vmx_hdr;
1943
1944 env->nested_state = g_malloc0(max_nested_state_len);
1945 env->nested_state->size = max_nested_state_len;
1946
1947 if (cpu_has_vmx(env)) {
1948 env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
1949 vmx_hdr = &env->nested_state->hdr.vmx;
1950 vmx_hdr->vmxon_pa = -1ull;
1951 vmx_hdr->vmcs12_pa = -1ull;
1952 } else {
1953 env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM;
1954 }
1955 }
1956 }
1957
1958 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
1959
1960 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
1961 has_msr_tsc_aux = false;
1962 }
1963
1964 kvm_init_msrs(cpu);
1965
1966 return 0;
1967
1968 fail:
1969 migrate_del_blocker(invtsc_mig_blocker);
1970
1971 return r;
1972 }
1973
kvm_arch_destroy_vcpu(CPUState * cs)1974 int kvm_arch_destroy_vcpu(CPUState *cs)
1975 {
1976 X86CPU *cpu = X86_CPU(cs);
1977 CPUX86State *env = &cpu->env;
1978
1979 if (cpu->kvm_msr_buf) {
1980 g_free(cpu->kvm_msr_buf);
1981 cpu->kvm_msr_buf = NULL;
1982 }
1983
1984 if (env->nested_state) {
1985 g_free(env->nested_state);
1986 env->nested_state = NULL;
1987 }
1988
1989 qemu_del_vm_change_state_handler(cpu->vmsentry);
1990
1991 return 0;
1992 }
1993
kvm_arch_reset_vcpu(X86CPU * cpu)1994 void kvm_arch_reset_vcpu(X86CPU *cpu)
1995 {
1996 CPUX86State *env = &cpu->env;
1997
1998 env->xcr0 = 1;
1999 if (kvm_irqchip_in_kernel()) {
2000 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
2001 KVM_MP_STATE_UNINITIALIZED;
2002 } else {
2003 env->mp_state = KVM_MP_STATE_RUNNABLE;
2004 }
2005
2006 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
2007 int i;
2008 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
2009 env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
2010 }
2011
2012 hyperv_x86_synic_reset(cpu);
2013 }
2014 /* enabled by default */
2015 env->poll_control_msr = 1;
2016
2017 sev_es_set_reset_vector(CPU(cpu));
2018 }
2019
kvm_arch_do_init_vcpu(X86CPU * cpu)2020 void kvm_arch_do_init_vcpu(X86CPU *cpu)
2021 {
2022 CPUX86State *env = &cpu->env;
2023
2024 /* APs get directly into wait-for-SIPI state. */
2025 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
2026 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
2027 }
2028 }
2029
kvm_get_supported_feature_msrs(KVMState * s)2030 static int kvm_get_supported_feature_msrs(KVMState *s)
2031 {
2032 int ret = 0;
2033
2034 if (kvm_feature_msrs != NULL) {
2035 return 0;
2036 }
2037
2038 if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) {
2039 return 0;
2040 }
2041
2042 struct kvm_msr_list msr_list;
2043
2044 msr_list.nmsrs = 0;
2045 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list);
2046 if (ret < 0 && ret != -E2BIG) {
2047 error_report("Fetch KVM feature MSR list failed: %s",
2048 strerror(-ret));
2049 return ret;
2050 }
2051
2052 assert(msr_list.nmsrs > 0);
2053 kvm_feature_msrs = (struct kvm_msr_list *) \
2054 g_malloc0(sizeof(msr_list) +
2055 msr_list.nmsrs * sizeof(msr_list.indices[0]));
2056
2057 kvm_feature_msrs->nmsrs = msr_list.nmsrs;
2058 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs);
2059
2060 if (ret < 0) {
2061 error_report("Fetch KVM feature MSR list failed: %s",
2062 strerror(-ret));
2063 g_free(kvm_feature_msrs);
2064 kvm_feature_msrs = NULL;
2065 return ret;
2066 }
2067
2068 return 0;
2069 }
2070
kvm_get_supported_msrs(KVMState * s)2071 static int kvm_get_supported_msrs(KVMState *s)
2072 {
2073 int ret = 0;
2074 struct kvm_msr_list msr_list, *kvm_msr_list;
2075
2076 /*
2077 * Obtain MSR list from KVM. These are the MSRs that we must
2078 * save/restore.
2079 */
2080 msr_list.nmsrs = 0;
2081 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
2082 if (ret < 0 && ret != -E2BIG) {
2083 return ret;
2084 }
2085 /*
2086 * Old kernel modules had a bug and could write beyond the provided
2087 * memory. Allocate at least a safe amount of 1K.
2088 */
2089 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
2090 msr_list.nmsrs *
2091 sizeof(msr_list.indices[0])));
2092
2093 kvm_msr_list->nmsrs = msr_list.nmsrs;
2094 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
2095 if (ret >= 0) {
2096 int i;
2097
2098 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
2099 switch (kvm_msr_list->indices[i]) {
2100 case MSR_STAR:
2101 has_msr_star = true;
2102 break;
2103 case MSR_VM_HSAVE_PA:
2104 has_msr_hsave_pa = true;
2105 break;
2106 case MSR_TSC_AUX:
2107 has_msr_tsc_aux = true;
2108 break;
2109 case MSR_TSC_ADJUST:
2110 has_msr_tsc_adjust = true;
2111 break;
2112 case MSR_IA32_TSCDEADLINE:
2113 has_msr_tsc_deadline = true;
2114 break;
2115 case MSR_IA32_SMBASE:
2116 has_msr_smbase = true;
2117 break;
2118 case MSR_SMI_COUNT:
2119 has_msr_smi_count = true;
2120 break;
2121 case MSR_IA32_MISC_ENABLE:
2122 has_msr_misc_enable = true;
2123 break;
2124 case MSR_IA32_BNDCFGS:
2125 has_msr_bndcfgs = true;
2126 break;
2127 case MSR_IA32_XSS:
2128 has_msr_xss = true;
2129 break;
2130 case MSR_IA32_UMWAIT_CONTROL:
2131 has_msr_umwait = true;
2132 break;
2133 case HV_X64_MSR_CRASH_CTL:
2134 has_msr_hv_crash = true;
2135 break;
2136 case HV_X64_MSR_RESET:
2137 has_msr_hv_reset = true;
2138 break;
2139 case HV_X64_MSR_VP_INDEX:
2140 has_msr_hv_vpindex = true;
2141 break;
2142 case HV_X64_MSR_VP_RUNTIME:
2143 has_msr_hv_runtime = true;
2144 break;
2145 case HV_X64_MSR_SCONTROL:
2146 has_msr_hv_synic = true;
2147 break;
2148 case HV_X64_MSR_STIMER0_CONFIG:
2149 has_msr_hv_stimer = true;
2150 break;
2151 case HV_X64_MSR_TSC_FREQUENCY:
2152 has_msr_hv_frequencies = true;
2153 break;
2154 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
2155 has_msr_hv_reenlightenment = true;
2156 break;
2157 case MSR_IA32_SPEC_CTRL:
2158 has_msr_spec_ctrl = true;
2159 break;
2160 case MSR_IA32_TSX_CTRL:
2161 has_msr_tsx_ctrl = true;
2162 break;
2163 case MSR_VIRT_SSBD:
2164 has_msr_virt_ssbd = true;
2165 break;
2166 case MSR_IA32_ARCH_CAPABILITIES:
2167 has_msr_arch_capabs = true;
2168 break;
2169 case MSR_IA32_CORE_CAPABILITY:
2170 has_msr_core_capabs = true;
2171 break;
2172 case MSR_IA32_PERF_CAPABILITIES:
2173 has_msr_perf_capabs = true;
2174 break;
2175 case MSR_IA32_VMX_VMFUNC:
2176 has_msr_vmx_vmfunc = true;
2177 break;
2178 case MSR_IA32_UCODE_REV:
2179 has_msr_ucode_rev = true;
2180 break;
2181 case MSR_IA32_VMX_PROCBASED_CTLS2:
2182 has_msr_vmx_procbased_ctls2 = true;
2183 break;
2184 case MSR_IA32_PKRS:
2185 has_msr_pkrs = true;
2186 break;
2187 }
2188 }
2189 }
2190
2191 g_free(kvm_msr_list);
2192
2193 return ret;
2194 }
2195
2196 static Notifier smram_machine_done;
2197 static KVMMemoryListener smram_listener;
2198 static AddressSpace smram_address_space;
2199 static MemoryRegion smram_as_root;
2200 static MemoryRegion smram_as_mem;
2201
register_smram_listener(Notifier * n,void * unused)2202 static void register_smram_listener(Notifier *n, void *unused)
2203 {
2204 MemoryRegion *smram =
2205 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2206
2207 /* Outer container... */
2208 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
2209 memory_region_set_enabled(&smram_as_root, true);
2210
2211 /* ... with two regions inside: normal system memory with low
2212 * priority, and...
2213 */
2214 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
2215 get_system_memory(), 0, ~0ull);
2216 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
2217 memory_region_set_enabled(&smram_as_mem, true);
2218
2219 if (smram) {
2220 /* ... SMRAM with higher priority */
2221 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
2222 memory_region_set_enabled(smram, true);
2223 }
2224
2225 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
2226 kvm_memory_listener_register(kvm_state, &smram_listener,
2227 &smram_address_space, 1);
2228 }
2229
kvm_arch_init(MachineState * ms,KVMState * s)2230 int kvm_arch_init(MachineState *ms, KVMState *s)
2231 {
2232 uint64_t identity_base = 0xfffbc000;
2233 uint64_t shadow_mem;
2234 int ret;
2235 struct utsname utsname;
2236 Error *local_err = NULL;
2237
2238 /*
2239 * Initialize SEV context, if required
2240 *
2241 * If no memory encryption is requested (ms->cgs == NULL) this is
2242 * a no-op.
2243 *
2244 * It's also a no-op if a non-SEV confidential guest support
2245 * mechanism is selected. SEV is the only mechanism available to
2246 * select on x86 at present, so this doesn't arise, but if new
2247 * mechanisms are supported in future (e.g. TDX), they'll need
2248 * their own initialization either here or elsewhere.
2249 */
2250 ret = sev_kvm_init(ms->cgs, &local_err);
2251 if (ret < 0) {
2252 error_report_err(local_err);
2253 return ret;
2254 }
2255
2256 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
2257 error_report("kvm: KVM_CAP_IRQ_ROUTING not supported by KVM");
2258 return -ENOTSUP;
2259 }
2260
2261 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
2262 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
2263 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
2264
2265 hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
2266
2267 has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
2268 if (has_exception_payload) {
2269 ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
2270 if (ret < 0) {
2271 error_report("kvm: Failed to enable exception payload cap: %s",
2272 strerror(-ret));
2273 return ret;
2274 }
2275 }
2276
2277 ret = kvm_get_supported_msrs(s);
2278 if (ret < 0) {
2279 return ret;
2280 }
2281
2282 kvm_get_supported_feature_msrs(s);
2283
2284 uname(&utsname);
2285 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
2286
2287 /*
2288 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
2289 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
2290 * Since these must be part of guest physical memory, we need to allocate
2291 * them, both by setting their start addresses in the kernel and by
2292 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
2293 *
2294 * Older KVM versions may not support setting the identity map base. In
2295 * that case we need to stick with the default, i.e. a 256K maximum BIOS
2296 * size.
2297 */
2298 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
2299 /* Allows up to 16M BIOSes. */
2300 identity_base = 0xfeffc000;
2301
2302 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
2303 if (ret < 0) {
2304 return ret;
2305 }
2306 }
2307
2308 /* Set TSS base one page after EPT identity map. */
2309 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
2310 if (ret < 0) {
2311 return ret;
2312 }
2313
2314 /* Tell fw_cfg to notify the BIOS to reserve the range. */
2315 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
2316 if (ret < 0) {
2317 fprintf(stderr, "e820_add_entry() table is full\n");
2318 return ret;
2319 }
2320
2321 shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort);
2322 if (shadow_mem != -1) {
2323 shadow_mem /= 4096;
2324 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
2325 if (ret < 0) {
2326 return ret;
2327 }
2328 }
2329
2330 if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
2331 object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) &&
2332 x86_machine_is_smm_enabled(X86_MACHINE(ms))) {
2333 smram_machine_done.notify = register_smram_listener;
2334 qemu_add_machine_init_done_notifier(&smram_machine_done);
2335 }
2336
2337 if (enable_cpu_pm) {
2338 int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
2339 int ret;
2340
2341 /* Work around for kernel header with a typo. TODO: fix header and drop. */
2342 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
2343 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
2344 #endif
2345 if (disable_exits) {
2346 disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
2347 KVM_X86_DISABLE_EXITS_HLT |
2348 KVM_X86_DISABLE_EXITS_PAUSE |
2349 KVM_X86_DISABLE_EXITS_CSTATE);
2350 }
2351
2352 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
2353 disable_exits);
2354 if (ret < 0) {
2355 error_report("kvm: guest stopping CPU not supported: %s",
2356 strerror(-ret));
2357 }
2358 }
2359
2360 if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE)) {
2361 X86MachineState *x86ms = X86_MACHINE(ms);
2362
2363 if (x86ms->bus_lock_ratelimit > 0) {
2364 ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT);
2365 if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) {
2366 error_report("kvm: bus lock detection unsupported");
2367 return -ENOTSUP;
2368 }
2369 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0,
2370 KVM_BUS_LOCK_DETECTION_EXIT);
2371 if (ret < 0) {
2372 error_report("kvm: Failed to enable bus lock detection cap: %s",
2373 strerror(-ret));
2374 return ret;
2375 }
2376 ratelimit_init(&bus_lock_ratelimit_ctrl);
2377 ratelimit_set_speed(&bus_lock_ratelimit_ctrl,
2378 x86ms->bus_lock_ratelimit, BUS_LOCK_SLICE_TIME);
2379 }
2380 }
2381
2382 return 0;
2383 }
2384
set_v8086_seg(struct kvm_segment * lhs,const SegmentCache * rhs)2385 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2386 {
2387 lhs->selector = rhs->selector;
2388 lhs->base = rhs->base;
2389 lhs->limit = rhs->limit;
2390 lhs->type = 3;
2391 lhs->present = 1;
2392 lhs->dpl = 3;
2393 lhs->db = 0;
2394 lhs->s = 1;
2395 lhs->l = 0;
2396 lhs->g = 0;
2397 lhs->avl = 0;
2398 lhs->unusable = 0;
2399 }
2400
set_seg(struct kvm_segment * lhs,const SegmentCache * rhs)2401 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2402 {
2403 unsigned flags = rhs->flags;
2404 lhs->selector = rhs->selector;
2405 lhs->base = rhs->base;
2406 lhs->limit = rhs->limit;
2407 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
2408 lhs->present = (flags & DESC_P_MASK) != 0;
2409 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
2410 lhs->db = (flags >> DESC_B_SHIFT) & 1;
2411 lhs->s = (flags & DESC_S_MASK) != 0;
2412 lhs->l = (flags >> DESC_L_SHIFT) & 1;
2413 lhs->g = (flags & DESC_G_MASK) != 0;
2414 lhs->avl = (flags & DESC_AVL_MASK) != 0;
2415 lhs->unusable = !lhs->present;
2416 lhs->padding = 0;
2417 }
2418
get_seg(SegmentCache * lhs,const struct kvm_segment * rhs)2419 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
2420 {
2421 lhs->selector = rhs->selector;
2422 lhs->base = rhs->base;
2423 lhs->limit = rhs->limit;
2424 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
2425 ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
2426 (rhs->dpl << DESC_DPL_SHIFT) |
2427 (rhs->db << DESC_B_SHIFT) |
2428 (rhs->s * DESC_S_MASK) |
2429 (rhs->l << DESC_L_SHIFT) |
2430 (rhs->g * DESC_G_MASK) |
2431 (rhs->avl * DESC_AVL_MASK);
2432 }
2433
kvm_getput_reg(__u64 * kvm_reg,target_ulong * qemu_reg,int set)2434 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
2435 {
2436 if (set) {
2437 *kvm_reg = *qemu_reg;
2438 } else {
2439 *qemu_reg = *kvm_reg;
2440 }
2441 }
2442
kvm_getput_regs(X86CPU * cpu,int set)2443 static int kvm_getput_regs(X86CPU *cpu, int set)
2444 {
2445 CPUX86State *env = &cpu->env;
2446 struct kvm_regs regs;
2447 int ret = 0;
2448
2449 if (!set) {
2450 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s);
2451 if (ret < 0) {
2452 return ret;
2453 }
2454 }
2455
2456 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
2457 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
2458 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
2459 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
2460 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
2461 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
2462 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
2463 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
2464 #ifdef TARGET_X86_64
2465 kvm_getput_reg(®s.r8, &env->regs[8], set);
2466 kvm_getput_reg(®s.r9, &env->regs[9], set);
2467 kvm_getput_reg(®s.r10, &env->regs[10], set);
2468 kvm_getput_reg(®s.r11, &env->regs[11], set);
2469 kvm_getput_reg(®s.r12, &env->regs[12], set);
2470 kvm_getput_reg(®s.r13, &env->regs[13], set);
2471 kvm_getput_reg(®s.r14, &env->regs[14], set);
2472 kvm_getput_reg(®s.r15, &env->regs[15], set);
2473 #endif
2474
2475 kvm_getput_reg(®s.rflags, &env->eflags, set);
2476 kvm_getput_reg(®s.rip, &env->eip, set);
2477
2478 if (set) {
2479 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s);
2480 }
2481
2482 return ret;
2483 }
2484
kvm_put_fpu(X86CPU * cpu)2485 static int kvm_put_fpu(X86CPU *cpu)
2486 {
2487 CPUX86State *env = &cpu->env;
2488 struct kvm_fpu fpu;
2489 int i;
2490
2491 memset(&fpu, 0, sizeof fpu);
2492 fpu.fsw = env->fpus & ~(7 << 11);
2493 fpu.fsw |= (env->fpstt & 7) << 11;
2494 fpu.fcw = env->fpuc;
2495 fpu.last_opcode = env->fpop;
2496 fpu.last_ip = env->fpip;
2497 fpu.last_dp = env->fpdp;
2498 for (i = 0; i < 8; ++i) {
2499 fpu.ftwx |= (!env->fptags[i]) << i;
2500 }
2501 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
2502 for (i = 0; i < CPU_NB_REGS; i++) {
2503 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
2504 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
2505 }
2506 fpu.mxcsr = env->mxcsr;
2507
2508 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
2509 }
2510
kvm_put_xsave(X86CPU * cpu)2511 static int kvm_put_xsave(X86CPU *cpu)
2512 {
2513 CPUX86State *env = &cpu->env;
2514 void *xsave = env->xsave_buf;
2515
2516 if (!has_xsave) {
2517 return kvm_put_fpu(cpu);
2518 }
2519 x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len);
2520
2521 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
2522 }
2523
kvm_put_xcrs(X86CPU * cpu)2524 static int kvm_put_xcrs(X86CPU *cpu)
2525 {
2526 CPUX86State *env = &cpu->env;
2527 struct kvm_xcrs xcrs = {};
2528
2529 if (!has_xcrs) {
2530 return 0;
2531 }
2532
2533 xcrs.nr_xcrs = 1;
2534 xcrs.flags = 0;
2535 xcrs.xcrs[0].xcr = 0;
2536 xcrs.xcrs[0].value = env->xcr0;
2537 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
2538 }
2539
kvm_put_sregs(X86CPU * cpu)2540 static int kvm_put_sregs(X86CPU *cpu)
2541 {
2542 CPUX86State *env = &cpu->env;
2543 struct kvm_sregs sregs;
2544
2545 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
2546 if (env->interrupt_injected >= 0) {
2547 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
2548 (uint64_t)1 << (env->interrupt_injected % 64);
2549 }
2550
2551 if ((env->eflags & VM_MASK)) {
2552 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
2553 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
2554 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
2555 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
2556 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
2557 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
2558 } else {
2559 set_seg(&sregs.cs, &env->segs[R_CS]);
2560 set_seg(&sregs.ds, &env->segs[R_DS]);
2561 set_seg(&sregs.es, &env->segs[R_ES]);
2562 set_seg(&sregs.fs, &env->segs[R_FS]);
2563 set_seg(&sregs.gs, &env->segs[R_GS]);
2564 set_seg(&sregs.ss, &env->segs[R_SS]);
2565 }
2566
2567 set_seg(&sregs.tr, &env->tr);
2568 set_seg(&sregs.ldt, &env->ldt);
2569
2570 sregs.idt.limit = env->idt.limit;
2571 sregs.idt.base = env->idt.base;
2572 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
2573 sregs.gdt.limit = env->gdt.limit;
2574 sregs.gdt.base = env->gdt.base;
2575 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
2576
2577 sregs.cr0 = env->cr[0];
2578 sregs.cr2 = env->cr[2];
2579 sregs.cr3 = env->cr[3];
2580 sregs.cr4 = env->cr[4];
2581
2582 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
2583 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
2584
2585 sregs.efer = env->efer;
2586
2587 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
2588 }
2589
kvm_msr_buf_reset(X86CPU * cpu)2590 static void kvm_msr_buf_reset(X86CPU *cpu)
2591 {
2592 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
2593 }
2594
kvm_msr_entry_add(X86CPU * cpu,uint32_t index,uint64_t value)2595 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
2596 {
2597 struct kvm_msrs *msrs = cpu->kvm_msr_buf;
2598 void *limit = ((void *)msrs) + MSR_BUF_SIZE;
2599 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
2600
2601 assert((void *)(entry + 1) <= limit);
2602
2603 entry->index = index;
2604 entry->reserved = 0;
2605 entry->data = value;
2606 msrs->nmsrs++;
2607 }
2608
kvm_put_one_msr(X86CPU * cpu,int index,uint64_t value)2609 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
2610 {
2611 kvm_msr_buf_reset(cpu);
2612 kvm_msr_entry_add(cpu, index, value);
2613
2614 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2615 }
2616
kvm_put_apicbase(X86CPU * cpu,uint64_t value)2617 void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
2618 {
2619 int ret;
2620
2621 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
2622 assert(ret == 1);
2623 }
2624
kvm_put_tscdeadline_msr(X86CPU * cpu)2625 static int kvm_put_tscdeadline_msr(X86CPU *cpu)
2626 {
2627 CPUX86State *env = &cpu->env;
2628 int ret;
2629
2630 if (!has_msr_tsc_deadline) {
2631 return 0;
2632 }
2633
2634 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
2635 if (ret < 0) {
2636 return ret;
2637 }
2638
2639 assert(ret == 1);
2640 return 0;
2641 }
2642
2643 /*
2644 * Provide a separate write service for the feature control MSR in order to
2645 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
2646 * before writing any other state because forcibly leaving nested mode
2647 * invalidates the VCPU state.
2648 */
kvm_put_msr_feature_control(X86CPU * cpu)2649 static int kvm_put_msr_feature_control(X86CPU *cpu)
2650 {
2651 int ret;
2652
2653 if (!has_msr_feature_control) {
2654 return 0;
2655 }
2656
2657 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
2658 cpu->env.msr_ia32_feature_control);
2659 if (ret < 0) {
2660 return ret;
2661 }
2662
2663 assert(ret == 1);
2664 return 0;
2665 }
2666
make_vmx_msr_value(uint32_t index,uint32_t features)2667 static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features)
2668 {
2669 uint32_t default1, can_be_one, can_be_zero;
2670 uint32_t must_be_one;
2671
2672 switch (index) {
2673 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2674 default1 = 0x00000016;
2675 break;
2676 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2677 default1 = 0x0401e172;
2678 break;
2679 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2680 default1 = 0x000011ff;
2681 break;
2682 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2683 default1 = 0x00036dff;
2684 break;
2685 case MSR_IA32_VMX_PROCBASED_CTLS2:
2686 default1 = 0;
2687 break;
2688 default:
2689 abort();
2690 }
2691
2692 /* If a feature bit is set, the control can be either set or clear.
2693 * Otherwise the value is limited to either 0 or 1 by default1.
2694 */
2695 can_be_one = features | default1;
2696 can_be_zero = features | ~default1;
2697 must_be_one = ~can_be_zero;
2698
2699 /*
2700 * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one).
2701 * Bit 32:63 -> 1 if the control bit can be one.
2702 */
2703 return must_be_one | (((uint64_t)can_be_one) << 32);
2704 }
2705
kvm_msr_entry_add_vmx(X86CPU * cpu,FeatureWordArray f)2706 static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f)
2707 {
2708 uint64_t kvm_vmx_basic =
2709 kvm_arch_get_supported_msr_feature(kvm_state,
2710 MSR_IA32_VMX_BASIC);
2711
2712 if (!kvm_vmx_basic) {
2713 /* If the kernel doesn't support VMX feature (kvm_intel.nested=0),
2714 * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail.
2715 */
2716 return;
2717 }
2718
2719 uint64_t kvm_vmx_misc =
2720 kvm_arch_get_supported_msr_feature(kvm_state,
2721 MSR_IA32_VMX_MISC);
2722 uint64_t kvm_vmx_ept_vpid =
2723 kvm_arch_get_supported_msr_feature(kvm_state,
2724 MSR_IA32_VMX_EPT_VPID_CAP);
2725
2726 /*
2727 * If the guest is 64-bit, a value of 1 is allowed for the host address
2728 * space size vmexit control.
2729 */
2730 uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM
2731 ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0;
2732
2733 /*
2734 * Bits 0-30, 32-44 and 50-53 come from the host. KVM should
2735 * not change them for backwards compatibility.
2736 */
2737 uint64_t fixed_vmx_basic = kvm_vmx_basic &
2738 (MSR_VMX_BASIC_VMCS_REVISION_MASK |
2739 MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK |
2740 MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK);
2741
2742 /*
2743 * Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can
2744 * change in the future but are always zero for now, clear them to be
2745 * future proof. Bits 32-63 in theory could change, though KVM does
2746 * not support dual-monitor treatment and probably never will; mask
2747 * them out as well.
2748 */
2749 uint64_t fixed_vmx_misc = kvm_vmx_misc &
2750 (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK |
2751 MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK);
2752
2753 /*
2754 * EPT memory types should not change either, so we do not bother
2755 * adding features for them.
2756 */
2757 uint64_t fixed_vmx_ept_mask =
2758 (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ?
2759 MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0);
2760 uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask;
2761
2762 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
2763 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
2764 f[FEAT_VMX_PROCBASED_CTLS]));
2765 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
2766 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS,
2767 f[FEAT_VMX_PINBASED_CTLS]));
2768 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS,
2769 make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS,
2770 f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit);
2771 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
2772 make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS,
2773 f[FEAT_VMX_ENTRY_CTLS]));
2774 kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2,
2775 make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2,
2776 f[FEAT_VMX_SECONDARY_CTLS]));
2777 kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP,
2778 f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid);
2779 kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC,
2780 f[FEAT_VMX_BASIC] | fixed_vmx_basic);
2781 kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC,
2782 f[FEAT_VMX_MISC] | fixed_vmx_misc);
2783 if (has_msr_vmx_vmfunc) {
2784 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]);
2785 }
2786
2787 /*
2788 * Just to be safe, write these with constant values. The CRn_FIXED1
2789 * MSRs are generated by KVM based on the vCPU's CPUID.
2790 */
2791 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0,
2792 CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK);
2793 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0,
2794 CR4_VMXE_MASK);
2795
2796 if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) {
2797 /* TSC multiplier (0x2032). */
2798 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32);
2799 } else {
2800 /* Preemption timer (0x482E). */
2801 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x2E);
2802 }
2803 }
2804
kvm_msr_entry_add_perf(X86CPU * cpu,FeatureWordArray f)2805 static void kvm_msr_entry_add_perf(X86CPU *cpu, FeatureWordArray f)
2806 {
2807 uint64_t kvm_perf_cap =
2808 kvm_arch_get_supported_msr_feature(kvm_state,
2809 MSR_IA32_PERF_CAPABILITIES);
2810
2811 if (kvm_perf_cap) {
2812 kvm_msr_entry_add(cpu, MSR_IA32_PERF_CAPABILITIES,
2813 kvm_perf_cap & f[FEAT_PERF_CAPABILITIES]);
2814 }
2815 }
2816
kvm_buf_set_msrs(X86CPU * cpu)2817 static int kvm_buf_set_msrs(X86CPU *cpu)
2818 {
2819 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2820 if (ret < 0) {
2821 return ret;
2822 }
2823
2824 if (ret < cpu->kvm_msr_buf->nmsrs) {
2825 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
2826 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
2827 (uint32_t)e->index, (uint64_t)e->data);
2828 }
2829
2830 assert(ret == cpu->kvm_msr_buf->nmsrs);
2831 return 0;
2832 }
2833
kvm_init_msrs(X86CPU * cpu)2834 static void kvm_init_msrs(X86CPU *cpu)
2835 {
2836 CPUX86State *env = &cpu->env;
2837
2838 kvm_msr_buf_reset(cpu);
2839 if (has_msr_arch_capabs) {
2840 kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
2841 env->features[FEAT_ARCH_CAPABILITIES]);
2842 }
2843
2844 if (has_msr_core_capabs) {
2845 kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
2846 env->features[FEAT_CORE_CAPABILITY]);
2847 }
2848
2849 if (has_msr_perf_capabs && cpu->enable_pmu) {
2850 kvm_msr_entry_add_perf(cpu, env->features);
2851 }
2852
2853 if (has_msr_ucode_rev) {
2854 kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
2855 }
2856
2857 /*
2858 * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
2859 * all kernels with MSR features should have them.
2860 */
2861 if (kvm_feature_msrs && cpu_has_vmx(env)) {
2862 kvm_msr_entry_add_vmx(cpu, env->features);
2863 }
2864
2865 assert(kvm_buf_set_msrs(cpu) == 0);
2866 }
2867
kvm_put_msrs(X86CPU * cpu,int level)2868 static int kvm_put_msrs(X86CPU *cpu, int level)
2869 {
2870 CPUX86State *env = &cpu->env;
2871 int i;
2872
2873 kvm_msr_buf_reset(cpu);
2874
2875 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
2876 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
2877 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
2878 kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
2879 if (has_msr_star) {
2880 kvm_msr_entry_add(cpu, MSR_STAR, env->star);
2881 }
2882 if (has_msr_hsave_pa) {
2883 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
2884 }
2885 if (has_msr_tsc_aux) {
2886 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
2887 }
2888 if (has_msr_tsc_adjust) {
2889 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
2890 }
2891 if (has_msr_misc_enable) {
2892 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
2893 env->msr_ia32_misc_enable);
2894 }
2895 if (has_msr_smbase) {
2896 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
2897 }
2898 if (has_msr_smi_count) {
2899 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
2900 }
2901 if (has_msr_pkrs) {
2902 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, env->pkrs);
2903 }
2904 if (has_msr_bndcfgs) {
2905 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
2906 }
2907 if (has_msr_xss) {
2908 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
2909 }
2910 if (has_msr_umwait) {
2911 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait);
2912 }
2913 if (has_msr_spec_ctrl) {
2914 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
2915 }
2916 if (has_msr_tsx_ctrl) {
2917 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl);
2918 }
2919 if (has_msr_virt_ssbd) {
2920 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
2921 }
2922
2923 #ifdef TARGET_X86_64
2924 if (lm_capable_kernel) {
2925 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
2926 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
2927 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
2928 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
2929 }
2930 #endif
2931
2932 /*
2933 * The following MSRs have side effects on the guest or are too heavy
2934 * for normal writeback. Limit them to reset or full state updates.
2935 */
2936 if (level >= KVM_PUT_RESET_STATE) {
2937 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
2938 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
2939 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
2940 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
2941 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr);
2942 }
2943 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
2944 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
2945 }
2946 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
2947 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
2948 }
2949 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
2950 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
2951 }
2952
2953 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
2954 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
2955 }
2956
2957 if (has_architectural_pmu_version > 0) {
2958 if (has_architectural_pmu_version > 1) {
2959 /* Stop the counter. */
2960 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
2961 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
2962 }
2963
2964 /* Set the counter values. */
2965 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
2966 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
2967 env->msr_fixed_counters[i]);
2968 }
2969 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
2970 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
2971 env->msr_gp_counters[i]);
2972 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
2973 env->msr_gp_evtsel[i]);
2974 }
2975 if (has_architectural_pmu_version > 1) {
2976 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
2977 env->msr_global_status);
2978 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
2979 env->msr_global_ovf_ctrl);
2980
2981 /* Now start the PMU. */
2982 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
2983 env->msr_fixed_ctr_ctrl);
2984 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
2985 env->msr_global_ctrl);
2986 }
2987 }
2988 /*
2989 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
2990 * only sync them to KVM on the first cpu
2991 */
2992 if (current_cpu == first_cpu) {
2993 if (has_msr_hv_hypercall) {
2994 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
2995 env->msr_hv_guest_os_id);
2996 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
2997 env->msr_hv_hypercall);
2998 }
2999 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
3000 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
3001 env->msr_hv_tsc);
3002 }
3003 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
3004 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
3005 env->msr_hv_reenlightenment_control);
3006 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
3007 env->msr_hv_tsc_emulation_control);
3008 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
3009 env->msr_hv_tsc_emulation_status);
3010 }
3011 }
3012 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
3013 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
3014 env->msr_hv_vapic);
3015 }
3016 if (has_msr_hv_crash) {
3017 int j;
3018
3019 for (j = 0; j < HV_CRASH_PARAMS; j++)
3020 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
3021 env->msr_hv_crash_params[j]);
3022
3023 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
3024 }
3025 if (has_msr_hv_runtime) {
3026 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
3027 }
3028 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)
3029 && hv_vpindex_settable) {
3030 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX,
3031 hyperv_vp_index(CPU(cpu)));
3032 }
3033 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
3034 int j;
3035
3036 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
3037
3038 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
3039 env->msr_hv_synic_control);
3040 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
3041 env->msr_hv_synic_evt_page);
3042 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
3043 env->msr_hv_synic_msg_page);
3044
3045 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
3046 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
3047 env->msr_hv_synic_sint[j]);
3048 }
3049 }
3050 if (has_msr_hv_stimer) {
3051 int j;
3052
3053 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
3054 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
3055 env->msr_hv_stimer_config[j]);
3056 }
3057
3058 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
3059 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
3060 env->msr_hv_stimer_count[j]);
3061 }
3062 }
3063 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
3064 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
3065
3066 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
3067 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
3068 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
3069 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
3070 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
3071 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
3072 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
3073 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
3074 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
3075 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
3076 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
3077 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
3078 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
3079 /* The CPU GPs if we write to a bit above the physical limit of
3080 * the host CPU (and KVM emulates that)
3081 */
3082 uint64_t mask = env->mtrr_var[i].mask;
3083 mask &= phys_mask;
3084
3085 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
3086 env->mtrr_var[i].base);
3087 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
3088 }
3089 }
3090 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
3091 int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
3092 0x14, 1, R_EAX) & 0x7;
3093
3094 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
3095 env->msr_rtit_ctrl);
3096 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
3097 env->msr_rtit_status);
3098 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
3099 env->msr_rtit_output_base);
3100 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
3101 env->msr_rtit_output_mask);
3102 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
3103 env->msr_rtit_cr3_match);
3104 for (i = 0; i < addr_num; i++) {
3105 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
3106 env->msr_rtit_addrs[i]);
3107 }
3108 }
3109
3110 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
3111 * kvm_put_msr_feature_control. */
3112 }
3113
3114 if (env->mcg_cap) {
3115 int i;
3116
3117 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
3118 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
3119 if (has_msr_mcg_ext_ctl) {
3120 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
3121 }
3122 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
3123 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
3124 }
3125 }
3126
3127 return kvm_buf_set_msrs(cpu);
3128 }
3129
3130
kvm_get_fpu(X86CPU * cpu)3131 static int kvm_get_fpu(X86CPU *cpu)
3132 {
3133 CPUX86State *env = &cpu->env;
3134 struct kvm_fpu fpu;
3135 int i, ret;
3136
3137 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
3138 if (ret < 0) {
3139 return ret;
3140 }
3141
3142 env->fpstt = (fpu.fsw >> 11) & 7;
3143 env->fpus = fpu.fsw;
3144 env->fpuc = fpu.fcw;
3145 env->fpop = fpu.last_opcode;
3146 env->fpip = fpu.last_ip;
3147 env->fpdp = fpu.last_dp;
3148 for (i = 0; i < 8; ++i) {
3149 env->fptags[i] = !((fpu.ftwx >> i) & 1);
3150 }
3151 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
3152 for (i = 0; i < CPU_NB_REGS; i++) {
3153 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
3154 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
3155 }
3156 env->mxcsr = fpu.mxcsr;
3157
3158 return 0;
3159 }
3160
kvm_get_xsave(X86CPU * cpu)3161 static int kvm_get_xsave(X86CPU *cpu)
3162 {
3163 CPUX86State *env = &cpu->env;
3164 void *xsave = env->xsave_buf;
3165 int ret;
3166
3167 if (!has_xsave) {
3168 return kvm_get_fpu(cpu);
3169 }
3170
3171 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
3172 if (ret < 0) {
3173 return ret;
3174 }
3175 x86_cpu_xrstor_all_areas(cpu, xsave, env->xsave_buf_len);
3176
3177 return 0;
3178 }
3179
kvm_get_xcrs(X86CPU * cpu)3180 static int kvm_get_xcrs(X86CPU *cpu)
3181 {
3182 CPUX86State *env = &cpu->env;
3183 int i, ret;
3184 struct kvm_xcrs xcrs;
3185
3186 if (!has_xcrs) {
3187 return 0;
3188 }
3189
3190 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
3191 if (ret < 0) {
3192 return ret;
3193 }
3194
3195 for (i = 0; i < xcrs.nr_xcrs; i++) {
3196 /* Only support xcr0 now */
3197 if (xcrs.xcrs[i].xcr == 0) {
3198 env->xcr0 = xcrs.xcrs[i].value;
3199 break;
3200 }
3201 }
3202 return 0;
3203 }
3204
kvm_get_sregs(X86CPU * cpu)3205 static int kvm_get_sregs(X86CPU *cpu)
3206 {
3207 CPUX86State *env = &cpu->env;
3208 struct kvm_sregs sregs;
3209 int bit, i, ret;
3210
3211 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
3212 if (ret < 0) {
3213 return ret;
3214 }
3215
3216 /* There can only be one pending IRQ set in the bitmap at a time, so try
3217 to find it and save its number instead (-1 for none). */
3218 env->interrupt_injected = -1;
3219 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
3220 if (sregs.interrupt_bitmap[i]) {
3221 bit = ctz64(sregs.interrupt_bitmap[i]);
3222 env->interrupt_injected = i * 64 + bit;
3223 break;
3224 }
3225 }
3226
3227 get_seg(&env->segs[R_CS], &sregs.cs);
3228 get_seg(&env->segs[R_DS], &sregs.ds);
3229 get_seg(&env->segs[R_ES], &sregs.es);
3230 get_seg(&env->segs[R_FS], &sregs.fs);
3231 get_seg(&env->segs[R_GS], &sregs.gs);
3232 get_seg(&env->segs[R_SS], &sregs.ss);
3233
3234 get_seg(&env->tr, &sregs.tr);
3235 get_seg(&env->ldt, &sregs.ldt);
3236
3237 env->idt.limit = sregs.idt.limit;
3238 env->idt.base = sregs.idt.base;
3239 env->gdt.limit = sregs.gdt.limit;
3240 env->gdt.base = sregs.gdt.base;
3241
3242 env->cr[0] = sregs.cr0;
3243 env->cr[2] = sregs.cr2;
3244 env->cr[3] = sregs.cr3;
3245 env->cr[4] = sregs.cr4;
3246
3247 env->efer = sregs.efer;
3248
3249 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
3250 x86_update_hflags(env);
3251
3252 return 0;
3253 }
3254
kvm_get_msrs(X86CPU * cpu)3255 static int kvm_get_msrs(X86CPU *cpu)
3256 {
3257 CPUX86State *env = &cpu->env;
3258 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
3259 int ret, i;
3260 uint64_t mtrr_top_bits;
3261
3262 kvm_msr_buf_reset(cpu);
3263
3264 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
3265 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
3266 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
3267 kvm_msr_entry_add(cpu, MSR_PAT, 0);
3268 if (has_msr_star) {
3269 kvm_msr_entry_add(cpu, MSR_STAR, 0);
3270 }
3271 if (has_msr_hsave_pa) {
3272 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
3273 }
3274 if (has_msr_tsc_aux) {
3275 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
3276 }
3277 if (has_msr_tsc_adjust) {
3278 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
3279 }
3280 if (has_msr_tsc_deadline) {
3281 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
3282 }
3283 if (has_msr_misc_enable) {
3284 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
3285 }
3286 if (has_msr_smbase) {
3287 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
3288 }
3289 if (has_msr_smi_count) {
3290 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
3291 }
3292 if (has_msr_feature_control) {
3293 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
3294 }
3295 if (has_msr_pkrs) {
3296 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, 0);
3297 }
3298 if (has_msr_bndcfgs) {
3299 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
3300 }
3301 if (has_msr_xss) {
3302 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
3303 }
3304 if (has_msr_umwait) {
3305 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0);
3306 }
3307 if (has_msr_spec_ctrl) {
3308 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
3309 }
3310 if (has_msr_tsx_ctrl) {
3311 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0);
3312 }
3313 if (has_msr_virt_ssbd) {
3314 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
3315 }
3316 if (!env->tsc_valid) {
3317 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
3318 env->tsc_valid = !runstate_is_running();
3319 }
3320
3321 #ifdef TARGET_X86_64
3322 if (lm_capable_kernel) {
3323 kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
3324 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
3325 kvm_msr_entry_add(cpu, MSR_FMASK, 0);
3326 kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
3327 }
3328 #endif
3329 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
3330 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
3331 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
3332 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0);
3333 }
3334 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
3335 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
3336 }
3337 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
3338 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
3339 }
3340 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
3341 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
3342 }
3343 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
3344 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
3345 }
3346 if (has_architectural_pmu_version > 0) {
3347 if (has_architectural_pmu_version > 1) {
3348 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
3349 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
3350 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
3351 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
3352 }
3353 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
3354 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
3355 }
3356 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
3357 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
3358 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
3359 }
3360 }
3361
3362 if (env->mcg_cap) {
3363 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
3364 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
3365 if (has_msr_mcg_ext_ctl) {
3366 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
3367 }
3368 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
3369 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
3370 }
3371 }
3372
3373 if (has_msr_hv_hypercall) {
3374 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
3375 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
3376 }
3377 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
3378 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
3379 }
3380 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
3381 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
3382 }
3383 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
3384 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
3385 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
3386 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
3387 }
3388 if (has_msr_hv_crash) {
3389 int j;
3390
3391 for (j = 0; j < HV_CRASH_PARAMS; j++) {
3392 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
3393 }
3394 }
3395 if (has_msr_hv_runtime) {
3396 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
3397 }
3398 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
3399 uint32_t msr;
3400
3401 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
3402 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
3403 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
3404 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
3405 kvm_msr_entry_add(cpu, msr, 0);
3406 }
3407 }
3408 if (has_msr_hv_stimer) {
3409 uint32_t msr;
3410
3411 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
3412 msr++) {
3413 kvm_msr_entry_add(cpu, msr, 0);
3414 }
3415 }
3416 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
3417 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
3418 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
3419 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
3420 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
3421 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
3422 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
3423 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
3424 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
3425 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
3426 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
3427 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
3428 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
3429 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
3430 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
3431 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
3432 }
3433 }
3434
3435 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
3436 int addr_num =
3437 kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
3438
3439 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
3440 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
3441 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
3442 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
3443 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
3444 for (i = 0; i < addr_num; i++) {
3445 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
3446 }
3447 }
3448
3449 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
3450 if (ret < 0) {
3451 return ret;
3452 }
3453
3454 if (ret < cpu->kvm_msr_buf->nmsrs) {
3455 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
3456 error_report("error: failed to get MSR 0x%" PRIx32,
3457 (uint32_t)e->index);
3458 }
3459
3460 assert(ret == cpu->kvm_msr_buf->nmsrs);
3461 /*
3462 * MTRR masks: Each mask consists of 5 parts
3463 * a 10..0: must be zero
3464 * b 11 : valid bit
3465 * c n-1.12: actual mask bits
3466 * d 51..n: reserved must be zero
3467 * e 63.52: reserved must be zero
3468 *
3469 * 'n' is the number of physical bits supported by the CPU and is
3470 * apparently always <= 52. We know our 'n' but don't know what
3471 * the destinations 'n' is; it might be smaller, in which case
3472 * it masks (c) on loading. It might be larger, in which case
3473 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
3474 * we're migrating to.
3475 */
3476
3477 if (cpu->fill_mtrr_mask) {
3478 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
3479 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
3480 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
3481 } else {
3482 mtrr_top_bits = 0;
3483 }
3484
3485 for (i = 0; i < ret; i++) {
3486 uint32_t index = msrs[i].index;
3487 switch (index) {
3488 case MSR_IA32_SYSENTER_CS:
3489 env->sysenter_cs = msrs[i].data;
3490 break;
3491 case MSR_IA32_SYSENTER_ESP:
3492 env->sysenter_esp = msrs[i].data;
3493 break;
3494 case MSR_IA32_SYSENTER_EIP:
3495 env->sysenter_eip = msrs[i].data;
3496 break;
3497 case MSR_PAT:
3498 env->pat = msrs[i].data;
3499 break;
3500 case MSR_STAR:
3501 env->star = msrs[i].data;
3502 break;
3503 #ifdef TARGET_X86_64
3504 case MSR_CSTAR:
3505 env->cstar = msrs[i].data;
3506 break;
3507 case MSR_KERNELGSBASE:
3508 env->kernelgsbase = msrs[i].data;
3509 break;
3510 case MSR_FMASK:
3511 env->fmask = msrs[i].data;
3512 break;
3513 case MSR_LSTAR:
3514 env->lstar = msrs[i].data;
3515 break;
3516 #endif
3517 case MSR_IA32_TSC:
3518 env->tsc = msrs[i].data;
3519 break;
3520 case MSR_TSC_AUX:
3521 env->tsc_aux = msrs[i].data;
3522 break;
3523 case MSR_TSC_ADJUST:
3524 env->tsc_adjust = msrs[i].data;
3525 break;
3526 case MSR_IA32_TSCDEADLINE:
3527 env->tsc_deadline = msrs[i].data;
3528 break;
3529 case MSR_VM_HSAVE_PA:
3530 env->vm_hsave = msrs[i].data;
3531 break;
3532 case MSR_KVM_SYSTEM_TIME:
3533 env->system_time_msr = msrs[i].data;
3534 break;
3535 case MSR_KVM_WALL_CLOCK:
3536 env->wall_clock_msr = msrs[i].data;
3537 break;
3538 case MSR_MCG_STATUS:
3539 env->mcg_status = msrs[i].data;
3540 break;
3541 case MSR_MCG_CTL:
3542 env->mcg_ctl = msrs[i].data;
3543 break;
3544 case MSR_MCG_EXT_CTL:
3545 env->mcg_ext_ctl = msrs[i].data;
3546 break;
3547 case MSR_IA32_MISC_ENABLE:
3548 env->msr_ia32_misc_enable = msrs[i].data;
3549 break;
3550 case MSR_IA32_SMBASE:
3551 env->smbase = msrs[i].data;
3552 break;
3553 case MSR_SMI_COUNT:
3554 env->msr_smi_count = msrs[i].data;
3555 break;
3556 case MSR_IA32_FEATURE_CONTROL:
3557 env->msr_ia32_feature_control = msrs[i].data;
3558 break;
3559 case MSR_IA32_BNDCFGS:
3560 env->msr_bndcfgs = msrs[i].data;
3561 break;
3562 case MSR_IA32_XSS:
3563 env->xss = msrs[i].data;
3564 break;
3565 case MSR_IA32_UMWAIT_CONTROL:
3566 env->umwait = msrs[i].data;
3567 break;
3568 case MSR_IA32_PKRS:
3569 env->pkrs = msrs[i].data;
3570 break;
3571 default:
3572 if (msrs[i].index >= MSR_MC0_CTL &&
3573 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
3574 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
3575 }
3576 break;
3577 case MSR_KVM_ASYNC_PF_EN:
3578 env->async_pf_en_msr = msrs[i].data;
3579 break;
3580 case MSR_KVM_ASYNC_PF_INT:
3581 env->async_pf_int_msr = msrs[i].data;
3582 break;
3583 case MSR_KVM_PV_EOI_EN:
3584 env->pv_eoi_en_msr = msrs[i].data;
3585 break;
3586 case MSR_KVM_STEAL_TIME:
3587 env->steal_time_msr = msrs[i].data;
3588 break;
3589 case MSR_KVM_POLL_CONTROL: {
3590 env->poll_control_msr = msrs[i].data;
3591 break;
3592 }
3593 case MSR_CORE_PERF_FIXED_CTR_CTRL:
3594 env->msr_fixed_ctr_ctrl = msrs[i].data;
3595 break;
3596 case MSR_CORE_PERF_GLOBAL_CTRL:
3597 env->msr_global_ctrl = msrs[i].data;
3598 break;
3599 case MSR_CORE_PERF_GLOBAL_STATUS:
3600 env->msr_global_status = msrs[i].data;
3601 break;
3602 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
3603 env->msr_global_ovf_ctrl = msrs[i].data;
3604 break;
3605 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
3606 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
3607 break;
3608 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
3609 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
3610 break;
3611 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
3612 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
3613 break;
3614 case HV_X64_MSR_HYPERCALL:
3615 env->msr_hv_hypercall = msrs[i].data;
3616 break;
3617 case HV_X64_MSR_GUEST_OS_ID:
3618 env->msr_hv_guest_os_id = msrs[i].data;
3619 break;
3620 case HV_X64_MSR_APIC_ASSIST_PAGE:
3621 env->msr_hv_vapic = msrs[i].data;
3622 break;
3623 case HV_X64_MSR_REFERENCE_TSC:
3624 env->msr_hv_tsc = msrs[i].data;
3625 break;
3626 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
3627 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
3628 break;
3629 case HV_X64_MSR_VP_RUNTIME:
3630 env->msr_hv_runtime = msrs[i].data;
3631 break;
3632 case HV_X64_MSR_SCONTROL:
3633 env->msr_hv_synic_control = msrs[i].data;
3634 break;
3635 case HV_X64_MSR_SIEFP:
3636 env->msr_hv_synic_evt_page = msrs[i].data;
3637 break;
3638 case HV_X64_MSR_SIMP:
3639 env->msr_hv_synic_msg_page = msrs[i].data;
3640 break;
3641 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
3642 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
3643 break;
3644 case HV_X64_MSR_STIMER0_CONFIG:
3645 case HV_X64_MSR_STIMER1_CONFIG:
3646 case HV_X64_MSR_STIMER2_CONFIG:
3647 case HV_X64_MSR_STIMER3_CONFIG:
3648 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
3649 msrs[i].data;
3650 break;
3651 case HV_X64_MSR_STIMER0_COUNT:
3652 case HV_X64_MSR_STIMER1_COUNT:
3653 case HV_X64_MSR_STIMER2_COUNT:
3654 case HV_X64_MSR_STIMER3_COUNT:
3655 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
3656 msrs[i].data;
3657 break;
3658 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
3659 env->msr_hv_reenlightenment_control = msrs[i].data;
3660 break;
3661 case HV_X64_MSR_TSC_EMULATION_CONTROL:
3662 env->msr_hv_tsc_emulation_control = msrs[i].data;
3663 break;
3664 case HV_X64_MSR_TSC_EMULATION_STATUS:
3665 env->msr_hv_tsc_emulation_status = msrs[i].data;
3666 break;
3667 case MSR_MTRRdefType:
3668 env->mtrr_deftype = msrs[i].data;
3669 break;
3670 case MSR_MTRRfix64K_00000:
3671 env->mtrr_fixed[0] = msrs[i].data;
3672 break;
3673 case MSR_MTRRfix16K_80000:
3674 env->mtrr_fixed[1] = msrs[i].data;
3675 break;
3676 case MSR_MTRRfix16K_A0000:
3677 env->mtrr_fixed[2] = msrs[i].data;
3678 break;
3679 case MSR_MTRRfix4K_C0000:
3680 env->mtrr_fixed[3] = msrs[i].data;
3681 break;
3682 case MSR_MTRRfix4K_C8000:
3683 env->mtrr_fixed[4] = msrs[i].data;
3684 break;
3685 case MSR_MTRRfix4K_D0000:
3686 env->mtrr_fixed[5] = msrs[i].data;
3687 break;
3688 case MSR_MTRRfix4K_D8000:
3689 env->mtrr_fixed[6] = msrs[i].data;
3690 break;
3691 case MSR_MTRRfix4K_E0000:
3692 env->mtrr_fixed[7] = msrs[i].data;
3693 break;
3694 case MSR_MTRRfix4K_E8000:
3695 env->mtrr_fixed[8] = msrs[i].data;
3696 break;
3697 case MSR_MTRRfix4K_F0000:
3698 env->mtrr_fixed[9] = msrs[i].data;
3699 break;
3700 case MSR_MTRRfix4K_F8000:
3701 env->mtrr_fixed[10] = msrs[i].data;
3702 break;
3703 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
3704 if (index & 1) {
3705 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
3706 mtrr_top_bits;
3707 } else {
3708 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
3709 }
3710 break;
3711 case MSR_IA32_SPEC_CTRL:
3712 env->spec_ctrl = msrs[i].data;
3713 break;
3714 case MSR_IA32_TSX_CTRL:
3715 env->tsx_ctrl = msrs[i].data;
3716 break;
3717 case MSR_VIRT_SSBD:
3718 env->virt_ssbd = msrs[i].data;
3719 break;
3720 case MSR_IA32_RTIT_CTL:
3721 env->msr_rtit_ctrl = msrs[i].data;
3722 break;
3723 case MSR_IA32_RTIT_STATUS:
3724 env->msr_rtit_status = msrs[i].data;
3725 break;
3726 case MSR_IA32_RTIT_OUTPUT_BASE:
3727 env->msr_rtit_output_base = msrs[i].data;
3728 break;
3729 case MSR_IA32_RTIT_OUTPUT_MASK:
3730 env->msr_rtit_output_mask = msrs[i].data;
3731 break;
3732 case MSR_IA32_RTIT_CR3_MATCH:
3733 env->msr_rtit_cr3_match = msrs[i].data;
3734 break;
3735 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
3736 env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
3737 break;
3738 }
3739 }
3740
3741 return 0;
3742 }
3743
kvm_put_mp_state(X86CPU * cpu)3744 static int kvm_put_mp_state(X86CPU *cpu)
3745 {
3746 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
3747
3748 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
3749 }
3750
kvm_get_mp_state(X86CPU * cpu)3751 static int kvm_get_mp_state(X86CPU *cpu)
3752 {
3753 CPUState *cs = CPU(cpu);
3754 CPUX86State *env = &cpu->env;
3755 struct kvm_mp_state mp_state;
3756 int ret;
3757
3758 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
3759 if (ret < 0) {
3760 return ret;
3761 }
3762 env->mp_state = mp_state.mp_state;
3763 if (kvm_irqchip_in_kernel()) {
3764 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
3765 }
3766 return 0;
3767 }
3768
kvm_get_apic(X86CPU * cpu)3769 static int kvm_get_apic(X86CPU *cpu)
3770 {
3771 DeviceState *apic = cpu->apic_state;
3772 struct kvm_lapic_state kapic;
3773 int ret;
3774
3775 if (apic && kvm_irqchip_in_kernel()) {
3776 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
3777 if (ret < 0) {
3778 return ret;
3779 }
3780
3781 kvm_get_apic_state(apic, &kapic);
3782 }
3783 return 0;
3784 }
3785
kvm_put_vcpu_events(X86CPU * cpu,int level)3786 static int kvm_put_vcpu_events(X86CPU *cpu, int level)
3787 {
3788 CPUState *cs = CPU(cpu);
3789 CPUX86State *env = &cpu->env;
3790 struct kvm_vcpu_events events = {};
3791
3792 if (!kvm_has_vcpu_events()) {
3793 return 0;
3794 }
3795
3796 events.flags = 0;
3797
3798 if (has_exception_payload) {
3799 events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
3800 events.exception.pending = env->exception_pending;
3801 events.exception_has_payload = env->exception_has_payload;
3802 events.exception_payload = env->exception_payload;
3803 }
3804 events.exception.nr = env->exception_nr;
3805 events.exception.injected = env->exception_injected;
3806 events.exception.has_error_code = env->has_error_code;
3807 events.exception.error_code = env->error_code;
3808
3809 events.interrupt.injected = (env->interrupt_injected >= 0);
3810 events.interrupt.nr = env->interrupt_injected;
3811 events.interrupt.soft = env->soft_interrupt;
3812
3813 events.nmi.injected = env->nmi_injected;
3814 events.nmi.pending = env->nmi_pending;
3815 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
3816
3817 events.sipi_vector = env->sipi_vector;
3818
3819 if (has_msr_smbase) {
3820 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
3821 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
3822 if (kvm_irqchip_in_kernel()) {
3823 /* As soon as these are moved to the kernel, remove them
3824 * from cs->interrupt_request.
3825 */
3826 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
3827 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
3828 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
3829 } else {
3830 /* Keep these in cs->interrupt_request. */
3831 events.smi.pending = 0;
3832 events.smi.latched_init = 0;
3833 }
3834 /* Stop SMI delivery on old machine types to avoid a reboot
3835 * on an inward migration of an old VM.
3836 */
3837 if (!cpu->kvm_no_smi_migration) {
3838 events.flags |= KVM_VCPUEVENT_VALID_SMM;
3839 }
3840 }
3841
3842 if (level >= KVM_PUT_RESET_STATE) {
3843 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
3844 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
3845 events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
3846 }
3847 }
3848
3849 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
3850 }
3851
kvm_get_vcpu_events(X86CPU * cpu)3852 static int kvm_get_vcpu_events(X86CPU *cpu)
3853 {
3854 CPUX86State *env = &cpu->env;
3855 struct kvm_vcpu_events events;
3856 int ret;
3857
3858 if (!kvm_has_vcpu_events()) {
3859 return 0;
3860 }
3861
3862 memset(&events, 0, sizeof(events));
3863 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
3864 if (ret < 0) {
3865 return ret;
3866 }
3867
3868 if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
3869 env->exception_pending = events.exception.pending;
3870 env->exception_has_payload = events.exception_has_payload;
3871 env->exception_payload = events.exception_payload;
3872 } else {
3873 env->exception_pending = 0;
3874 env->exception_has_payload = false;
3875 }
3876 env->exception_injected = events.exception.injected;
3877 env->exception_nr =
3878 (env->exception_pending || env->exception_injected) ?
3879 events.exception.nr : -1;
3880 env->has_error_code = events.exception.has_error_code;
3881 env->error_code = events.exception.error_code;
3882
3883 env->interrupt_injected =
3884 events.interrupt.injected ? events.interrupt.nr : -1;
3885 env->soft_interrupt = events.interrupt.soft;
3886
3887 env->nmi_injected = events.nmi.injected;
3888 env->nmi_pending = events.nmi.pending;
3889 if (events.nmi.masked) {
3890 env->hflags2 |= HF2_NMI_MASK;
3891 } else {
3892 env->hflags2 &= ~HF2_NMI_MASK;
3893 }
3894
3895 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
3896 if (events.smi.smm) {
3897 env->hflags |= HF_SMM_MASK;
3898 } else {
3899 env->hflags &= ~HF_SMM_MASK;
3900 }
3901 if (events.smi.pending) {
3902 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
3903 } else {
3904 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
3905 }
3906 if (events.smi.smm_inside_nmi) {
3907 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
3908 } else {
3909 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
3910 }
3911 if (events.smi.latched_init) {
3912 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
3913 } else {
3914 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
3915 }
3916 }
3917
3918 env->sipi_vector = events.sipi_vector;
3919
3920 return 0;
3921 }
3922
kvm_guest_debug_workarounds(X86CPU * cpu)3923 static int kvm_guest_debug_workarounds(X86CPU *cpu)
3924 {
3925 CPUState *cs = CPU(cpu);
3926 CPUX86State *env = &cpu->env;
3927 int ret = 0;
3928 unsigned long reinject_trap = 0;
3929
3930 if (!kvm_has_vcpu_events()) {
3931 if (env->exception_nr == EXCP01_DB) {
3932 reinject_trap = KVM_GUESTDBG_INJECT_DB;
3933 } else if (env->exception_injected == EXCP03_INT3) {
3934 reinject_trap = KVM_GUESTDBG_INJECT_BP;
3935 }
3936 kvm_reset_exception(env);
3937 }
3938
3939 /*
3940 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
3941 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
3942 * by updating the debug state once again if single-stepping is on.
3943 * Another reason to call kvm_update_guest_debug here is a pending debug
3944 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
3945 * reinject them via SET_GUEST_DEBUG.
3946 */
3947 if (reinject_trap ||
3948 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
3949 ret = kvm_update_guest_debug(cs, reinject_trap);
3950 }
3951 return ret;
3952 }
3953
kvm_put_debugregs(X86CPU * cpu)3954 static int kvm_put_debugregs(X86CPU *cpu)
3955 {
3956 CPUX86State *env = &cpu->env;
3957 struct kvm_debugregs dbgregs;
3958 int i;
3959
3960 if (!kvm_has_debugregs()) {
3961 return 0;
3962 }
3963
3964 memset(&dbgregs, 0, sizeof(dbgregs));
3965 for (i = 0; i < 4; i++) {
3966 dbgregs.db[i] = env->dr[i];
3967 }
3968 dbgregs.dr6 = env->dr[6];
3969 dbgregs.dr7 = env->dr[7];
3970 dbgregs.flags = 0;
3971
3972 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
3973 }
3974
kvm_get_debugregs(X86CPU * cpu)3975 static int kvm_get_debugregs(X86CPU *cpu)
3976 {
3977 CPUX86State *env = &cpu->env;
3978 struct kvm_debugregs dbgregs;
3979 int i, ret;
3980
3981 if (!kvm_has_debugregs()) {
3982 return 0;
3983 }
3984
3985 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
3986 if (ret < 0) {
3987 return ret;
3988 }
3989 for (i = 0; i < 4; i++) {
3990 env->dr[i] = dbgregs.db[i];
3991 }
3992 env->dr[4] = env->dr[6] = dbgregs.dr6;
3993 env->dr[5] = env->dr[7] = dbgregs.dr7;
3994
3995 return 0;
3996 }
3997
kvm_put_nested_state(X86CPU * cpu)3998 static int kvm_put_nested_state(X86CPU *cpu)
3999 {
4000 CPUX86State *env = &cpu->env;
4001 int max_nested_state_len = kvm_max_nested_state_length();
4002
4003 if (!env->nested_state) {
4004 return 0;
4005 }
4006
4007 /*
4008 * Copy flags that are affected by reset from env->hflags and env->hflags2.
4009 */
4010 if (env->hflags & HF_GUEST_MASK) {
4011 env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE;
4012 } else {
4013 env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE;
4014 }
4015
4016 /* Don't set KVM_STATE_NESTED_GIF_SET on VMX as it is illegal */
4017 if (cpu_has_svm(env) && (env->hflags2 & HF2_GIF_MASK)) {
4018 env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET;
4019 } else {
4020 env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET;
4021 }
4022
4023 assert(env->nested_state->size <= max_nested_state_len);
4024 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
4025 }
4026
kvm_get_nested_state(X86CPU * cpu)4027 static int kvm_get_nested_state(X86CPU *cpu)
4028 {
4029 CPUX86State *env = &cpu->env;
4030 int max_nested_state_len = kvm_max_nested_state_length();
4031 int ret;
4032
4033 if (!env->nested_state) {
4034 return 0;
4035 }
4036
4037 /*
4038 * It is possible that migration restored a smaller size into
4039 * nested_state->hdr.size than what our kernel support.
4040 * We preserve migration origin nested_state->hdr.size for
4041 * call to KVM_SET_NESTED_STATE but wish that our next call
4042 * to KVM_GET_NESTED_STATE will use max size our kernel support.
4043 */
4044 env->nested_state->size = max_nested_state_len;
4045
4046 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state);
4047 if (ret < 0) {
4048 return ret;
4049 }
4050
4051 /*
4052 * Copy flags that are affected by reset to env->hflags and env->hflags2.
4053 */
4054 if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
4055 env->hflags |= HF_GUEST_MASK;
4056 } else {
4057 env->hflags &= ~HF_GUEST_MASK;
4058 }
4059
4060 /* Keep HF2_GIF_MASK set on !SVM as x86_cpu_pending_interrupt() needs it */
4061 if (cpu_has_svm(env)) {
4062 if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) {
4063 env->hflags2 |= HF2_GIF_MASK;
4064 } else {
4065 env->hflags2 &= ~HF2_GIF_MASK;
4066 }
4067 }
4068
4069 return ret;
4070 }
4071
kvm_arch_put_registers(CPUState * cpu,int level)4072 int kvm_arch_put_registers(CPUState *cpu, int level)
4073 {
4074 X86CPU *x86_cpu = X86_CPU(cpu);
4075 int ret;
4076
4077 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
4078
4079 /* must be before kvm_put_nested_state so that EFER.SVME is set */
4080 ret = kvm_put_sregs(x86_cpu);
4081 if (ret < 0) {
4082 return ret;
4083 }
4084
4085 if (level >= KVM_PUT_RESET_STATE) {
4086 ret = kvm_put_nested_state(x86_cpu);
4087 if (ret < 0) {
4088 return ret;
4089 }
4090
4091 ret = kvm_put_msr_feature_control(x86_cpu);
4092 if (ret < 0) {
4093 return ret;
4094 }
4095 }
4096
4097 if (level == KVM_PUT_FULL_STATE) {
4098 /* We don't check for kvm_arch_set_tsc_khz() errors here,
4099 * because TSC frequency mismatch shouldn't abort migration,
4100 * unless the user explicitly asked for a more strict TSC
4101 * setting (e.g. using an explicit "tsc-freq" option).
4102 */
4103 kvm_arch_set_tsc_khz(cpu);
4104 }
4105
4106 ret = kvm_getput_regs(x86_cpu, 1);
4107 if (ret < 0) {
4108 return ret;
4109 }
4110 ret = kvm_put_xsave(x86_cpu);
4111 if (ret < 0) {
4112 return ret;
4113 }
4114 ret = kvm_put_xcrs(x86_cpu);
4115 if (ret < 0) {
4116 return ret;
4117 }
4118 /* must be before kvm_put_msrs */
4119 ret = kvm_inject_mce_oldstyle(x86_cpu);
4120 if (ret < 0) {
4121 return ret;
4122 }
4123 ret = kvm_put_msrs(x86_cpu, level);
4124 if (ret < 0) {
4125 return ret;
4126 }
4127 ret = kvm_put_vcpu_events(x86_cpu, level);
4128 if (ret < 0) {
4129 return ret;
4130 }
4131 if (level >= KVM_PUT_RESET_STATE) {
4132 ret = kvm_put_mp_state(x86_cpu);
4133 if (ret < 0) {
4134 return ret;
4135 }
4136 }
4137
4138 ret = kvm_put_tscdeadline_msr(x86_cpu);
4139 if (ret < 0) {
4140 return ret;
4141 }
4142 ret = kvm_put_debugregs(x86_cpu);
4143 if (ret < 0) {
4144 return ret;
4145 }
4146 /* must be last */
4147 ret = kvm_guest_debug_workarounds(x86_cpu);
4148 if (ret < 0) {
4149 return ret;
4150 }
4151 return 0;
4152 }
4153
kvm_arch_get_registers(CPUState * cs)4154 int kvm_arch_get_registers(CPUState *cs)
4155 {
4156 X86CPU *cpu = X86_CPU(cs);
4157 int ret;
4158
4159 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
4160
4161 ret = kvm_get_vcpu_events(cpu);
4162 if (ret < 0) {
4163 goto out;
4164 }
4165 /*
4166 * KVM_GET_MPSTATE can modify CS and RIP, call it before
4167 * KVM_GET_REGS and KVM_GET_SREGS.
4168 */
4169 ret = kvm_get_mp_state(cpu);
4170 if (ret < 0) {
4171 goto out;
4172 }
4173 ret = kvm_getput_regs(cpu, 0);
4174 if (ret < 0) {
4175 goto out;
4176 }
4177 ret = kvm_get_xsave(cpu);
4178 if (ret < 0) {
4179 goto out;
4180 }
4181 ret = kvm_get_xcrs(cpu);
4182 if (ret < 0) {
4183 goto out;
4184 }
4185 ret = kvm_get_sregs(cpu);
4186 if (ret < 0) {
4187 goto out;
4188 }
4189 ret = kvm_get_msrs(cpu);
4190 if (ret < 0) {
4191 goto out;
4192 }
4193 ret = kvm_get_apic(cpu);
4194 if (ret < 0) {
4195 goto out;
4196 }
4197 ret = kvm_get_debugregs(cpu);
4198 if (ret < 0) {
4199 goto out;
4200 }
4201 ret = kvm_get_nested_state(cpu);
4202 if (ret < 0) {
4203 goto out;
4204 }
4205 ret = 0;
4206 out:
4207 cpu_sync_bndcs_hflags(&cpu->env);
4208 return ret;
4209 }
4210
kvm_arch_pre_run(CPUState * cpu,struct kvm_run * run)4211 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
4212 {
4213 X86CPU *x86_cpu = X86_CPU(cpu);
4214 CPUX86State *env = &x86_cpu->env;
4215 int ret;
4216
4217 /* Inject NMI */
4218 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
4219 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
4220 qemu_mutex_lock_iothread();
4221 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
4222 qemu_mutex_unlock_iothread();
4223 DPRINTF("injected NMI\n");
4224 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
4225 if (ret < 0) {
4226 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
4227 strerror(-ret));
4228 }
4229 }
4230 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
4231 qemu_mutex_lock_iothread();
4232 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
4233 qemu_mutex_unlock_iothread();
4234 DPRINTF("injected SMI\n");
4235 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
4236 if (ret < 0) {
4237 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
4238 strerror(-ret));
4239 }
4240 }
4241 }
4242
4243 if (!kvm_pic_in_kernel()) {
4244 qemu_mutex_lock_iothread();
4245 }
4246
4247 /* Force the VCPU out of its inner loop to process any INIT requests
4248 * or (for userspace APIC, but it is cheap to combine the checks here)
4249 * pending TPR access reports.
4250 */
4251 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
4252 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
4253 !(env->hflags & HF_SMM_MASK)) {
4254 cpu->exit_request = 1;
4255 }
4256 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
4257 cpu->exit_request = 1;
4258 }
4259 }
4260
4261 if (!kvm_pic_in_kernel()) {
4262 /* Try to inject an interrupt if the guest can accept it */
4263 if (run->ready_for_interrupt_injection &&
4264 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
4265 (env->eflags & IF_MASK)) {
4266 int irq;
4267
4268 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
4269 irq = cpu_get_pic_interrupt(env);
4270 if (irq >= 0) {
4271 struct kvm_interrupt intr;
4272
4273 intr.irq = irq;
4274 DPRINTF("injected interrupt %d\n", irq);
4275 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
4276 if (ret < 0) {
4277 fprintf(stderr,
4278 "KVM: injection failed, interrupt lost (%s)\n",
4279 strerror(-ret));
4280 }
4281 }
4282 }
4283
4284 /* If we have an interrupt but the guest is not ready to receive an
4285 * interrupt, request an interrupt window exit. This will
4286 * cause a return to userspace as soon as the guest is ready to
4287 * receive interrupts. */
4288 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
4289 run->request_interrupt_window = 1;
4290 } else {
4291 run->request_interrupt_window = 0;
4292 }
4293
4294 DPRINTF("setting tpr\n");
4295 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
4296
4297 qemu_mutex_unlock_iothread();
4298 }
4299 }
4300
kvm_rate_limit_on_bus_lock(void)4301 static void kvm_rate_limit_on_bus_lock(void)
4302 {
4303 uint64_t delay_ns = ratelimit_calculate_delay(&bus_lock_ratelimit_ctrl, 1);
4304
4305 if (delay_ns) {
4306 g_usleep(delay_ns / SCALE_US);
4307 }
4308 }
4309
kvm_arch_post_run(CPUState * cpu,struct kvm_run * run)4310 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
4311 {
4312 X86CPU *x86_cpu = X86_CPU(cpu);
4313 CPUX86State *env = &x86_cpu->env;
4314
4315 if (run->flags & KVM_RUN_X86_SMM) {
4316 env->hflags |= HF_SMM_MASK;
4317 } else {
4318 env->hflags &= ~HF_SMM_MASK;
4319 }
4320 if (run->if_flag) {
4321 env->eflags |= IF_MASK;
4322 } else {
4323 env->eflags &= ~IF_MASK;
4324 }
4325 if (run->flags & KVM_RUN_X86_BUS_LOCK) {
4326 kvm_rate_limit_on_bus_lock();
4327 }
4328
4329 /* We need to protect the apic state against concurrent accesses from
4330 * different threads in case the userspace irqchip is used. */
4331 if (!kvm_irqchip_in_kernel()) {
4332 qemu_mutex_lock_iothread();
4333 }
4334 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
4335 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
4336 if (!kvm_irqchip_in_kernel()) {
4337 qemu_mutex_unlock_iothread();
4338 }
4339 return cpu_get_mem_attrs(env);
4340 }
4341
kvm_arch_process_async_events(CPUState * cs)4342 int kvm_arch_process_async_events(CPUState *cs)
4343 {
4344 X86CPU *cpu = X86_CPU(cs);
4345 CPUX86State *env = &cpu->env;
4346
4347 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
4348 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
4349 assert(env->mcg_cap);
4350
4351 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
4352
4353 kvm_cpu_synchronize_state(cs);
4354
4355 if (env->exception_nr == EXCP08_DBLE) {
4356 /* this means triple fault */
4357 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
4358 cs->exit_request = 1;
4359 return 0;
4360 }
4361 kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
4362 env->has_error_code = 0;
4363
4364 cs->halted = 0;
4365 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
4366 env->mp_state = KVM_MP_STATE_RUNNABLE;
4367 }
4368 }
4369
4370 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
4371 !(env->hflags & HF_SMM_MASK)) {
4372 kvm_cpu_synchronize_state(cs);
4373 do_cpu_init(cpu);
4374 }
4375
4376 if (kvm_irqchip_in_kernel()) {
4377 return 0;
4378 }
4379
4380 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
4381 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
4382 apic_poll_irq(cpu->apic_state);
4383 }
4384 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4385 (env->eflags & IF_MASK)) ||
4386 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4387 cs->halted = 0;
4388 }
4389 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
4390 kvm_cpu_synchronize_state(cs);
4391 do_cpu_sipi(cpu);
4392 }
4393 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
4394 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
4395 kvm_cpu_synchronize_state(cs);
4396 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
4397 env->tpr_access_type);
4398 }
4399
4400 return cs->halted;
4401 }
4402
kvm_handle_halt(X86CPU * cpu)4403 static int kvm_handle_halt(X86CPU *cpu)
4404 {
4405 CPUState *cs = CPU(cpu);
4406 CPUX86State *env = &cpu->env;
4407
4408 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4409 (env->eflags & IF_MASK)) &&
4410 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4411 cs->halted = 1;
4412 return EXCP_HLT;
4413 }
4414
4415 return 0;
4416 }
4417
kvm_handle_tpr_access(X86CPU * cpu)4418 static int kvm_handle_tpr_access(X86CPU *cpu)
4419 {
4420 CPUState *cs = CPU(cpu);
4421 struct kvm_run *run = cs->kvm_run;
4422
4423 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
4424 run->tpr_access.is_write ? TPR_ACCESS_WRITE
4425 : TPR_ACCESS_READ);
4426 return 1;
4427 }
4428
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)4429 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4430 {
4431 static const uint8_t int3 = 0xcc;
4432
4433 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
4434 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
4435 return -EINVAL;
4436 }
4437 return 0;
4438 }
4439
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)4440 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4441 {
4442 uint8_t int3;
4443
4444 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) {
4445 return -EINVAL;
4446 }
4447 if (int3 != 0xcc) {
4448 return 0;
4449 }
4450 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
4451 return -EINVAL;
4452 }
4453 return 0;
4454 }
4455
4456 static struct {
4457 target_ulong addr;
4458 int len;
4459 int type;
4460 } hw_breakpoint[4];
4461
4462 static int nb_hw_breakpoint;
4463
find_hw_breakpoint(target_ulong addr,int len,int type)4464 static int find_hw_breakpoint(target_ulong addr, int len, int type)
4465 {
4466 int n;
4467
4468 for (n = 0; n < nb_hw_breakpoint; n++) {
4469 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
4470 (hw_breakpoint[n].len == len || len == -1)) {
4471 return n;
4472 }
4473 }
4474 return -1;
4475 }
4476
kvm_arch_insert_hw_breakpoint(target_ulong addr,target_ulong len,int type)4477 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
4478 target_ulong len, int type)
4479 {
4480 switch (type) {
4481 case GDB_BREAKPOINT_HW:
4482 len = 1;
4483 break;
4484 case GDB_WATCHPOINT_WRITE:
4485 case GDB_WATCHPOINT_ACCESS:
4486 switch (len) {
4487 case 1:
4488 break;
4489 case 2:
4490 case 4:
4491 case 8:
4492 if (addr & (len - 1)) {
4493 return -EINVAL;
4494 }
4495 break;
4496 default:
4497 return -EINVAL;
4498 }
4499 break;
4500 default:
4501 return -ENOSYS;
4502 }
4503
4504 if (nb_hw_breakpoint == 4) {
4505 return -ENOBUFS;
4506 }
4507 if (find_hw_breakpoint(addr, len, type) >= 0) {
4508 return -EEXIST;
4509 }
4510 hw_breakpoint[nb_hw_breakpoint].addr = addr;
4511 hw_breakpoint[nb_hw_breakpoint].len = len;
4512 hw_breakpoint[nb_hw_breakpoint].type = type;
4513 nb_hw_breakpoint++;
4514
4515 return 0;
4516 }
4517
kvm_arch_remove_hw_breakpoint(target_ulong addr,target_ulong len,int type)4518 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
4519 target_ulong len, int type)
4520 {
4521 int n;
4522
4523 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
4524 if (n < 0) {
4525 return -ENOENT;
4526 }
4527 nb_hw_breakpoint--;
4528 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
4529
4530 return 0;
4531 }
4532
kvm_arch_remove_all_hw_breakpoints(void)4533 void kvm_arch_remove_all_hw_breakpoints(void)
4534 {
4535 nb_hw_breakpoint = 0;
4536 }
4537
4538 static CPUWatchpoint hw_watchpoint;
4539
kvm_handle_debug(X86CPU * cpu,struct kvm_debug_exit_arch * arch_info)4540 static int kvm_handle_debug(X86CPU *cpu,
4541 struct kvm_debug_exit_arch *arch_info)
4542 {
4543 CPUState *cs = CPU(cpu);
4544 CPUX86State *env = &cpu->env;
4545 int ret = 0;
4546 int n;
4547
4548 if (arch_info->exception == EXCP01_DB) {
4549 if (arch_info->dr6 & DR6_BS) {
4550 if (cs->singlestep_enabled) {
4551 ret = EXCP_DEBUG;
4552 }
4553 } else {
4554 for (n = 0; n < 4; n++) {
4555 if (arch_info->dr6 & (1 << n)) {
4556 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
4557 case 0x0:
4558 ret = EXCP_DEBUG;
4559 break;
4560 case 0x1:
4561 ret = EXCP_DEBUG;
4562 cs->watchpoint_hit = &hw_watchpoint;
4563 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
4564 hw_watchpoint.flags = BP_MEM_WRITE;
4565 break;
4566 case 0x3:
4567 ret = EXCP_DEBUG;
4568 cs->watchpoint_hit = &hw_watchpoint;
4569 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
4570 hw_watchpoint.flags = BP_MEM_ACCESS;
4571 break;
4572 }
4573 }
4574 }
4575 }
4576 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
4577 ret = EXCP_DEBUG;
4578 }
4579 if (ret == 0) {
4580 cpu_synchronize_state(cs);
4581 assert(env->exception_nr == -1);
4582
4583 /* pass to guest */
4584 kvm_queue_exception(env, arch_info->exception,
4585 arch_info->exception == EXCP01_DB,
4586 arch_info->dr6);
4587 env->has_error_code = 0;
4588 }
4589
4590 return ret;
4591 }
4592
kvm_arch_update_guest_debug(CPUState * cpu,struct kvm_guest_debug * dbg)4593 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
4594 {
4595 const uint8_t type_code[] = {
4596 [GDB_BREAKPOINT_HW] = 0x0,
4597 [GDB_WATCHPOINT_WRITE] = 0x1,
4598 [GDB_WATCHPOINT_ACCESS] = 0x3
4599 };
4600 const uint8_t len_code[] = {
4601 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
4602 };
4603 int n;
4604
4605 if (kvm_sw_breakpoints_active(cpu)) {
4606 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
4607 }
4608 if (nb_hw_breakpoint > 0) {
4609 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
4610 dbg->arch.debugreg[7] = 0x0600;
4611 for (n = 0; n < nb_hw_breakpoint; n++) {
4612 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
4613 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
4614 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
4615 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
4616 }
4617 }
4618 }
4619
host_supports_vmx(void)4620 static bool host_supports_vmx(void)
4621 {
4622 uint32_t ecx, unused;
4623
4624 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
4625 return ecx & CPUID_EXT_VMX;
4626 }
4627
4628 #define VMX_INVALID_GUEST_STATE 0x80000021
4629
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)4630 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
4631 {
4632 X86CPU *cpu = X86_CPU(cs);
4633 uint64_t code;
4634 int ret;
4635
4636 switch (run->exit_reason) {
4637 case KVM_EXIT_HLT:
4638 DPRINTF("handle_hlt\n");
4639 qemu_mutex_lock_iothread();
4640 ret = kvm_handle_halt(cpu);
4641 qemu_mutex_unlock_iothread();
4642 break;
4643 case KVM_EXIT_SET_TPR:
4644 ret = 0;
4645 break;
4646 case KVM_EXIT_TPR_ACCESS:
4647 qemu_mutex_lock_iothread();
4648 ret = kvm_handle_tpr_access(cpu);
4649 qemu_mutex_unlock_iothread();
4650 break;
4651 case KVM_EXIT_FAIL_ENTRY:
4652 code = run->fail_entry.hardware_entry_failure_reason;
4653 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
4654 code);
4655 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
4656 fprintf(stderr,
4657 "\nIf you're running a guest on an Intel machine without "
4658 "unrestricted mode\n"
4659 "support, the failure can be most likely due to the guest "
4660 "entering an invalid\n"
4661 "state for Intel VT. For example, the guest maybe running "
4662 "in big real mode\n"
4663 "which is not supported on less recent Intel processors."
4664 "\n\n");
4665 }
4666 ret = -1;
4667 break;
4668 case KVM_EXIT_EXCEPTION:
4669 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
4670 run->ex.exception, run->ex.error_code);
4671 ret = -1;
4672 break;
4673 case KVM_EXIT_DEBUG:
4674 DPRINTF("kvm_exit_debug\n");
4675 qemu_mutex_lock_iothread();
4676 ret = kvm_handle_debug(cpu, &run->debug.arch);
4677 qemu_mutex_unlock_iothread();
4678 break;
4679 case KVM_EXIT_HYPERV:
4680 ret = kvm_hv_handle_exit(cpu, &run->hyperv);
4681 break;
4682 case KVM_EXIT_IOAPIC_EOI:
4683 ioapic_eoi_broadcast(run->eoi.vector);
4684 ret = 0;
4685 break;
4686 case KVM_EXIT_X86_BUS_LOCK:
4687 /* already handled in kvm_arch_post_run */
4688 ret = 0;
4689 break;
4690 default:
4691 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
4692 ret = -1;
4693 break;
4694 }
4695
4696 return ret;
4697 }
4698
kvm_arch_stop_on_emulation_error(CPUState * cs)4699 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
4700 {
4701 X86CPU *cpu = X86_CPU(cs);
4702 CPUX86State *env = &cpu->env;
4703
4704 kvm_cpu_synchronize_state(cs);
4705 return !(env->cr[0] & CR0_PE_MASK) ||
4706 ((env->segs[R_CS].selector & 3) != 3);
4707 }
4708
kvm_arch_init_irq_routing(KVMState * s)4709 void kvm_arch_init_irq_routing(KVMState *s)
4710 {
4711 /* We know at this point that we're using the in-kernel
4712 * irqchip, so we can use irqfds, and on x86 we know
4713 * we can use msi via irqfd and GSI routing.
4714 */
4715 kvm_msi_via_irqfd_allowed = true;
4716 kvm_gsi_routing_allowed = true;
4717
4718 if (kvm_irqchip_is_split()) {
4719 int i;
4720
4721 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
4722 MSI routes for signaling interrupts to the local apics. */
4723 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
4724 if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) {
4725 error_report("Could not enable split IRQ mode.");
4726 exit(1);
4727 }
4728 }
4729 }
4730 }
4731
kvm_arch_irqchip_create(KVMState * s)4732 int kvm_arch_irqchip_create(KVMState *s)
4733 {
4734 int ret;
4735 if (kvm_kernel_irqchip_split()) {
4736 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
4737 if (ret) {
4738 error_report("Could not enable split irqchip mode: %s",
4739 strerror(-ret));
4740 exit(1);
4741 } else {
4742 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
4743 kvm_split_irqchip = true;
4744 return 1;
4745 }
4746 } else {
4747 return 0;
4748 }
4749 }
4750
kvm_swizzle_msi_ext_dest_id(uint64_t address)4751 uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address)
4752 {
4753 CPUX86State *env;
4754 uint64_t ext_id;
4755
4756 if (!first_cpu) {
4757 return address;
4758 }
4759 env = &X86_CPU(first_cpu)->env;
4760 if (!(env->features[FEAT_KVM] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID))) {
4761 return address;
4762 }
4763
4764 /*
4765 * If the remappable format bit is set, or the upper bits are
4766 * already set in address_hi, or the low extended bits aren't
4767 * there anyway, do nothing.
4768 */
4769 ext_id = address & (0xff << MSI_ADDR_DEST_IDX_SHIFT);
4770 if (!ext_id || (ext_id & (1 << MSI_ADDR_DEST_IDX_SHIFT)) || (address >> 32)) {
4771 return address;
4772 }
4773
4774 address &= ~ext_id;
4775 address |= ext_id << 35;
4776 return address;
4777 }
4778
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)4779 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
4780 uint64_t address, uint32_t data, PCIDevice *dev)
4781 {
4782 X86IOMMUState *iommu = x86_iommu_get_default();
4783
4784 if (iommu) {
4785 X86IOMMUClass *class = X86_IOMMU_DEVICE_GET_CLASS(iommu);
4786
4787 if (class->int_remap) {
4788 int ret;
4789 MSIMessage src, dst;
4790
4791 src.address = route->u.msi.address_hi;
4792 src.address <<= VTD_MSI_ADDR_HI_SHIFT;
4793 src.address |= route->u.msi.address_lo;
4794 src.data = route->u.msi.data;
4795
4796 ret = class->int_remap(iommu, &src, &dst, dev ? \
4797 pci_requester_id(dev) : \
4798 X86_IOMMU_SID_INVALID);
4799 if (ret) {
4800 trace_kvm_x86_fixup_msi_error(route->gsi);
4801 return 1;
4802 }
4803
4804 /*
4805 * Handled untranslated compatibilty format interrupt with
4806 * extended destination ID in the low bits 11-5. */
4807 dst.address = kvm_swizzle_msi_ext_dest_id(dst.address);
4808
4809 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
4810 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
4811 route->u.msi.data = dst.data;
4812 return 0;
4813 }
4814 }
4815
4816 address = kvm_swizzle_msi_ext_dest_id(address);
4817 route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT;
4818 route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK;
4819 return 0;
4820 }
4821
4822 typedef struct MSIRouteEntry MSIRouteEntry;
4823
4824 struct MSIRouteEntry {
4825 PCIDevice *dev; /* Device pointer */
4826 int vector; /* MSI/MSIX vector index */
4827 int virq; /* Virtual IRQ index */
4828 QLIST_ENTRY(MSIRouteEntry) list;
4829 };
4830
4831 /* List of used GSI routes */
4832 static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
4833 QLIST_HEAD_INITIALIZER(msi_route_list);
4834
kvm_update_msi_routes_all(void * private,bool global,uint32_t index,uint32_t mask)4835 static void kvm_update_msi_routes_all(void *private, bool global,
4836 uint32_t index, uint32_t mask)
4837 {
4838 int cnt = 0, vector;
4839 MSIRouteEntry *entry;
4840 MSIMessage msg;
4841 PCIDevice *dev;
4842
4843 /* TODO: explicit route update */
4844 QLIST_FOREACH(entry, &msi_route_list, list) {
4845 cnt++;
4846 vector = entry->vector;
4847 dev = entry->dev;
4848 if (msix_enabled(dev) && !msix_is_masked(dev, vector)) {
4849 msg = msix_get_message(dev, vector);
4850 } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) {
4851 msg = msi_get_message(dev, vector);
4852 } else {
4853 /*
4854 * Either MSI/MSIX is disabled for the device, or the
4855 * specific message was masked out. Skip this one.
4856 */
4857 continue;
4858 }
4859 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
4860 }
4861 kvm_irqchip_commit_routes(kvm_state);
4862 trace_kvm_x86_update_msi_routes(cnt);
4863 }
4864
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)4865 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
4866 int vector, PCIDevice *dev)
4867 {
4868 static bool notify_list_inited = false;
4869 MSIRouteEntry *entry;
4870
4871 if (!dev) {
4872 /* These are (possibly) IOAPIC routes only used for split
4873 * kernel irqchip mode, while what we are housekeeping are
4874 * PCI devices only. */
4875 return 0;
4876 }
4877
4878 entry = g_new0(MSIRouteEntry, 1);
4879 entry->dev = dev;
4880 entry->vector = vector;
4881 entry->virq = route->gsi;
4882 QLIST_INSERT_HEAD(&msi_route_list, entry, list);
4883
4884 trace_kvm_x86_add_msi_route(route->gsi);
4885
4886 if (!notify_list_inited) {
4887 /* For the first time we do add route, add ourselves into
4888 * IOMMU's IEC notify list if needed. */
4889 X86IOMMUState *iommu = x86_iommu_get_default();
4890 if (iommu) {
4891 x86_iommu_iec_register_notifier(iommu,
4892 kvm_update_msi_routes_all,
4893 NULL);
4894 }
4895 notify_list_inited = true;
4896 }
4897 return 0;
4898 }
4899
kvm_arch_release_virq_post(int virq)4900 int kvm_arch_release_virq_post(int virq)
4901 {
4902 MSIRouteEntry *entry, *next;
4903 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
4904 if (entry->virq == virq) {
4905 trace_kvm_x86_remove_msi_route(virq);
4906 QLIST_REMOVE(entry, list);
4907 g_free(entry);
4908 break;
4909 }
4910 }
4911 return 0;
4912 }
4913
kvm_arch_msi_data_to_gsi(uint32_t data)4914 int kvm_arch_msi_data_to_gsi(uint32_t data)
4915 {
4916 abort();
4917 }
4918
kvm_has_waitpkg(void)4919 bool kvm_has_waitpkg(void)
4920 {
4921 return has_msr_umwait;
4922 }
4923
kvm_arch_cpu_check_are_resettable(void)4924 bool kvm_arch_cpu_check_are_resettable(void)
4925 {
4926 return !sev_es_enabled();
4927 }
4928