1 /* 2 * QEMU KVM support 3 * 4 * Copyright (C) 2006-2008 Qumranet Technologies 5 * Copyright IBM, Corp. 2008 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/osdep.h" 16 #include "qapi/qapi-events-run-state.h" 17 #include "qapi/error.h" 18 #include <sys/ioctl.h> 19 #include <sys/utsname.h> 20 #include <sys/syscall.h> 21 22 #include <linux/kvm.h> 23 #include "standard-headers/asm-x86/kvm_para.h" 24 25 #include "cpu.h" 26 #include "host-cpu.h" 27 #include "sysemu/sysemu.h" 28 #include "sysemu/hw_accel.h" 29 #include "sysemu/kvm_int.h" 30 #include "sysemu/runstate.h" 31 #include "kvm_i386.h" 32 #include "sev.h" 33 #include "hyperv.h" 34 #include "hyperv-proto.h" 35 36 #include "exec/gdbstub.h" 37 #include "qemu/host-utils.h" 38 #include "qemu/main-loop.h" 39 #include "qemu/config-file.h" 40 #include "qemu/error-report.h" 41 #include "qemu/memalign.h" 42 #include "hw/i386/x86.h" 43 #include "hw/i386/apic.h" 44 #include "hw/i386/apic_internal.h" 45 #include "hw/i386/apic-msidef.h" 46 #include "hw/i386/intel_iommu.h" 47 #include "hw/i386/x86-iommu.h" 48 #include "hw/i386/e820_memory_layout.h" 49 50 #include "hw/pci/pci.h" 51 #include "hw/pci/msi.h" 52 #include "hw/pci/msix.h" 53 #include "migration/blocker.h" 54 #include "exec/memattrs.h" 55 #include "trace.h" 56 57 //#define DEBUG_KVM 58 59 #ifdef DEBUG_KVM 60 #define DPRINTF(fmt, ...) \ 61 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) 62 #else 63 #define DPRINTF(fmt, ...) \ 64 do { } while (0) 65 #endif 66 67 /* From arch/x86/kvm/lapic.h */ 68 #define KVM_APIC_BUS_CYCLE_NS 1 69 #define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS) 70 71 #define MSR_KVM_WALL_CLOCK 0x11 72 #define MSR_KVM_SYSTEM_TIME 0x12 73 74 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus 75 * 255 kvm_msr_entry structs */ 76 #define MSR_BUF_SIZE 4096 77 78 static void kvm_init_msrs(X86CPU *cpu); 79 80 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 81 KVM_CAP_INFO(SET_TSS_ADDR), 82 KVM_CAP_INFO(EXT_CPUID), 83 KVM_CAP_INFO(MP_STATE), 84 KVM_CAP_LAST_INFO 85 }; 86 87 static bool has_msr_star; 88 static bool has_msr_hsave_pa; 89 static bool has_msr_tsc_aux; 90 static bool has_msr_tsc_adjust; 91 static bool has_msr_tsc_deadline; 92 static bool has_msr_feature_control; 93 static bool has_msr_misc_enable; 94 static bool has_msr_smbase; 95 static bool has_msr_bndcfgs; 96 static int lm_capable_kernel; 97 static bool has_msr_hv_hypercall; 98 static bool has_msr_hv_crash; 99 static bool has_msr_hv_reset; 100 static bool has_msr_hv_vpindex; 101 static bool hv_vpindex_settable; 102 static bool has_msr_hv_runtime; 103 static bool has_msr_hv_synic; 104 static bool has_msr_hv_stimer; 105 static bool has_msr_hv_frequencies; 106 static bool has_msr_hv_reenlightenment; 107 static bool has_msr_xss; 108 static bool has_msr_umwait; 109 static bool has_msr_spec_ctrl; 110 static bool has_tsc_scale_msr; 111 static bool has_msr_tsx_ctrl; 112 static bool has_msr_virt_ssbd; 113 static bool has_msr_smi_count; 114 static bool has_msr_arch_capabs; 115 static bool has_msr_core_capabs; 116 static bool has_msr_vmx_vmfunc; 117 static bool has_msr_ucode_rev; 118 static bool has_msr_vmx_procbased_ctls2; 119 static bool has_msr_perf_capabs; 120 static bool has_msr_pkrs; 121 122 static uint32_t has_architectural_pmu_version; 123 static uint32_t num_architectural_pmu_gp_counters; 124 static uint32_t num_architectural_pmu_fixed_counters; 125 126 static int has_xsave; 127 static int has_xsave2; 128 static int has_xcrs; 129 static int has_pit_state2; 130 static int has_sregs2; 131 static int has_exception_payload; 132 133 static bool has_msr_mcg_ext_ctl; 134 135 static struct kvm_cpuid2 *cpuid_cache; 136 static struct kvm_cpuid2 *hv_cpuid_cache; 137 static struct kvm_msr_list *kvm_feature_msrs; 138 139 #define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */ 140 static RateLimit bus_lock_ratelimit_ctrl; 141 142 int kvm_has_pit_state2(void) 143 { 144 return has_pit_state2; 145 } 146 147 bool kvm_has_smm(void) 148 { 149 return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM); 150 } 151 152 bool kvm_has_adjust_clock_stable(void) 153 { 154 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK); 155 156 return (ret == KVM_CLOCK_TSC_STABLE); 157 } 158 159 bool kvm_has_adjust_clock(void) 160 { 161 return kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK); 162 } 163 164 bool kvm_has_exception_payload(void) 165 { 166 return has_exception_payload; 167 } 168 169 static bool kvm_x2apic_api_set_flags(uint64_t flags) 170 { 171 KVMState *s = KVM_STATE(current_accel()); 172 173 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags); 174 } 175 176 #define MEMORIZE(fn, _result) \ 177 ({ \ 178 static bool _memorized; \ 179 \ 180 if (_memorized) { \ 181 return _result; \ 182 } \ 183 _memorized = true; \ 184 _result = fn; \ 185 }) 186 187 static bool has_x2apic_api; 188 189 bool kvm_has_x2apic_api(void) 190 { 191 return has_x2apic_api; 192 } 193 194 bool kvm_enable_x2apic(void) 195 { 196 return MEMORIZE( 197 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS | 198 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK), 199 has_x2apic_api); 200 } 201 202 bool kvm_hv_vpindex_settable(void) 203 { 204 return hv_vpindex_settable; 205 } 206 207 static int kvm_get_tsc(CPUState *cs) 208 { 209 X86CPU *cpu = X86_CPU(cs); 210 CPUX86State *env = &cpu->env; 211 struct { 212 struct kvm_msrs info; 213 struct kvm_msr_entry entries[1]; 214 } msr_data = {}; 215 int ret; 216 217 if (env->tsc_valid) { 218 return 0; 219 } 220 221 memset(&msr_data, 0, sizeof(msr_data)); 222 msr_data.info.nmsrs = 1; 223 msr_data.entries[0].index = MSR_IA32_TSC; 224 env->tsc_valid = !runstate_is_running(); 225 226 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data); 227 if (ret < 0) { 228 return ret; 229 } 230 231 assert(ret == 1); 232 env->tsc = msr_data.entries[0].data; 233 return 0; 234 } 235 236 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg) 237 { 238 kvm_get_tsc(cpu); 239 } 240 241 void kvm_synchronize_all_tsc(void) 242 { 243 CPUState *cpu; 244 245 if (kvm_enabled()) { 246 CPU_FOREACH(cpu) { 247 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL); 248 } 249 } 250 } 251 252 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max) 253 { 254 struct kvm_cpuid2 *cpuid; 255 int r, size; 256 257 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); 258 cpuid = g_malloc0(size); 259 cpuid->nent = max; 260 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid); 261 if (r == 0 && cpuid->nent >= max) { 262 r = -E2BIG; 263 } 264 if (r < 0) { 265 if (r == -E2BIG) { 266 g_free(cpuid); 267 return NULL; 268 } else { 269 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n", 270 strerror(-r)); 271 exit(1); 272 } 273 } 274 return cpuid; 275 } 276 277 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough 278 * for all entries. 279 */ 280 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s) 281 { 282 struct kvm_cpuid2 *cpuid; 283 int max = 1; 284 285 if (cpuid_cache != NULL) { 286 return cpuid_cache; 287 } 288 while ((cpuid = try_get_cpuid(s, max)) == NULL) { 289 max *= 2; 290 } 291 cpuid_cache = cpuid; 292 return cpuid; 293 } 294 295 static bool host_tsx_broken(void) 296 { 297 int family, model, stepping;\ 298 char vendor[CPUID_VENDOR_SZ + 1]; 299 300 host_cpu_vendor_fms(vendor, &family, &model, &stepping); 301 302 /* Check if we are running on a Haswell host known to have broken TSX */ 303 return !strcmp(vendor, CPUID_VENDOR_INTEL) && 304 (family == 6) && 305 ((model == 63 && stepping < 4) || 306 model == 60 || model == 69 || model == 70); 307 } 308 309 /* Returns the value for a specific register on the cpuid entry 310 */ 311 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg) 312 { 313 uint32_t ret = 0; 314 switch (reg) { 315 case R_EAX: 316 ret = entry->eax; 317 break; 318 case R_EBX: 319 ret = entry->ebx; 320 break; 321 case R_ECX: 322 ret = entry->ecx; 323 break; 324 case R_EDX: 325 ret = entry->edx; 326 break; 327 } 328 return ret; 329 } 330 331 /* Find matching entry for function/index on kvm_cpuid2 struct 332 */ 333 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid, 334 uint32_t function, 335 uint32_t index) 336 { 337 int i; 338 for (i = 0; i < cpuid->nent; ++i) { 339 if (cpuid->entries[i].function == function && 340 cpuid->entries[i].index == index) { 341 return &cpuid->entries[i]; 342 } 343 } 344 /* not found: */ 345 return NULL; 346 } 347 348 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, 349 uint32_t index, int reg) 350 { 351 struct kvm_cpuid2 *cpuid; 352 uint32_t ret = 0; 353 uint32_t cpuid_1_edx; 354 uint64_t bitmask; 355 356 cpuid = get_supported_cpuid(s); 357 358 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index); 359 if (entry) { 360 ret = cpuid_entry_get_reg(entry, reg); 361 } 362 363 /* Fixups for the data returned by KVM, below */ 364 365 if (function == 1 && reg == R_EDX) { 366 /* KVM before 2.6.30 misreports the following features */ 367 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; 368 } else if (function == 1 && reg == R_ECX) { 369 /* We can set the hypervisor flag, even if KVM does not return it on 370 * GET_SUPPORTED_CPUID 371 */ 372 ret |= CPUID_EXT_HYPERVISOR; 373 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it 374 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER, 375 * and the irqchip is in the kernel. 376 */ 377 if (kvm_irqchip_in_kernel() && 378 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) { 379 ret |= CPUID_EXT_TSC_DEADLINE_TIMER; 380 } 381 382 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled 383 * without the in-kernel irqchip 384 */ 385 if (!kvm_irqchip_in_kernel()) { 386 ret &= ~CPUID_EXT_X2APIC; 387 } 388 389 if (enable_cpu_pm) { 390 int disable_exits = kvm_check_extension(s, 391 KVM_CAP_X86_DISABLE_EXITS); 392 393 if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) { 394 ret |= CPUID_EXT_MONITOR; 395 } 396 } 397 } else if (function == 6 && reg == R_EAX) { 398 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */ 399 } else if (function == 7 && index == 0 && reg == R_EBX) { 400 if (host_tsx_broken()) { 401 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE); 402 } 403 } else if (function == 7 && index == 0 && reg == R_EDX) { 404 /* 405 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts. 406 * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is 407 * returned by KVM_GET_MSR_INDEX_LIST. 408 */ 409 if (!has_msr_arch_capabs) { 410 ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES; 411 } 412 } else if (function == 0xd && index == 0 && 413 (reg == R_EAX || reg == R_EDX)) { 414 struct kvm_device_attr attr = { 415 .group = 0, 416 .attr = KVM_X86_XCOMP_GUEST_SUPP, 417 .addr = (unsigned long) &bitmask 418 }; 419 420 bool sys_attr = kvm_check_extension(s, KVM_CAP_SYS_ATTRIBUTES); 421 if (!sys_attr) { 422 warn_report("cannot get sys attribute capabilities %d", sys_attr); 423 } 424 425 int rc = kvm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr); 426 if (rc == -1 && (errno == ENXIO || errno == EINVAL)) { 427 warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) " 428 "error: %d", rc); 429 } 430 ret = (reg == R_EAX) ? bitmask : bitmask >> 32; 431 } else if (function == 0x80000001 && reg == R_ECX) { 432 /* 433 * It's safe to enable TOPOEXT even if it's not returned by 434 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows 435 * us to keep CPU models including TOPOEXT runnable on older kernels. 436 */ 437 ret |= CPUID_EXT3_TOPOEXT; 438 } else if (function == 0x80000001 && reg == R_EDX) { 439 /* On Intel, kvm returns cpuid according to the Intel spec, 440 * so add missing bits according to the AMD spec: 441 */ 442 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX); 443 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES; 444 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) { 445 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't 446 * be enabled without the in-kernel irqchip 447 */ 448 if (!kvm_irqchip_in_kernel()) { 449 ret &= ~(1U << KVM_FEATURE_PV_UNHALT); 450 } 451 if (kvm_irqchip_is_split()) { 452 ret |= 1U << KVM_FEATURE_MSI_EXT_DEST_ID; 453 } 454 } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) { 455 ret |= 1U << KVM_HINTS_REALTIME; 456 } 457 458 return ret; 459 } 460 461 uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index) 462 { 463 struct { 464 struct kvm_msrs info; 465 struct kvm_msr_entry entries[1]; 466 } msr_data = {}; 467 uint64_t value; 468 uint32_t ret, can_be_one, must_be_one; 469 470 if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */ 471 return 0; 472 } 473 474 /* Check if requested MSR is supported feature MSR */ 475 int i; 476 for (i = 0; i < kvm_feature_msrs->nmsrs; i++) 477 if (kvm_feature_msrs->indices[i] == index) { 478 break; 479 } 480 if (i == kvm_feature_msrs->nmsrs) { 481 return 0; /* if the feature MSR is not supported, simply return 0 */ 482 } 483 484 msr_data.info.nmsrs = 1; 485 msr_data.entries[0].index = index; 486 487 ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data); 488 if (ret != 1) { 489 error_report("KVM get MSR (index=0x%x) feature failed, %s", 490 index, strerror(-ret)); 491 exit(1); 492 } 493 494 value = msr_data.entries[0].data; 495 switch (index) { 496 case MSR_IA32_VMX_PROCBASED_CTLS2: 497 if (!has_msr_vmx_procbased_ctls2) { 498 /* KVM forgot to add these bits for some time, do this ourselves. */ 499 if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) & 500 CPUID_XSAVE_XSAVES) { 501 value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32; 502 } 503 if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) & 504 CPUID_EXT_RDRAND) { 505 value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32; 506 } 507 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) & 508 CPUID_7_0_EBX_INVPCID) { 509 value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32; 510 } 511 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) & 512 CPUID_7_0_EBX_RDSEED) { 513 value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32; 514 } 515 if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) & 516 CPUID_EXT2_RDTSCP) { 517 value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32; 518 } 519 } 520 /* fall through */ 521 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 522 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 523 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 524 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 525 /* 526 * Return true for bits that can be one, but do not have to be one. 527 * The SDM tells us which bits could have a "must be one" setting, 528 * so we can do the opposite transformation in make_vmx_msr_value. 529 */ 530 must_be_one = (uint32_t)value; 531 can_be_one = (uint32_t)(value >> 32); 532 return can_be_one & ~must_be_one; 533 534 default: 535 return value; 536 } 537 } 538 539 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, 540 int *max_banks) 541 { 542 int r; 543 544 r = kvm_check_extension(s, KVM_CAP_MCE); 545 if (r > 0) { 546 *max_banks = r; 547 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap); 548 } 549 return -ENOSYS; 550 } 551 552 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code) 553 { 554 CPUState *cs = CPU(cpu); 555 CPUX86State *env = &cpu->env; 556 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | 557 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S; 558 uint64_t mcg_status = MCG_STATUS_MCIP; 559 int flags = 0; 560 561 if (code == BUS_MCEERR_AR) { 562 status |= MCI_STATUS_AR | 0x134; 563 mcg_status |= MCG_STATUS_EIPV; 564 } else { 565 status |= 0xc0; 566 mcg_status |= MCG_STATUS_RIPV; 567 } 568 569 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0; 570 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the 571 * guest kernel back into env->mcg_ext_ctl. 572 */ 573 cpu_synchronize_state(cs); 574 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) { 575 mcg_status |= MCG_STATUS_LMCE; 576 flags = 0; 577 } 578 579 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr, 580 (MCM_ADDR_PHYS << 6) | 0xc, flags); 581 } 582 583 static void emit_hypervisor_memory_failure(MemoryFailureAction action, bool ar) 584 { 585 MemoryFailureFlags mff = {.action_required = ar, .recursive = false}; 586 587 qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_HYPERVISOR, action, 588 &mff); 589 } 590 591 static void hardware_memory_error(void *host_addr) 592 { 593 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_FATAL, true); 594 error_report("QEMU got Hardware memory error at addr %p", host_addr); 595 exit(1); 596 } 597 598 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) 599 { 600 X86CPU *cpu = X86_CPU(c); 601 CPUX86State *env = &cpu->env; 602 ram_addr_t ram_addr; 603 hwaddr paddr; 604 605 /* If we get an action required MCE, it has been injected by KVM 606 * while the VM was running. An action optional MCE instead should 607 * be coming from the main thread, which qemu_init_sigbus identifies 608 * as the "early kill" thread. 609 */ 610 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); 611 612 if ((env->mcg_cap & MCG_SER_P) && addr) { 613 ram_addr = qemu_ram_addr_from_host(addr); 614 if (ram_addr != RAM_ADDR_INVALID && 615 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { 616 kvm_hwpoison_page_add(ram_addr); 617 kvm_mce_inject(cpu, paddr, code); 618 619 /* 620 * Use different logging severity based on error type. 621 * If there is additional MCE reporting on the hypervisor, QEMU VA 622 * could be another source to identify the PA and MCE details. 623 */ 624 if (code == BUS_MCEERR_AR) { 625 error_report("Guest MCE Memory Error at QEMU addr %p and " 626 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected", 627 addr, paddr, "BUS_MCEERR_AR"); 628 } else { 629 warn_report("Guest MCE Memory Error at QEMU addr %p and " 630 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected", 631 addr, paddr, "BUS_MCEERR_AO"); 632 } 633 634 return; 635 } 636 637 if (code == BUS_MCEERR_AO) { 638 warn_report("Hardware memory error at addr %p of type %s " 639 "for memory used by QEMU itself instead of guest system!", 640 addr, "BUS_MCEERR_AO"); 641 } 642 } 643 644 if (code == BUS_MCEERR_AR) { 645 hardware_memory_error(addr); 646 } 647 648 /* Hope we are lucky for AO MCE, just notify a event */ 649 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false); 650 } 651 652 static void kvm_reset_exception(CPUX86State *env) 653 { 654 env->exception_nr = -1; 655 env->exception_pending = 0; 656 env->exception_injected = 0; 657 env->exception_has_payload = false; 658 env->exception_payload = 0; 659 } 660 661 static void kvm_queue_exception(CPUX86State *env, 662 int32_t exception_nr, 663 uint8_t exception_has_payload, 664 uint64_t exception_payload) 665 { 666 assert(env->exception_nr == -1); 667 assert(!env->exception_pending); 668 assert(!env->exception_injected); 669 assert(!env->exception_has_payload); 670 671 env->exception_nr = exception_nr; 672 673 if (has_exception_payload) { 674 env->exception_pending = 1; 675 676 env->exception_has_payload = exception_has_payload; 677 env->exception_payload = exception_payload; 678 } else { 679 env->exception_injected = 1; 680 681 if (exception_nr == EXCP01_DB) { 682 assert(exception_has_payload); 683 env->dr[6] = exception_payload; 684 } else if (exception_nr == EXCP0E_PAGE) { 685 assert(exception_has_payload); 686 env->cr[2] = exception_payload; 687 } else { 688 assert(!exception_has_payload); 689 } 690 } 691 } 692 693 static int kvm_inject_mce_oldstyle(X86CPU *cpu) 694 { 695 CPUX86State *env = &cpu->env; 696 697 if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) { 698 unsigned int bank, bank_num = env->mcg_cap & 0xff; 699 struct kvm_x86_mce mce; 700 701 kvm_reset_exception(env); 702 703 /* 704 * There must be at least one bank in use if an MCE is pending. 705 * Find it and use its values for the event injection. 706 */ 707 for (bank = 0; bank < bank_num; bank++) { 708 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) { 709 break; 710 } 711 } 712 assert(bank < bank_num); 713 714 mce.bank = bank; 715 mce.status = env->mce_banks[bank * 4 + 1]; 716 mce.mcg_status = env->mcg_status; 717 mce.addr = env->mce_banks[bank * 4 + 2]; 718 mce.misc = env->mce_banks[bank * 4 + 3]; 719 720 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce); 721 } 722 return 0; 723 } 724 725 static void cpu_update_state(void *opaque, bool running, RunState state) 726 { 727 CPUX86State *env = opaque; 728 729 if (running) { 730 env->tsc_valid = false; 731 } 732 } 733 734 unsigned long kvm_arch_vcpu_id(CPUState *cs) 735 { 736 X86CPU *cpu = X86_CPU(cs); 737 return cpu->apic_id; 738 } 739 740 #ifndef KVM_CPUID_SIGNATURE_NEXT 741 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100 742 #endif 743 744 static bool hyperv_enabled(X86CPU *cpu) 745 { 746 return kvm_check_extension(kvm_state, KVM_CAP_HYPERV) > 0 && 747 ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) || 748 cpu->hyperv_features || cpu->hyperv_passthrough); 749 } 750 751 /* 752 * Check whether target_freq is within conservative 753 * ntp correctable bounds (250ppm) of freq 754 */ 755 static inline bool freq_within_bounds(int freq, int target_freq) 756 { 757 int max_freq = freq + (freq * 250 / 1000000); 758 int min_freq = freq - (freq * 250 / 1000000); 759 760 if (target_freq >= min_freq && target_freq <= max_freq) { 761 return true; 762 } 763 764 return false; 765 } 766 767 static int kvm_arch_set_tsc_khz(CPUState *cs) 768 { 769 X86CPU *cpu = X86_CPU(cs); 770 CPUX86State *env = &cpu->env; 771 int r, cur_freq; 772 bool set_ioctl = false; 773 774 if (!env->tsc_khz) { 775 return 0; 776 } 777 778 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? 779 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP; 780 781 /* 782 * If TSC scaling is supported, attempt to set TSC frequency. 783 */ 784 if (kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL)) { 785 set_ioctl = true; 786 } 787 788 /* 789 * If desired TSC frequency is within bounds of NTP correction, 790 * attempt to set TSC frequency. 791 */ 792 if (cur_freq != -ENOTSUP && freq_within_bounds(cur_freq, env->tsc_khz)) { 793 set_ioctl = true; 794 } 795 796 r = set_ioctl ? 797 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) : 798 -ENOTSUP; 799 800 if (r < 0) { 801 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current 802 * TSC frequency doesn't match the one we want. 803 */ 804 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? 805 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : 806 -ENOTSUP; 807 if (cur_freq <= 0 || cur_freq != env->tsc_khz) { 808 warn_report("TSC frequency mismatch between " 809 "VM (%" PRId64 " kHz) and host (%d kHz), " 810 "and TSC scaling unavailable", 811 env->tsc_khz, cur_freq); 812 return r; 813 } 814 } 815 816 return 0; 817 } 818 819 static bool tsc_is_stable_and_known(CPUX86State *env) 820 { 821 if (!env->tsc_khz) { 822 return false; 823 } 824 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) 825 || env->user_tsc_khz; 826 } 827 828 static struct { 829 const char *desc; 830 struct { 831 uint32_t func; 832 int reg; 833 uint32_t bits; 834 } flags[2]; 835 uint64_t dependencies; 836 } kvm_hyperv_properties[] = { 837 [HYPERV_FEAT_RELAXED] = { 838 .desc = "relaxed timing (hv-relaxed)", 839 .flags = { 840 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 841 .bits = HV_RELAXED_TIMING_RECOMMENDED} 842 } 843 }, 844 [HYPERV_FEAT_VAPIC] = { 845 .desc = "virtual APIC (hv-vapic)", 846 .flags = { 847 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 848 .bits = HV_APIC_ACCESS_AVAILABLE} 849 } 850 }, 851 [HYPERV_FEAT_TIME] = { 852 .desc = "clocksources (hv-time)", 853 .flags = { 854 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 855 .bits = HV_TIME_REF_COUNT_AVAILABLE | HV_REFERENCE_TSC_AVAILABLE} 856 } 857 }, 858 [HYPERV_FEAT_CRASH] = { 859 .desc = "crash MSRs (hv-crash)", 860 .flags = { 861 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 862 .bits = HV_GUEST_CRASH_MSR_AVAILABLE} 863 } 864 }, 865 [HYPERV_FEAT_RESET] = { 866 .desc = "reset MSR (hv-reset)", 867 .flags = { 868 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 869 .bits = HV_RESET_AVAILABLE} 870 } 871 }, 872 [HYPERV_FEAT_VPINDEX] = { 873 .desc = "VP_INDEX MSR (hv-vpindex)", 874 .flags = { 875 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 876 .bits = HV_VP_INDEX_AVAILABLE} 877 } 878 }, 879 [HYPERV_FEAT_RUNTIME] = { 880 .desc = "VP_RUNTIME MSR (hv-runtime)", 881 .flags = { 882 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 883 .bits = HV_VP_RUNTIME_AVAILABLE} 884 } 885 }, 886 [HYPERV_FEAT_SYNIC] = { 887 .desc = "synthetic interrupt controller (hv-synic)", 888 .flags = { 889 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 890 .bits = HV_SYNIC_AVAILABLE} 891 } 892 }, 893 [HYPERV_FEAT_STIMER] = { 894 .desc = "synthetic timers (hv-stimer)", 895 .flags = { 896 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 897 .bits = HV_SYNTIMERS_AVAILABLE} 898 }, 899 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME) 900 }, 901 [HYPERV_FEAT_FREQUENCIES] = { 902 .desc = "frequency MSRs (hv-frequencies)", 903 .flags = { 904 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 905 .bits = HV_ACCESS_FREQUENCY_MSRS}, 906 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 907 .bits = HV_FREQUENCY_MSRS_AVAILABLE} 908 } 909 }, 910 [HYPERV_FEAT_REENLIGHTENMENT] = { 911 .desc = "reenlightenment MSRs (hv-reenlightenment)", 912 .flags = { 913 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 914 .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL} 915 } 916 }, 917 [HYPERV_FEAT_TLBFLUSH] = { 918 .desc = "paravirtualized TLB flush (hv-tlbflush)", 919 .flags = { 920 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 921 .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED | 922 HV_EX_PROCESSOR_MASKS_RECOMMENDED} 923 }, 924 .dependencies = BIT(HYPERV_FEAT_VPINDEX) 925 }, 926 [HYPERV_FEAT_EVMCS] = { 927 .desc = "enlightened VMCS (hv-evmcs)", 928 .flags = { 929 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 930 .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED} 931 }, 932 .dependencies = BIT(HYPERV_FEAT_VAPIC) 933 }, 934 [HYPERV_FEAT_IPI] = { 935 .desc = "paravirtualized IPI (hv-ipi)", 936 .flags = { 937 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 938 .bits = HV_CLUSTER_IPI_RECOMMENDED | 939 HV_EX_PROCESSOR_MASKS_RECOMMENDED} 940 }, 941 .dependencies = BIT(HYPERV_FEAT_VPINDEX) 942 }, 943 [HYPERV_FEAT_STIMER_DIRECT] = { 944 .desc = "direct mode synthetic timers (hv-stimer-direct)", 945 .flags = { 946 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 947 .bits = HV_STIMER_DIRECT_MODE_AVAILABLE} 948 }, 949 .dependencies = BIT(HYPERV_FEAT_STIMER) 950 }, 951 [HYPERV_FEAT_AVIC] = { 952 .desc = "AVIC/APICv support (hv-avic/hv-apicv)", 953 .flags = { 954 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 955 .bits = HV_DEPRECATING_AEOI_RECOMMENDED} 956 } 957 }, 958 }; 959 960 static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max, 961 bool do_sys_ioctl) 962 { 963 struct kvm_cpuid2 *cpuid; 964 int r, size; 965 966 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); 967 cpuid = g_malloc0(size); 968 cpuid->nent = max; 969 970 if (do_sys_ioctl) { 971 r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_HV_CPUID, cpuid); 972 } else { 973 r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid); 974 } 975 if (r == 0 && cpuid->nent >= max) { 976 r = -E2BIG; 977 } 978 if (r < 0) { 979 if (r == -E2BIG) { 980 g_free(cpuid); 981 return NULL; 982 } else { 983 fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n", 984 strerror(-r)); 985 exit(1); 986 } 987 } 988 return cpuid; 989 } 990 991 /* 992 * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough 993 * for all entries. 994 */ 995 static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs) 996 { 997 struct kvm_cpuid2 *cpuid; 998 /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000080 leaves */ 999 int max = 10; 1000 int i; 1001 bool do_sys_ioctl; 1002 1003 do_sys_ioctl = 1004 kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID) > 0; 1005 1006 /* 1007 * Non-empty KVM context is needed when KVM_CAP_SYS_HYPERV_CPUID is 1008 * unsupported, kvm_hyperv_expand_features() checks for that. 1009 */ 1010 assert(do_sys_ioctl || cs->kvm_state); 1011 1012 /* 1013 * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with 1014 * -E2BIG, however, it doesn't report back the right size. Keep increasing 1015 * it and re-trying until we succeed. 1016 */ 1017 while ((cpuid = try_get_hv_cpuid(cs, max, do_sys_ioctl)) == NULL) { 1018 max++; 1019 } 1020 1021 /* 1022 * KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before 1023 * KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the 1024 * information early, just check for the capability and set the bit 1025 * manually. 1026 */ 1027 if (!do_sys_ioctl && kvm_check_extension(cs->kvm_state, 1028 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) { 1029 for (i = 0; i < cpuid->nent; i++) { 1030 if (cpuid->entries[i].function == HV_CPUID_ENLIGHTMENT_INFO) { 1031 cpuid->entries[i].eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED; 1032 } 1033 } 1034 } 1035 1036 return cpuid; 1037 } 1038 1039 /* 1040 * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature 1041 * leaves from KVM_CAP_HYPERV* and present MSRs data. 1042 */ 1043 static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs) 1044 { 1045 X86CPU *cpu = X86_CPU(cs); 1046 struct kvm_cpuid2 *cpuid; 1047 struct kvm_cpuid_entry2 *entry_feat, *entry_recomm; 1048 1049 /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */ 1050 cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries)); 1051 cpuid->nent = 2; 1052 1053 /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */ 1054 entry_feat = &cpuid->entries[0]; 1055 entry_feat->function = HV_CPUID_FEATURES; 1056 1057 entry_recomm = &cpuid->entries[1]; 1058 entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO; 1059 entry_recomm->ebx = cpu->hyperv_spinlock_attempts; 1060 1061 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) { 1062 entry_feat->eax |= HV_HYPERCALL_AVAILABLE; 1063 entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE; 1064 entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; 1065 entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED; 1066 entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED; 1067 } 1068 1069 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) { 1070 entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE; 1071 entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE; 1072 } 1073 1074 if (has_msr_hv_frequencies) { 1075 entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS; 1076 entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE; 1077 } 1078 1079 if (has_msr_hv_crash) { 1080 entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE; 1081 } 1082 1083 if (has_msr_hv_reenlightenment) { 1084 entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL; 1085 } 1086 1087 if (has_msr_hv_reset) { 1088 entry_feat->eax |= HV_RESET_AVAILABLE; 1089 } 1090 1091 if (has_msr_hv_vpindex) { 1092 entry_feat->eax |= HV_VP_INDEX_AVAILABLE; 1093 } 1094 1095 if (has_msr_hv_runtime) { 1096 entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE; 1097 } 1098 1099 if (has_msr_hv_synic) { 1100 unsigned int cap = cpu->hyperv_synic_kvm_only ? 1101 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2; 1102 1103 if (kvm_check_extension(cs->kvm_state, cap) > 0) { 1104 entry_feat->eax |= HV_SYNIC_AVAILABLE; 1105 } 1106 } 1107 1108 if (has_msr_hv_stimer) { 1109 entry_feat->eax |= HV_SYNTIMERS_AVAILABLE; 1110 } 1111 1112 if (kvm_check_extension(cs->kvm_state, 1113 KVM_CAP_HYPERV_TLBFLUSH) > 0) { 1114 entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED; 1115 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; 1116 } 1117 1118 if (kvm_check_extension(cs->kvm_state, 1119 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) { 1120 entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED; 1121 } 1122 1123 if (kvm_check_extension(cs->kvm_state, 1124 KVM_CAP_HYPERV_SEND_IPI) > 0) { 1125 entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED; 1126 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; 1127 } 1128 1129 return cpuid; 1130 } 1131 1132 static uint32_t hv_cpuid_get_host(CPUState *cs, uint32_t func, int reg) 1133 { 1134 struct kvm_cpuid_entry2 *entry; 1135 struct kvm_cpuid2 *cpuid; 1136 1137 if (hv_cpuid_cache) { 1138 cpuid = hv_cpuid_cache; 1139 } else { 1140 if (kvm_check_extension(kvm_state, KVM_CAP_HYPERV_CPUID) > 0) { 1141 cpuid = get_supported_hv_cpuid(cs); 1142 } else { 1143 /* 1144 * 'cs->kvm_state' may be NULL when Hyper-V features are expanded 1145 * before KVM context is created but this is only done when 1146 * KVM_CAP_SYS_HYPERV_CPUID is supported and it implies 1147 * KVM_CAP_HYPERV_CPUID. 1148 */ 1149 assert(cs->kvm_state); 1150 1151 cpuid = get_supported_hv_cpuid_legacy(cs); 1152 } 1153 hv_cpuid_cache = cpuid; 1154 } 1155 1156 if (!cpuid) { 1157 return 0; 1158 } 1159 1160 entry = cpuid_find_entry(cpuid, func, 0); 1161 if (!entry) { 1162 return 0; 1163 } 1164 1165 return cpuid_entry_get_reg(entry, reg); 1166 } 1167 1168 static bool hyperv_feature_supported(CPUState *cs, int feature) 1169 { 1170 uint32_t func, bits; 1171 int i, reg; 1172 1173 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) { 1174 1175 func = kvm_hyperv_properties[feature].flags[i].func; 1176 reg = kvm_hyperv_properties[feature].flags[i].reg; 1177 bits = kvm_hyperv_properties[feature].flags[i].bits; 1178 1179 if (!func) { 1180 continue; 1181 } 1182 1183 if ((hv_cpuid_get_host(cs, func, reg) & bits) != bits) { 1184 return false; 1185 } 1186 } 1187 1188 return true; 1189 } 1190 1191 /* Checks that all feature dependencies are enabled */ 1192 static bool hv_feature_check_deps(X86CPU *cpu, int feature, Error **errp) 1193 { 1194 uint64_t deps; 1195 int dep_feat; 1196 1197 deps = kvm_hyperv_properties[feature].dependencies; 1198 while (deps) { 1199 dep_feat = ctz64(deps); 1200 if (!(hyperv_feat_enabled(cpu, dep_feat))) { 1201 error_setg(errp, "Hyper-V %s requires Hyper-V %s", 1202 kvm_hyperv_properties[feature].desc, 1203 kvm_hyperv_properties[dep_feat].desc); 1204 return false; 1205 } 1206 deps &= ~(1ull << dep_feat); 1207 } 1208 1209 return true; 1210 } 1211 1212 static uint32_t hv_build_cpuid_leaf(CPUState *cs, uint32_t func, int reg) 1213 { 1214 X86CPU *cpu = X86_CPU(cs); 1215 uint32_t r = 0; 1216 int i, j; 1217 1218 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties); i++) { 1219 if (!hyperv_feat_enabled(cpu, i)) { 1220 continue; 1221 } 1222 1223 for (j = 0; j < ARRAY_SIZE(kvm_hyperv_properties[i].flags); j++) { 1224 if (kvm_hyperv_properties[i].flags[j].func != func) { 1225 continue; 1226 } 1227 if (kvm_hyperv_properties[i].flags[j].reg != reg) { 1228 continue; 1229 } 1230 1231 r |= kvm_hyperv_properties[i].flags[j].bits; 1232 } 1233 } 1234 1235 return r; 1236 } 1237 1238 /* 1239 * Expand Hyper-V CPU features. In partucular, check that all the requested 1240 * features are supported by the host and the sanity of the configuration 1241 * (that all the required dependencies are included). Also, this takes care 1242 * of 'hv_passthrough' mode and fills the environment with all supported 1243 * Hyper-V features. 1244 */ 1245 bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp) 1246 { 1247 CPUState *cs = CPU(cpu); 1248 Error *local_err = NULL; 1249 int feat; 1250 1251 if (!hyperv_enabled(cpu)) 1252 return true; 1253 1254 /* 1255 * When kvm_hyperv_expand_features is called at CPU feature expansion 1256 * time per-CPU kvm_state is not available yet so we can only proceed 1257 * when KVM_CAP_SYS_HYPERV_CPUID is supported. 1258 */ 1259 if (!cs->kvm_state && 1260 !kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID)) 1261 return true; 1262 1263 if (cpu->hyperv_passthrough) { 1264 cpu->hyperv_vendor_id[0] = 1265 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX); 1266 cpu->hyperv_vendor_id[1] = 1267 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX); 1268 cpu->hyperv_vendor_id[2] = 1269 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX); 1270 cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, 1271 sizeof(cpu->hyperv_vendor_id) + 1); 1272 memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, 1273 sizeof(cpu->hyperv_vendor_id)); 1274 cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0; 1275 1276 cpu->hyperv_interface_id[0] = 1277 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EAX); 1278 cpu->hyperv_interface_id[1] = 1279 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EBX); 1280 cpu->hyperv_interface_id[2] = 1281 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_ECX); 1282 cpu->hyperv_interface_id[3] = 1283 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX); 1284 1285 cpu->hyperv_ver_id_build = 1286 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX); 1287 cpu->hyperv_ver_id_major = 1288 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) >> 16; 1289 cpu->hyperv_ver_id_minor = 1290 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) & 0xffff; 1291 cpu->hyperv_ver_id_sp = 1292 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX); 1293 cpu->hyperv_ver_id_sb = 1294 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) >> 24; 1295 cpu->hyperv_ver_id_sn = 1296 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) & 0xffffff; 1297 1298 cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, 1299 R_EAX); 1300 cpu->hyperv_limits[0] = 1301 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EBX); 1302 cpu->hyperv_limits[1] = 1303 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_ECX); 1304 cpu->hyperv_limits[2] = 1305 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EDX); 1306 1307 cpu->hyperv_spinlock_attempts = 1308 hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EBX); 1309 1310 /* 1311 * Mark feature as enabled in 'cpu->hyperv_features' as 1312 * hv_build_cpuid_leaf() uses this info to build guest CPUIDs. 1313 */ 1314 for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) { 1315 if (hyperv_feature_supported(cs, feat)) { 1316 cpu->hyperv_features |= BIT(feat); 1317 } 1318 } 1319 } else { 1320 /* Check features availability and dependencies */ 1321 for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) { 1322 /* If the feature was not requested skip it. */ 1323 if (!hyperv_feat_enabled(cpu, feat)) { 1324 continue; 1325 } 1326 1327 /* Check if the feature is supported by KVM */ 1328 if (!hyperv_feature_supported(cs, feat)) { 1329 error_setg(errp, "Hyper-V %s is not supported by kernel", 1330 kvm_hyperv_properties[feat].desc); 1331 return false; 1332 } 1333 1334 /* Check dependencies */ 1335 if (!hv_feature_check_deps(cpu, feat, &local_err)) { 1336 error_propagate(errp, local_err); 1337 return false; 1338 } 1339 } 1340 } 1341 1342 /* Additional dependencies not covered by kvm_hyperv_properties[] */ 1343 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) && 1344 !cpu->hyperv_synic_kvm_only && 1345 !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) { 1346 error_setg(errp, "Hyper-V %s requires Hyper-V %s", 1347 kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc, 1348 kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc); 1349 return false; 1350 } 1351 1352 return true; 1353 } 1354 1355 /* 1356 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent. 1357 */ 1358 static int hyperv_fill_cpuids(CPUState *cs, 1359 struct kvm_cpuid_entry2 *cpuid_ent) 1360 { 1361 X86CPU *cpu = X86_CPU(cs); 1362 struct kvm_cpuid_entry2 *c; 1363 uint32_t cpuid_i = 0; 1364 1365 c = &cpuid_ent[cpuid_i++]; 1366 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS; 1367 c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ? 1368 HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS; 1369 c->ebx = cpu->hyperv_vendor_id[0]; 1370 c->ecx = cpu->hyperv_vendor_id[1]; 1371 c->edx = cpu->hyperv_vendor_id[2]; 1372 1373 c = &cpuid_ent[cpuid_i++]; 1374 c->function = HV_CPUID_INTERFACE; 1375 c->eax = cpu->hyperv_interface_id[0]; 1376 c->ebx = cpu->hyperv_interface_id[1]; 1377 c->ecx = cpu->hyperv_interface_id[2]; 1378 c->edx = cpu->hyperv_interface_id[3]; 1379 1380 c = &cpuid_ent[cpuid_i++]; 1381 c->function = HV_CPUID_VERSION; 1382 c->eax = cpu->hyperv_ver_id_build; 1383 c->ebx = (uint32_t)cpu->hyperv_ver_id_major << 16 | 1384 cpu->hyperv_ver_id_minor; 1385 c->ecx = cpu->hyperv_ver_id_sp; 1386 c->edx = (uint32_t)cpu->hyperv_ver_id_sb << 24 | 1387 (cpu->hyperv_ver_id_sn & 0xffffff); 1388 1389 c = &cpuid_ent[cpuid_i++]; 1390 c->function = HV_CPUID_FEATURES; 1391 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EAX); 1392 c->ebx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EBX); 1393 c->edx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EDX); 1394 1395 /* Unconditionally required with any Hyper-V enlightenment */ 1396 c->eax |= HV_HYPERCALL_AVAILABLE; 1397 1398 /* SynIC and Vmbus devices require messages/signals hypercalls */ 1399 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) && 1400 !cpu->hyperv_synic_kvm_only) { 1401 c->ebx |= HV_POST_MESSAGES | HV_SIGNAL_EVENTS; 1402 } 1403 1404 1405 /* Not exposed by KVM but needed to make CPU hotplug in Windows work */ 1406 c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; 1407 1408 c = &cpuid_ent[cpuid_i++]; 1409 c->function = HV_CPUID_ENLIGHTMENT_INFO; 1410 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX); 1411 c->ebx = cpu->hyperv_spinlock_attempts; 1412 1413 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) && 1414 !hyperv_feat_enabled(cpu, HYPERV_FEAT_AVIC)) { 1415 c->eax |= HV_APIC_ACCESS_RECOMMENDED; 1416 } 1417 1418 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { 1419 c->eax |= HV_NO_NONARCH_CORESHARING; 1420 } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { 1421 c->eax |= hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) & 1422 HV_NO_NONARCH_CORESHARING; 1423 } 1424 1425 c = &cpuid_ent[cpuid_i++]; 1426 c->function = HV_CPUID_IMPLEMENT_LIMITS; 1427 c->eax = cpu->hv_max_vps; 1428 c->ebx = cpu->hyperv_limits[0]; 1429 c->ecx = cpu->hyperv_limits[1]; 1430 c->edx = cpu->hyperv_limits[2]; 1431 1432 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { 1433 uint32_t function; 1434 1435 /* Create zeroed 0x40000006..0x40000009 leaves */ 1436 for (function = HV_CPUID_IMPLEMENT_LIMITS + 1; 1437 function < HV_CPUID_NESTED_FEATURES; function++) { 1438 c = &cpuid_ent[cpuid_i++]; 1439 c->function = function; 1440 } 1441 1442 c = &cpuid_ent[cpuid_i++]; 1443 c->function = HV_CPUID_NESTED_FEATURES; 1444 c->eax = cpu->hyperv_nested[0]; 1445 } 1446 1447 return cpuid_i; 1448 } 1449 1450 static Error *hv_passthrough_mig_blocker; 1451 static Error *hv_no_nonarch_cs_mig_blocker; 1452 1453 /* Checks that the exposed eVMCS version range is supported by KVM */ 1454 static bool evmcs_version_supported(uint16_t evmcs_version, 1455 uint16_t supported_evmcs_version) 1456 { 1457 uint8_t min_version = evmcs_version & 0xff; 1458 uint8_t max_version = evmcs_version >> 8; 1459 uint8_t min_supported_version = supported_evmcs_version & 0xff; 1460 uint8_t max_supported_version = supported_evmcs_version >> 8; 1461 1462 return (min_version >= min_supported_version) && 1463 (max_version <= max_supported_version); 1464 } 1465 1466 #define DEFAULT_EVMCS_VERSION ((1 << 8) | 1) 1467 1468 static int hyperv_init_vcpu(X86CPU *cpu) 1469 { 1470 CPUState *cs = CPU(cpu); 1471 Error *local_err = NULL; 1472 int ret; 1473 1474 if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) { 1475 error_setg(&hv_passthrough_mig_blocker, 1476 "'hv-passthrough' CPU flag prevents migration, use explicit" 1477 " set of hv-* flags instead"); 1478 ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err); 1479 if (ret < 0) { 1480 error_report_err(local_err); 1481 return ret; 1482 } 1483 } 1484 1485 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO && 1486 hv_no_nonarch_cs_mig_blocker == NULL) { 1487 error_setg(&hv_no_nonarch_cs_mig_blocker, 1488 "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration" 1489 " use explicit 'hv-no-nonarch-coresharing=on' instead (but" 1490 " make sure SMT is disabled and/or that vCPUs are properly" 1491 " pinned)"); 1492 ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err); 1493 if (ret < 0) { 1494 error_report_err(local_err); 1495 return ret; 1496 } 1497 } 1498 1499 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) { 1500 /* 1501 * the kernel doesn't support setting vp_index; assert that its value 1502 * is in sync 1503 */ 1504 struct { 1505 struct kvm_msrs info; 1506 struct kvm_msr_entry entries[1]; 1507 } msr_data = { 1508 .info.nmsrs = 1, 1509 .entries[0].index = HV_X64_MSR_VP_INDEX, 1510 }; 1511 1512 ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data); 1513 if (ret < 0) { 1514 return ret; 1515 } 1516 assert(ret == 1); 1517 1518 if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) { 1519 error_report("kernel's vp_index != QEMU's vp_index"); 1520 return -ENXIO; 1521 } 1522 } 1523 1524 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 1525 uint32_t synic_cap = cpu->hyperv_synic_kvm_only ? 1526 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2; 1527 ret = kvm_vcpu_enable_cap(cs, synic_cap, 0); 1528 if (ret < 0) { 1529 error_report("failed to turn on HyperV SynIC in KVM: %s", 1530 strerror(-ret)); 1531 return ret; 1532 } 1533 1534 if (!cpu->hyperv_synic_kvm_only) { 1535 ret = hyperv_x86_synic_add(cpu); 1536 if (ret < 0) { 1537 error_report("failed to create HyperV SynIC: %s", 1538 strerror(-ret)); 1539 return ret; 1540 } 1541 } 1542 } 1543 1544 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { 1545 uint16_t evmcs_version = DEFAULT_EVMCS_VERSION; 1546 uint16_t supported_evmcs_version; 1547 1548 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0, 1549 (uintptr_t)&supported_evmcs_version); 1550 1551 /* 1552 * KVM is required to support EVMCS ver.1. as that's what 'hv-evmcs' 1553 * option sets. Note: we hardcode the maximum supported eVMCS version 1554 * to '1' as well so 'hv-evmcs' feature is migratable even when (and if) 1555 * ver.2 is implemented. A new option (e.g. 'hv-evmcs=2') will then have 1556 * to be added. 1557 */ 1558 if (ret < 0) { 1559 error_report("Hyper-V %s is not supported by kernel", 1560 kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc); 1561 return ret; 1562 } 1563 1564 if (!evmcs_version_supported(evmcs_version, supported_evmcs_version)) { 1565 error_report("eVMCS version range [%d..%d] is not supported by " 1566 "kernel (supported: [%d..%d])", evmcs_version & 0xff, 1567 evmcs_version >> 8, supported_evmcs_version & 0xff, 1568 supported_evmcs_version >> 8); 1569 return -ENOTSUP; 1570 } 1571 1572 cpu->hyperv_nested[0] = evmcs_version; 1573 } 1574 1575 if (cpu->hyperv_enforce_cpuid) { 1576 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENFORCE_CPUID, 0, 1); 1577 if (ret < 0) { 1578 error_report("failed to enable KVM_CAP_HYPERV_ENFORCE_CPUID: %s", 1579 strerror(-ret)); 1580 return ret; 1581 } 1582 } 1583 1584 return 0; 1585 } 1586 1587 static Error *invtsc_mig_blocker; 1588 1589 #define KVM_MAX_CPUID_ENTRIES 100 1590 1591 static void kvm_init_xsave(CPUX86State *env) 1592 { 1593 if (has_xsave2) { 1594 env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096); 1595 } else if (has_xsave) { 1596 env->xsave_buf_len = sizeof(struct kvm_xsave); 1597 } else { 1598 return; 1599 } 1600 1601 env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); 1602 memset(env->xsave_buf, 0, env->xsave_buf_len); 1603 /* 1604 * The allocated storage must be large enough for all of the 1605 * possible XSAVE state components. 1606 */ 1607 assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) <= 1608 env->xsave_buf_len); 1609 } 1610 1611 int kvm_arch_init_vcpu(CPUState *cs) 1612 { 1613 struct { 1614 struct kvm_cpuid2 cpuid; 1615 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES]; 1616 } cpuid_data; 1617 /* 1618 * The kernel defines these structs with padding fields so there 1619 * should be no extra padding in our cpuid_data struct. 1620 */ 1621 QEMU_BUILD_BUG_ON(sizeof(cpuid_data) != 1622 sizeof(struct kvm_cpuid2) + 1623 sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES); 1624 1625 X86CPU *cpu = X86_CPU(cs); 1626 CPUX86State *env = &cpu->env; 1627 uint32_t limit, i, j, cpuid_i; 1628 uint32_t unused; 1629 struct kvm_cpuid_entry2 *c; 1630 uint32_t signature[3]; 1631 int kvm_base = KVM_CPUID_SIGNATURE; 1632 int max_nested_state_len; 1633 int r; 1634 Error *local_err = NULL; 1635 1636 memset(&cpuid_data, 0, sizeof(cpuid_data)); 1637 1638 cpuid_i = 0; 1639 1640 has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2); 1641 1642 r = kvm_arch_set_tsc_khz(cs); 1643 if (r < 0) { 1644 return r; 1645 } 1646 1647 /* vcpu's TSC frequency is either specified by user, or following 1648 * the value used by KVM if the former is not present. In the 1649 * latter case, we query it from KVM and record in env->tsc_khz, 1650 * so that vcpu's TSC frequency can be migrated later via this field. 1651 */ 1652 if (!env->tsc_khz) { 1653 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? 1654 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : 1655 -ENOTSUP; 1656 if (r > 0) { 1657 env->tsc_khz = r; 1658 } 1659 } 1660 1661 env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY; 1662 1663 /* 1664 * kvm_hyperv_expand_features() is called here for the second time in case 1665 * KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle 1666 * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to 1667 * check which Hyper-V enlightenments are supported and which are not, we 1668 * can still proceed and check/expand Hyper-V enlightenments here so legacy 1669 * behavior is preserved. 1670 */ 1671 if (!kvm_hyperv_expand_features(cpu, &local_err)) { 1672 error_report_err(local_err); 1673 return -ENOSYS; 1674 } 1675 1676 if (hyperv_enabled(cpu)) { 1677 r = hyperv_init_vcpu(cpu); 1678 if (r) { 1679 return r; 1680 } 1681 1682 cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries); 1683 kvm_base = KVM_CPUID_SIGNATURE_NEXT; 1684 has_msr_hv_hypercall = true; 1685 } 1686 1687 if (cpu->expose_kvm) { 1688 memcpy(signature, "KVMKVMKVM\0\0\0", 12); 1689 c = &cpuid_data.entries[cpuid_i++]; 1690 c->function = KVM_CPUID_SIGNATURE | kvm_base; 1691 c->eax = KVM_CPUID_FEATURES | kvm_base; 1692 c->ebx = signature[0]; 1693 c->ecx = signature[1]; 1694 c->edx = signature[2]; 1695 1696 c = &cpuid_data.entries[cpuid_i++]; 1697 c->function = KVM_CPUID_FEATURES | kvm_base; 1698 c->eax = env->features[FEAT_KVM]; 1699 c->edx = env->features[FEAT_KVM_HINTS]; 1700 } 1701 1702 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); 1703 1704 if (cpu->kvm_pv_enforce_cpuid) { 1705 r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1); 1706 if (r < 0) { 1707 fprintf(stderr, 1708 "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s", 1709 strerror(-r)); 1710 abort(); 1711 } 1712 } 1713 1714 for (i = 0; i <= limit; i++) { 1715 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1716 fprintf(stderr, "unsupported level value: 0x%x\n", limit); 1717 abort(); 1718 } 1719 c = &cpuid_data.entries[cpuid_i++]; 1720 1721 switch (i) { 1722 case 2: { 1723 /* Keep reading function 2 till all the input is received */ 1724 int times; 1725 1726 c->function = i; 1727 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | 1728 KVM_CPUID_FLAG_STATE_READ_NEXT; 1729 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1730 times = c->eax & 0xff; 1731 1732 for (j = 1; j < times; ++j) { 1733 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1734 fprintf(stderr, "cpuid_data is full, no space for " 1735 "cpuid(eax:2):eax & 0xf = 0x%x\n", times); 1736 abort(); 1737 } 1738 c = &cpuid_data.entries[cpuid_i++]; 1739 c->function = i; 1740 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; 1741 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1742 } 1743 break; 1744 } 1745 case 0x1f: 1746 if (env->nr_dies < 2) { 1747 break; 1748 } 1749 /* fallthrough */ 1750 case 4: 1751 case 0xb: 1752 case 0xd: 1753 for (j = 0; ; j++) { 1754 if (i == 0xd && j == 64) { 1755 break; 1756 } 1757 1758 if (i == 0x1f && j == 64) { 1759 break; 1760 } 1761 1762 c->function = i; 1763 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1764 c->index = j; 1765 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1766 1767 if (i == 4 && c->eax == 0) { 1768 break; 1769 } 1770 if (i == 0xb && !(c->ecx & 0xff00)) { 1771 break; 1772 } 1773 if (i == 0x1f && !(c->ecx & 0xff00)) { 1774 break; 1775 } 1776 if (i == 0xd && c->eax == 0) { 1777 continue; 1778 } 1779 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1780 fprintf(stderr, "cpuid_data is full, no space for " 1781 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); 1782 abort(); 1783 } 1784 c = &cpuid_data.entries[cpuid_i++]; 1785 } 1786 break; 1787 case 0x7: 1788 case 0x12: 1789 for (j = 0; ; j++) { 1790 c->function = i; 1791 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1792 c->index = j; 1793 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1794 1795 if (j > 1 && (c->eax & 0xf) != 1) { 1796 break; 1797 } 1798 1799 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1800 fprintf(stderr, "cpuid_data is full, no space for " 1801 "cpuid(eax:0x12,ecx:0x%x)\n", j); 1802 abort(); 1803 } 1804 c = &cpuid_data.entries[cpuid_i++]; 1805 } 1806 break; 1807 case 0x14: 1808 case 0x1d: 1809 case 0x1e: { 1810 uint32_t times; 1811 1812 c->function = i; 1813 c->index = 0; 1814 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1815 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1816 times = c->eax; 1817 1818 for (j = 1; j <= times; ++j) { 1819 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1820 fprintf(stderr, "cpuid_data is full, no space for " 1821 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); 1822 abort(); 1823 } 1824 c = &cpuid_data.entries[cpuid_i++]; 1825 c->function = i; 1826 c->index = j; 1827 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1828 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1829 } 1830 break; 1831 } 1832 default: 1833 c->function = i; 1834 c->flags = 0; 1835 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1836 if (!c->eax && !c->ebx && !c->ecx && !c->edx) { 1837 /* 1838 * KVM already returns all zeroes if a CPUID entry is missing, 1839 * so we can omit it and avoid hitting KVM's 80-entry limit. 1840 */ 1841 cpuid_i--; 1842 } 1843 break; 1844 } 1845 } 1846 1847 if (limit >= 0x0a) { 1848 uint32_t eax, edx; 1849 1850 cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx); 1851 1852 has_architectural_pmu_version = eax & 0xff; 1853 if (has_architectural_pmu_version > 0) { 1854 num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8; 1855 1856 /* Shouldn't be more than 32, since that's the number of bits 1857 * available in EBX to tell us _which_ counters are available. 1858 * Play it safe. 1859 */ 1860 if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) { 1861 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS; 1862 } 1863 1864 if (has_architectural_pmu_version > 1) { 1865 num_architectural_pmu_fixed_counters = edx & 0x1f; 1866 1867 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) { 1868 num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS; 1869 } 1870 } 1871 } 1872 } 1873 1874 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); 1875 1876 for (i = 0x80000000; i <= limit; i++) { 1877 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1878 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit); 1879 abort(); 1880 } 1881 c = &cpuid_data.entries[cpuid_i++]; 1882 1883 switch (i) { 1884 case 0x8000001d: 1885 /* Query for all AMD cache information leaves */ 1886 for (j = 0; ; j++) { 1887 c->function = i; 1888 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1889 c->index = j; 1890 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1891 1892 if (c->eax == 0) { 1893 break; 1894 } 1895 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1896 fprintf(stderr, "cpuid_data is full, no space for " 1897 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); 1898 abort(); 1899 } 1900 c = &cpuid_data.entries[cpuid_i++]; 1901 } 1902 break; 1903 default: 1904 c->function = i; 1905 c->flags = 0; 1906 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1907 if (!c->eax && !c->ebx && !c->ecx && !c->edx) { 1908 /* 1909 * KVM already returns all zeroes if a CPUID entry is missing, 1910 * so we can omit it and avoid hitting KVM's 80-entry limit. 1911 */ 1912 cpuid_i--; 1913 } 1914 break; 1915 } 1916 } 1917 1918 /* Call Centaur's CPUID instructions they are supported. */ 1919 if (env->cpuid_xlevel2 > 0) { 1920 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused); 1921 1922 for (i = 0xC0000000; i <= limit; i++) { 1923 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1924 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit); 1925 abort(); 1926 } 1927 c = &cpuid_data.entries[cpuid_i++]; 1928 1929 c->function = i; 1930 c->flags = 0; 1931 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1932 } 1933 } 1934 1935 cpuid_data.cpuid.nent = cpuid_i; 1936 1937 if (((env->cpuid_version >> 8)&0xF) >= 6 1938 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 1939 (CPUID_MCE | CPUID_MCA) 1940 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) { 1941 uint64_t mcg_cap, unsupported_caps; 1942 int banks; 1943 int ret; 1944 1945 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks); 1946 if (ret < 0) { 1947 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret)); 1948 return ret; 1949 } 1950 1951 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) { 1952 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)", 1953 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks); 1954 return -ENOTSUP; 1955 } 1956 1957 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK); 1958 if (unsupported_caps) { 1959 if (unsupported_caps & MCG_LMCE_P) { 1960 error_report("kvm: LMCE not supported"); 1961 return -ENOTSUP; 1962 } 1963 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64, 1964 unsupported_caps); 1965 } 1966 1967 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK; 1968 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap); 1969 if (ret < 0) { 1970 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret)); 1971 return ret; 1972 } 1973 } 1974 1975 cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env); 1976 1977 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0); 1978 if (c) { 1979 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) || 1980 !!(c->ecx & CPUID_EXT_SMX); 1981 } 1982 1983 c = cpuid_find_entry(&cpuid_data.cpuid, 7, 0); 1984 if (c && (c->ebx & CPUID_7_0_EBX_SGX)) { 1985 has_msr_feature_control = true; 1986 } 1987 1988 if (env->mcg_cap & MCG_LMCE_P) { 1989 has_msr_mcg_ext_ctl = has_msr_feature_control = true; 1990 } 1991 1992 if (!env->user_tsc_khz) { 1993 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) && 1994 invtsc_mig_blocker == NULL) { 1995 error_setg(&invtsc_mig_blocker, 1996 "State blocked by non-migratable CPU device" 1997 " (invtsc flag)"); 1998 r = migrate_add_blocker(invtsc_mig_blocker, &local_err); 1999 if (r < 0) { 2000 error_report_err(local_err); 2001 return r; 2002 } 2003 } 2004 } 2005 2006 if (cpu->vmware_cpuid_freq 2007 /* Guests depend on 0x40000000 to detect this feature, so only expose 2008 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */ 2009 && cpu->expose_kvm 2010 && kvm_base == KVM_CPUID_SIGNATURE 2011 /* TSC clock must be stable and known for this feature. */ 2012 && tsc_is_stable_and_known(env)) { 2013 2014 c = &cpuid_data.entries[cpuid_i++]; 2015 c->function = KVM_CPUID_SIGNATURE | 0x10; 2016 c->eax = env->tsc_khz; 2017 c->ebx = env->apic_bus_freq / 1000; /* Hz to KHz */ 2018 c->ecx = c->edx = 0; 2019 2020 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0); 2021 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10); 2022 } 2023 2024 cpuid_data.cpuid.nent = cpuid_i; 2025 2026 cpuid_data.cpuid.padding = 0; 2027 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data); 2028 if (r) { 2029 goto fail; 2030 } 2031 kvm_init_xsave(env); 2032 2033 max_nested_state_len = kvm_max_nested_state_length(); 2034 if (max_nested_state_len > 0) { 2035 assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data)); 2036 2037 if (cpu_has_vmx(env) || cpu_has_svm(env)) { 2038 struct kvm_vmx_nested_state_hdr *vmx_hdr; 2039 2040 env->nested_state = g_malloc0(max_nested_state_len); 2041 env->nested_state->size = max_nested_state_len; 2042 2043 if (cpu_has_vmx(env)) { 2044 env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX; 2045 vmx_hdr = &env->nested_state->hdr.vmx; 2046 vmx_hdr->vmxon_pa = -1ull; 2047 vmx_hdr->vmcs12_pa = -1ull; 2048 } else { 2049 env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM; 2050 } 2051 } 2052 } 2053 2054 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE); 2055 2056 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) { 2057 has_msr_tsc_aux = false; 2058 } 2059 2060 kvm_init_msrs(cpu); 2061 2062 return 0; 2063 2064 fail: 2065 migrate_del_blocker(invtsc_mig_blocker); 2066 2067 return r; 2068 } 2069 2070 int kvm_arch_destroy_vcpu(CPUState *cs) 2071 { 2072 X86CPU *cpu = X86_CPU(cs); 2073 CPUX86State *env = &cpu->env; 2074 2075 if (cpu->kvm_msr_buf) { 2076 g_free(cpu->kvm_msr_buf); 2077 cpu->kvm_msr_buf = NULL; 2078 } 2079 2080 if (env->nested_state) { 2081 g_free(env->nested_state); 2082 env->nested_state = NULL; 2083 } 2084 2085 qemu_del_vm_change_state_handler(cpu->vmsentry); 2086 2087 return 0; 2088 } 2089 2090 void kvm_arch_reset_vcpu(X86CPU *cpu) 2091 { 2092 CPUX86State *env = &cpu->env; 2093 2094 env->xcr0 = 1; 2095 if (kvm_irqchip_in_kernel()) { 2096 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE : 2097 KVM_MP_STATE_UNINITIALIZED; 2098 } else { 2099 env->mp_state = KVM_MP_STATE_RUNNABLE; 2100 } 2101 2102 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 2103 int i; 2104 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { 2105 env->msr_hv_synic_sint[i] = HV_SINT_MASKED; 2106 } 2107 2108 hyperv_x86_synic_reset(cpu); 2109 } 2110 /* enabled by default */ 2111 env->poll_control_msr = 1; 2112 2113 sev_es_set_reset_vector(CPU(cpu)); 2114 } 2115 2116 void kvm_arch_do_init_vcpu(X86CPU *cpu) 2117 { 2118 CPUX86State *env = &cpu->env; 2119 2120 /* APs get directly into wait-for-SIPI state. */ 2121 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) { 2122 env->mp_state = KVM_MP_STATE_INIT_RECEIVED; 2123 } 2124 } 2125 2126 static int kvm_get_supported_feature_msrs(KVMState *s) 2127 { 2128 int ret = 0; 2129 2130 if (kvm_feature_msrs != NULL) { 2131 return 0; 2132 } 2133 2134 if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) { 2135 return 0; 2136 } 2137 2138 struct kvm_msr_list msr_list; 2139 2140 msr_list.nmsrs = 0; 2141 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list); 2142 if (ret < 0 && ret != -E2BIG) { 2143 error_report("Fetch KVM feature MSR list failed: %s", 2144 strerror(-ret)); 2145 return ret; 2146 } 2147 2148 assert(msr_list.nmsrs > 0); 2149 kvm_feature_msrs = (struct kvm_msr_list *) \ 2150 g_malloc0(sizeof(msr_list) + 2151 msr_list.nmsrs * sizeof(msr_list.indices[0])); 2152 2153 kvm_feature_msrs->nmsrs = msr_list.nmsrs; 2154 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs); 2155 2156 if (ret < 0) { 2157 error_report("Fetch KVM feature MSR list failed: %s", 2158 strerror(-ret)); 2159 g_free(kvm_feature_msrs); 2160 kvm_feature_msrs = NULL; 2161 return ret; 2162 } 2163 2164 return 0; 2165 } 2166 2167 static int kvm_get_supported_msrs(KVMState *s) 2168 { 2169 int ret = 0; 2170 struct kvm_msr_list msr_list, *kvm_msr_list; 2171 2172 /* 2173 * Obtain MSR list from KVM. These are the MSRs that we must 2174 * save/restore. 2175 */ 2176 msr_list.nmsrs = 0; 2177 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list); 2178 if (ret < 0 && ret != -E2BIG) { 2179 return ret; 2180 } 2181 /* 2182 * Old kernel modules had a bug and could write beyond the provided 2183 * memory. Allocate at least a safe amount of 1K. 2184 */ 2185 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) + 2186 msr_list.nmsrs * 2187 sizeof(msr_list.indices[0]))); 2188 2189 kvm_msr_list->nmsrs = msr_list.nmsrs; 2190 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); 2191 if (ret >= 0) { 2192 int i; 2193 2194 for (i = 0; i < kvm_msr_list->nmsrs; i++) { 2195 switch (kvm_msr_list->indices[i]) { 2196 case MSR_STAR: 2197 has_msr_star = true; 2198 break; 2199 case MSR_VM_HSAVE_PA: 2200 has_msr_hsave_pa = true; 2201 break; 2202 case MSR_TSC_AUX: 2203 has_msr_tsc_aux = true; 2204 break; 2205 case MSR_TSC_ADJUST: 2206 has_msr_tsc_adjust = true; 2207 break; 2208 case MSR_IA32_TSCDEADLINE: 2209 has_msr_tsc_deadline = true; 2210 break; 2211 case MSR_IA32_SMBASE: 2212 has_msr_smbase = true; 2213 break; 2214 case MSR_SMI_COUNT: 2215 has_msr_smi_count = true; 2216 break; 2217 case MSR_IA32_MISC_ENABLE: 2218 has_msr_misc_enable = true; 2219 break; 2220 case MSR_IA32_BNDCFGS: 2221 has_msr_bndcfgs = true; 2222 break; 2223 case MSR_IA32_XSS: 2224 has_msr_xss = true; 2225 break; 2226 case MSR_IA32_UMWAIT_CONTROL: 2227 has_msr_umwait = true; 2228 break; 2229 case HV_X64_MSR_CRASH_CTL: 2230 has_msr_hv_crash = true; 2231 break; 2232 case HV_X64_MSR_RESET: 2233 has_msr_hv_reset = true; 2234 break; 2235 case HV_X64_MSR_VP_INDEX: 2236 has_msr_hv_vpindex = true; 2237 break; 2238 case HV_X64_MSR_VP_RUNTIME: 2239 has_msr_hv_runtime = true; 2240 break; 2241 case HV_X64_MSR_SCONTROL: 2242 has_msr_hv_synic = true; 2243 break; 2244 case HV_X64_MSR_STIMER0_CONFIG: 2245 has_msr_hv_stimer = true; 2246 break; 2247 case HV_X64_MSR_TSC_FREQUENCY: 2248 has_msr_hv_frequencies = true; 2249 break; 2250 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 2251 has_msr_hv_reenlightenment = true; 2252 break; 2253 case MSR_IA32_SPEC_CTRL: 2254 has_msr_spec_ctrl = true; 2255 break; 2256 case MSR_AMD64_TSC_RATIO: 2257 has_tsc_scale_msr = true; 2258 break; 2259 case MSR_IA32_TSX_CTRL: 2260 has_msr_tsx_ctrl = true; 2261 break; 2262 case MSR_VIRT_SSBD: 2263 has_msr_virt_ssbd = true; 2264 break; 2265 case MSR_IA32_ARCH_CAPABILITIES: 2266 has_msr_arch_capabs = true; 2267 break; 2268 case MSR_IA32_CORE_CAPABILITY: 2269 has_msr_core_capabs = true; 2270 break; 2271 case MSR_IA32_PERF_CAPABILITIES: 2272 has_msr_perf_capabs = true; 2273 break; 2274 case MSR_IA32_VMX_VMFUNC: 2275 has_msr_vmx_vmfunc = true; 2276 break; 2277 case MSR_IA32_UCODE_REV: 2278 has_msr_ucode_rev = true; 2279 break; 2280 case MSR_IA32_VMX_PROCBASED_CTLS2: 2281 has_msr_vmx_procbased_ctls2 = true; 2282 break; 2283 case MSR_IA32_PKRS: 2284 has_msr_pkrs = true; 2285 break; 2286 } 2287 } 2288 } 2289 2290 g_free(kvm_msr_list); 2291 2292 return ret; 2293 } 2294 2295 static Notifier smram_machine_done; 2296 static KVMMemoryListener smram_listener; 2297 static AddressSpace smram_address_space; 2298 static MemoryRegion smram_as_root; 2299 static MemoryRegion smram_as_mem; 2300 2301 static void register_smram_listener(Notifier *n, void *unused) 2302 { 2303 MemoryRegion *smram = 2304 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 2305 2306 /* Outer container... */ 2307 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull); 2308 memory_region_set_enabled(&smram_as_root, true); 2309 2310 /* ... with two regions inside: normal system memory with low 2311 * priority, and... 2312 */ 2313 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram", 2314 get_system_memory(), 0, ~0ull); 2315 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0); 2316 memory_region_set_enabled(&smram_as_mem, true); 2317 2318 if (smram) { 2319 /* ... SMRAM with higher priority */ 2320 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10); 2321 memory_region_set_enabled(smram, true); 2322 } 2323 2324 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM"); 2325 kvm_memory_listener_register(kvm_state, &smram_listener, 2326 &smram_address_space, 1, "kvm-smram"); 2327 } 2328 2329 int kvm_arch_init(MachineState *ms, KVMState *s) 2330 { 2331 uint64_t identity_base = 0xfffbc000; 2332 uint64_t shadow_mem; 2333 int ret; 2334 struct utsname utsname; 2335 Error *local_err = NULL; 2336 2337 /* 2338 * Initialize SEV context, if required 2339 * 2340 * If no memory encryption is requested (ms->cgs == NULL) this is 2341 * a no-op. 2342 * 2343 * It's also a no-op if a non-SEV confidential guest support 2344 * mechanism is selected. SEV is the only mechanism available to 2345 * select on x86 at present, so this doesn't arise, but if new 2346 * mechanisms are supported in future (e.g. TDX), they'll need 2347 * their own initialization either here or elsewhere. 2348 */ 2349 ret = sev_kvm_init(ms->cgs, &local_err); 2350 if (ret < 0) { 2351 error_report_err(local_err); 2352 return ret; 2353 } 2354 2355 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { 2356 error_report("kvm: KVM_CAP_IRQ_ROUTING not supported by KVM"); 2357 return -ENOTSUP; 2358 } 2359 2360 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE); 2361 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS); 2362 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2); 2363 has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0; 2364 2365 hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX); 2366 2367 has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD); 2368 if (has_exception_payload) { 2369 ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true); 2370 if (ret < 0) { 2371 error_report("kvm: Failed to enable exception payload cap: %s", 2372 strerror(-ret)); 2373 return ret; 2374 } 2375 } 2376 2377 ret = kvm_get_supported_msrs(s); 2378 if (ret < 0) { 2379 return ret; 2380 } 2381 2382 kvm_get_supported_feature_msrs(s); 2383 2384 uname(&utsname); 2385 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0; 2386 2387 /* 2388 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly. 2389 * In order to use vm86 mode, an EPT identity map and a TSS are needed. 2390 * Since these must be part of guest physical memory, we need to allocate 2391 * them, both by setting their start addresses in the kernel and by 2392 * creating a corresponding e820 entry. We need 4 pages before the BIOS. 2393 * 2394 * Older KVM versions may not support setting the identity map base. In 2395 * that case we need to stick with the default, i.e. a 256K maximum BIOS 2396 * size. 2397 */ 2398 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) { 2399 /* Allows up to 16M BIOSes. */ 2400 identity_base = 0xfeffc000; 2401 2402 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); 2403 if (ret < 0) { 2404 return ret; 2405 } 2406 } 2407 2408 /* Set TSS base one page after EPT identity map. */ 2409 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000); 2410 if (ret < 0) { 2411 return ret; 2412 } 2413 2414 /* Tell fw_cfg to notify the BIOS to reserve the range. */ 2415 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED); 2416 if (ret < 0) { 2417 fprintf(stderr, "e820_add_entry() table is full\n"); 2418 return ret; 2419 } 2420 2421 shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort); 2422 if (shadow_mem != -1) { 2423 shadow_mem /= 4096; 2424 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem); 2425 if (ret < 0) { 2426 return ret; 2427 } 2428 } 2429 2430 if (kvm_check_extension(s, KVM_CAP_X86_SMM) && 2431 object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) && 2432 x86_machine_is_smm_enabled(X86_MACHINE(ms))) { 2433 smram_machine_done.notify = register_smram_listener; 2434 qemu_add_machine_init_done_notifier(&smram_machine_done); 2435 } 2436 2437 if (enable_cpu_pm) { 2438 int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS); 2439 int ret; 2440 2441 /* Work around for kernel header with a typo. TODO: fix header and drop. */ 2442 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT) 2443 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL 2444 #endif 2445 if (disable_exits) { 2446 disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT | 2447 KVM_X86_DISABLE_EXITS_HLT | 2448 KVM_X86_DISABLE_EXITS_PAUSE | 2449 KVM_X86_DISABLE_EXITS_CSTATE); 2450 } 2451 2452 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0, 2453 disable_exits); 2454 if (ret < 0) { 2455 error_report("kvm: guest stopping CPU not supported: %s", 2456 strerror(-ret)); 2457 } 2458 } 2459 2460 if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE)) { 2461 X86MachineState *x86ms = X86_MACHINE(ms); 2462 2463 if (x86ms->bus_lock_ratelimit > 0) { 2464 ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT); 2465 if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) { 2466 error_report("kvm: bus lock detection unsupported"); 2467 return -ENOTSUP; 2468 } 2469 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0, 2470 KVM_BUS_LOCK_DETECTION_EXIT); 2471 if (ret < 0) { 2472 error_report("kvm: Failed to enable bus lock detection cap: %s", 2473 strerror(-ret)); 2474 return ret; 2475 } 2476 ratelimit_init(&bus_lock_ratelimit_ctrl); 2477 ratelimit_set_speed(&bus_lock_ratelimit_ctrl, 2478 x86ms->bus_lock_ratelimit, BUS_LOCK_SLICE_TIME); 2479 } 2480 } 2481 2482 return 0; 2483 } 2484 2485 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs) 2486 { 2487 lhs->selector = rhs->selector; 2488 lhs->base = rhs->base; 2489 lhs->limit = rhs->limit; 2490 lhs->type = 3; 2491 lhs->present = 1; 2492 lhs->dpl = 3; 2493 lhs->db = 0; 2494 lhs->s = 1; 2495 lhs->l = 0; 2496 lhs->g = 0; 2497 lhs->avl = 0; 2498 lhs->unusable = 0; 2499 } 2500 2501 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) 2502 { 2503 unsigned flags = rhs->flags; 2504 lhs->selector = rhs->selector; 2505 lhs->base = rhs->base; 2506 lhs->limit = rhs->limit; 2507 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15; 2508 lhs->present = (flags & DESC_P_MASK) != 0; 2509 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3; 2510 lhs->db = (flags >> DESC_B_SHIFT) & 1; 2511 lhs->s = (flags & DESC_S_MASK) != 0; 2512 lhs->l = (flags >> DESC_L_SHIFT) & 1; 2513 lhs->g = (flags & DESC_G_MASK) != 0; 2514 lhs->avl = (flags & DESC_AVL_MASK) != 0; 2515 lhs->unusable = !lhs->present; 2516 lhs->padding = 0; 2517 } 2518 2519 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) 2520 { 2521 lhs->selector = rhs->selector; 2522 lhs->base = rhs->base; 2523 lhs->limit = rhs->limit; 2524 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) | 2525 ((rhs->present && !rhs->unusable) * DESC_P_MASK) | 2526 (rhs->dpl << DESC_DPL_SHIFT) | 2527 (rhs->db << DESC_B_SHIFT) | 2528 (rhs->s * DESC_S_MASK) | 2529 (rhs->l << DESC_L_SHIFT) | 2530 (rhs->g * DESC_G_MASK) | 2531 (rhs->avl * DESC_AVL_MASK); 2532 } 2533 2534 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) 2535 { 2536 if (set) { 2537 *kvm_reg = *qemu_reg; 2538 } else { 2539 *qemu_reg = *kvm_reg; 2540 } 2541 } 2542 2543 static int kvm_getput_regs(X86CPU *cpu, int set) 2544 { 2545 CPUX86State *env = &cpu->env; 2546 struct kvm_regs regs; 2547 int ret = 0; 2548 2549 if (!set) { 2550 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s); 2551 if (ret < 0) { 2552 return ret; 2553 } 2554 } 2555 2556 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set); 2557 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set); 2558 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set); 2559 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set); 2560 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set); 2561 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set); 2562 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set); 2563 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set); 2564 #ifdef TARGET_X86_64 2565 kvm_getput_reg(®s.r8, &env->regs[8], set); 2566 kvm_getput_reg(®s.r9, &env->regs[9], set); 2567 kvm_getput_reg(®s.r10, &env->regs[10], set); 2568 kvm_getput_reg(®s.r11, &env->regs[11], set); 2569 kvm_getput_reg(®s.r12, &env->regs[12], set); 2570 kvm_getput_reg(®s.r13, &env->regs[13], set); 2571 kvm_getput_reg(®s.r14, &env->regs[14], set); 2572 kvm_getput_reg(®s.r15, &env->regs[15], set); 2573 #endif 2574 2575 kvm_getput_reg(®s.rflags, &env->eflags, set); 2576 kvm_getput_reg(®s.rip, &env->eip, set); 2577 2578 if (set) { 2579 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s); 2580 } 2581 2582 return ret; 2583 } 2584 2585 static int kvm_put_fpu(X86CPU *cpu) 2586 { 2587 CPUX86State *env = &cpu->env; 2588 struct kvm_fpu fpu; 2589 int i; 2590 2591 memset(&fpu, 0, sizeof fpu); 2592 fpu.fsw = env->fpus & ~(7 << 11); 2593 fpu.fsw |= (env->fpstt & 7) << 11; 2594 fpu.fcw = env->fpuc; 2595 fpu.last_opcode = env->fpop; 2596 fpu.last_ip = env->fpip; 2597 fpu.last_dp = env->fpdp; 2598 for (i = 0; i < 8; ++i) { 2599 fpu.ftwx |= (!env->fptags[i]) << i; 2600 } 2601 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs); 2602 for (i = 0; i < CPU_NB_REGS; i++) { 2603 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0)); 2604 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1)); 2605 } 2606 fpu.mxcsr = env->mxcsr; 2607 2608 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu); 2609 } 2610 2611 static int kvm_put_xsave(X86CPU *cpu) 2612 { 2613 CPUX86State *env = &cpu->env; 2614 void *xsave = env->xsave_buf; 2615 2616 if (!has_xsave) { 2617 return kvm_put_fpu(cpu); 2618 } 2619 x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len); 2620 2621 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave); 2622 } 2623 2624 static int kvm_put_xcrs(X86CPU *cpu) 2625 { 2626 CPUX86State *env = &cpu->env; 2627 struct kvm_xcrs xcrs = {}; 2628 2629 if (!has_xcrs) { 2630 return 0; 2631 } 2632 2633 xcrs.nr_xcrs = 1; 2634 xcrs.flags = 0; 2635 xcrs.xcrs[0].xcr = 0; 2636 xcrs.xcrs[0].value = env->xcr0; 2637 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs); 2638 } 2639 2640 static int kvm_put_sregs(X86CPU *cpu) 2641 { 2642 CPUX86State *env = &cpu->env; 2643 struct kvm_sregs sregs; 2644 2645 /* 2646 * The interrupt_bitmap is ignored because KVM_SET_SREGS is 2647 * always followed by KVM_SET_VCPU_EVENTS. 2648 */ 2649 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap)); 2650 2651 if ((env->eflags & VM_MASK)) { 2652 set_v8086_seg(&sregs.cs, &env->segs[R_CS]); 2653 set_v8086_seg(&sregs.ds, &env->segs[R_DS]); 2654 set_v8086_seg(&sregs.es, &env->segs[R_ES]); 2655 set_v8086_seg(&sregs.fs, &env->segs[R_FS]); 2656 set_v8086_seg(&sregs.gs, &env->segs[R_GS]); 2657 set_v8086_seg(&sregs.ss, &env->segs[R_SS]); 2658 } else { 2659 set_seg(&sregs.cs, &env->segs[R_CS]); 2660 set_seg(&sregs.ds, &env->segs[R_DS]); 2661 set_seg(&sregs.es, &env->segs[R_ES]); 2662 set_seg(&sregs.fs, &env->segs[R_FS]); 2663 set_seg(&sregs.gs, &env->segs[R_GS]); 2664 set_seg(&sregs.ss, &env->segs[R_SS]); 2665 } 2666 2667 set_seg(&sregs.tr, &env->tr); 2668 set_seg(&sregs.ldt, &env->ldt); 2669 2670 sregs.idt.limit = env->idt.limit; 2671 sregs.idt.base = env->idt.base; 2672 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding); 2673 sregs.gdt.limit = env->gdt.limit; 2674 sregs.gdt.base = env->gdt.base; 2675 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding); 2676 2677 sregs.cr0 = env->cr[0]; 2678 sregs.cr2 = env->cr[2]; 2679 sregs.cr3 = env->cr[3]; 2680 sregs.cr4 = env->cr[4]; 2681 2682 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state); 2683 sregs.apic_base = cpu_get_apic_base(cpu->apic_state); 2684 2685 sregs.efer = env->efer; 2686 2687 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs); 2688 } 2689 2690 static int kvm_put_sregs2(X86CPU *cpu) 2691 { 2692 CPUX86State *env = &cpu->env; 2693 struct kvm_sregs2 sregs; 2694 int i; 2695 2696 sregs.flags = 0; 2697 2698 if ((env->eflags & VM_MASK)) { 2699 set_v8086_seg(&sregs.cs, &env->segs[R_CS]); 2700 set_v8086_seg(&sregs.ds, &env->segs[R_DS]); 2701 set_v8086_seg(&sregs.es, &env->segs[R_ES]); 2702 set_v8086_seg(&sregs.fs, &env->segs[R_FS]); 2703 set_v8086_seg(&sregs.gs, &env->segs[R_GS]); 2704 set_v8086_seg(&sregs.ss, &env->segs[R_SS]); 2705 } else { 2706 set_seg(&sregs.cs, &env->segs[R_CS]); 2707 set_seg(&sregs.ds, &env->segs[R_DS]); 2708 set_seg(&sregs.es, &env->segs[R_ES]); 2709 set_seg(&sregs.fs, &env->segs[R_FS]); 2710 set_seg(&sregs.gs, &env->segs[R_GS]); 2711 set_seg(&sregs.ss, &env->segs[R_SS]); 2712 } 2713 2714 set_seg(&sregs.tr, &env->tr); 2715 set_seg(&sregs.ldt, &env->ldt); 2716 2717 sregs.idt.limit = env->idt.limit; 2718 sregs.idt.base = env->idt.base; 2719 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding); 2720 sregs.gdt.limit = env->gdt.limit; 2721 sregs.gdt.base = env->gdt.base; 2722 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding); 2723 2724 sregs.cr0 = env->cr[0]; 2725 sregs.cr2 = env->cr[2]; 2726 sregs.cr3 = env->cr[3]; 2727 sregs.cr4 = env->cr[4]; 2728 2729 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state); 2730 sregs.apic_base = cpu_get_apic_base(cpu->apic_state); 2731 2732 sregs.efer = env->efer; 2733 2734 if (env->pdptrs_valid) { 2735 for (i = 0; i < 4; i++) { 2736 sregs.pdptrs[i] = env->pdptrs[i]; 2737 } 2738 sregs.flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID; 2739 } 2740 2741 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS2, &sregs); 2742 } 2743 2744 2745 static void kvm_msr_buf_reset(X86CPU *cpu) 2746 { 2747 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE); 2748 } 2749 2750 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value) 2751 { 2752 struct kvm_msrs *msrs = cpu->kvm_msr_buf; 2753 void *limit = ((void *)msrs) + MSR_BUF_SIZE; 2754 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs]; 2755 2756 assert((void *)(entry + 1) <= limit); 2757 2758 entry->index = index; 2759 entry->reserved = 0; 2760 entry->data = value; 2761 msrs->nmsrs++; 2762 } 2763 2764 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value) 2765 { 2766 kvm_msr_buf_reset(cpu); 2767 kvm_msr_entry_add(cpu, index, value); 2768 2769 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); 2770 } 2771 2772 void kvm_put_apicbase(X86CPU *cpu, uint64_t value) 2773 { 2774 int ret; 2775 2776 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value); 2777 assert(ret == 1); 2778 } 2779 2780 static int kvm_put_tscdeadline_msr(X86CPU *cpu) 2781 { 2782 CPUX86State *env = &cpu->env; 2783 int ret; 2784 2785 if (!has_msr_tsc_deadline) { 2786 return 0; 2787 } 2788 2789 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline); 2790 if (ret < 0) { 2791 return ret; 2792 } 2793 2794 assert(ret == 1); 2795 return 0; 2796 } 2797 2798 /* 2799 * Provide a separate write service for the feature control MSR in order to 2800 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done 2801 * before writing any other state because forcibly leaving nested mode 2802 * invalidates the VCPU state. 2803 */ 2804 static int kvm_put_msr_feature_control(X86CPU *cpu) 2805 { 2806 int ret; 2807 2808 if (!has_msr_feature_control) { 2809 return 0; 2810 } 2811 2812 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL, 2813 cpu->env.msr_ia32_feature_control); 2814 if (ret < 0) { 2815 return ret; 2816 } 2817 2818 assert(ret == 1); 2819 return 0; 2820 } 2821 2822 static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features) 2823 { 2824 uint32_t default1, can_be_one, can_be_zero; 2825 uint32_t must_be_one; 2826 2827 switch (index) { 2828 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 2829 default1 = 0x00000016; 2830 break; 2831 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 2832 default1 = 0x0401e172; 2833 break; 2834 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 2835 default1 = 0x000011ff; 2836 break; 2837 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 2838 default1 = 0x00036dff; 2839 break; 2840 case MSR_IA32_VMX_PROCBASED_CTLS2: 2841 default1 = 0; 2842 break; 2843 default: 2844 abort(); 2845 } 2846 2847 /* If a feature bit is set, the control can be either set or clear. 2848 * Otherwise the value is limited to either 0 or 1 by default1. 2849 */ 2850 can_be_one = features | default1; 2851 can_be_zero = features | ~default1; 2852 must_be_one = ~can_be_zero; 2853 2854 /* 2855 * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one). 2856 * Bit 32:63 -> 1 if the control bit can be one. 2857 */ 2858 return must_be_one | (((uint64_t)can_be_one) << 32); 2859 } 2860 2861 static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f) 2862 { 2863 uint64_t kvm_vmx_basic = 2864 kvm_arch_get_supported_msr_feature(kvm_state, 2865 MSR_IA32_VMX_BASIC); 2866 2867 if (!kvm_vmx_basic) { 2868 /* If the kernel doesn't support VMX feature (kvm_intel.nested=0), 2869 * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail. 2870 */ 2871 return; 2872 } 2873 2874 uint64_t kvm_vmx_misc = 2875 kvm_arch_get_supported_msr_feature(kvm_state, 2876 MSR_IA32_VMX_MISC); 2877 uint64_t kvm_vmx_ept_vpid = 2878 kvm_arch_get_supported_msr_feature(kvm_state, 2879 MSR_IA32_VMX_EPT_VPID_CAP); 2880 2881 /* 2882 * If the guest is 64-bit, a value of 1 is allowed for the host address 2883 * space size vmexit control. 2884 */ 2885 uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM 2886 ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0; 2887 2888 /* 2889 * Bits 0-30, 32-44 and 50-53 come from the host. KVM should 2890 * not change them for backwards compatibility. 2891 */ 2892 uint64_t fixed_vmx_basic = kvm_vmx_basic & 2893 (MSR_VMX_BASIC_VMCS_REVISION_MASK | 2894 MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK | 2895 MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK); 2896 2897 /* 2898 * Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can 2899 * change in the future but are always zero for now, clear them to be 2900 * future proof. Bits 32-63 in theory could change, though KVM does 2901 * not support dual-monitor treatment and probably never will; mask 2902 * them out as well. 2903 */ 2904 uint64_t fixed_vmx_misc = kvm_vmx_misc & 2905 (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK | 2906 MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK); 2907 2908 /* 2909 * EPT memory types should not change either, so we do not bother 2910 * adding features for them. 2911 */ 2912 uint64_t fixed_vmx_ept_mask = 2913 (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ? 2914 MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0); 2915 uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask; 2916 2917 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 2918 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 2919 f[FEAT_VMX_PROCBASED_CTLS])); 2920 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS, 2921 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS, 2922 f[FEAT_VMX_PINBASED_CTLS])); 2923 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS, 2924 make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS, 2925 f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit); 2926 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS, 2927 make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS, 2928 f[FEAT_VMX_ENTRY_CTLS])); 2929 kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2, 2930 make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2, 2931 f[FEAT_VMX_SECONDARY_CTLS])); 2932 kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP, 2933 f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid); 2934 kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC, 2935 f[FEAT_VMX_BASIC] | fixed_vmx_basic); 2936 kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC, 2937 f[FEAT_VMX_MISC] | fixed_vmx_misc); 2938 if (has_msr_vmx_vmfunc) { 2939 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]); 2940 } 2941 2942 /* 2943 * Just to be safe, write these with constant values. The CRn_FIXED1 2944 * MSRs are generated by KVM based on the vCPU's CPUID. 2945 */ 2946 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0, 2947 CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK); 2948 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0, 2949 CR4_VMXE_MASK); 2950 2951 if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) { 2952 /* TSC multiplier (0x2032). */ 2953 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32); 2954 } else { 2955 /* Preemption timer (0x482E). */ 2956 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x2E); 2957 } 2958 } 2959 2960 static void kvm_msr_entry_add_perf(X86CPU *cpu, FeatureWordArray f) 2961 { 2962 uint64_t kvm_perf_cap = 2963 kvm_arch_get_supported_msr_feature(kvm_state, 2964 MSR_IA32_PERF_CAPABILITIES); 2965 2966 if (kvm_perf_cap) { 2967 kvm_msr_entry_add(cpu, MSR_IA32_PERF_CAPABILITIES, 2968 kvm_perf_cap & f[FEAT_PERF_CAPABILITIES]); 2969 } 2970 } 2971 2972 static int kvm_buf_set_msrs(X86CPU *cpu) 2973 { 2974 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); 2975 if (ret < 0) { 2976 return ret; 2977 } 2978 2979 if (ret < cpu->kvm_msr_buf->nmsrs) { 2980 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; 2981 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64, 2982 (uint32_t)e->index, (uint64_t)e->data); 2983 } 2984 2985 assert(ret == cpu->kvm_msr_buf->nmsrs); 2986 return 0; 2987 } 2988 2989 static void kvm_init_msrs(X86CPU *cpu) 2990 { 2991 CPUX86State *env = &cpu->env; 2992 2993 kvm_msr_buf_reset(cpu); 2994 if (has_msr_arch_capabs) { 2995 kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES, 2996 env->features[FEAT_ARCH_CAPABILITIES]); 2997 } 2998 2999 if (has_msr_core_capabs) { 3000 kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY, 3001 env->features[FEAT_CORE_CAPABILITY]); 3002 } 3003 3004 if (has_msr_perf_capabs && cpu->enable_pmu) { 3005 kvm_msr_entry_add_perf(cpu, env->features); 3006 } 3007 3008 if (has_msr_ucode_rev) { 3009 kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev); 3010 } 3011 3012 /* 3013 * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but 3014 * all kernels with MSR features should have them. 3015 */ 3016 if (kvm_feature_msrs && cpu_has_vmx(env)) { 3017 kvm_msr_entry_add_vmx(cpu, env->features); 3018 } 3019 3020 assert(kvm_buf_set_msrs(cpu) == 0); 3021 } 3022 3023 static int kvm_put_msrs(X86CPU *cpu, int level) 3024 { 3025 CPUX86State *env = &cpu->env; 3026 int i; 3027 3028 kvm_msr_buf_reset(cpu); 3029 3030 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs); 3031 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp); 3032 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip); 3033 kvm_msr_entry_add(cpu, MSR_PAT, env->pat); 3034 if (has_msr_star) { 3035 kvm_msr_entry_add(cpu, MSR_STAR, env->star); 3036 } 3037 if (has_msr_hsave_pa) { 3038 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave); 3039 } 3040 if (has_msr_tsc_aux) { 3041 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux); 3042 } 3043 if (has_msr_tsc_adjust) { 3044 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust); 3045 } 3046 if (has_msr_misc_enable) { 3047 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 3048 env->msr_ia32_misc_enable); 3049 } 3050 if (has_msr_smbase) { 3051 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase); 3052 } 3053 if (has_msr_smi_count) { 3054 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count); 3055 } 3056 if (has_msr_pkrs) { 3057 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, env->pkrs); 3058 } 3059 if (has_msr_bndcfgs) { 3060 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs); 3061 } 3062 if (has_msr_xss) { 3063 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss); 3064 } 3065 if (has_msr_umwait) { 3066 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait); 3067 } 3068 if (has_msr_spec_ctrl) { 3069 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl); 3070 } 3071 if (has_tsc_scale_msr) { 3072 kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr); 3073 } 3074 3075 if (has_msr_tsx_ctrl) { 3076 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl); 3077 } 3078 if (has_msr_virt_ssbd) { 3079 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd); 3080 } 3081 3082 #ifdef TARGET_X86_64 3083 if (lm_capable_kernel) { 3084 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar); 3085 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase); 3086 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask); 3087 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar); 3088 } 3089 #endif 3090 3091 /* 3092 * The following MSRs have side effects on the guest or are too heavy 3093 * for normal writeback. Limit them to reset or full state updates. 3094 */ 3095 if (level >= KVM_PUT_RESET_STATE) { 3096 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc); 3097 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr); 3098 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr); 3099 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) { 3100 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr); 3101 } 3102 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { 3103 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr); 3104 } 3105 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { 3106 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr); 3107 } 3108 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { 3109 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); 3110 } 3111 3112 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { 3113 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr); 3114 } 3115 3116 if (has_architectural_pmu_version > 0) { 3117 if (has_architectural_pmu_version > 1) { 3118 /* Stop the counter. */ 3119 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); 3120 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); 3121 } 3122 3123 /* Set the counter values. */ 3124 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { 3125 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 3126 env->msr_fixed_counters[i]); 3127 } 3128 for (i = 0; i < num_architectural_pmu_gp_counters; i++) { 3129 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 3130 env->msr_gp_counters[i]); 3131 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 3132 env->msr_gp_evtsel[i]); 3133 } 3134 if (has_architectural_pmu_version > 1) { 3135 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 3136 env->msr_global_status); 3137 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 3138 env->msr_global_ovf_ctrl); 3139 3140 /* Now start the PMU. */ 3141 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 3142 env->msr_fixed_ctr_ctrl); 3143 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 3144 env->msr_global_ctrl); 3145 } 3146 } 3147 /* 3148 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add, 3149 * only sync them to KVM on the first cpu 3150 */ 3151 if (current_cpu == first_cpu) { 3152 if (has_msr_hv_hypercall) { 3153 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 3154 env->msr_hv_guest_os_id); 3155 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 3156 env->msr_hv_hypercall); 3157 } 3158 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) { 3159 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 3160 env->msr_hv_tsc); 3161 } 3162 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) { 3163 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 3164 env->msr_hv_reenlightenment_control); 3165 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 3166 env->msr_hv_tsc_emulation_control); 3167 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 3168 env->msr_hv_tsc_emulation_status); 3169 } 3170 } 3171 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) { 3172 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 3173 env->msr_hv_vapic); 3174 } 3175 if (has_msr_hv_crash) { 3176 int j; 3177 3178 for (j = 0; j < HV_CRASH_PARAMS; j++) 3179 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 3180 env->msr_hv_crash_params[j]); 3181 3182 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY); 3183 } 3184 if (has_msr_hv_runtime) { 3185 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime); 3186 } 3187 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) 3188 && hv_vpindex_settable) { 3189 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX, 3190 hyperv_vp_index(CPU(cpu))); 3191 } 3192 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 3193 int j; 3194 3195 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION); 3196 3197 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 3198 env->msr_hv_synic_control); 3199 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 3200 env->msr_hv_synic_evt_page); 3201 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 3202 env->msr_hv_synic_msg_page); 3203 3204 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) { 3205 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j, 3206 env->msr_hv_synic_sint[j]); 3207 } 3208 } 3209 if (has_msr_hv_stimer) { 3210 int j; 3211 3212 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) { 3213 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2, 3214 env->msr_hv_stimer_config[j]); 3215 } 3216 3217 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) { 3218 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2, 3219 env->msr_hv_stimer_count[j]); 3220 } 3221 } 3222 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { 3223 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits); 3224 3225 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype); 3226 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]); 3227 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]); 3228 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]); 3229 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]); 3230 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]); 3231 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]); 3232 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]); 3233 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]); 3234 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]); 3235 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]); 3236 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]); 3237 for (i = 0; i < MSR_MTRRcap_VCNT; i++) { 3238 /* The CPU GPs if we write to a bit above the physical limit of 3239 * the host CPU (and KVM emulates that) 3240 */ 3241 uint64_t mask = env->mtrr_var[i].mask; 3242 mask &= phys_mask; 3243 3244 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 3245 env->mtrr_var[i].base); 3246 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask); 3247 } 3248 } 3249 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { 3250 int addr_num = kvm_arch_get_supported_cpuid(kvm_state, 3251 0x14, 1, R_EAX) & 0x7; 3252 3253 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 3254 env->msr_rtit_ctrl); 3255 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 3256 env->msr_rtit_status); 3257 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 3258 env->msr_rtit_output_base); 3259 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 3260 env->msr_rtit_output_mask); 3261 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 3262 env->msr_rtit_cr3_match); 3263 for (i = 0; i < addr_num; i++) { 3264 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 3265 env->msr_rtit_addrs[i]); 3266 } 3267 } 3268 3269 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) { 3270 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 3271 env->msr_ia32_sgxlepubkeyhash[0]); 3272 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 3273 env->msr_ia32_sgxlepubkeyhash[1]); 3274 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 3275 env->msr_ia32_sgxlepubkeyhash[2]); 3276 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 3277 env->msr_ia32_sgxlepubkeyhash[3]); 3278 } 3279 3280 if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { 3281 kvm_msr_entry_add(cpu, MSR_IA32_XFD, 3282 env->msr_xfd); 3283 kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 3284 env->msr_xfd_err); 3285 } 3286 3287 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see 3288 * kvm_put_msr_feature_control. */ 3289 } 3290 3291 if (env->mcg_cap) { 3292 int i; 3293 3294 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status); 3295 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl); 3296 if (has_msr_mcg_ext_ctl) { 3297 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl); 3298 } 3299 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { 3300 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]); 3301 } 3302 } 3303 3304 return kvm_buf_set_msrs(cpu); 3305 } 3306 3307 3308 static int kvm_get_fpu(X86CPU *cpu) 3309 { 3310 CPUX86State *env = &cpu->env; 3311 struct kvm_fpu fpu; 3312 int i, ret; 3313 3314 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu); 3315 if (ret < 0) { 3316 return ret; 3317 } 3318 3319 env->fpstt = (fpu.fsw >> 11) & 7; 3320 env->fpus = fpu.fsw; 3321 env->fpuc = fpu.fcw; 3322 env->fpop = fpu.last_opcode; 3323 env->fpip = fpu.last_ip; 3324 env->fpdp = fpu.last_dp; 3325 for (i = 0; i < 8; ++i) { 3326 env->fptags[i] = !((fpu.ftwx >> i) & 1); 3327 } 3328 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs); 3329 for (i = 0; i < CPU_NB_REGS; i++) { 3330 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]); 3331 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]); 3332 } 3333 env->mxcsr = fpu.mxcsr; 3334 3335 return 0; 3336 } 3337 3338 static int kvm_get_xsave(X86CPU *cpu) 3339 { 3340 CPUX86State *env = &cpu->env; 3341 void *xsave = env->xsave_buf; 3342 int type, ret; 3343 3344 if (!has_xsave) { 3345 return kvm_get_fpu(cpu); 3346 } 3347 3348 type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE; 3349 ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave); 3350 if (ret < 0) { 3351 return ret; 3352 } 3353 x86_cpu_xrstor_all_areas(cpu, xsave, env->xsave_buf_len); 3354 3355 return 0; 3356 } 3357 3358 static int kvm_get_xcrs(X86CPU *cpu) 3359 { 3360 CPUX86State *env = &cpu->env; 3361 int i, ret; 3362 struct kvm_xcrs xcrs; 3363 3364 if (!has_xcrs) { 3365 return 0; 3366 } 3367 3368 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs); 3369 if (ret < 0) { 3370 return ret; 3371 } 3372 3373 for (i = 0; i < xcrs.nr_xcrs; i++) { 3374 /* Only support xcr0 now */ 3375 if (xcrs.xcrs[i].xcr == 0) { 3376 env->xcr0 = xcrs.xcrs[i].value; 3377 break; 3378 } 3379 } 3380 return 0; 3381 } 3382 3383 static int kvm_get_sregs(X86CPU *cpu) 3384 { 3385 CPUX86State *env = &cpu->env; 3386 struct kvm_sregs sregs; 3387 int ret; 3388 3389 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); 3390 if (ret < 0) { 3391 return ret; 3392 } 3393 3394 /* 3395 * The interrupt_bitmap is ignored because KVM_GET_SREGS is 3396 * always preceded by KVM_GET_VCPU_EVENTS. 3397 */ 3398 3399 get_seg(&env->segs[R_CS], &sregs.cs); 3400 get_seg(&env->segs[R_DS], &sregs.ds); 3401 get_seg(&env->segs[R_ES], &sregs.es); 3402 get_seg(&env->segs[R_FS], &sregs.fs); 3403 get_seg(&env->segs[R_GS], &sregs.gs); 3404 get_seg(&env->segs[R_SS], &sregs.ss); 3405 3406 get_seg(&env->tr, &sregs.tr); 3407 get_seg(&env->ldt, &sregs.ldt); 3408 3409 env->idt.limit = sregs.idt.limit; 3410 env->idt.base = sregs.idt.base; 3411 env->gdt.limit = sregs.gdt.limit; 3412 env->gdt.base = sregs.gdt.base; 3413 3414 env->cr[0] = sregs.cr0; 3415 env->cr[2] = sregs.cr2; 3416 env->cr[3] = sregs.cr3; 3417 env->cr[4] = sregs.cr4; 3418 3419 env->efer = sregs.efer; 3420 3421 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ 3422 x86_update_hflags(env); 3423 3424 return 0; 3425 } 3426 3427 static int kvm_get_sregs2(X86CPU *cpu) 3428 { 3429 CPUX86State *env = &cpu->env; 3430 struct kvm_sregs2 sregs; 3431 int i, ret; 3432 3433 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS2, &sregs); 3434 if (ret < 0) { 3435 return ret; 3436 } 3437 3438 get_seg(&env->segs[R_CS], &sregs.cs); 3439 get_seg(&env->segs[R_DS], &sregs.ds); 3440 get_seg(&env->segs[R_ES], &sregs.es); 3441 get_seg(&env->segs[R_FS], &sregs.fs); 3442 get_seg(&env->segs[R_GS], &sregs.gs); 3443 get_seg(&env->segs[R_SS], &sregs.ss); 3444 3445 get_seg(&env->tr, &sregs.tr); 3446 get_seg(&env->ldt, &sregs.ldt); 3447 3448 env->idt.limit = sregs.idt.limit; 3449 env->idt.base = sregs.idt.base; 3450 env->gdt.limit = sregs.gdt.limit; 3451 env->gdt.base = sregs.gdt.base; 3452 3453 env->cr[0] = sregs.cr0; 3454 env->cr[2] = sregs.cr2; 3455 env->cr[3] = sregs.cr3; 3456 env->cr[4] = sregs.cr4; 3457 3458 env->efer = sregs.efer; 3459 3460 env->pdptrs_valid = sregs.flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; 3461 3462 if (env->pdptrs_valid) { 3463 for (i = 0; i < 4; i++) { 3464 env->pdptrs[i] = sregs.pdptrs[i]; 3465 } 3466 } 3467 3468 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ 3469 x86_update_hflags(env); 3470 3471 return 0; 3472 } 3473 3474 static int kvm_get_msrs(X86CPU *cpu) 3475 { 3476 CPUX86State *env = &cpu->env; 3477 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries; 3478 int ret, i; 3479 uint64_t mtrr_top_bits; 3480 3481 kvm_msr_buf_reset(cpu); 3482 3483 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0); 3484 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0); 3485 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0); 3486 kvm_msr_entry_add(cpu, MSR_PAT, 0); 3487 if (has_msr_star) { 3488 kvm_msr_entry_add(cpu, MSR_STAR, 0); 3489 } 3490 if (has_msr_hsave_pa) { 3491 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0); 3492 } 3493 if (has_msr_tsc_aux) { 3494 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0); 3495 } 3496 if (has_msr_tsc_adjust) { 3497 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0); 3498 } 3499 if (has_msr_tsc_deadline) { 3500 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0); 3501 } 3502 if (has_msr_misc_enable) { 3503 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0); 3504 } 3505 if (has_msr_smbase) { 3506 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0); 3507 } 3508 if (has_msr_smi_count) { 3509 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0); 3510 } 3511 if (has_msr_feature_control) { 3512 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0); 3513 } 3514 if (has_msr_pkrs) { 3515 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, 0); 3516 } 3517 if (has_msr_bndcfgs) { 3518 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0); 3519 } 3520 if (has_msr_xss) { 3521 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0); 3522 } 3523 if (has_msr_umwait) { 3524 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0); 3525 } 3526 if (has_msr_spec_ctrl) { 3527 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0); 3528 } 3529 if (has_tsc_scale_msr) { 3530 kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, 0); 3531 } 3532 3533 if (has_msr_tsx_ctrl) { 3534 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0); 3535 } 3536 if (has_msr_virt_ssbd) { 3537 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0); 3538 } 3539 if (!env->tsc_valid) { 3540 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0); 3541 env->tsc_valid = !runstate_is_running(); 3542 } 3543 3544 #ifdef TARGET_X86_64 3545 if (lm_capable_kernel) { 3546 kvm_msr_entry_add(cpu, MSR_CSTAR, 0); 3547 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0); 3548 kvm_msr_entry_add(cpu, MSR_FMASK, 0); 3549 kvm_msr_entry_add(cpu, MSR_LSTAR, 0); 3550 } 3551 #endif 3552 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0); 3553 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0); 3554 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) { 3555 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0); 3556 } 3557 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { 3558 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0); 3559 } 3560 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { 3561 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0); 3562 } 3563 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { 3564 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0); 3565 } 3566 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { 3567 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1); 3568 } 3569 if (has_architectural_pmu_version > 0) { 3570 if (has_architectural_pmu_version > 1) { 3571 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); 3572 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); 3573 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); 3574 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); 3575 } 3576 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { 3577 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0); 3578 } 3579 for (i = 0; i < num_architectural_pmu_gp_counters; i++) { 3580 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0); 3581 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0); 3582 } 3583 } 3584 3585 if (env->mcg_cap) { 3586 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0); 3587 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0); 3588 if (has_msr_mcg_ext_ctl) { 3589 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0); 3590 } 3591 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { 3592 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0); 3593 } 3594 } 3595 3596 if (has_msr_hv_hypercall) { 3597 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0); 3598 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0); 3599 } 3600 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) { 3601 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0); 3602 } 3603 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) { 3604 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0); 3605 } 3606 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) { 3607 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0); 3608 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0); 3609 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0); 3610 } 3611 if (has_msr_hv_crash) { 3612 int j; 3613 3614 for (j = 0; j < HV_CRASH_PARAMS; j++) { 3615 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0); 3616 } 3617 } 3618 if (has_msr_hv_runtime) { 3619 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0); 3620 } 3621 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 3622 uint32_t msr; 3623 3624 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0); 3625 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0); 3626 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0); 3627 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) { 3628 kvm_msr_entry_add(cpu, msr, 0); 3629 } 3630 } 3631 if (has_msr_hv_stimer) { 3632 uint32_t msr; 3633 3634 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT; 3635 msr++) { 3636 kvm_msr_entry_add(cpu, msr, 0); 3637 } 3638 } 3639 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { 3640 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0); 3641 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0); 3642 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0); 3643 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0); 3644 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0); 3645 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0); 3646 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0); 3647 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0); 3648 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0); 3649 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0); 3650 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0); 3651 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0); 3652 for (i = 0; i < MSR_MTRRcap_VCNT; i++) { 3653 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0); 3654 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0); 3655 } 3656 } 3657 3658 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { 3659 int addr_num = 3660 kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7; 3661 3662 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0); 3663 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0); 3664 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0); 3665 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0); 3666 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0); 3667 for (i = 0; i < addr_num; i++) { 3668 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0); 3669 } 3670 } 3671 3672 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) { 3673 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 0); 3674 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 0); 3675 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 0); 3676 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0); 3677 } 3678 3679 if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { 3680 kvm_msr_entry_add(cpu, MSR_IA32_XFD, 0); 3681 kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0); 3682 } 3683 3684 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); 3685 if (ret < 0) { 3686 return ret; 3687 } 3688 3689 if (ret < cpu->kvm_msr_buf->nmsrs) { 3690 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; 3691 error_report("error: failed to get MSR 0x%" PRIx32, 3692 (uint32_t)e->index); 3693 } 3694 3695 assert(ret == cpu->kvm_msr_buf->nmsrs); 3696 /* 3697 * MTRR masks: Each mask consists of 5 parts 3698 * a 10..0: must be zero 3699 * b 11 : valid bit 3700 * c n-1.12: actual mask bits 3701 * d 51..n: reserved must be zero 3702 * e 63.52: reserved must be zero 3703 * 3704 * 'n' is the number of physical bits supported by the CPU and is 3705 * apparently always <= 52. We know our 'n' but don't know what 3706 * the destinations 'n' is; it might be smaller, in which case 3707 * it masks (c) on loading. It might be larger, in which case 3708 * we fill 'd' so that d..c is consistent irrespetive of the 'n' 3709 * we're migrating to. 3710 */ 3711 3712 if (cpu->fill_mtrr_mask) { 3713 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52); 3714 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS); 3715 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits); 3716 } else { 3717 mtrr_top_bits = 0; 3718 } 3719 3720 for (i = 0; i < ret; i++) { 3721 uint32_t index = msrs[i].index; 3722 switch (index) { 3723 case MSR_IA32_SYSENTER_CS: 3724 env->sysenter_cs = msrs[i].data; 3725 break; 3726 case MSR_IA32_SYSENTER_ESP: 3727 env->sysenter_esp = msrs[i].data; 3728 break; 3729 case MSR_IA32_SYSENTER_EIP: 3730 env->sysenter_eip = msrs[i].data; 3731 break; 3732 case MSR_PAT: 3733 env->pat = msrs[i].data; 3734 break; 3735 case MSR_STAR: 3736 env->star = msrs[i].data; 3737 break; 3738 #ifdef TARGET_X86_64 3739 case MSR_CSTAR: 3740 env->cstar = msrs[i].data; 3741 break; 3742 case MSR_KERNELGSBASE: 3743 env->kernelgsbase = msrs[i].data; 3744 break; 3745 case MSR_FMASK: 3746 env->fmask = msrs[i].data; 3747 break; 3748 case MSR_LSTAR: 3749 env->lstar = msrs[i].data; 3750 break; 3751 #endif 3752 case MSR_IA32_TSC: 3753 env->tsc = msrs[i].data; 3754 break; 3755 case MSR_TSC_AUX: 3756 env->tsc_aux = msrs[i].data; 3757 break; 3758 case MSR_TSC_ADJUST: 3759 env->tsc_adjust = msrs[i].data; 3760 break; 3761 case MSR_IA32_TSCDEADLINE: 3762 env->tsc_deadline = msrs[i].data; 3763 break; 3764 case MSR_VM_HSAVE_PA: 3765 env->vm_hsave = msrs[i].data; 3766 break; 3767 case MSR_KVM_SYSTEM_TIME: 3768 env->system_time_msr = msrs[i].data; 3769 break; 3770 case MSR_KVM_WALL_CLOCK: 3771 env->wall_clock_msr = msrs[i].data; 3772 break; 3773 case MSR_MCG_STATUS: 3774 env->mcg_status = msrs[i].data; 3775 break; 3776 case MSR_MCG_CTL: 3777 env->mcg_ctl = msrs[i].data; 3778 break; 3779 case MSR_MCG_EXT_CTL: 3780 env->mcg_ext_ctl = msrs[i].data; 3781 break; 3782 case MSR_IA32_MISC_ENABLE: 3783 env->msr_ia32_misc_enable = msrs[i].data; 3784 break; 3785 case MSR_IA32_SMBASE: 3786 env->smbase = msrs[i].data; 3787 break; 3788 case MSR_SMI_COUNT: 3789 env->msr_smi_count = msrs[i].data; 3790 break; 3791 case MSR_IA32_FEATURE_CONTROL: 3792 env->msr_ia32_feature_control = msrs[i].data; 3793 break; 3794 case MSR_IA32_BNDCFGS: 3795 env->msr_bndcfgs = msrs[i].data; 3796 break; 3797 case MSR_IA32_XSS: 3798 env->xss = msrs[i].data; 3799 break; 3800 case MSR_IA32_UMWAIT_CONTROL: 3801 env->umwait = msrs[i].data; 3802 break; 3803 case MSR_IA32_PKRS: 3804 env->pkrs = msrs[i].data; 3805 break; 3806 default: 3807 if (msrs[i].index >= MSR_MC0_CTL && 3808 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { 3809 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data; 3810 } 3811 break; 3812 case MSR_KVM_ASYNC_PF_EN: 3813 env->async_pf_en_msr = msrs[i].data; 3814 break; 3815 case MSR_KVM_ASYNC_PF_INT: 3816 env->async_pf_int_msr = msrs[i].data; 3817 break; 3818 case MSR_KVM_PV_EOI_EN: 3819 env->pv_eoi_en_msr = msrs[i].data; 3820 break; 3821 case MSR_KVM_STEAL_TIME: 3822 env->steal_time_msr = msrs[i].data; 3823 break; 3824 case MSR_KVM_POLL_CONTROL: { 3825 env->poll_control_msr = msrs[i].data; 3826 break; 3827 } 3828 case MSR_CORE_PERF_FIXED_CTR_CTRL: 3829 env->msr_fixed_ctr_ctrl = msrs[i].data; 3830 break; 3831 case MSR_CORE_PERF_GLOBAL_CTRL: 3832 env->msr_global_ctrl = msrs[i].data; 3833 break; 3834 case MSR_CORE_PERF_GLOBAL_STATUS: 3835 env->msr_global_status = msrs[i].data; 3836 break; 3837 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 3838 env->msr_global_ovf_ctrl = msrs[i].data; 3839 break; 3840 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1: 3841 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data; 3842 break; 3843 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1: 3844 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data; 3845 break; 3846 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1: 3847 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data; 3848 break; 3849 case HV_X64_MSR_HYPERCALL: 3850 env->msr_hv_hypercall = msrs[i].data; 3851 break; 3852 case HV_X64_MSR_GUEST_OS_ID: 3853 env->msr_hv_guest_os_id = msrs[i].data; 3854 break; 3855 case HV_X64_MSR_APIC_ASSIST_PAGE: 3856 env->msr_hv_vapic = msrs[i].data; 3857 break; 3858 case HV_X64_MSR_REFERENCE_TSC: 3859 env->msr_hv_tsc = msrs[i].data; 3860 break; 3861 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 3862 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data; 3863 break; 3864 case HV_X64_MSR_VP_RUNTIME: 3865 env->msr_hv_runtime = msrs[i].data; 3866 break; 3867 case HV_X64_MSR_SCONTROL: 3868 env->msr_hv_synic_control = msrs[i].data; 3869 break; 3870 case HV_X64_MSR_SIEFP: 3871 env->msr_hv_synic_evt_page = msrs[i].data; 3872 break; 3873 case HV_X64_MSR_SIMP: 3874 env->msr_hv_synic_msg_page = msrs[i].data; 3875 break; 3876 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 3877 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data; 3878 break; 3879 case HV_X64_MSR_STIMER0_CONFIG: 3880 case HV_X64_MSR_STIMER1_CONFIG: 3881 case HV_X64_MSR_STIMER2_CONFIG: 3882 case HV_X64_MSR_STIMER3_CONFIG: 3883 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] = 3884 msrs[i].data; 3885 break; 3886 case HV_X64_MSR_STIMER0_COUNT: 3887 case HV_X64_MSR_STIMER1_COUNT: 3888 case HV_X64_MSR_STIMER2_COUNT: 3889 case HV_X64_MSR_STIMER3_COUNT: 3890 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] = 3891 msrs[i].data; 3892 break; 3893 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 3894 env->msr_hv_reenlightenment_control = msrs[i].data; 3895 break; 3896 case HV_X64_MSR_TSC_EMULATION_CONTROL: 3897 env->msr_hv_tsc_emulation_control = msrs[i].data; 3898 break; 3899 case HV_X64_MSR_TSC_EMULATION_STATUS: 3900 env->msr_hv_tsc_emulation_status = msrs[i].data; 3901 break; 3902 case MSR_MTRRdefType: 3903 env->mtrr_deftype = msrs[i].data; 3904 break; 3905 case MSR_MTRRfix64K_00000: 3906 env->mtrr_fixed[0] = msrs[i].data; 3907 break; 3908 case MSR_MTRRfix16K_80000: 3909 env->mtrr_fixed[1] = msrs[i].data; 3910 break; 3911 case MSR_MTRRfix16K_A0000: 3912 env->mtrr_fixed[2] = msrs[i].data; 3913 break; 3914 case MSR_MTRRfix4K_C0000: 3915 env->mtrr_fixed[3] = msrs[i].data; 3916 break; 3917 case MSR_MTRRfix4K_C8000: 3918 env->mtrr_fixed[4] = msrs[i].data; 3919 break; 3920 case MSR_MTRRfix4K_D0000: 3921 env->mtrr_fixed[5] = msrs[i].data; 3922 break; 3923 case MSR_MTRRfix4K_D8000: 3924 env->mtrr_fixed[6] = msrs[i].data; 3925 break; 3926 case MSR_MTRRfix4K_E0000: 3927 env->mtrr_fixed[7] = msrs[i].data; 3928 break; 3929 case MSR_MTRRfix4K_E8000: 3930 env->mtrr_fixed[8] = msrs[i].data; 3931 break; 3932 case MSR_MTRRfix4K_F0000: 3933 env->mtrr_fixed[9] = msrs[i].data; 3934 break; 3935 case MSR_MTRRfix4K_F8000: 3936 env->mtrr_fixed[10] = msrs[i].data; 3937 break; 3938 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1): 3939 if (index & 1) { 3940 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data | 3941 mtrr_top_bits; 3942 } else { 3943 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data; 3944 } 3945 break; 3946 case MSR_IA32_SPEC_CTRL: 3947 env->spec_ctrl = msrs[i].data; 3948 break; 3949 case MSR_AMD64_TSC_RATIO: 3950 env->amd_tsc_scale_msr = msrs[i].data; 3951 break; 3952 case MSR_IA32_TSX_CTRL: 3953 env->tsx_ctrl = msrs[i].data; 3954 break; 3955 case MSR_VIRT_SSBD: 3956 env->virt_ssbd = msrs[i].data; 3957 break; 3958 case MSR_IA32_RTIT_CTL: 3959 env->msr_rtit_ctrl = msrs[i].data; 3960 break; 3961 case MSR_IA32_RTIT_STATUS: 3962 env->msr_rtit_status = msrs[i].data; 3963 break; 3964 case MSR_IA32_RTIT_OUTPUT_BASE: 3965 env->msr_rtit_output_base = msrs[i].data; 3966 break; 3967 case MSR_IA32_RTIT_OUTPUT_MASK: 3968 env->msr_rtit_output_mask = msrs[i].data; 3969 break; 3970 case MSR_IA32_RTIT_CR3_MATCH: 3971 env->msr_rtit_cr3_match = msrs[i].data; 3972 break; 3973 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 3974 env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data; 3975 break; 3976 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: 3977 env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] = 3978 msrs[i].data; 3979 break; 3980 case MSR_IA32_XFD: 3981 env->msr_xfd = msrs[i].data; 3982 break; 3983 case MSR_IA32_XFD_ERR: 3984 env->msr_xfd_err = msrs[i].data; 3985 break; 3986 } 3987 } 3988 3989 return 0; 3990 } 3991 3992 static int kvm_put_mp_state(X86CPU *cpu) 3993 { 3994 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state }; 3995 3996 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); 3997 } 3998 3999 static int kvm_get_mp_state(X86CPU *cpu) 4000 { 4001 CPUState *cs = CPU(cpu); 4002 CPUX86State *env = &cpu->env; 4003 struct kvm_mp_state mp_state; 4004 int ret; 4005 4006 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state); 4007 if (ret < 0) { 4008 return ret; 4009 } 4010 env->mp_state = mp_state.mp_state; 4011 if (kvm_irqchip_in_kernel()) { 4012 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED); 4013 } 4014 return 0; 4015 } 4016 4017 static int kvm_get_apic(X86CPU *cpu) 4018 { 4019 DeviceState *apic = cpu->apic_state; 4020 struct kvm_lapic_state kapic; 4021 int ret; 4022 4023 if (apic && kvm_irqchip_in_kernel()) { 4024 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic); 4025 if (ret < 0) { 4026 return ret; 4027 } 4028 4029 kvm_get_apic_state(apic, &kapic); 4030 } 4031 return 0; 4032 } 4033 4034 static int kvm_put_vcpu_events(X86CPU *cpu, int level) 4035 { 4036 CPUState *cs = CPU(cpu); 4037 CPUX86State *env = &cpu->env; 4038 struct kvm_vcpu_events events = {}; 4039 4040 if (!kvm_has_vcpu_events()) { 4041 return 0; 4042 } 4043 4044 events.flags = 0; 4045 4046 if (has_exception_payload) { 4047 events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD; 4048 events.exception.pending = env->exception_pending; 4049 events.exception_has_payload = env->exception_has_payload; 4050 events.exception_payload = env->exception_payload; 4051 } 4052 events.exception.nr = env->exception_nr; 4053 events.exception.injected = env->exception_injected; 4054 events.exception.has_error_code = env->has_error_code; 4055 events.exception.error_code = env->error_code; 4056 4057 events.interrupt.injected = (env->interrupt_injected >= 0); 4058 events.interrupt.nr = env->interrupt_injected; 4059 events.interrupt.soft = env->soft_interrupt; 4060 4061 events.nmi.injected = env->nmi_injected; 4062 events.nmi.pending = env->nmi_pending; 4063 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK); 4064 4065 events.sipi_vector = env->sipi_vector; 4066 4067 if (has_msr_smbase) { 4068 events.smi.smm = !!(env->hflags & HF_SMM_MASK); 4069 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK); 4070 if (kvm_irqchip_in_kernel()) { 4071 /* As soon as these are moved to the kernel, remove them 4072 * from cs->interrupt_request. 4073 */ 4074 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI; 4075 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT; 4076 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI); 4077 } else { 4078 /* Keep these in cs->interrupt_request. */ 4079 events.smi.pending = 0; 4080 events.smi.latched_init = 0; 4081 } 4082 /* Stop SMI delivery on old machine types to avoid a reboot 4083 * on an inward migration of an old VM. 4084 */ 4085 if (!cpu->kvm_no_smi_migration) { 4086 events.flags |= KVM_VCPUEVENT_VALID_SMM; 4087 } 4088 } 4089 4090 if (level >= KVM_PUT_RESET_STATE) { 4091 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING; 4092 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 4093 events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR; 4094 } 4095 } 4096 4097 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events); 4098 } 4099 4100 static int kvm_get_vcpu_events(X86CPU *cpu) 4101 { 4102 CPUX86State *env = &cpu->env; 4103 struct kvm_vcpu_events events; 4104 int ret; 4105 4106 if (!kvm_has_vcpu_events()) { 4107 return 0; 4108 } 4109 4110 memset(&events, 0, sizeof(events)); 4111 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); 4112 if (ret < 0) { 4113 return ret; 4114 } 4115 4116 if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) { 4117 env->exception_pending = events.exception.pending; 4118 env->exception_has_payload = events.exception_has_payload; 4119 env->exception_payload = events.exception_payload; 4120 } else { 4121 env->exception_pending = 0; 4122 env->exception_has_payload = false; 4123 } 4124 env->exception_injected = events.exception.injected; 4125 env->exception_nr = 4126 (env->exception_pending || env->exception_injected) ? 4127 events.exception.nr : -1; 4128 env->has_error_code = events.exception.has_error_code; 4129 env->error_code = events.exception.error_code; 4130 4131 env->interrupt_injected = 4132 events.interrupt.injected ? events.interrupt.nr : -1; 4133 env->soft_interrupt = events.interrupt.soft; 4134 4135 env->nmi_injected = events.nmi.injected; 4136 env->nmi_pending = events.nmi.pending; 4137 if (events.nmi.masked) { 4138 env->hflags2 |= HF2_NMI_MASK; 4139 } else { 4140 env->hflags2 &= ~HF2_NMI_MASK; 4141 } 4142 4143 if (events.flags & KVM_VCPUEVENT_VALID_SMM) { 4144 if (events.smi.smm) { 4145 env->hflags |= HF_SMM_MASK; 4146 } else { 4147 env->hflags &= ~HF_SMM_MASK; 4148 } 4149 if (events.smi.pending) { 4150 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); 4151 } else { 4152 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); 4153 } 4154 if (events.smi.smm_inside_nmi) { 4155 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK; 4156 } else { 4157 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK; 4158 } 4159 if (events.smi.latched_init) { 4160 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); 4161 } else { 4162 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); 4163 } 4164 } 4165 4166 env->sipi_vector = events.sipi_vector; 4167 4168 return 0; 4169 } 4170 4171 static int kvm_guest_debug_workarounds(X86CPU *cpu) 4172 { 4173 CPUState *cs = CPU(cpu); 4174 CPUX86State *env = &cpu->env; 4175 int ret = 0; 4176 unsigned long reinject_trap = 0; 4177 4178 if (!kvm_has_vcpu_events()) { 4179 if (env->exception_nr == EXCP01_DB) { 4180 reinject_trap = KVM_GUESTDBG_INJECT_DB; 4181 } else if (env->exception_injected == EXCP03_INT3) { 4182 reinject_trap = KVM_GUESTDBG_INJECT_BP; 4183 } 4184 kvm_reset_exception(env); 4185 } 4186 4187 /* 4188 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF 4189 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this 4190 * by updating the debug state once again if single-stepping is on. 4191 * Another reason to call kvm_update_guest_debug here is a pending debug 4192 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to 4193 * reinject them via SET_GUEST_DEBUG. 4194 */ 4195 if (reinject_trap || 4196 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) { 4197 ret = kvm_update_guest_debug(cs, reinject_trap); 4198 } 4199 return ret; 4200 } 4201 4202 static int kvm_put_debugregs(X86CPU *cpu) 4203 { 4204 CPUX86State *env = &cpu->env; 4205 struct kvm_debugregs dbgregs; 4206 int i; 4207 4208 if (!kvm_has_debugregs()) { 4209 return 0; 4210 } 4211 4212 memset(&dbgregs, 0, sizeof(dbgregs)); 4213 for (i = 0; i < 4; i++) { 4214 dbgregs.db[i] = env->dr[i]; 4215 } 4216 dbgregs.dr6 = env->dr[6]; 4217 dbgregs.dr7 = env->dr[7]; 4218 dbgregs.flags = 0; 4219 4220 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs); 4221 } 4222 4223 static int kvm_get_debugregs(X86CPU *cpu) 4224 { 4225 CPUX86State *env = &cpu->env; 4226 struct kvm_debugregs dbgregs; 4227 int i, ret; 4228 4229 if (!kvm_has_debugregs()) { 4230 return 0; 4231 } 4232 4233 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs); 4234 if (ret < 0) { 4235 return ret; 4236 } 4237 for (i = 0; i < 4; i++) { 4238 env->dr[i] = dbgregs.db[i]; 4239 } 4240 env->dr[4] = env->dr[6] = dbgregs.dr6; 4241 env->dr[5] = env->dr[7] = dbgregs.dr7; 4242 4243 return 0; 4244 } 4245 4246 static int kvm_put_nested_state(X86CPU *cpu) 4247 { 4248 CPUX86State *env = &cpu->env; 4249 int max_nested_state_len = kvm_max_nested_state_length(); 4250 4251 if (!env->nested_state) { 4252 return 0; 4253 } 4254 4255 /* 4256 * Copy flags that are affected by reset from env->hflags and env->hflags2. 4257 */ 4258 if (env->hflags & HF_GUEST_MASK) { 4259 env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE; 4260 } else { 4261 env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE; 4262 } 4263 4264 /* Don't set KVM_STATE_NESTED_GIF_SET on VMX as it is illegal */ 4265 if (cpu_has_svm(env) && (env->hflags2 & HF2_GIF_MASK)) { 4266 env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET; 4267 } else { 4268 env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET; 4269 } 4270 4271 assert(env->nested_state->size <= max_nested_state_len); 4272 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state); 4273 } 4274 4275 static int kvm_get_nested_state(X86CPU *cpu) 4276 { 4277 CPUX86State *env = &cpu->env; 4278 int max_nested_state_len = kvm_max_nested_state_length(); 4279 int ret; 4280 4281 if (!env->nested_state) { 4282 return 0; 4283 } 4284 4285 /* 4286 * It is possible that migration restored a smaller size into 4287 * nested_state->hdr.size than what our kernel support. 4288 * We preserve migration origin nested_state->hdr.size for 4289 * call to KVM_SET_NESTED_STATE but wish that our next call 4290 * to KVM_GET_NESTED_STATE will use max size our kernel support. 4291 */ 4292 env->nested_state->size = max_nested_state_len; 4293 4294 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state); 4295 if (ret < 0) { 4296 return ret; 4297 } 4298 4299 /* 4300 * Copy flags that are affected by reset to env->hflags and env->hflags2. 4301 */ 4302 if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) { 4303 env->hflags |= HF_GUEST_MASK; 4304 } else { 4305 env->hflags &= ~HF_GUEST_MASK; 4306 } 4307 4308 /* Keep HF2_GIF_MASK set on !SVM as x86_cpu_pending_interrupt() needs it */ 4309 if (cpu_has_svm(env)) { 4310 if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) { 4311 env->hflags2 |= HF2_GIF_MASK; 4312 } else { 4313 env->hflags2 &= ~HF2_GIF_MASK; 4314 } 4315 } 4316 4317 return ret; 4318 } 4319 4320 int kvm_arch_put_registers(CPUState *cpu, int level) 4321 { 4322 X86CPU *x86_cpu = X86_CPU(cpu); 4323 int ret; 4324 4325 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); 4326 4327 /* must be before kvm_put_nested_state so that EFER.SVME is set */ 4328 ret = has_sregs2 ? kvm_put_sregs2(x86_cpu) : kvm_put_sregs(x86_cpu); 4329 if (ret < 0) { 4330 return ret; 4331 } 4332 4333 if (level >= KVM_PUT_RESET_STATE) { 4334 ret = kvm_put_nested_state(x86_cpu); 4335 if (ret < 0) { 4336 return ret; 4337 } 4338 4339 ret = kvm_put_msr_feature_control(x86_cpu); 4340 if (ret < 0) { 4341 return ret; 4342 } 4343 } 4344 4345 if (level == KVM_PUT_FULL_STATE) { 4346 /* We don't check for kvm_arch_set_tsc_khz() errors here, 4347 * because TSC frequency mismatch shouldn't abort migration, 4348 * unless the user explicitly asked for a more strict TSC 4349 * setting (e.g. using an explicit "tsc-freq" option). 4350 */ 4351 kvm_arch_set_tsc_khz(cpu); 4352 } 4353 4354 ret = kvm_getput_regs(x86_cpu, 1); 4355 if (ret < 0) { 4356 return ret; 4357 } 4358 ret = kvm_put_xsave(x86_cpu); 4359 if (ret < 0) { 4360 return ret; 4361 } 4362 ret = kvm_put_xcrs(x86_cpu); 4363 if (ret < 0) { 4364 return ret; 4365 } 4366 /* must be before kvm_put_msrs */ 4367 ret = kvm_inject_mce_oldstyle(x86_cpu); 4368 if (ret < 0) { 4369 return ret; 4370 } 4371 ret = kvm_put_msrs(x86_cpu, level); 4372 if (ret < 0) { 4373 return ret; 4374 } 4375 ret = kvm_put_vcpu_events(x86_cpu, level); 4376 if (ret < 0) { 4377 return ret; 4378 } 4379 if (level >= KVM_PUT_RESET_STATE) { 4380 ret = kvm_put_mp_state(x86_cpu); 4381 if (ret < 0) { 4382 return ret; 4383 } 4384 } 4385 4386 ret = kvm_put_tscdeadline_msr(x86_cpu); 4387 if (ret < 0) { 4388 return ret; 4389 } 4390 ret = kvm_put_debugregs(x86_cpu); 4391 if (ret < 0) { 4392 return ret; 4393 } 4394 /* must be last */ 4395 ret = kvm_guest_debug_workarounds(x86_cpu); 4396 if (ret < 0) { 4397 return ret; 4398 } 4399 return 0; 4400 } 4401 4402 int kvm_arch_get_registers(CPUState *cs) 4403 { 4404 X86CPU *cpu = X86_CPU(cs); 4405 int ret; 4406 4407 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs)); 4408 4409 ret = kvm_get_vcpu_events(cpu); 4410 if (ret < 0) { 4411 goto out; 4412 } 4413 /* 4414 * KVM_GET_MPSTATE can modify CS and RIP, call it before 4415 * KVM_GET_REGS and KVM_GET_SREGS. 4416 */ 4417 ret = kvm_get_mp_state(cpu); 4418 if (ret < 0) { 4419 goto out; 4420 } 4421 ret = kvm_getput_regs(cpu, 0); 4422 if (ret < 0) { 4423 goto out; 4424 } 4425 ret = kvm_get_xsave(cpu); 4426 if (ret < 0) { 4427 goto out; 4428 } 4429 ret = kvm_get_xcrs(cpu); 4430 if (ret < 0) { 4431 goto out; 4432 } 4433 ret = has_sregs2 ? kvm_get_sregs2(cpu) : kvm_get_sregs(cpu); 4434 if (ret < 0) { 4435 goto out; 4436 } 4437 ret = kvm_get_msrs(cpu); 4438 if (ret < 0) { 4439 goto out; 4440 } 4441 ret = kvm_get_apic(cpu); 4442 if (ret < 0) { 4443 goto out; 4444 } 4445 ret = kvm_get_debugregs(cpu); 4446 if (ret < 0) { 4447 goto out; 4448 } 4449 ret = kvm_get_nested_state(cpu); 4450 if (ret < 0) { 4451 goto out; 4452 } 4453 ret = 0; 4454 out: 4455 cpu_sync_bndcs_hflags(&cpu->env); 4456 return ret; 4457 } 4458 4459 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) 4460 { 4461 X86CPU *x86_cpu = X86_CPU(cpu); 4462 CPUX86State *env = &x86_cpu->env; 4463 int ret; 4464 4465 /* Inject NMI */ 4466 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { 4467 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { 4468 qemu_mutex_lock_iothread(); 4469 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; 4470 qemu_mutex_unlock_iothread(); 4471 DPRINTF("injected NMI\n"); 4472 ret = kvm_vcpu_ioctl(cpu, KVM_NMI); 4473 if (ret < 0) { 4474 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n", 4475 strerror(-ret)); 4476 } 4477 } 4478 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { 4479 qemu_mutex_lock_iothread(); 4480 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; 4481 qemu_mutex_unlock_iothread(); 4482 DPRINTF("injected SMI\n"); 4483 ret = kvm_vcpu_ioctl(cpu, KVM_SMI); 4484 if (ret < 0) { 4485 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n", 4486 strerror(-ret)); 4487 } 4488 } 4489 } 4490 4491 if (!kvm_pic_in_kernel()) { 4492 qemu_mutex_lock_iothread(); 4493 } 4494 4495 /* Force the VCPU out of its inner loop to process any INIT requests 4496 * or (for userspace APIC, but it is cheap to combine the checks here) 4497 * pending TPR access reports. 4498 */ 4499 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { 4500 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && 4501 !(env->hflags & HF_SMM_MASK)) { 4502 cpu->exit_request = 1; 4503 } 4504 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { 4505 cpu->exit_request = 1; 4506 } 4507 } 4508 4509 if (!kvm_pic_in_kernel()) { 4510 /* Try to inject an interrupt if the guest can accept it */ 4511 if (run->ready_for_interrupt_injection && 4512 (cpu->interrupt_request & CPU_INTERRUPT_HARD) && 4513 (env->eflags & IF_MASK)) { 4514 int irq; 4515 4516 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; 4517 irq = cpu_get_pic_interrupt(env); 4518 if (irq >= 0) { 4519 struct kvm_interrupt intr; 4520 4521 intr.irq = irq; 4522 DPRINTF("injected interrupt %d\n", irq); 4523 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr); 4524 if (ret < 0) { 4525 fprintf(stderr, 4526 "KVM: injection failed, interrupt lost (%s)\n", 4527 strerror(-ret)); 4528 } 4529 } 4530 } 4531 4532 /* If we have an interrupt but the guest is not ready to receive an 4533 * interrupt, request an interrupt window exit. This will 4534 * cause a return to userspace as soon as the guest is ready to 4535 * receive interrupts. */ 4536 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) { 4537 run->request_interrupt_window = 1; 4538 } else { 4539 run->request_interrupt_window = 0; 4540 } 4541 4542 DPRINTF("setting tpr\n"); 4543 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state); 4544 4545 qemu_mutex_unlock_iothread(); 4546 } 4547 } 4548 4549 static void kvm_rate_limit_on_bus_lock(void) 4550 { 4551 uint64_t delay_ns = ratelimit_calculate_delay(&bus_lock_ratelimit_ctrl, 1); 4552 4553 if (delay_ns) { 4554 g_usleep(delay_ns / SCALE_US); 4555 } 4556 } 4557 4558 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) 4559 { 4560 X86CPU *x86_cpu = X86_CPU(cpu); 4561 CPUX86State *env = &x86_cpu->env; 4562 4563 if (run->flags & KVM_RUN_X86_SMM) { 4564 env->hflags |= HF_SMM_MASK; 4565 } else { 4566 env->hflags &= ~HF_SMM_MASK; 4567 } 4568 if (run->if_flag) { 4569 env->eflags |= IF_MASK; 4570 } else { 4571 env->eflags &= ~IF_MASK; 4572 } 4573 if (run->flags & KVM_RUN_X86_BUS_LOCK) { 4574 kvm_rate_limit_on_bus_lock(); 4575 } 4576 4577 /* We need to protect the apic state against concurrent accesses from 4578 * different threads in case the userspace irqchip is used. */ 4579 if (!kvm_irqchip_in_kernel()) { 4580 qemu_mutex_lock_iothread(); 4581 } 4582 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8); 4583 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base); 4584 if (!kvm_irqchip_in_kernel()) { 4585 qemu_mutex_unlock_iothread(); 4586 } 4587 return cpu_get_mem_attrs(env); 4588 } 4589 4590 int kvm_arch_process_async_events(CPUState *cs) 4591 { 4592 X86CPU *cpu = X86_CPU(cs); 4593 CPUX86State *env = &cpu->env; 4594 4595 if (cs->interrupt_request & CPU_INTERRUPT_MCE) { 4596 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */ 4597 assert(env->mcg_cap); 4598 4599 cs->interrupt_request &= ~CPU_INTERRUPT_MCE; 4600 4601 kvm_cpu_synchronize_state(cs); 4602 4603 if (env->exception_nr == EXCP08_DBLE) { 4604 /* this means triple fault */ 4605 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 4606 cs->exit_request = 1; 4607 return 0; 4608 } 4609 kvm_queue_exception(env, EXCP12_MCHK, 0, 0); 4610 env->has_error_code = 0; 4611 4612 cs->halted = 0; 4613 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) { 4614 env->mp_state = KVM_MP_STATE_RUNNABLE; 4615 } 4616 } 4617 4618 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) && 4619 !(env->hflags & HF_SMM_MASK)) { 4620 kvm_cpu_synchronize_state(cs); 4621 do_cpu_init(cpu); 4622 } 4623 4624 if (kvm_irqchip_in_kernel()) { 4625 return 0; 4626 } 4627 4628 if (cs->interrupt_request & CPU_INTERRUPT_POLL) { 4629 cs->interrupt_request &= ~CPU_INTERRUPT_POLL; 4630 apic_poll_irq(cpu->apic_state); 4631 } 4632 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && 4633 (env->eflags & IF_MASK)) || 4634 (cs->interrupt_request & CPU_INTERRUPT_NMI)) { 4635 cs->halted = 0; 4636 } 4637 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { 4638 kvm_cpu_synchronize_state(cs); 4639 do_cpu_sipi(cpu); 4640 } 4641 if (cs->interrupt_request & CPU_INTERRUPT_TPR) { 4642 cs->interrupt_request &= ~CPU_INTERRUPT_TPR; 4643 kvm_cpu_synchronize_state(cs); 4644 apic_handle_tpr_access_report(cpu->apic_state, env->eip, 4645 env->tpr_access_type); 4646 } 4647 4648 return cs->halted; 4649 } 4650 4651 static int kvm_handle_halt(X86CPU *cpu) 4652 { 4653 CPUState *cs = CPU(cpu); 4654 CPUX86State *env = &cpu->env; 4655 4656 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) && 4657 (env->eflags & IF_MASK)) && 4658 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) { 4659 cs->halted = 1; 4660 return EXCP_HLT; 4661 } 4662 4663 return 0; 4664 } 4665 4666 static int kvm_handle_tpr_access(X86CPU *cpu) 4667 { 4668 CPUState *cs = CPU(cpu); 4669 struct kvm_run *run = cs->kvm_run; 4670 4671 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip, 4672 run->tpr_access.is_write ? TPR_ACCESS_WRITE 4673 : TPR_ACCESS_READ); 4674 return 1; 4675 } 4676 4677 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 4678 { 4679 static const uint8_t int3 = 0xcc; 4680 4681 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || 4682 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) { 4683 return -EINVAL; 4684 } 4685 return 0; 4686 } 4687 4688 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 4689 { 4690 uint8_t int3; 4691 4692 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) { 4693 return -EINVAL; 4694 } 4695 if (int3 != 0xcc) { 4696 return 0; 4697 } 4698 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) { 4699 return -EINVAL; 4700 } 4701 return 0; 4702 } 4703 4704 static struct { 4705 target_ulong addr; 4706 int len; 4707 int type; 4708 } hw_breakpoint[4]; 4709 4710 static int nb_hw_breakpoint; 4711 4712 static int find_hw_breakpoint(target_ulong addr, int len, int type) 4713 { 4714 int n; 4715 4716 for (n = 0; n < nb_hw_breakpoint; n++) { 4717 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type && 4718 (hw_breakpoint[n].len == len || len == -1)) { 4719 return n; 4720 } 4721 } 4722 return -1; 4723 } 4724 4725 int kvm_arch_insert_hw_breakpoint(target_ulong addr, 4726 target_ulong len, int type) 4727 { 4728 switch (type) { 4729 case GDB_BREAKPOINT_HW: 4730 len = 1; 4731 break; 4732 case GDB_WATCHPOINT_WRITE: 4733 case GDB_WATCHPOINT_ACCESS: 4734 switch (len) { 4735 case 1: 4736 break; 4737 case 2: 4738 case 4: 4739 case 8: 4740 if (addr & (len - 1)) { 4741 return -EINVAL; 4742 } 4743 break; 4744 default: 4745 return -EINVAL; 4746 } 4747 break; 4748 default: 4749 return -ENOSYS; 4750 } 4751 4752 if (nb_hw_breakpoint == 4) { 4753 return -ENOBUFS; 4754 } 4755 if (find_hw_breakpoint(addr, len, type) >= 0) { 4756 return -EEXIST; 4757 } 4758 hw_breakpoint[nb_hw_breakpoint].addr = addr; 4759 hw_breakpoint[nb_hw_breakpoint].len = len; 4760 hw_breakpoint[nb_hw_breakpoint].type = type; 4761 nb_hw_breakpoint++; 4762 4763 return 0; 4764 } 4765 4766 int kvm_arch_remove_hw_breakpoint(target_ulong addr, 4767 target_ulong len, int type) 4768 { 4769 int n; 4770 4771 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type); 4772 if (n < 0) { 4773 return -ENOENT; 4774 } 4775 nb_hw_breakpoint--; 4776 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint]; 4777 4778 return 0; 4779 } 4780 4781 void kvm_arch_remove_all_hw_breakpoints(void) 4782 { 4783 nb_hw_breakpoint = 0; 4784 } 4785 4786 static CPUWatchpoint hw_watchpoint; 4787 4788 static int kvm_handle_debug(X86CPU *cpu, 4789 struct kvm_debug_exit_arch *arch_info) 4790 { 4791 CPUState *cs = CPU(cpu); 4792 CPUX86State *env = &cpu->env; 4793 int ret = 0; 4794 int n; 4795 4796 if (arch_info->exception == EXCP01_DB) { 4797 if (arch_info->dr6 & DR6_BS) { 4798 if (cs->singlestep_enabled) { 4799 ret = EXCP_DEBUG; 4800 } 4801 } else { 4802 for (n = 0; n < 4; n++) { 4803 if (arch_info->dr6 & (1 << n)) { 4804 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) { 4805 case 0x0: 4806 ret = EXCP_DEBUG; 4807 break; 4808 case 0x1: 4809 ret = EXCP_DEBUG; 4810 cs->watchpoint_hit = &hw_watchpoint; 4811 hw_watchpoint.vaddr = hw_breakpoint[n].addr; 4812 hw_watchpoint.flags = BP_MEM_WRITE; 4813 break; 4814 case 0x3: 4815 ret = EXCP_DEBUG; 4816 cs->watchpoint_hit = &hw_watchpoint; 4817 hw_watchpoint.vaddr = hw_breakpoint[n].addr; 4818 hw_watchpoint.flags = BP_MEM_ACCESS; 4819 break; 4820 } 4821 } 4822 } 4823 } 4824 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) { 4825 ret = EXCP_DEBUG; 4826 } 4827 if (ret == 0) { 4828 cpu_synchronize_state(cs); 4829 assert(env->exception_nr == -1); 4830 4831 /* pass to guest */ 4832 kvm_queue_exception(env, arch_info->exception, 4833 arch_info->exception == EXCP01_DB, 4834 arch_info->dr6); 4835 env->has_error_code = 0; 4836 } 4837 4838 return ret; 4839 } 4840 4841 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) 4842 { 4843 const uint8_t type_code[] = { 4844 [GDB_BREAKPOINT_HW] = 0x0, 4845 [GDB_WATCHPOINT_WRITE] = 0x1, 4846 [GDB_WATCHPOINT_ACCESS] = 0x3 4847 }; 4848 const uint8_t len_code[] = { 4849 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2 4850 }; 4851 int n; 4852 4853 if (kvm_sw_breakpoints_active(cpu)) { 4854 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; 4855 } 4856 if (nb_hw_breakpoint > 0) { 4857 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 4858 dbg->arch.debugreg[7] = 0x0600; 4859 for (n = 0; n < nb_hw_breakpoint; n++) { 4860 dbg->arch.debugreg[n] = hw_breakpoint[n].addr; 4861 dbg->arch.debugreg[7] |= (2 << (n * 2)) | 4862 (type_code[hw_breakpoint[n].type] << (16 + n*4)) | 4863 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4)); 4864 } 4865 } 4866 } 4867 4868 static bool has_sgx_provisioning; 4869 4870 static bool __kvm_enable_sgx_provisioning(KVMState *s) 4871 { 4872 int fd, ret; 4873 4874 if (!kvm_vm_check_extension(s, KVM_CAP_SGX_ATTRIBUTE)) { 4875 return false; 4876 } 4877 4878 fd = qemu_open_old("/dev/sgx_provision", O_RDONLY); 4879 if (fd < 0) { 4880 return false; 4881 } 4882 4883 ret = kvm_vm_enable_cap(s, KVM_CAP_SGX_ATTRIBUTE, 0, fd); 4884 if (ret) { 4885 error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret)); 4886 exit(1); 4887 } 4888 close(fd); 4889 return true; 4890 } 4891 4892 bool kvm_enable_sgx_provisioning(KVMState *s) 4893 { 4894 return MEMORIZE(__kvm_enable_sgx_provisioning(s), has_sgx_provisioning); 4895 } 4896 4897 static bool host_supports_vmx(void) 4898 { 4899 uint32_t ecx, unused; 4900 4901 host_cpuid(1, 0, &unused, &unused, &ecx, &unused); 4902 return ecx & CPUID_EXT_VMX; 4903 } 4904 4905 #define VMX_INVALID_GUEST_STATE 0x80000021 4906 4907 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 4908 { 4909 X86CPU *cpu = X86_CPU(cs); 4910 uint64_t code; 4911 int ret; 4912 4913 switch (run->exit_reason) { 4914 case KVM_EXIT_HLT: 4915 DPRINTF("handle_hlt\n"); 4916 qemu_mutex_lock_iothread(); 4917 ret = kvm_handle_halt(cpu); 4918 qemu_mutex_unlock_iothread(); 4919 break; 4920 case KVM_EXIT_SET_TPR: 4921 ret = 0; 4922 break; 4923 case KVM_EXIT_TPR_ACCESS: 4924 qemu_mutex_lock_iothread(); 4925 ret = kvm_handle_tpr_access(cpu); 4926 qemu_mutex_unlock_iothread(); 4927 break; 4928 case KVM_EXIT_FAIL_ENTRY: 4929 code = run->fail_entry.hardware_entry_failure_reason; 4930 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n", 4931 code); 4932 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) { 4933 fprintf(stderr, 4934 "\nIf you're running a guest on an Intel machine without " 4935 "unrestricted mode\n" 4936 "support, the failure can be most likely due to the guest " 4937 "entering an invalid\n" 4938 "state for Intel VT. For example, the guest maybe running " 4939 "in big real mode\n" 4940 "which is not supported on less recent Intel processors." 4941 "\n\n"); 4942 } 4943 ret = -1; 4944 break; 4945 case KVM_EXIT_EXCEPTION: 4946 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n", 4947 run->ex.exception, run->ex.error_code); 4948 ret = -1; 4949 break; 4950 case KVM_EXIT_DEBUG: 4951 DPRINTF("kvm_exit_debug\n"); 4952 qemu_mutex_lock_iothread(); 4953 ret = kvm_handle_debug(cpu, &run->debug.arch); 4954 qemu_mutex_unlock_iothread(); 4955 break; 4956 case KVM_EXIT_HYPERV: 4957 ret = kvm_hv_handle_exit(cpu, &run->hyperv); 4958 break; 4959 case KVM_EXIT_IOAPIC_EOI: 4960 ioapic_eoi_broadcast(run->eoi.vector); 4961 ret = 0; 4962 break; 4963 case KVM_EXIT_X86_BUS_LOCK: 4964 /* already handled in kvm_arch_post_run */ 4965 ret = 0; 4966 break; 4967 default: 4968 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); 4969 ret = -1; 4970 break; 4971 } 4972 4973 return ret; 4974 } 4975 4976 bool kvm_arch_stop_on_emulation_error(CPUState *cs) 4977 { 4978 X86CPU *cpu = X86_CPU(cs); 4979 CPUX86State *env = &cpu->env; 4980 4981 kvm_cpu_synchronize_state(cs); 4982 return !(env->cr[0] & CR0_PE_MASK) || 4983 ((env->segs[R_CS].selector & 3) != 3); 4984 } 4985 4986 void kvm_arch_init_irq_routing(KVMState *s) 4987 { 4988 /* We know at this point that we're using the in-kernel 4989 * irqchip, so we can use irqfds, and on x86 we know 4990 * we can use msi via irqfd and GSI routing. 4991 */ 4992 kvm_msi_via_irqfd_allowed = true; 4993 kvm_gsi_routing_allowed = true; 4994 4995 if (kvm_irqchip_is_split()) { 4996 KVMRouteChange c = kvm_irqchip_begin_route_changes(s); 4997 int i; 4998 4999 /* If the ioapic is in QEMU and the lapics are in KVM, reserve 5000 MSI routes for signaling interrupts to the local apics. */ 5001 for (i = 0; i < IOAPIC_NUM_PINS; i++) { 5002 if (kvm_irqchip_add_msi_route(&c, 0, NULL) < 0) { 5003 error_report("Could not enable split IRQ mode."); 5004 exit(1); 5005 } 5006 } 5007 kvm_irqchip_commit_route_changes(&c); 5008 } 5009 } 5010 5011 int kvm_arch_irqchip_create(KVMState *s) 5012 { 5013 int ret; 5014 if (kvm_kernel_irqchip_split()) { 5015 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24); 5016 if (ret) { 5017 error_report("Could not enable split irqchip mode: %s", 5018 strerror(-ret)); 5019 exit(1); 5020 } else { 5021 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n"); 5022 kvm_split_irqchip = true; 5023 return 1; 5024 } 5025 } else { 5026 return 0; 5027 } 5028 } 5029 5030 uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address) 5031 { 5032 CPUX86State *env; 5033 uint64_t ext_id; 5034 5035 if (!first_cpu) { 5036 return address; 5037 } 5038 env = &X86_CPU(first_cpu)->env; 5039 if (!(env->features[FEAT_KVM] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID))) { 5040 return address; 5041 } 5042 5043 /* 5044 * If the remappable format bit is set, or the upper bits are 5045 * already set in address_hi, or the low extended bits aren't 5046 * there anyway, do nothing. 5047 */ 5048 ext_id = address & (0xff << MSI_ADDR_DEST_IDX_SHIFT); 5049 if (!ext_id || (ext_id & (1 << MSI_ADDR_DEST_IDX_SHIFT)) || (address >> 32)) { 5050 return address; 5051 } 5052 5053 address &= ~ext_id; 5054 address |= ext_id << 35; 5055 return address; 5056 } 5057 5058 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 5059 uint64_t address, uint32_t data, PCIDevice *dev) 5060 { 5061 X86IOMMUState *iommu = x86_iommu_get_default(); 5062 5063 if (iommu) { 5064 X86IOMMUClass *class = X86_IOMMU_DEVICE_GET_CLASS(iommu); 5065 5066 if (class->int_remap) { 5067 int ret; 5068 MSIMessage src, dst; 5069 5070 src.address = route->u.msi.address_hi; 5071 src.address <<= VTD_MSI_ADDR_HI_SHIFT; 5072 src.address |= route->u.msi.address_lo; 5073 src.data = route->u.msi.data; 5074 5075 ret = class->int_remap(iommu, &src, &dst, dev ? \ 5076 pci_requester_id(dev) : \ 5077 X86_IOMMU_SID_INVALID); 5078 if (ret) { 5079 trace_kvm_x86_fixup_msi_error(route->gsi); 5080 return 1; 5081 } 5082 5083 /* 5084 * Handled untranslated compatibilty format interrupt with 5085 * extended destination ID in the low bits 11-5. */ 5086 dst.address = kvm_swizzle_msi_ext_dest_id(dst.address); 5087 5088 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT; 5089 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK; 5090 route->u.msi.data = dst.data; 5091 return 0; 5092 } 5093 } 5094 5095 address = kvm_swizzle_msi_ext_dest_id(address); 5096 route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT; 5097 route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK; 5098 return 0; 5099 } 5100 5101 typedef struct MSIRouteEntry MSIRouteEntry; 5102 5103 struct MSIRouteEntry { 5104 PCIDevice *dev; /* Device pointer */ 5105 int vector; /* MSI/MSIX vector index */ 5106 int virq; /* Virtual IRQ index */ 5107 QLIST_ENTRY(MSIRouteEntry) list; 5108 }; 5109 5110 /* List of used GSI routes */ 5111 static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \ 5112 QLIST_HEAD_INITIALIZER(msi_route_list); 5113 5114 static void kvm_update_msi_routes_all(void *private, bool global, 5115 uint32_t index, uint32_t mask) 5116 { 5117 int cnt = 0, vector; 5118 MSIRouteEntry *entry; 5119 MSIMessage msg; 5120 PCIDevice *dev; 5121 5122 /* TODO: explicit route update */ 5123 QLIST_FOREACH(entry, &msi_route_list, list) { 5124 cnt++; 5125 vector = entry->vector; 5126 dev = entry->dev; 5127 if (msix_enabled(dev) && !msix_is_masked(dev, vector)) { 5128 msg = msix_get_message(dev, vector); 5129 } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) { 5130 msg = msi_get_message(dev, vector); 5131 } else { 5132 /* 5133 * Either MSI/MSIX is disabled for the device, or the 5134 * specific message was masked out. Skip this one. 5135 */ 5136 continue; 5137 } 5138 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev); 5139 } 5140 kvm_irqchip_commit_routes(kvm_state); 5141 trace_kvm_x86_update_msi_routes(cnt); 5142 } 5143 5144 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 5145 int vector, PCIDevice *dev) 5146 { 5147 static bool notify_list_inited = false; 5148 MSIRouteEntry *entry; 5149 5150 if (!dev) { 5151 /* These are (possibly) IOAPIC routes only used for split 5152 * kernel irqchip mode, while what we are housekeeping are 5153 * PCI devices only. */ 5154 return 0; 5155 } 5156 5157 entry = g_new0(MSIRouteEntry, 1); 5158 entry->dev = dev; 5159 entry->vector = vector; 5160 entry->virq = route->gsi; 5161 QLIST_INSERT_HEAD(&msi_route_list, entry, list); 5162 5163 trace_kvm_x86_add_msi_route(route->gsi); 5164 5165 if (!notify_list_inited) { 5166 /* For the first time we do add route, add ourselves into 5167 * IOMMU's IEC notify list if needed. */ 5168 X86IOMMUState *iommu = x86_iommu_get_default(); 5169 if (iommu) { 5170 x86_iommu_iec_register_notifier(iommu, 5171 kvm_update_msi_routes_all, 5172 NULL); 5173 } 5174 notify_list_inited = true; 5175 } 5176 return 0; 5177 } 5178 5179 int kvm_arch_release_virq_post(int virq) 5180 { 5181 MSIRouteEntry *entry, *next; 5182 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) { 5183 if (entry->virq == virq) { 5184 trace_kvm_x86_remove_msi_route(virq); 5185 QLIST_REMOVE(entry, list); 5186 g_free(entry); 5187 break; 5188 } 5189 } 5190 return 0; 5191 } 5192 5193 int kvm_arch_msi_data_to_gsi(uint32_t data) 5194 { 5195 abort(); 5196 } 5197 5198 bool kvm_has_waitpkg(void) 5199 { 5200 return has_msr_umwait; 5201 } 5202 5203 bool kvm_arch_cpu_check_are_resettable(void) 5204 { 5205 return !sev_es_enabled(); 5206 } 5207 5208 #define ARCH_REQ_XCOMP_GUEST_PERM 0x1025 5209 5210 void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask) 5211 { 5212 KVMState *s = kvm_state; 5213 uint64_t supported; 5214 5215 mask &= XSTATE_DYNAMIC_MASK; 5216 if (!mask) { 5217 return; 5218 } 5219 /* 5220 * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0]. 5221 * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned 5222 * about them already because they are not supported features. 5223 */ 5224 supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX); 5225 supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32; 5226 mask &= supported; 5227 5228 while (mask) { 5229 int bit = ctz64(mask); 5230 int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit); 5231 if (rc) { 5232 /* 5233 * Older kernel version (<5.17) do not support 5234 * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return 5235 * any dynamic feature from kvm_arch_get_supported_cpuid. 5236 */ 5237 warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure " 5238 "for feature bit %d", bit); 5239 } 5240 mask &= ~BIT_ULL(bit); 5241 } 5242 } 5243