1 #include "qemu/osdep.h" 2 #include "cpu.h" 3 #include "exec/exec-all.h" 4 #include "hw/isa/isa.h" 5 #include "migration/cpu.h" 6 #include "kvm/hyperv.h" 7 #include "hw/i386/x86.h" 8 #include "kvm/kvm_i386.h" 9 #include "hw/xen/xen.h" 10 11 #include "sysemu/kvm.h" 12 #include "sysemu/kvm_xen.h" 13 #include "sysemu/tcg.h" 14 15 #include "qemu/error-report.h" 16 17 static const VMStateDescription vmstate_segment = { 18 .name = "segment", 19 .version_id = 1, 20 .minimum_version_id = 1, 21 .fields = (const VMStateField[]) { 22 VMSTATE_UINT32(selector, SegmentCache), 23 VMSTATE_UINTTL(base, SegmentCache), 24 VMSTATE_UINT32(limit, SegmentCache), 25 VMSTATE_UINT32(flags, SegmentCache), 26 VMSTATE_END_OF_LIST() 27 } 28 }; 29 30 #define VMSTATE_SEGMENT(_field, _state) { \ 31 .name = (stringify(_field)), \ 32 .size = sizeof(SegmentCache), \ 33 .vmsd = &vmstate_segment, \ 34 .flags = VMS_STRUCT, \ 35 .offset = offsetof(_state, _field) \ 36 + type_check(SegmentCache,typeof_field(_state, _field)) \ 37 } 38 39 #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \ 40 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache) 41 42 static const VMStateDescription vmstate_xmm_reg = { 43 .name = "xmm_reg", 44 .version_id = 1, 45 .minimum_version_id = 1, 46 .fields = (const VMStateField[]) { 47 VMSTATE_UINT64(ZMM_Q(0), ZMMReg), 48 VMSTATE_UINT64(ZMM_Q(1), ZMMReg), 49 VMSTATE_END_OF_LIST() 50 } 51 }; 52 53 #define VMSTATE_XMM_REGS(_field, _state, _start) \ 54 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \ 55 vmstate_xmm_reg, ZMMReg) 56 57 /* YMMH format is the same as XMM, but for bits 128-255 */ 58 static const VMStateDescription vmstate_ymmh_reg = { 59 .name = "ymmh_reg", 60 .version_id = 1, 61 .minimum_version_id = 1, 62 .fields = (const VMStateField[]) { 63 VMSTATE_UINT64(ZMM_Q(2), ZMMReg), 64 VMSTATE_UINT64(ZMM_Q(3), ZMMReg), 65 VMSTATE_END_OF_LIST() 66 } 67 }; 68 69 #define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v) \ 70 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v, \ 71 vmstate_ymmh_reg, ZMMReg) 72 73 static const VMStateDescription vmstate_zmmh_reg = { 74 .name = "zmmh_reg", 75 .version_id = 1, 76 .minimum_version_id = 1, 77 .fields = (const VMStateField[]) { 78 VMSTATE_UINT64(ZMM_Q(4), ZMMReg), 79 VMSTATE_UINT64(ZMM_Q(5), ZMMReg), 80 VMSTATE_UINT64(ZMM_Q(6), ZMMReg), 81 VMSTATE_UINT64(ZMM_Q(7), ZMMReg), 82 VMSTATE_END_OF_LIST() 83 } 84 }; 85 86 #define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start) \ 87 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \ 88 vmstate_zmmh_reg, ZMMReg) 89 90 #ifdef TARGET_X86_64 91 static const VMStateDescription vmstate_hi16_zmm_reg = { 92 .name = "hi16_zmm_reg", 93 .version_id = 1, 94 .minimum_version_id = 1, 95 .fields = (const VMStateField[]) { 96 VMSTATE_UINT64(ZMM_Q(0), ZMMReg), 97 VMSTATE_UINT64(ZMM_Q(1), ZMMReg), 98 VMSTATE_UINT64(ZMM_Q(2), ZMMReg), 99 VMSTATE_UINT64(ZMM_Q(3), ZMMReg), 100 VMSTATE_UINT64(ZMM_Q(4), ZMMReg), 101 VMSTATE_UINT64(ZMM_Q(5), ZMMReg), 102 VMSTATE_UINT64(ZMM_Q(6), ZMMReg), 103 VMSTATE_UINT64(ZMM_Q(7), ZMMReg), 104 VMSTATE_END_OF_LIST() 105 } 106 }; 107 108 #define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start) \ 109 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \ 110 vmstate_hi16_zmm_reg, ZMMReg) 111 #endif 112 113 static const VMStateDescription vmstate_bnd_regs = { 114 .name = "bnd_regs", 115 .version_id = 1, 116 .minimum_version_id = 1, 117 .fields = (const VMStateField[]) { 118 VMSTATE_UINT64(lb, BNDReg), 119 VMSTATE_UINT64(ub, BNDReg), 120 VMSTATE_END_OF_LIST() 121 } 122 }; 123 124 #define VMSTATE_BND_REGS(_field, _state, _n) \ 125 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg) 126 127 static const VMStateDescription vmstate_mtrr_var = { 128 .name = "mtrr_var", 129 .version_id = 1, 130 .minimum_version_id = 1, 131 .fields = (const VMStateField[]) { 132 VMSTATE_UINT64(base, MTRRVar), 133 VMSTATE_UINT64(mask, MTRRVar), 134 VMSTATE_END_OF_LIST() 135 } 136 }; 137 138 #define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \ 139 VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar) 140 141 static const VMStateDescription vmstate_lbr_records_var = { 142 .name = "lbr_records_var", 143 .version_id = 1, 144 .minimum_version_id = 1, 145 .fields = (const VMStateField[]) { 146 VMSTATE_UINT64(from, LBREntry), 147 VMSTATE_UINT64(to, LBREntry), 148 VMSTATE_UINT64(info, LBREntry), 149 VMSTATE_END_OF_LIST() 150 } 151 }; 152 153 #define VMSTATE_LBR_VARS(_field, _state, _n, _v) \ 154 VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_lbr_records_var, \ 155 LBREntry) 156 157 typedef struct x86_FPReg_tmp { 158 FPReg *parent; 159 uint64_t tmp_mant; 160 uint16_t tmp_exp; 161 } x86_FPReg_tmp; 162 163 static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f) 164 { 165 CPU_LDoubleU temp; 166 167 temp.d = f; 168 *pmant = temp.l.lower; 169 *pexp = temp.l.upper; 170 } 171 172 static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper) 173 { 174 CPU_LDoubleU temp; 175 176 temp.l.upper = upper; 177 temp.l.lower = mant; 178 return temp.d; 179 } 180 181 static int fpreg_pre_save(void *opaque) 182 { 183 x86_FPReg_tmp *tmp = opaque; 184 185 /* we save the real CPU data (in case of MMX usage only 'mant' 186 contains the MMX register */ 187 cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d); 188 189 return 0; 190 } 191 192 static int fpreg_post_load(void *opaque, int version) 193 { 194 x86_FPReg_tmp *tmp = opaque; 195 196 tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp); 197 return 0; 198 } 199 200 static const VMStateDescription vmstate_fpreg_tmp = { 201 .name = "fpreg_tmp", 202 .post_load = fpreg_post_load, 203 .pre_save = fpreg_pre_save, 204 .fields = (const VMStateField[]) { 205 VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp), 206 VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp), 207 VMSTATE_END_OF_LIST() 208 } 209 }; 210 211 static const VMStateDescription vmstate_fpreg = { 212 .name = "fpreg", 213 .fields = (const VMStateField[]) { 214 VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp), 215 VMSTATE_END_OF_LIST() 216 } 217 }; 218 219 static int cpu_pre_save(void *opaque) 220 { 221 X86CPU *cpu = opaque; 222 CPUX86State *env = &cpu->env; 223 int i; 224 env->v_tpr = env->int_ctl & V_TPR_MASK; 225 /* FPU */ 226 env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; 227 env->fptag_vmstate = 0; 228 for(i = 0; i < 8; i++) { 229 env->fptag_vmstate |= ((!env->fptags[i]) << i); 230 } 231 232 env->fpregs_format_vmstate = 0; 233 234 /* 235 * Real mode guest segments register DPL should be zero. 236 * Older KVM version were setting it wrongly. 237 * Fixing it will allow live migration to host with unrestricted guest 238 * support (otherwise the migration will fail with invalid guest state 239 * error). 240 */ 241 if (!(env->cr[0] & CR0_PE_MASK) && 242 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) { 243 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK); 244 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK); 245 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK); 246 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK); 247 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK); 248 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK); 249 } 250 251 #ifdef CONFIG_KVM 252 /* 253 * In case vCPU may have enabled VMX, we need to make sure kernel have 254 * required capabilities in order to perform migration correctly: 255 * 256 * 1) We must be able to extract vCPU nested-state from KVM. 257 * 258 * 2) In case vCPU is running in guest-mode and it has a pending exception, 259 * we must be able to determine if it's in a pending or injected state. 260 * Note that in case KVM don't have required capability to do so, 261 * a pending/injected exception will always appear as an 262 * injected exception. 263 */ 264 if (kvm_enabled() && cpu_vmx_maybe_enabled(env) && 265 (!env->nested_state || 266 (!kvm_has_exception_payload() && (env->hflags & HF_GUEST_MASK) && 267 env->exception_injected))) { 268 error_report("Guest maybe enabled nested virtualization but kernel " 269 "does not support required capabilities to save vCPU " 270 "nested state"); 271 return -EINVAL; 272 } 273 #endif 274 275 /* 276 * When vCPU is running L2 and exception is still pending, 277 * it can potentially be intercepted by L1 hypervisor. 278 * In contrast to an injected exception which cannot be 279 * intercepted anymore. 280 * 281 * Furthermore, when a L2 exception is intercepted by L1 282 * hypervisor, its exception payload (CR2/DR6 on #PF/#DB) 283 * should not be set yet in the respective vCPU register. 284 * Thus, in case an exception is pending, it is 285 * important to save the exception payload separately. 286 * 287 * Therefore, if an exception is not in a pending state 288 * or vCPU is not in guest-mode, it is not important to 289 * distinguish between a pending and injected exception 290 * and we don't need to store separately the exception payload. 291 * 292 * In order to preserve better backwards-compatible migration, 293 * convert a pending exception to an injected exception in 294 * case it is not important to distinguish between them 295 * as described above. 296 */ 297 if (env->exception_pending && !(env->hflags & HF_GUEST_MASK)) { 298 env->exception_pending = 0; 299 env->exception_injected = 1; 300 301 if (env->exception_has_payload) { 302 if (env->exception_nr == EXCP01_DB) { 303 env->dr[6] = env->exception_payload; 304 } else if (env->exception_nr == EXCP0E_PAGE) { 305 env->cr[2] = env->exception_payload; 306 } 307 } 308 } 309 310 return 0; 311 } 312 313 static int cpu_post_load(void *opaque, int version_id) 314 { 315 X86CPU *cpu = opaque; 316 CPUState *cs = CPU(cpu); 317 CPUX86State *env = &cpu->env; 318 int i; 319 320 if (env->tsc_khz && env->user_tsc_khz && 321 env->tsc_khz != env->user_tsc_khz) { 322 error_report("Mismatch between user-specified TSC frequency and " 323 "migrated TSC frequency"); 324 return -EINVAL; 325 } 326 327 if (env->fpregs_format_vmstate) { 328 error_report("Unsupported old non-softfloat CPU state"); 329 return -EINVAL; 330 } 331 /* 332 * Real mode guest segments register DPL should be zero. 333 * Older KVM version were setting it wrongly. 334 * Fixing it will allow live migration from such host that don't have 335 * restricted guest support to a host with unrestricted guest support 336 * (otherwise the migration will fail with invalid guest state 337 * error). 338 */ 339 if (!(env->cr[0] & CR0_PE_MASK) && 340 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) { 341 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK); 342 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK); 343 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK); 344 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK); 345 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK); 346 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK); 347 } 348 349 /* Older versions of QEMU incorrectly used CS.DPL as the CPL when 350 * running under KVM. This is wrong for conforming code segments. 351 * Luckily, in our implementation the CPL field of hflags is redundant 352 * and we can get the right value from the SS descriptor privilege level. 353 */ 354 env->hflags &= ~HF_CPL_MASK; 355 env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 356 357 #ifdef CONFIG_KVM 358 if ((env->hflags & HF_GUEST_MASK) && 359 (!env->nested_state || 360 !(env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE))) { 361 error_report("vCPU set in guest-mode inconsistent with " 362 "migrated kernel nested state"); 363 return -EINVAL; 364 } 365 #endif 366 367 /* 368 * There are cases that we can get valid exception_nr with both 369 * exception_pending and exception_injected being cleared. 370 * This can happen in one of the following scenarios: 371 * 1) Source is older QEMU without KVM_CAP_EXCEPTION_PAYLOAD support. 372 * 2) Source is running on kernel without KVM_CAP_EXCEPTION_PAYLOAD support. 373 * 3) "cpu/exception_info" subsection not sent because there is no exception 374 * pending or guest wasn't running L2 (See comment in cpu_pre_save()). 375 * 376 * In those cases, we can just deduce that a valid exception_nr means 377 * we can treat the exception as already injected. 378 */ 379 if ((env->exception_nr != -1) && 380 !env->exception_pending && !env->exception_injected) { 381 env->exception_injected = 1; 382 } 383 384 env->fpstt = (env->fpus_vmstate >> 11) & 7; 385 env->fpus = env->fpus_vmstate & ~0x3800; 386 env->fptag_vmstate ^= 0xff; 387 for(i = 0; i < 8; i++) { 388 env->fptags[i] = (env->fptag_vmstate >> i) & 1; 389 } 390 if (tcg_enabled()) { 391 target_ulong dr7; 392 update_fp_status(env); 393 update_mxcsr_status(env); 394 395 cpu_breakpoint_remove_all(cs, BP_CPU); 396 cpu_watchpoint_remove_all(cs, BP_CPU); 397 398 /* Indicate all breakpoints disabled, as they are, then 399 let the helper re-enable them. */ 400 dr7 = env->dr[7]; 401 env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK); 402 cpu_x86_update_dr7(env, dr7); 403 } 404 tlb_flush(cs); 405 return 0; 406 } 407 408 static bool async_pf_msr_needed(void *opaque) 409 { 410 X86CPU *cpu = opaque; 411 412 return cpu->env.async_pf_en_msr != 0; 413 } 414 415 static bool async_pf_int_msr_needed(void *opaque) 416 { 417 X86CPU *cpu = opaque; 418 419 return cpu->env.async_pf_int_msr != 0; 420 } 421 422 static bool pv_eoi_msr_needed(void *opaque) 423 { 424 X86CPU *cpu = opaque; 425 426 return cpu->env.pv_eoi_en_msr != 0; 427 } 428 429 static bool steal_time_msr_needed(void *opaque) 430 { 431 X86CPU *cpu = opaque; 432 433 return cpu->env.steal_time_msr != 0; 434 } 435 436 static bool exception_info_needed(void *opaque) 437 { 438 X86CPU *cpu = opaque; 439 CPUX86State *env = &cpu->env; 440 441 /* 442 * It is important to save exception-info only in case 443 * we need to distinguish between a pending and injected 444 * exception. Which is only required in case there is a 445 * pending exception and vCPU is running L2. 446 * For more info, refer to comment in cpu_pre_save(). 447 */ 448 return env->exception_pending && (env->hflags & HF_GUEST_MASK); 449 } 450 451 static const VMStateDescription vmstate_exception_info = { 452 .name = "cpu/exception_info", 453 .version_id = 1, 454 .minimum_version_id = 1, 455 .needed = exception_info_needed, 456 .fields = (const VMStateField[]) { 457 VMSTATE_UINT8(env.exception_pending, X86CPU), 458 VMSTATE_UINT8(env.exception_injected, X86CPU), 459 VMSTATE_UINT8(env.exception_has_payload, X86CPU), 460 VMSTATE_UINT64(env.exception_payload, X86CPU), 461 VMSTATE_END_OF_LIST() 462 } 463 }; 464 465 /* Poll control MSR enabled by default */ 466 static bool poll_control_msr_needed(void *opaque) 467 { 468 X86CPU *cpu = opaque; 469 470 return cpu->env.poll_control_msr != 1; 471 } 472 473 static const VMStateDescription vmstate_steal_time_msr = { 474 .name = "cpu/steal_time_msr", 475 .version_id = 1, 476 .minimum_version_id = 1, 477 .needed = steal_time_msr_needed, 478 .fields = (const VMStateField[]) { 479 VMSTATE_UINT64(env.steal_time_msr, X86CPU), 480 VMSTATE_END_OF_LIST() 481 } 482 }; 483 484 static const VMStateDescription vmstate_async_pf_msr = { 485 .name = "cpu/async_pf_msr", 486 .version_id = 1, 487 .minimum_version_id = 1, 488 .needed = async_pf_msr_needed, 489 .fields = (const VMStateField[]) { 490 VMSTATE_UINT64(env.async_pf_en_msr, X86CPU), 491 VMSTATE_END_OF_LIST() 492 } 493 }; 494 495 static const VMStateDescription vmstate_async_pf_int_msr = { 496 .name = "cpu/async_pf_int_msr", 497 .version_id = 1, 498 .minimum_version_id = 1, 499 .needed = async_pf_int_msr_needed, 500 .fields = (const VMStateField[]) { 501 VMSTATE_UINT64(env.async_pf_int_msr, X86CPU), 502 VMSTATE_END_OF_LIST() 503 } 504 }; 505 506 static const VMStateDescription vmstate_pv_eoi_msr = { 507 .name = "cpu/async_pv_eoi_msr", 508 .version_id = 1, 509 .minimum_version_id = 1, 510 .needed = pv_eoi_msr_needed, 511 .fields = (const VMStateField[]) { 512 VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU), 513 VMSTATE_END_OF_LIST() 514 } 515 }; 516 517 static const VMStateDescription vmstate_poll_control_msr = { 518 .name = "cpu/poll_control_msr", 519 .version_id = 1, 520 .minimum_version_id = 1, 521 .needed = poll_control_msr_needed, 522 .fields = (const VMStateField[]) { 523 VMSTATE_UINT64(env.poll_control_msr, X86CPU), 524 VMSTATE_END_OF_LIST() 525 } 526 }; 527 528 static bool fpop_ip_dp_needed(void *opaque) 529 { 530 X86CPU *cpu = opaque; 531 CPUX86State *env = &cpu->env; 532 533 return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0; 534 } 535 536 static const VMStateDescription vmstate_fpop_ip_dp = { 537 .name = "cpu/fpop_ip_dp", 538 .version_id = 1, 539 .minimum_version_id = 1, 540 .needed = fpop_ip_dp_needed, 541 .fields = (const VMStateField[]) { 542 VMSTATE_UINT16(env.fpop, X86CPU), 543 VMSTATE_UINT64(env.fpip, X86CPU), 544 VMSTATE_UINT64(env.fpdp, X86CPU), 545 VMSTATE_END_OF_LIST() 546 } 547 }; 548 549 static bool tsc_adjust_needed(void *opaque) 550 { 551 X86CPU *cpu = opaque; 552 CPUX86State *env = &cpu->env; 553 554 return env->tsc_adjust != 0; 555 } 556 557 static const VMStateDescription vmstate_msr_tsc_adjust = { 558 .name = "cpu/msr_tsc_adjust", 559 .version_id = 1, 560 .minimum_version_id = 1, 561 .needed = tsc_adjust_needed, 562 .fields = (const VMStateField[]) { 563 VMSTATE_UINT64(env.tsc_adjust, X86CPU), 564 VMSTATE_END_OF_LIST() 565 } 566 }; 567 568 static bool msr_smi_count_needed(void *opaque) 569 { 570 X86CPU *cpu = opaque; 571 CPUX86State *env = &cpu->env; 572 573 return cpu->migrate_smi_count && env->msr_smi_count != 0; 574 } 575 576 static const VMStateDescription vmstate_msr_smi_count = { 577 .name = "cpu/msr_smi_count", 578 .version_id = 1, 579 .minimum_version_id = 1, 580 .needed = msr_smi_count_needed, 581 .fields = (const VMStateField[]) { 582 VMSTATE_UINT64(env.msr_smi_count, X86CPU), 583 VMSTATE_END_OF_LIST() 584 } 585 }; 586 587 static bool tscdeadline_needed(void *opaque) 588 { 589 X86CPU *cpu = opaque; 590 CPUX86State *env = &cpu->env; 591 592 return env->tsc_deadline != 0; 593 } 594 595 static const VMStateDescription vmstate_msr_tscdeadline = { 596 .name = "cpu/msr_tscdeadline", 597 .version_id = 1, 598 .minimum_version_id = 1, 599 .needed = tscdeadline_needed, 600 .fields = (const VMStateField[]) { 601 VMSTATE_UINT64(env.tsc_deadline, X86CPU), 602 VMSTATE_END_OF_LIST() 603 } 604 }; 605 606 static bool misc_enable_needed(void *opaque) 607 { 608 X86CPU *cpu = opaque; 609 CPUX86State *env = &cpu->env; 610 611 return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT; 612 } 613 614 static bool feature_control_needed(void *opaque) 615 { 616 X86CPU *cpu = opaque; 617 CPUX86State *env = &cpu->env; 618 619 return env->msr_ia32_feature_control != 0; 620 } 621 622 static const VMStateDescription vmstate_msr_ia32_misc_enable = { 623 .name = "cpu/msr_ia32_misc_enable", 624 .version_id = 1, 625 .minimum_version_id = 1, 626 .needed = misc_enable_needed, 627 .fields = (const VMStateField[]) { 628 VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU), 629 VMSTATE_END_OF_LIST() 630 } 631 }; 632 633 static const VMStateDescription vmstate_msr_ia32_feature_control = { 634 .name = "cpu/msr_ia32_feature_control", 635 .version_id = 1, 636 .minimum_version_id = 1, 637 .needed = feature_control_needed, 638 .fields = (const VMStateField[]) { 639 VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU), 640 VMSTATE_END_OF_LIST() 641 } 642 }; 643 644 static bool pmu_enable_needed(void *opaque) 645 { 646 X86CPU *cpu = opaque; 647 CPUX86State *env = &cpu->env; 648 int i; 649 650 if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl || 651 env->msr_global_status || env->msr_global_ovf_ctrl) { 652 return true; 653 } 654 for (i = 0; i < MAX_FIXED_COUNTERS; i++) { 655 if (env->msr_fixed_counters[i]) { 656 return true; 657 } 658 } 659 for (i = 0; i < MAX_GP_COUNTERS; i++) { 660 if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) { 661 return true; 662 } 663 } 664 665 return false; 666 } 667 668 static const VMStateDescription vmstate_msr_architectural_pmu = { 669 .name = "cpu/msr_architectural_pmu", 670 .version_id = 1, 671 .minimum_version_id = 1, 672 .needed = pmu_enable_needed, 673 .fields = (const VMStateField[]) { 674 VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU), 675 VMSTATE_UINT64(env.msr_global_ctrl, X86CPU), 676 VMSTATE_UINT64(env.msr_global_status, X86CPU), 677 VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU), 678 VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS), 679 VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS), 680 VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS), 681 VMSTATE_END_OF_LIST() 682 } 683 }; 684 685 static bool mpx_needed(void *opaque) 686 { 687 X86CPU *cpu = opaque; 688 CPUX86State *env = &cpu->env; 689 unsigned int i; 690 691 for (i = 0; i < 4; i++) { 692 if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) { 693 return true; 694 } 695 } 696 697 if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) { 698 return true; 699 } 700 701 return !!env->msr_bndcfgs; 702 } 703 704 static const VMStateDescription vmstate_mpx = { 705 .name = "cpu/mpx", 706 .version_id = 1, 707 .minimum_version_id = 1, 708 .needed = mpx_needed, 709 .fields = (const VMStateField[]) { 710 VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4), 711 VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU), 712 VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU), 713 VMSTATE_UINT64(env.msr_bndcfgs, X86CPU), 714 VMSTATE_END_OF_LIST() 715 } 716 }; 717 718 static bool hyperv_hypercall_enable_needed(void *opaque) 719 { 720 X86CPU *cpu = opaque; 721 CPUX86State *env = &cpu->env; 722 723 return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0; 724 } 725 726 static const VMStateDescription vmstate_msr_hyperv_hypercall = { 727 .name = "cpu/msr_hyperv_hypercall", 728 .version_id = 1, 729 .minimum_version_id = 1, 730 .needed = hyperv_hypercall_enable_needed, 731 .fields = (const VMStateField[]) { 732 VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU), 733 VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU), 734 VMSTATE_END_OF_LIST() 735 } 736 }; 737 738 static bool hyperv_vapic_enable_needed(void *opaque) 739 { 740 X86CPU *cpu = opaque; 741 CPUX86State *env = &cpu->env; 742 743 return env->msr_hv_vapic != 0; 744 } 745 746 static const VMStateDescription vmstate_msr_hyperv_vapic = { 747 .name = "cpu/msr_hyperv_vapic", 748 .version_id = 1, 749 .minimum_version_id = 1, 750 .needed = hyperv_vapic_enable_needed, 751 .fields = (const VMStateField[]) { 752 VMSTATE_UINT64(env.msr_hv_vapic, X86CPU), 753 VMSTATE_END_OF_LIST() 754 } 755 }; 756 757 static bool hyperv_time_enable_needed(void *opaque) 758 { 759 X86CPU *cpu = opaque; 760 CPUX86State *env = &cpu->env; 761 762 return env->msr_hv_tsc != 0; 763 } 764 765 static const VMStateDescription vmstate_msr_hyperv_time = { 766 .name = "cpu/msr_hyperv_time", 767 .version_id = 1, 768 .minimum_version_id = 1, 769 .needed = hyperv_time_enable_needed, 770 .fields = (const VMStateField[]) { 771 VMSTATE_UINT64(env.msr_hv_tsc, X86CPU), 772 VMSTATE_END_OF_LIST() 773 } 774 }; 775 776 static bool hyperv_crash_enable_needed(void *opaque) 777 { 778 X86CPU *cpu = opaque; 779 CPUX86State *env = &cpu->env; 780 int i; 781 782 for (i = 0; i < HV_CRASH_PARAMS; i++) { 783 if (env->msr_hv_crash_params[i]) { 784 return true; 785 } 786 } 787 return false; 788 } 789 790 static const VMStateDescription vmstate_msr_hyperv_crash = { 791 .name = "cpu/msr_hyperv_crash", 792 .version_id = 1, 793 .minimum_version_id = 1, 794 .needed = hyperv_crash_enable_needed, 795 .fields = (const VMStateField[]) { 796 VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS), 797 VMSTATE_END_OF_LIST() 798 } 799 }; 800 801 static bool hyperv_runtime_enable_needed(void *opaque) 802 { 803 X86CPU *cpu = opaque; 804 CPUX86State *env = &cpu->env; 805 806 if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_RUNTIME)) { 807 return false; 808 } 809 810 return env->msr_hv_runtime != 0; 811 } 812 813 static const VMStateDescription vmstate_msr_hyperv_runtime = { 814 .name = "cpu/msr_hyperv_runtime", 815 .version_id = 1, 816 .minimum_version_id = 1, 817 .needed = hyperv_runtime_enable_needed, 818 .fields = (const VMStateField[]) { 819 VMSTATE_UINT64(env.msr_hv_runtime, X86CPU), 820 VMSTATE_END_OF_LIST() 821 } 822 }; 823 824 static bool hyperv_synic_enable_needed(void *opaque) 825 { 826 X86CPU *cpu = opaque; 827 CPUX86State *env = &cpu->env; 828 int i; 829 830 if (env->msr_hv_synic_control != 0 || 831 env->msr_hv_synic_evt_page != 0 || 832 env->msr_hv_synic_msg_page != 0) { 833 return true; 834 } 835 836 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { 837 if (env->msr_hv_synic_sint[i] != 0) { 838 return true; 839 } 840 } 841 842 return false; 843 } 844 845 static int hyperv_synic_post_load(void *opaque, int version_id) 846 { 847 X86CPU *cpu = opaque; 848 hyperv_x86_synic_update(cpu); 849 return 0; 850 } 851 852 static const VMStateDescription vmstate_msr_hyperv_synic = { 853 .name = "cpu/msr_hyperv_synic", 854 .version_id = 1, 855 .minimum_version_id = 1, 856 .needed = hyperv_synic_enable_needed, 857 .post_load = hyperv_synic_post_load, 858 .fields = (const VMStateField[]) { 859 VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU), 860 VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU), 861 VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU), 862 VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT), 863 VMSTATE_END_OF_LIST() 864 } 865 }; 866 867 static bool hyperv_stimer_enable_needed(void *opaque) 868 { 869 X86CPU *cpu = opaque; 870 CPUX86State *env = &cpu->env; 871 int i; 872 873 for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) { 874 if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) { 875 return true; 876 } 877 } 878 return false; 879 } 880 881 static const VMStateDescription vmstate_msr_hyperv_stimer = { 882 .name = "cpu/msr_hyperv_stimer", 883 .version_id = 1, 884 .minimum_version_id = 1, 885 .needed = hyperv_stimer_enable_needed, 886 .fields = (const VMStateField[]) { 887 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU, 888 HV_STIMER_COUNT), 889 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT), 890 VMSTATE_END_OF_LIST() 891 } 892 }; 893 894 static bool hyperv_reenlightenment_enable_needed(void *opaque) 895 { 896 X86CPU *cpu = opaque; 897 CPUX86State *env = &cpu->env; 898 899 return env->msr_hv_reenlightenment_control != 0 || 900 env->msr_hv_tsc_emulation_control != 0 || 901 env->msr_hv_tsc_emulation_status != 0; 902 } 903 904 static int hyperv_reenlightenment_post_load(void *opaque, int version_id) 905 { 906 X86CPU *cpu = opaque; 907 CPUX86State *env = &cpu->env; 908 909 /* 910 * KVM doesn't fully support re-enlightenment notifications so we need to 911 * make sure TSC frequency doesn't change upon migration. 912 */ 913 if ((env->msr_hv_reenlightenment_control & HV_REENLIGHTENMENT_ENABLE_BIT) && 914 !env->user_tsc_khz) { 915 error_report("Guest enabled re-enlightenment notifications, " 916 "'tsc-frequency=' has to be specified"); 917 return -EINVAL; 918 } 919 920 return 0; 921 } 922 923 static const VMStateDescription vmstate_msr_hyperv_reenlightenment = { 924 .name = "cpu/msr_hyperv_reenlightenment", 925 .version_id = 1, 926 .minimum_version_id = 1, 927 .needed = hyperv_reenlightenment_enable_needed, 928 .post_load = hyperv_reenlightenment_post_load, 929 .fields = (const VMStateField[]) { 930 VMSTATE_UINT64(env.msr_hv_reenlightenment_control, X86CPU), 931 VMSTATE_UINT64(env.msr_hv_tsc_emulation_control, X86CPU), 932 VMSTATE_UINT64(env.msr_hv_tsc_emulation_status, X86CPU), 933 VMSTATE_END_OF_LIST() 934 } 935 }; 936 937 static bool avx512_needed(void *opaque) 938 { 939 X86CPU *cpu = opaque; 940 CPUX86State *env = &cpu->env; 941 unsigned int i; 942 943 for (i = 0; i < NB_OPMASK_REGS; i++) { 944 if (env->opmask_regs[i]) { 945 return true; 946 } 947 } 948 949 for (i = 0; i < CPU_NB_REGS; i++) { 950 #define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field)) 951 if (ENV_XMM(i, 4) || ENV_XMM(i, 6) || 952 ENV_XMM(i, 5) || ENV_XMM(i, 7)) { 953 return true; 954 } 955 #ifdef TARGET_X86_64 956 if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) || 957 ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) || 958 ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) || 959 ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) { 960 return true; 961 } 962 #endif 963 } 964 965 return false; 966 } 967 968 static const VMStateDescription vmstate_avx512 = { 969 .name = "cpu/avx512", 970 .version_id = 1, 971 .minimum_version_id = 1, 972 .needed = avx512_needed, 973 .fields = (const VMStateField[]) { 974 VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS), 975 VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0), 976 #ifdef TARGET_X86_64 977 VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16), 978 #endif 979 VMSTATE_END_OF_LIST() 980 } 981 }; 982 983 static bool xss_needed(void *opaque) 984 { 985 X86CPU *cpu = opaque; 986 CPUX86State *env = &cpu->env; 987 988 return env->xss != 0; 989 } 990 991 static const VMStateDescription vmstate_xss = { 992 .name = "cpu/xss", 993 .version_id = 1, 994 .minimum_version_id = 1, 995 .needed = xss_needed, 996 .fields = (const VMStateField[]) { 997 VMSTATE_UINT64(env.xss, X86CPU), 998 VMSTATE_END_OF_LIST() 999 } 1000 }; 1001 1002 static bool umwait_needed(void *opaque) 1003 { 1004 X86CPU *cpu = opaque; 1005 CPUX86State *env = &cpu->env; 1006 1007 return env->umwait != 0; 1008 } 1009 1010 static const VMStateDescription vmstate_umwait = { 1011 .name = "cpu/umwait", 1012 .version_id = 1, 1013 .minimum_version_id = 1, 1014 .needed = umwait_needed, 1015 .fields = (const VMStateField[]) { 1016 VMSTATE_UINT32(env.umwait, X86CPU), 1017 VMSTATE_END_OF_LIST() 1018 } 1019 }; 1020 1021 static bool pkru_needed(void *opaque) 1022 { 1023 X86CPU *cpu = opaque; 1024 CPUX86State *env = &cpu->env; 1025 1026 return env->pkru != 0; 1027 } 1028 1029 static const VMStateDescription vmstate_pkru = { 1030 .name = "cpu/pkru", 1031 .version_id = 1, 1032 .minimum_version_id = 1, 1033 .needed = pkru_needed, 1034 .fields = (const VMStateField[]){ 1035 VMSTATE_UINT32(env.pkru, X86CPU), 1036 VMSTATE_END_OF_LIST() 1037 } 1038 }; 1039 1040 static bool pkrs_needed(void *opaque) 1041 { 1042 X86CPU *cpu = opaque; 1043 CPUX86State *env = &cpu->env; 1044 1045 return env->pkrs != 0; 1046 } 1047 1048 static const VMStateDescription vmstate_pkrs = { 1049 .name = "cpu/pkrs", 1050 .version_id = 1, 1051 .minimum_version_id = 1, 1052 .needed = pkrs_needed, 1053 .fields = (const VMStateField[]){ 1054 VMSTATE_UINT32(env.pkrs, X86CPU), 1055 VMSTATE_END_OF_LIST() 1056 } 1057 }; 1058 1059 static bool tsc_khz_needed(void *opaque) 1060 { 1061 X86CPU *cpu = opaque; 1062 CPUX86State *env = &cpu->env; 1063 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); 1064 X86MachineClass *x86mc = X86_MACHINE_CLASS(mc); 1065 return env->tsc_khz && x86mc->save_tsc_khz; 1066 } 1067 1068 static const VMStateDescription vmstate_tsc_khz = { 1069 .name = "cpu/tsc_khz", 1070 .version_id = 1, 1071 .minimum_version_id = 1, 1072 .needed = tsc_khz_needed, 1073 .fields = (const VMStateField[]) { 1074 VMSTATE_INT64(env.tsc_khz, X86CPU), 1075 VMSTATE_END_OF_LIST() 1076 } 1077 }; 1078 1079 #ifdef CONFIG_KVM 1080 1081 static bool vmx_vmcs12_needed(void *opaque) 1082 { 1083 struct kvm_nested_state *nested_state = opaque; 1084 return (nested_state->size > 1085 offsetof(struct kvm_nested_state, data.vmx[0].vmcs12)); 1086 } 1087 1088 static const VMStateDescription vmstate_vmx_vmcs12 = { 1089 .name = "cpu/kvm_nested_state/vmx/vmcs12", 1090 .version_id = 1, 1091 .minimum_version_id = 1, 1092 .needed = vmx_vmcs12_needed, 1093 .fields = (const VMStateField[]) { 1094 VMSTATE_UINT8_ARRAY(data.vmx[0].vmcs12, 1095 struct kvm_nested_state, 1096 KVM_STATE_NESTED_VMX_VMCS_SIZE), 1097 VMSTATE_END_OF_LIST() 1098 } 1099 }; 1100 1101 static bool vmx_shadow_vmcs12_needed(void *opaque) 1102 { 1103 struct kvm_nested_state *nested_state = opaque; 1104 return (nested_state->size > 1105 offsetof(struct kvm_nested_state, data.vmx[0].shadow_vmcs12)); 1106 } 1107 1108 static const VMStateDescription vmstate_vmx_shadow_vmcs12 = { 1109 .name = "cpu/kvm_nested_state/vmx/shadow_vmcs12", 1110 .version_id = 1, 1111 .minimum_version_id = 1, 1112 .needed = vmx_shadow_vmcs12_needed, 1113 .fields = (const VMStateField[]) { 1114 VMSTATE_UINT8_ARRAY(data.vmx[0].shadow_vmcs12, 1115 struct kvm_nested_state, 1116 KVM_STATE_NESTED_VMX_VMCS_SIZE), 1117 VMSTATE_END_OF_LIST() 1118 } 1119 }; 1120 1121 static bool vmx_nested_state_needed(void *opaque) 1122 { 1123 struct kvm_nested_state *nested_state = opaque; 1124 1125 return (nested_state->format == KVM_STATE_NESTED_FORMAT_VMX && 1126 nested_state->hdr.vmx.vmxon_pa != -1ull); 1127 } 1128 1129 static const VMStateDescription vmstate_vmx_nested_state = { 1130 .name = "cpu/kvm_nested_state/vmx", 1131 .version_id = 1, 1132 .minimum_version_id = 1, 1133 .needed = vmx_nested_state_needed, 1134 .fields = (const VMStateField[]) { 1135 VMSTATE_U64(hdr.vmx.vmxon_pa, struct kvm_nested_state), 1136 VMSTATE_U64(hdr.vmx.vmcs12_pa, struct kvm_nested_state), 1137 VMSTATE_U16(hdr.vmx.smm.flags, struct kvm_nested_state), 1138 VMSTATE_END_OF_LIST() 1139 }, 1140 .subsections = (const VMStateDescription * const []) { 1141 &vmstate_vmx_vmcs12, 1142 &vmstate_vmx_shadow_vmcs12, 1143 NULL, 1144 } 1145 }; 1146 1147 static bool svm_nested_state_needed(void *opaque) 1148 { 1149 struct kvm_nested_state *nested_state = opaque; 1150 1151 /* 1152 * HF_GUEST_MASK and HF2_GIF_MASK are already serialized 1153 * via hflags and hflags2, all that's left is the opaque 1154 * nested state blob. 1155 */ 1156 return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM && 1157 nested_state->size > offsetof(struct kvm_nested_state, data)); 1158 } 1159 1160 static const VMStateDescription vmstate_svm_nested_state = { 1161 .name = "cpu/kvm_nested_state/svm", 1162 .version_id = 1, 1163 .minimum_version_id = 1, 1164 .needed = svm_nested_state_needed, 1165 .fields = (const VMStateField[]) { 1166 VMSTATE_U64(hdr.svm.vmcb_pa, struct kvm_nested_state), 1167 VMSTATE_UINT8_ARRAY(data.svm[0].vmcb12, 1168 struct kvm_nested_state, 1169 KVM_STATE_NESTED_SVM_VMCB_SIZE), 1170 VMSTATE_END_OF_LIST() 1171 } 1172 }; 1173 1174 static bool nested_state_needed(void *opaque) 1175 { 1176 X86CPU *cpu = opaque; 1177 CPUX86State *env = &cpu->env; 1178 1179 return (env->nested_state && 1180 (vmx_nested_state_needed(env->nested_state) || 1181 svm_nested_state_needed(env->nested_state))); 1182 } 1183 1184 static int nested_state_post_load(void *opaque, int version_id) 1185 { 1186 X86CPU *cpu = opaque; 1187 CPUX86State *env = &cpu->env; 1188 struct kvm_nested_state *nested_state = env->nested_state; 1189 int min_nested_state_len = offsetof(struct kvm_nested_state, data); 1190 int max_nested_state_len = kvm_max_nested_state_length(); 1191 1192 /* 1193 * If our kernel don't support setting nested state 1194 * and we have received nested state from migration stream, 1195 * we need to fail migration 1196 */ 1197 if (max_nested_state_len <= 0) { 1198 error_report("Received nested state when kernel cannot restore it"); 1199 return -EINVAL; 1200 } 1201 1202 /* 1203 * Verify that the size of received nested_state struct 1204 * at least cover required header and is not larger 1205 * than the max size that our kernel support 1206 */ 1207 if (nested_state->size < min_nested_state_len) { 1208 error_report("Received nested state size less than min: " 1209 "len=%d, min=%d", 1210 nested_state->size, min_nested_state_len); 1211 return -EINVAL; 1212 } 1213 if (nested_state->size > max_nested_state_len) { 1214 error_report("Received unsupported nested state size: " 1215 "nested_state->size=%d, max=%d", 1216 nested_state->size, max_nested_state_len); 1217 return -EINVAL; 1218 } 1219 1220 /* Verify format is valid */ 1221 if ((nested_state->format != KVM_STATE_NESTED_FORMAT_VMX) && 1222 (nested_state->format != KVM_STATE_NESTED_FORMAT_SVM)) { 1223 error_report("Received invalid nested state format: %d", 1224 nested_state->format); 1225 return -EINVAL; 1226 } 1227 1228 return 0; 1229 } 1230 1231 static const VMStateDescription vmstate_kvm_nested_state = { 1232 .name = "cpu/kvm_nested_state", 1233 .version_id = 1, 1234 .minimum_version_id = 1, 1235 .fields = (const VMStateField[]) { 1236 VMSTATE_U16(flags, struct kvm_nested_state), 1237 VMSTATE_U16(format, struct kvm_nested_state), 1238 VMSTATE_U32(size, struct kvm_nested_state), 1239 VMSTATE_END_OF_LIST() 1240 }, 1241 .subsections = (const VMStateDescription * const []) { 1242 &vmstate_vmx_nested_state, 1243 &vmstate_svm_nested_state, 1244 NULL 1245 } 1246 }; 1247 1248 static const VMStateDescription vmstate_nested_state = { 1249 .name = "cpu/nested_state", 1250 .version_id = 1, 1251 .minimum_version_id = 1, 1252 .needed = nested_state_needed, 1253 .post_load = nested_state_post_load, 1254 .fields = (const VMStateField[]) { 1255 VMSTATE_STRUCT_POINTER(env.nested_state, X86CPU, 1256 vmstate_kvm_nested_state, 1257 struct kvm_nested_state), 1258 VMSTATE_END_OF_LIST() 1259 } 1260 }; 1261 1262 static bool xen_vcpu_needed(void *opaque) 1263 { 1264 return (xen_mode == XEN_EMULATE); 1265 } 1266 1267 static const VMStateDescription vmstate_xen_vcpu = { 1268 .name = "cpu/xen_vcpu", 1269 .version_id = 1, 1270 .minimum_version_id = 1, 1271 .needed = xen_vcpu_needed, 1272 .fields = (const VMStateField[]) { 1273 VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU), 1274 VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU), 1275 VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU), 1276 VMSTATE_UINT64(env.xen_vcpu_runstate_gpa, X86CPU), 1277 VMSTATE_UINT8(env.xen_vcpu_callback_vector, X86CPU), 1278 VMSTATE_UINT16_ARRAY(env.xen_virq, X86CPU, XEN_NR_VIRQS), 1279 VMSTATE_UINT64(env.xen_singleshot_timer_ns, X86CPU), 1280 VMSTATE_UINT64(env.xen_periodic_timer_period, X86CPU), 1281 VMSTATE_END_OF_LIST() 1282 } 1283 }; 1284 #endif 1285 1286 static bool mcg_ext_ctl_needed(void *opaque) 1287 { 1288 X86CPU *cpu = opaque; 1289 CPUX86State *env = &cpu->env; 1290 return cpu->enable_lmce && env->mcg_ext_ctl; 1291 } 1292 1293 static const VMStateDescription vmstate_mcg_ext_ctl = { 1294 .name = "cpu/mcg_ext_ctl", 1295 .version_id = 1, 1296 .minimum_version_id = 1, 1297 .needed = mcg_ext_ctl_needed, 1298 .fields = (const VMStateField[]) { 1299 VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU), 1300 VMSTATE_END_OF_LIST() 1301 } 1302 }; 1303 1304 static bool spec_ctrl_needed(void *opaque) 1305 { 1306 X86CPU *cpu = opaque; 1307 CPUX86State *env = &cpu->env; 1308 1309 return env->spec_ctrl != 0; 1310 } 1311 1312 static const VMStateDescription vmstate_spec_ctrl = { 1313 .name = "cpu/spec_ctrl", 1314 .version_id = 1, 1315 .minimum_version_id = 1, 1316 .needed = spec_ctrl_needed, 1317 .fields = (const VMStateField[]){ 1318 VMSTATE_UINT64(env.spec_ctrl, X86CPU), 1319 VMSTATE_END_OF_LIST() 1320 } 1321 }; 1322 1323 1324 static bool amd_tsc_scale_msr_needed(void *opaque) 1325 { 1326 X86CPU *cpu = opaque; 1327 CPUX86State *env = &cpu->env; 1328 1329 return (env->features[FEAT_SVM] & CPUID_SVM_TSCSCALE); 1330 } 1331 1332 static const VMStateDescription amd_tsc_scale_msr_ctrl = { 1333 .name = "cpu/amd_tsc_scale_msr", 1334 .version_id = 1, 1335 .minimum_version_id = 1, 1336 .needed = amd_tsc_scale_msr_needed, 1337 .fields = (const VMStateField[]){ 1338 VMSTATE_UINT64(env.amd_tsc_scale_msr, X86CPU), 1339 VMSTATE_END_OF_LIST() 1340 } 1341 }; 1342 1343 1344 static bool intel_pt_enable_needed(void *opaque) 1345 { 1346 X86CPU *cpu = opaque; 1347 CPUX86State *env = &cpu->env; 1348 int i; 1349 1350 if (env->msr_rtit_ctrl || env->msr_rtit_status || 1351 env->msr_rtit_output_base || env->msr_rtit_output_mask || 1352 env->msr_rtit_cr3_match) { 1353 return true; 1354 } 1355 1356 for (i = 0; i < MAX_RTIT_ADDRS; i++) { 1357 if (env->msr_rtit_addrs[i]) { 1358 return true; 1359 } 1360 } 1361 1362 return false; 1363 } 1364 1365 static const VMStateDescription vmstate_msr_intel_pt = { 1366 .name = "cpu/intel_pt", 1367 .version_id = 1, 1368 .minimum_version_id = 1, 1369 .needed = intel_pt_enable_needed, 1370 .fields = (const VMStateField[]) { 1371 VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU), 1372 VMSTATE_UINT64(env.msr_rtit_status, X86CPU), 1373 VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU), 1374 VMSTATE_UINT64(env.msr_rtit_output_mask, X86CPU), 1375 VMSTATE_UINT64(env.msr_rtit_cr3_match, X86CPU), 1376 VMSTATE_UINT64_ARRAY(env.msr_rtit_addrs, X86CPU, MAX_RTIT_ADDRS), 1377 VMSTATE_END_OF_LIST() 1378 } 1379 }; 1380 1381 static bool virt_ssbd_needed(void *opaque) 1382 { 1383 X86CPU *cpu = opaque; 1384 CPUX86State *env = &cpu->env; 1385 1386 return env->virt_ssbd != 0; 1387 } 1388 1389 static const VMStateDescription vmstate_msr_virt_ssbd = { 1390 .name = "cpu/virt_ssbd", 1391 .version_id = 1, 1392 .minimum_version_id = 1, 1393 .needed = virt_ssbd_needed, 1394 .fields = (const VMStateField[]){ 1395 VMSTATE_UINT64(env.virt_ssbd, X86CPU), 1396 VMSTATE_END_OF_LIST() 1397 } 1398 }; 1399 1400 static bool svm_npt_needed(void *opaque) 1401 { 1402 X86CPU *cpu = opaque; 1403 CPUX86State *env = &cpu->env; 1404 1405 return !!(env->hflags2 & HF2_NPT_MASK); 1406 } 1407 1408 static const VMStateDescription vmstate_svm_npt = { 1409 .name = "cpu/svn_npt", 1410 .version_id = 1, 1411 .minimum_version_id = 1, 1412 .needed = svm_npt_needed, 1413 .fields = (const VMStateField[]){ 1414 VMSTATE_UINT64(env.nested_cr3, X86CPU), 1415 VMSTATE_UINT32(env.nested_pg_mode, X86CPU), 1416 VMSTATE_END_OF_LIST() 1417 } 1418 }; 1419 1420 static bool svm_guest_needed(void *opaque) 1421 { 1422 X86CPU *cpu = opaque; 1423 CPUX86State *env = &cpu->env; 1424 1425 return tcg_enabled() && env->int_ctl; 1426 } 1427 1428 static const VMStateDescription vmstate_svm_guest = { 1429 .name = "cpu/svm_guest", 1430 .version_id = 1, 1431 .minimum_version_id = 1, 1432 .needed = svm_guest_needed, 1433 .fields = (const VMStateField[]){ 1434 VMSTATE_UINT32(env.int_ctl, X86CPU), 1435 VMSTATE_END_OF_LIST() 1436 } 1437 }; 1438 1439 #ifndef TARGET_X86_64 1440 static bool intel_efer32_needed(void *opaque) 1441 { 1442 X86CPU *cpu = opaque; 1443 CPUX86State *env = &cpu->env; 1444 1445 return env->efer != 0; 1446 } 1447 1448 static const VMStateDescription vmstate_efer32 = { 1449 .name = "cpu/efer32", 1450 .version_id = 1, 1451 .minimum_version_id = 1, 1452 .needed = intel_efer32_needed, 1453 .fields = (const VMStateField[]) { 1454 VMSTATE_UINT64(env.efer, X86CPU), 1455 VMSTATE_END_OF_LIST() 1456 } 1457 }; 1458 #endif 1459 1460 static bool msr_tsx_ctrl_needed(void *opaque) 1461 { 1462 X86CPU *cpu = opaque; 1463 CPUX86State *env = &cpu->env; 1464 1465 return env->features[FEAT_ARCH_CAPABILITIES] & ARCH_CAP_TSX_CTRL_MSR; 1466 } 1467 1468 static const VMStateDescription vmstate_msr_tsx_ctrl = { 1469 .name = "cpu/msr_tsx_ctrl", 1470 .version_id = 1, 1471 .minimum_version_id = 1, 1472 .needed = msr_tsx_ctrl_needed, 1473 .fields = (const VMStateField[]) { 1474 VMSTATE_UINT32(env.tsx_ctrl, X86CPU), 1475 VMSTATE_END_OF_LIST() 1476 } 1477 }; 1478 1479 static bool intel_sgx_msrs_needed(void *opaque) 1480 { 1481 X86CPU *cpu = opaque; 1482 CPUX86State *env = &cpu->env; 1483 1484 return !!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC); 1485 } 1486 1487 static const VMStateDescription vmstate_msr_intel_sgx = { 1488 .name = "cpu/intel_sgx", 1489 .version_id = 1, 1490 .minimum_version_id = 1, 1491 .needed = intel_sgx_msrs_needed, 1492 .fields = (const VMStateField[]) { 1493 VMSTATE_UINT64_ARRAY(env.msr_ia32_sgxlepubkeyhash, X86CPU, 4), 1494 VMSTATE_END_OF_LIST() 1495 } 1496 }; 1497 1498 static bool pdptrs_needed(void *opaque) 1499 { 1500 X86CPU *cpu = opaque; 1501 CPUX86State *env = &cpu->env; 1502 return env->pdptrs_valid; 1503 } 1504 1505 static int pdptrs_post_load(void *opaque, int version_id) 1506 { 1507 X86CPU *cpu = opaque; 1508 CPUX86State *env = &cpu->env; 1509 env->pdptrs_valid = true; 1510 return 0; 1511 } 1512 1513 1514 static const VMStateDescription vmstate_pdptrs = { 1515 .name = "cpu/pdptrs", 1516 .version_id = 1, 1517 .minimum_version_id = 1, 1518 .needed = pdptrs_needed, 1519 .post_load = pdptrs_post_load, 1520 .fields = (const VMStateField[]) { 1521 VMSTATE_UINT64_ARRAY(env.pdptrs, X86CPU, 4), 1522 VMSTATE_END_OF_LIST() 1523 } 1524 }; 1525 1526 static bool xfd_msrs_needed(void *opaque) 1527 { 1528 X86CPU *cpu = opaque; 1529 CPUX86State *env = &cpu->env; 1530 1531 return !!(env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD); 1532 } 1533 1534 static const VMStateDescription vmstate_msr_xfd = { 1535 .name = "cpu/msr_xfd", 1536 .version_id = 1, 1537 .minimum_version_id = 1, 1538 .needed = xfd_msrs_needed, 1539 .fields = (const VMStateField[]) { 1540 VMSTATE_UINT64(env.msr_xfd, X86CPU), 1541 VMSTATE_UINT64(env.msr_xfd_err, X86CPU), 1542 VMSTATE_END_OF_LIST() 1543 } 1544 }; 1545 1546 static bool msr_hwcr_needed(void *opaque) 1547 { 1548 X86CPU *cpu = opaque; 1549 CPUX86State *env = &cpu->env; 1550 1551 return env->msr_hwcr != 0; 1552 } 1553 1554 static const VMStateDescription vmstate_msr_hwcr = { 1555 .name = "cpu/msr_hwcr", 1556 .version_id = 1, 1557 .minimum_version_id = 1, 1558 .needed = msr_hwcr_needed, 1559 .fields = (VMStateField[]) { 1560 VMSTATE_UINT64(env.msr_hwcr, X86CPU), 1561 VMSTATE_END_OF_LIST() 1562 } 1563 }; 1564 1565 #ifdef TARGET_X86_64 1566 static bool intel_fred_msrs_needed(void *opaque) 1567 { 1568 X86CPU *cpu = opaque; 1569 CPUX86State *env = &cpu->env; 1570 1571 return !!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED); 1572 } 1573 1574 static const VMStateDescription vmstate_msr_fred = { 1575 .name = "cpu/fred", 1576 .version_id = 1, 1577 .minimum_version_id = 1, 1578 .needed = intel_fred_msrs_needed, 1579 .fields = (VMStateField[]) { 1580 VMSTATE_UINT64(env.fred_rsp0, X86CPU), 1581 VMSTATE_UINT64(env.fred_rsp1, X86CPU), 1582 VMSTATE_UINT64(env.fred_rsp2, X86CPU), 1583 VMSTATE_UINT64(env.fred_rsp3, X86CPU), 1584 VMSTATE_UINT64(env.fred_stklvls, X86CPU), 1585 VMSTATE_UINT64(env.fred_ssp1, X86CPU), 1586 VMSTATE_UINT64(env.fred_ssp2, X86CPU), 1587 VMSTATE_UINT64(env.fred_ssp3, X86CPU), 1588 VMSTATE_UINT64(env.fred_config, X86CPU), 1589 VMSTATE_END_OF_LIST() 1590 } 1591 }; 1592 1593 static bool amx_xtile_needed(void *opaque) 1594 { 1595 X86CPU *cpu = opaque; 1596 CPUX86State *env = &cpu->env; 1597 1598 return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE); 1599 } 1600 1601 static const VMStateDescription vmstate_amx_xtile = { 1602 .name = "cpu/intel_amx_xtile", 1603 .version_id = 1, 1604 .minimum_version_id = 1, 1605 .needed = amx_xtile_needed, 1606 .fields = (const VMStateField[]) { 1607 VMSTATE_UINT8_ARRAY(env.xtilecfg, X86CPU, 64), 1608 VMSTATE_UINT8_ARRAY(env.xtiledata, X86CPU, 8192), 1609 VMSTATE_END_OF_LIST() 1610 } 1611 }; 1612 #endif 1613 1614 static bool arch_lbr_needed(void *opaque) 1615 { 1616 X86CPU *cpu = opaque; 1617 CPUX86State *env = &cpu->env; 1618 1619 return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR); 1620 } 1621 1622 static const VMStateDescription vmstate_arch_lbr = { 1623 .name = "cpu/arch_lbr", 1624 .version_id = 1, 1625 .minimum_version_id = 1, 1626 .needed = arch_lbr_needed, 1627 .fields = (const VMStateField[]) { 1628 VMSTATE_UINT64(env.msr_lbr_ctl, X86CPU), 1629 VMSTATE_UINT64(env.msr_lbr_depth, X86CPU), 1630 VMSTATE_LBR_VARS(env.lbr_records, X86CPU, ARCH_LBR_NR_ENTRIES, 1), 1631 VMSTATE_END_OF_LIST() 1632 } 1633 }; 1634 1635 static bool triple_fault_needed(void *opaque) 1636 { 1637 X86CPU *cpu = opaque; 1638 CPUX86State *env = &cpu->env; 1639 1640 return env->triple_fault_pending; 1641 } 1642 1643 static const VMStateDescription vmstate_triple_fault = { 1644 .name = "cpu/triple_fault", 1645 .version_id = 1, 1646 .minimum_version_id = 1, 1647 .needed = triple_fault_needed, 1648 .fields = (const VMStateField[]) { 1649 VMSTATE_UINT8(env.triple_fault_pending, X86CPU), 1650 VMSTATE_END_OF_LIST() 1651 } 1652 }; 1653 1654 const VMStateDescription vmstate_x86_cpu = { 1655 .name = "cpu", 1656 .version_id = 12, 1657 .minimum_version_id = 11, 1658 .pre_save = cpu_pre_save, 1659 .post_load = cpu_post_load, 1660 .fields = (const VMStateField[]) { 1661 VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS), 1662 VMSTATE_UINTTL(env.eip, X86CPU), 1663 VMSTATE_UINTTL(env.eflags, X86CPU), 1664 VMSTATE_UINT32(env.hflags, X86CPU), 1665 /* FPU */ 1666 VMSTATE_UINT16(env.fpuc, X86CPU), 1667 VMSTATE_UINT16(env.fpus_vmstate, X86CPU), 1668 VMSTATE_UINT16(env.fptag_vmstate, X86CPU), 1669 VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU), 1670 1671 VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg), 1672 1673 VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6), 1674 VMSTATE_SEGMENT(env.ldt, X86CPU), 1675 VMSTATE_SEGMENT(env.tr, X86CPU), 1676 VMSTATE_SEGMENT(env.gdt, X86CPU), 1677 VMSTATE_SEGMENT(env.idt, X86CPU), 1678 1679 VMSTATE_UINT32(env.sysenter_cs, X86CPU), 1680 VMSTATE_UINTTL(env.sysenter_esp, X86CPU), 1681 VMSTATE_UINTTL(env.sysenter_eip, X86CPU), 1682 1683 VMSTATE_UINTTL(env.cr[0], X86CPU), 1684 VMSTATE_UINTTL(env.cr[2], X86CPU), 1685 VMSTATE_UINTTL(env.cr[3], X86CPU), 1686 VMSTATE_UINTTL(env.cr[4], X86CPU), 1687 VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8), 1688 /* MMU */ 1689 VMSTATE_INT32(env.a20_mask, X86CPU), 1690 /* XMM */ 1691 VMSTATE_UINT32(env.mxcsr, X86CPU), 1692 VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0), 1693 1694 #ifdef TARGET_X86_64 1695 VMSTATE_UINT64(env.efer, X86CPU), 1696 VMSTATE_UINT64(env.star, X86CPU), 1697 VMSTATE_UINT64(env.lstar, X86CPU), 1698 VMSTATE_UINT64(env.cstar, X86CPU), 1699 VMSTATE_UINT64(env.fmask, X86CPU), 1700 VMSTATE_UINT64(env.kernelgsbase, X86CPU), 1701 #endif 1702 VMSTATE_UINT32(env.smbase, X86CPU), 1703 1704 VMSTATE_UINT64(env.pat, X86CPU), 1705 VMSTATE_UINT32(env.hflags2, X86CPU), 1706 1707 VMSTATE_UINT64(env.vm_hsave, X86CPU), 1708 VMSTATE_UINT64(env.vm_vmcb, X86CPU), 1709 VMSTATE_UINT64(env.tsc_offset, X86CPU), 1710 VMSTATE_UINT64(env.intercept, X86CPU), 1711 VMSTATE_UINT16(env.intercept_cr_read, X86CPU), 1712 VMSTATE_UINT16(env.intercept_cr_write, X86CPU), 1713 VMSTATE_UINT16(env.intercept_dr_read, X86CPU), 1714 VMSTATE_UINT16(env.intercept_dr_write, X86CPU), 1715 VMSTATE_UINT32(env.intercept_exceptions, X86CPU), 1716 VMSTATE_UINT8(env.v_tpr, X86CPU), 1717 /* MTRRs */ 1718 VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11), 1719 VMSTATE_UINT64(env.mtrr_deftype, X86CPU), 1720 VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8), 1721 /* KVM-related states */ 1722 VMSTATE_INT32(env.interrupt_injected, X86CPU), 1723 VMSTATE_UINT32(env.mp_state, X86CPU), 1724 VMSTATE_UINT64(env.tsc, X86CPU), 1725 VMSTATE_INT32(env.exception_nr, X86CPU), 1726 VMSTATE_UINT8(env.soft_interrupt, X86CPU), 1727 VMSTATE_UINT8(env.nmi_injected, X86CPU), 1728 VMSTATE_UINT8(env.nmi_pending, X86CPU), 1729 VMSTATE_UINT8(env.has_error_code, X86CPU), 1730 VMSTATE_UINT32(env.sipi_vector, X86CPU), 1731 /* MCE */ 1732 VMSTATE_UINT64(env.mcg_cap, X86CPU), 1733 VMSTATE_UINT64(env.mcg_status, X86CPU), 1734 VMSTATE_UINT64(env.mcg_ctl, X86CPU), 1735 VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4), 1736 /* rdtscp */ 1737 VMSTATE_UINT64(env.tsc_aux, X86CPU), 1738 /* KVM pvclock msr */ 1739 VMSTATE_UINT64(env.system_time_msr, X86CPU), 1740 VMSTATE_UINT64(env.wall_clock_msr, X86CPU), 1741 /* XSAVE related fields */ 1742 VMSTATE_UINT64_V(env.xcr0, X86CPU, 12), 1743 VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12), 1744 VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12), 1745 VMSTATE_END_OF_LIST() 1746 /* The above list is not sorted /wrt version numbers, watch out! */ 1747 }, 1748 .subsections = (const VMStateDescription * const []) { 1749 &vmstate_exception_info, 1750 &vmstate_async_pf_msr, 1751 &vmstate_async_pf_int_msr, 1752 &vmstate_pv_eoi_msr, 1753 &vmstate_steal_time_msr, 1754 &vmstate_poll_control_msr, 1755 &vmstate_fpop_ip_dp, 1756 &vmstate_msr_tsc_adjust, 1757 &vmstate_msr_tscdeadline, 1758 &vmstate_msr_ia32_misc_enable, 1759 &vmstate_msr_ia32_feature_control, 1760 &vmstate_msr_architectural_pmu, 1761 &vmstate_mpx, 1762 &vmstate_msr_hyperv_hypercall, 1763 &vmstate_msr_hyperv_vapic, 1764 &vmstate_msr_hyperv_time, 1765 &vmstate_msr_hyperv_crash, 1766 &vmstate_msr_hyperv_runtime, 1767 &vmstate_msr_hyperv_synic, 1768 &vmstate_msr_hyperv_stimer, 1769 &vmstate_msr_hyperv_reenlightenment, 1770 &vmstate_avx512, 1771 &vmstate_xss, 1772 &vmstate_umwait, 1773 &vmstate_tsc_khz, 1774 &vmstate_msr_smi_count, 1775 &vmstate_pkru, 1776 &vmstate_pkrs, 1777 &vmstate_spec_ctrl, 1778 &amd_tsc_scale_msr_ctrl, 1779 &vmstate_mcg_ext_ctl, 1780 &vmstate_msr_intel_pt, 1781 &vmstate_msr_virt_ssbd, 1782 &vmstate_svm_npt, 1783 &vmstate_svm_guest, 1784 #ifndef TARGET_X86_64 1785 &vmstate_efer32, 1786 #endif 1787 #ifdef CONFIG_KVM 1788 &vmstate_nested_state, 1789 &vmstate_xen_vcpu, 1790 #endif 1791 &vmstate_msr_tsx_ctrl, 1792 &vmstate_msr_intel_sgx, 1793 &vmstate_pdptrs, 1794 &vmstate_msr_xfd, 1795 &vmstate_msr_hwcr, 1796 #ifdef TARGET_X86_64 1797 &vmstate_msr_fred, 1798 &vmstate_amx_xtile, 1799 #endif 1800 &vmstate_arch_lbr, 1801 &vmstate_triple_fault, 1802 NULL 1803 } 1804 }; 1805