1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 /* 31 * This file and its contents are supplied under the terms of the 32 * Common Development and Distribution License ("CDDL"), version 1.0. 33 * You may only use this file in accordance with the terms of version 34 * 1.0 of the CDDL. 35 * 36 * A full copy of the text of the CDDL should have accompanied this 37 * source. A copy of the CDDL is also available via the Internet at 38 * http://www.illumos.org/license/CDDL. 39 * 40 * Copyright 2015 Pluribus Networks Inc. 41 * Copyright 2018 Joyent, Inc. 42 * Copyright 2023 Oxide Computer Company 43 * Copyright 2021 OmniOS Community Edition (OmniOSce) Association. 44 */ 45 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/kernel.h> 53 #include <sys/module.h> 54 #include <sys/sysctl.h> 55 #include <sys/kmem.h> 56 #include <sys/pcpu.h> 57 #include <sys/mutex.h> 58 #include <sys/proc.h> 59 #include <sys/rwlock.h> 60 #include <sys/sched.h> 61 #include <sys/systm.h> 62 #include <sys/sunddi.h> 63 #include <sys/hma.h> 64 #include <sys/archsystm.h> 65 66 #include <machine/md_var.h> 67 #include <x86/psl.h> 68 #include <x86/apicreg.h> 69 70 #include <machine/specialreg.h> 71 #include <machine/vmm.h> 72 #include <machine/vmm_dev.h> 73 #include <machine/vmparam.h> 74 #include <sys/vmm_instruction_emul.h> 75 #include <sys/vmm_vm.h> 76 #include <sys/vmm_gpt.h> 77 #include <sys/vmm_data.h> 78 79 #include "vmm_ioport.h" 80 #include "vmm_host.h" 81 #include "vmm_util.h" 82 #include "vatpic.h" 83 #include "vatpit.h" 84 #include "vhpet.h" 85 #include "vioapic.h" 86 #include "vlapic.h" 87 #include "vpmtmr.h" 88 #include "vrtc.h" 89 #include "vmm_stat.h" 90 #include "vmm_lapic.h" 91 92 #include "io/ppt.h" 93 #include "io/iommu.h" 94 95 struct vlapic; 96 97 /* Flags for vtc_status */ 98 #define VTCS_FPU_RESTORED 1 /* guest FPU restored, host FPU saved */ 99 #define VTCS_FPU_CTX_CRITICAL 2 /* in ctx where FPU restore cannot be lazy */ 100 101 typedef struct vm_thread_ctx { 102 struct vm *vtc_vm; 103 int vtc_vcpuid; 104 uint_t vtc_status; 105 enum vcpu_ustate vtc_ustate; 106 } vm_thread_ctx_t; 107 108 #define VMM_MTRR_VAR_MAX 10 109 #define VMM_MTRR_DEF_MASK \ 110 (MTRR_DEF_ENABLE | MTRR_DEF_FIXED_ENABLE | MTRR_DEF_TYPE) 111 #define VMM_MTRR_PHYSBASE_MASK (MTRR_PHYSBASE_PHYSBASE | MTRR_PHYSBASE_TYPE) 112 #define VMM_MTRR_PHYSMASK_MASK (MTRR_PHYSMASK_PHYSMASK | MTRR_PHYSMASK_VALID) 113 struct vm_mtrr { 114 uint64_t def_type; 115 uint64_t fixed4k[8]; 116 uint64_t fixed16k[2]; 117 uint64_t fixed64k; 118 struct { 119 uint64_t base; 120 uint64_t mask; 121 } var[VMM_MTRR_VAR_MAX]; 122 }; 123 124 /* 125 * Initialization: 126 * (a) allocated when vcpu is created 127 * (i) initialized when vcpu is created and when it is reinitialized 128 * (o) initialized the first time the vcpu is created 129 * (x) initialized before use 130 */ 131 struct vcpu { 132 /* (o) protects state, run_state, hostcpu, sipi_vector */ 133 kmutex_t lock; 134 135 enum vcpu_state state; /* (o) vcpu state */ 136 enum vcpu_run_state run_state; /* (i) vcpu init/sipi/run state */ 137 kcondvar_t vcpu_cv; /* (o) cpu waiter cv */ 138 kcondvar_t state_cv; /* (o) IDLE-transition cv */ 139 int hostcpu; /* (o) vcpu's current host cpu */ 140 int lastloccpu; /* (o) last host cpu localized to */ 141 int reqidle; /* (i) request vcpu to idle */ 142 bool reqconsist; /* (i) req. vcpu exit when consistent */ 143 struct vlapic *vlapic; /* (i) APIC device model */ 144 enum x2apic_state x2apic_state; /* (i) APIC mode */ 145 uint64_t exit_intinfo; /* (i) events pending at VM exit */ 146 uint64_t exc_pending; /* (i) exception pending */ 147 bool nmi_pending; /* (i) NMI pending */ 148 bool extint_pending; /* (i) INTR pending */ 149 150 uint8_t sipi_vector; /* (i) SIPI vector */ 151 hma_fpu_t *guestfpu; /* (a,i) guest fpu state */ 152 uint64_t guest_xcr0; /* (i) guest %xcr0 register */ 153 void *stats; /* (a,i) statistics */ 154 struct vm_exit exitinfo; /* (x) exit reason and collateral */ 155 uint64_t nextrip; /* (x) next instruction to execute */ 156 struct vie *vie_ctx; /* (x) instruction emulation context */ 157 vm_client_t *vmclient; /* (a) VM-system client */ 158 uint64_t tsc_offset; /* (x) vCPU TSC offset */ 159 struct vm_mtrr mtrr; /* (i) vcpu's MTRR */ 160 vcpu_cpuid_config_t cpuid_cfg; /* (x) cpuid configuration */ 161 162 enum vcpu_ustate ustate; /* (i) microstate for the vcpu */ 163 hrtime_t ustate_when; /* (i) time of last ustate change */ 164 uint64_t ustate_total[VU_MAX]; /* (o) total time spent in ustates */ 165 vm_thread_ctx_t vtc; /* (o) thread state for ctxops */ 166 struct ctxop *ctxop; /* (o) ctxop storage for vcpu */ 167 }; 168 169 #define vcpu_lock(v) mutex_enter(&((v)->lock)) 170 #define vcpu_unlock(v) mutex_exit(&((v)->lock)) 171 #define vcpu_assert_locked(v) ASSERT(MUTEX_HELD(&((v)->lock))) 172 173 struct mem_seg { 174 size_t len; 175 bool sysmem; 176 vm_object_t *object; 177 }; 178 #define VM_MAX_MEMSEGS 5 179 180 struct mem_map { 181 vm_paddr_t gpa; 182 size_t len; 183 vm_ooffset_t segoff; 184 int segid; 185 int prot; 186 int flags; 187 }; 188 #define VM_MAX_MEMMAPS 8 189 190 /* 191 * Initialization: 192 * (o) initialized the first time the VM is created 193 * (i) initialized when VM is created and when it is reinitialized 194 * (x) initialized before use 195 */ 196 struct vm { 197 void *cookie; /* (i) cpu-specific data */ 198 void *iommu; /* (x) iommu-specific data */ 199 struct vhpet *vhpet; /* (i) virtual HPET */ 200 struct vioapic *vioapic; /* (i) virtual ioapic */ 201 struct vatpic *vatpic; /* (i) virtual atpic */ 202 struct vatpit *vatpit; /* (i) virtual atpit */ 203 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */ 204 struct vrtc *vrtc; /* (o) virtual RTC */ 205 volatile cpuset_t active_cpus; /* (i) active vcpus */ 206 volatile cpuset_t debug_cpus; /* (i) vcpus stopped for dbg */ 207 int suspend; /* (i) stop VM execution */ 208 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ 209 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ 210 struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */ 211 struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */ 212 struct vmspace *vmspace; /* (o) guest's address space */ 213 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */ 214 /* The following describe the vm cpu topology */ 215 uint16_t sockets; /* (o) num of sockets */ 216 uint16_t cores; /* (o) num of cores/socket */ 217 uint16_t threads; /* (o) num of threads/core */ 218 uint16_t maxcpus; /* (o) max pluggable cpus */ 219 220 hrtime_t boot_hrtime; /* (i) hrtime at VM boot */ 221 222 /* TSC and TSC scaling related values */ 223 uint64_t tsc_offset; /* (i) VM-wide TSC offset */ 224 uint64_t guest_freq; /* (i) guest TSC Frequency */ 225 uint64_t freq_multiplier; /* (i) guest/host TSC Ratio */ 226 227 struct ioport_config ioports; /* (o) ioport handling */ 228 229 bool mem_transient; /* (o) alloc transient memory */ 230 bool is_paused; /* (i) instance is paused */ 231 }; 232 233 static int vmm_initialized; 234 static uint64_t vmm_host_freq; 235 236 237 static void 238 nullop_panic(void) 239 { 240 panic("null vmm operation call"); 241 } 242 243 /* Do not allow use of an un-set `ops` to do anything but panic */ 244 static struct vmm_ops vmm_ops_null = { 245 .init = (vmm_init_func_t)nullop_panic, 246 .cleanup = (vmm_cleanup_func_t)nullop_panic, 247 .resume = (vmm_resume_func_t)nullop_panic, 248 .vminit = (vmi_init_func_t)nullop_panic, 249 .vmrun = (vmi_run_func_t)nullop_panic, 250 .vmcleanup = (vmi_cleanup_func_t)nullop_panic, 251 .vmgetreg = (vmi_get_register_t)nullop_panic, 252 .vmsetreg = (vmi_set_register_t)nullop_panic, 253 .vmgetdesc = (vmi_get_desc_t)nullop_panic, 254 .vmsetdesc = (vmi_set_desc_t)nullop_panic, 255 .vmgetcap = (vmi_get_cap_t)nullop_panic, 256 .vmsetcap = (vmi_set_cap_t)nullop_panic, 257 .vlapic_init = (vmi_vlapic_init)nullop_panic, 258 .vlapic_cleanup = (vmi_vlapic_cleanup)nullop_panic, 259 .vmpause = (vmi_pause_t)nullop_panic, 260 .vmsavectx = (vmi_savectx)nullop_panic, 261 .vmrestorectx = (vmi_restorectx)nullop_panic, 262 .vmgetmsr = (vmi_get_msr_t)nullop_panic, 263 .vmsetmsr = (vmi_set_msr_t)nullop_panic, 264 .vmfreqratio = (vmi_freqratio_t)nullop_panic, 265 .fr_fracsize = 0, 266 .fr_intsize = 0, 267 }; 268 269 static struct vmm_ops *ops = &vmm_ops_null; 270 static vmm_pte_ops_t *pte_ops = NULL; 271 272 #define VMM_INIT() ((*ops->init)()) 273 #define VMM_CLEANUP() ((*ops->cleanup)()) 274 #define VMM_RESUME() ((*ops->resume)()) 275 276 #define VMINIT(vm) ((*ops->vminit)(vm)) 277 #define VMRUN(vmi, vcpu, rip) ((*ops->vmrun)(vmi, vcpu, rip)) 278 #define VMCLEANUP(vmi) ((*ops->vmcleanup)(vmi)) 279 280 #define VMGETREG(vmi, vcpu, num, rv) ((*ops->vmgetreg)(vmi, vcpu, num, rv)) 281 #define VMSETREG(vmi, vcpu, num, val) ((*ops->vmsetreg)(vmi, vcpu, num, val)) 282 #define VMGETDESC(vmi, vcpu, num, dsc) ((*ops->vmgetdesc)(vmi, vcpu, num, dsc)) 283 #define VMSETDESC(vmi, vcpu, num, dsc) ((*ops->vmsetdesc)(vmi, vcpu, num, dsc)) 284 #define VMGETCAP(vmi, vcpu, num, rv) ((*ops->vmgetcap)(vmi, vcpu, num, rv)) 285 #define VMSETCAP(vmi, vcpu, num, val) ((*ops->vmsetcap)(vmi, vcpu, num, val)) 286 #define VLAPIC_INIT(vmi, vcpu) ((*ops->vlapic_init)(vmi, vcpu)) 287 #define VLAPIC_CLEANUP(vmi, vlapic) ((*ops->vlapic_cleanup)(vmi, vlapic)) 288 289 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 290 #define fpu_stop_emulating() clts() 291 292 SDT_PROVIDER_DEFINE(vmm); 293 294 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 295 NULL); 296 297 /* 298 * Halt the guest if all vcpus are executing a HLT instruction with 299 * interrupts disabled. 300 */ 301 int halt_detection_enabled = 1; 302 303 /* Trap into hypervisor on all guest exceptions and reflect them back */ 304 int trace_guest_exceptions; 305 306 /* Trap WBINVD and ignore it */ 307 int trap_wbinvd = 1; 308 309 static void vm_free_memmap(struct vm *vm, int ident); 310 static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); 311 static void vcpu_notify_event_locked(struct vcpu *vcpu, vcpu_notify_t); 312 static bool vcpu_sleep_bailout_checks(struct vm *vm, int vcpuid); 313 static int vcpu_vector_sipi(struct vm *vm, int vcpuid, uint8_t vector); 314 315 static void vmm_savectx(void *); 316 static void vmm_restorectx(void *); 317 static const struct ctxop_template vmm_ctxop_tpl = { 318 .ct_rev = CTXOP_TPL_REV, 319 .ct_save = vmm_savectx, 320 .ct_restore = vmm_restorectx, 321 }; 322 323 static uint64_t calc_tsc_offset(uint64_t base_host_tsc, uint64_t base_guest_tsc, 324 uint64_t mult); 325 static uint64_t calc_guest_tsc(uint64_t host_tsc, uint64_t mult, 326 uint64_t offset); 327 328 /* functions implemented in vmm_time_support.S */ 329 uint64_t calc_freq_multiplier(uint64_t guest_hz, uint64_t host_hz, 330 uint32_t frac_size); 331 uint64_t scale_tsc(uint64_t tsc, uint64_t multiplier, uint32_t frac_size); 332 333 #ifdef KTR 334 static const char * 335 vcpu_state2str(enum vcpu_state state) 336 { 337 338 switch (state) { 339 case VCPU_IDLE: 340 return ("idle"); 341 case VCPU_FROZEN: 342 return ("frozen"); 343 case VCPU_RUNNING: 344 return ("running"); 345 case VCPU_SLEEPING: 346 return ("sleeping"); 347 default: 348 return ("unknown"); 349 } 350 } 351 #endif 352 353 static void 354 vcpu_cleanup(struct vm *vm, int i, bool destroy) 355 { 356 struct vcpu *vcpu = &vm->vcpu[i]; 357 358 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic); 359 if (destroy) { 360 vmm_stat_free(vcpu->stats); 361 362 vcpu_cpuid_cleanup(&vcpu->cpuid_cfg); 363 364 hma_fpu_free(vcpu->guestfpu); 365 vcpu->guestfpu = NULL; 366 367 vie_free(vcpu->vie_ctx); 368 vcpu->vie_ctx = NULL; 369 370 vmc_destroy(vcpu->vmclient); 371 vcpu->vmclient = NULL; 372 373 ctxop_free(vcpu->ctxop); 374 mutex_destroy(&vcpu->lock); 375 } 376 } 377 378 static void 379 vcpu_init(struct vm *vm, int vcpu_id, bool create) 380 { 381 struct vcpu *vcpu; 382 383 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, 384 ("vcpu_init: invalid vcpu %d", vcpu_id)); 385 386 vcpu = &vm->vcpu[vcpu_id]; 387 388 if (create) { 389 mutex_init(&vcpu->lock, NULL, MUTEX_ADAPTIVE, NULL); 390 391 vcpu->state = VCPU_IDLE; 392 vcpu->hostcpu = NOCPU; 393 vcpu->lastloccpu = NOCPU; 394 vcpu->guestfpu = hma_fpu_alloc(KM_SLEEP); 395 vcpu->stats = vmm_stat_alloc(); 396 vcpu->vie_ctx = vie_alloc(); 397 vcpu_cpuid_init(&vcpu->cpuid_cfg); 398 399 vcpu->ustate = VU_INIT; 400 vcpu->ustate_when = gethrtime(); 401 402 vcpu->vtc.vtc_vm = vm; 403 vcpu->vtc.vtc_vcpuid = vcpu_id; 404 vcpu->ctxop = ctxop_allocate(&vmm_ctxop_tpl, &vcpu->vtc); 405 } else { 406 vie_reset(vcpu->vie_ctx); 407 bzero(&vcpu->exitinfo, sizeof (vcpu->exitinfo)); 408 if (vcpu->ustate != VU_INIT) { 409 vcpu_ustate_change(vm, vcpu_id, VU_INIT); 410 } 411 bzero(&vcpu->mtrr, sizeof (vcpu->mtrr)); 412 } 413 414 vcpu->run_state = VRS_HALT; 415 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id); 416 (void) vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED); 417 vcpu->reqidle = 0; 418 vcpu->exit_intinfo = 0; 419 vcpu->nmi_pending = false; 420 vcpu->extint_pending = false; 421 vcpu->exc_pending = 0; 422 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 423 (void) hma_fpu_init(vcpu->guestfpu); 424 vmm_stat_init(vcpu->stats); 425 vcpu->tsc_offset = 0; 426 } 427 428 int 429 vcpu_trace_exceptions(struct vm *vm, int vcpuid) 430 { 431 return (trace_guest_exceptions); 432 } 433 434 int 435 vcpu_trap_wbinvd(struct vm *vm, int vcpuid) 436 { 437 return (trap_wbinvd); 438 } 439 440 struct vm_exit * 441 vm_exitinfo(struct vm *vm, int cpuid) 442 { 443 struct vcpu *vcpu; 444 445 if (cpuid < 0 || cpuid >= vm->maxcpus) 446 panic("vm_exitinfo: invalid cpuid %d", cpuid); 447 448 vcpu = &vm->vcpu[cpuid]; 449 450 return (&vcpu->exitinfo); 451 } 452 453 struct vie * 454 vm_vie_ctx(struct vm *vm, int cpuid) 455 { 456 if (cpuid < 0 || cpuid >= vm->maxcpus) 457 panic("vm_vie_ctx: invalid cpuid %d", cpuid); 458 459 return (vm->vcpu[cpuid].vie_ctx); 460 } 461 462 static int 463 vmm_init(void) 464 { 465 vmm_host_state_init(); 466 vmm_host_freq = unscalehrtime(NANOSEC); 467 468 if (vmm_is_intel()) { 469 ops = &vmm_ops_intel; 470 pte_ops = &ept_pte_ops; 471 } else if (vmm_is_svm()) { 472 ops = &vmm_ops_amd; 473 pte_ops = &rvi_pte_ops; 474 } else { 475 return (ENXIO); 476 } 477 478 return (VMM_INIT()); 479 } 480 481 int 482 vmm_mod_load() 483 { 484 int error; 485 486 VERIFY(vmm_initialized == 0); 487 488 error = vmm_init(); 489 if (error == 0) 490 vmm_initialized = 1; 491 492 return (error); 493 } 494 495 int 496 vmm_mod_unload() 497 { 498 int error; 499 500 VERIFY(vmm_initialized == 1); 501 502 error = VMM_CLEANUP(); 503 if (error) 504 return (error); 505 vmm_initialized = 0; 506 507 return (0); 508 } 509 510 /* 511 * Create a test IOMMU domain to see if the host system has necessary hardware 512 * and drivers to do so. 513 */ 514 bool 515 vmm_check_iommu(void) 516 { 517 void *domain; 518 const size_t arb_test_sz = (1UL << 32); 519 520 domain = iommu_create_domain(arb_test_sz); 521 if (domain == NULL) { 522 return (false); 523 } 524 iommu_destroy_domain(domain); 525 return (true); 526 } 527 528 static void 529 vm_init(struct vm *vm, bool create) 530 { 531 int i; 532 533 vm->cookie = VMINIT(vm); 534 vm->iommu = NULL; 535 vm->vioapic = vioapic_init(vm); 536 vm->vhpet = vhpet_init(vm); 537 vm->vatpic = vatpic_init(vm); 538 vm->vatpit = vatpit_init(vm); 539 vm->vpmtmr = vpmtmr_init(vm); 540 if (create) 541 vm->vrtc = vrtc_init(vm); 542 543 vm_inout_init(vm, &vm->ioports); 544 545 CPU_ZERO(&vm->active_cpus); 546 CPU_ZERO(&vm->debug_cpus); 547 548 vm->suspend = 0; 549 CPU_ZERO(&vm->suspended_cpus); 550 551 for (i = 0; i < vm->maxcpus; i++) 552 vcpu_init(vm, i, create); 553 554 /* 555 * Configure VM time-related data, including: 556 * - VM-wide TSC offset 557 * - boot_hrtime 558 * - guest_freq (same as host at boot time) 559 * - freq_multiplier (used for scaling) 560 * 561 * This data is configured such that the call to vm_init() represents 562 * the boot time (when the TSC(s) read 0). Each vCPU will have its own 563 * offset from this, which is altered if/when the guest writes to 564 * MSR_TSC. 565 * 566 * Further changes to this data may occur if userspace writes to the 567 * time data. 568 */ 569 const uint64_t boot_tsc = rdtsc_offset(); 570 571 /* Convert the boot TSC reading to hrtime */ 572 vm->boot_hrtime = (hrtime_t)boot_tsc; 573 scalehrtime(&vm->boot_hrtime); 574 575 /* Guest frequency is the same as the host at boot time */ 576 vm->guest_freq = vmm_host_freq; 577 578 /* no scaling needed if guest_freq == host_freq */ 579 vm->freq_multiplier = VM_TSCM_NOSCALE; 580 581 /* configure VM-wide offset: initial guest TSC is 0 at boot */ 582 vm->tsc_offset = calc_tsc_offset(boot_tsc, 0, vm->freq_multiplier); 583 } 584 585 /* 586 * The default CPU topology is a single thread per package. 587 */ 588 uint_t cores_per_package = 1; 589 uint_t threads_per_core = 1; 590 591 int 592 vm_create(uint64_t flags, struct vm **retvm) 593 { 594 struct vm *vm; 595 struct vmspace *vmspace; 596 597 /* 598 * If vmm.ko could not be successfully initialized then don't attempt 599 * to create the virtual machine. 600 */ 601 if (!vmm_initialized) 602 return (ENXIO); 603 604 bool track_dirty = (flags & VCF_TRACK_DIRTY) != 0; 605 if (track_dirty && !pte_ops->vpeo_hw_ad_supported()) 606 return (ENOTSUP); 607 608 vmspace = vmspace_alloc(VM_MAXUSER_ADDRESS, pte_ops, track_dirty); 609 if (vmspace == NULL) 610 return (ENOMEM); 611 612 vm = kmem_zalloc(sizeof (struct vm), KM_SLEEP); 613 614 vm->vmspace = vmspace; 615 vm->mem_transient = (flags & VCF_RESERVOIR_MEM) == 0; 616 for (uint_t i = 0; i < VM_MAXCPU; i++) { 617 vm->vcpu[i].vmclient = vmspace_client_alloc(vmspace); 618 } 619 620 vm->sockets = 1; 621 vm->cores = cores_per_package; /* XXX backwards compatibility */ 622 vm->threads = threads_per_core; /* XXX backwards compatibility */ 623 vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */ 624 625 vm_init(vm, true); 626 627 *retvm = vm; 628 return (0); 629 } 630 631 void 632 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, 633 uint16_t *threads, uint16_t *maxcpus) 634 { 635 *sockets = vm->sockets; 636 *cores = vm->cores; 637 *threads = vm->threads; 638 *maxcpus = vm->maxcpus; 639 } 640 641 uint16_t 642 vm_get_maxcpus(struct vm *vm) 643 { 644 return (vm->maxcpus); 645 } 646 647 int 648 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, 649 uint16_t threads, uint16_t maxcpus) 650 { 651 if (maxcpus != 0) 652 return (EINVAL); /* XXX remove when supported */ 653 if ((sockets * cores * threads) > vm->maxcpus) 654 return (EINVAL); 655 /* XXX need to check sockets * cores * threads == vCPU, how? */ 656 vm->sockets = sockets; 657 vm->cores = cores; 658 vm->threads = threads; 659 vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */ 660 return (0); 661 } 662 663 static void 664 vm_cleanup(struct vm *vm, bool destroy) 665 { 666 struct mem_map *mm; 667 int i; 668 669 ppt_unassign_all(vm); 670 671 if (vm->iommu != NULL) 672 iommu_destroy_domain(vm->iommu); 673 674 /* 675 * Devices which attach their own ioport hooks should be cleaned up 676 * first so they can tear down those registrations. 677 */ 678 vpmtmr_cleanup(vm->vpmtmr); 679 680 vm_inout_cleanup(vm, &vm->ioports); 681 682 if (destroy) 683 vrtc_cleanup(vm->vrtc); 684 else 685 vrtc_reset(vm->vrtc); 686 687 vatpit_cleanup(vm->vatpit); 688 vhpet_cleanup(vm->vhpet); 689 vatpic_cleanup(vm->vatpic); 690 vioapic_cleanup(vm->vioapic); 691 692 for (i = 0; i < vm->maxcpus; i++) 693 vcpu_cleanup(vm, i, destroy); 694 695 VMCLEANUP(vm->cookie); 696 697 /* 698 * System memory is removed from the guest address space only when 699 * the VM is destroyed. This is because the mapping remains the same 700 * across VM reset. 701 * 702 * Device memory can be relocated by the guest (e.g. using PCI BARs) 703 * so those mappings are removed on a VM reset. 704 */ 705 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 706 mm = &vm->mem_maps[i]; 707 if (destroy || !sysmem_mapping(vm, mm)) { 708 vm_free_memmap(vm, i); 709 } else { 710 /* 711 * We need to reset the IOMMU flag so this mapping can 712 * be reused when a VM is rebooted. Since the IOMMU 713 * domain has already been destroyed we can just reset 714 * the flag here. 715 */ 716 mm->flags &= ~VM_MEMMAP_F_IOMMU; 717 } 718 } 719 720 if (destroy) { 721 for (i = 0; i < VM_MAX_MEMSEGS; i++) 722 vm_free_memseg(vm, i); 723 724 vmspace_destroy(vm->vmspace); 725 vm->vmspace = NULL; 726 } 727 } 728 729 void 730 vm_destroy(struct vm *vm) 731 { 732 vm_cleanup(vm, true); 733 kmem_free(vm, sizeof (*vm)); 734 } 735 736 int 737 vm_reinit(struct vm *vm, uint64_t flags) 738 { 739 /* A virtual machine can be reset only if all vcpus are suspended. */ 740 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) != 0) { 741 if ((flags & VM_REINIT_F_FORCE_SUSPEND) == 0) { 742 return (EBUSY); 743 } 744 745 /* 746 * Force the VM (and all its vCPUs) into a suspended state. 747 * This should be quick and easy, since the vm_reinit() call is 748 * made while holding the VM write lock, which requires holding 749 * all of the vCPUs in the VCPU_FROZEN state. 750 */ 751 (void) atomic_cmpset_int((uint_t *)&vm->suspend, 0, 752 VM_SUSPEND_RESET); 753 for (uint_t i = 0; i < vm->maxcpus; i++) { 754 struct vcpu *vcpu = &vm->vcpu[i]; 755 756 if (CPU_ISSET(i, &vm->suspended_cpus) || 757 !CPU_ISSET(i, &vm->active_cpus)) { 758 continue; 759 } 760 761 vcpu_lock(vcpu); 762 VERIFY3U(vcpu->state, ==, VCPU_FROZEN); 763 CPU_SET_ATOMIC(i, &vm->suspended_cpus); 764 vcpu_unlock(vcpu); 765 } 766 767 VERIFY0(CPU_CMP(&vm->suspended_cpus, &vm->active_cpus)); 768 } 769 770 vm_cleanup(vm, false); 771 vm_init(vm, false); 772 return (0); 773 } 774 775 bool 776 vm_is_paused(struct vm *vm) 777 { 778 return (vm->is_paused); 779 } 780 781 int 782 vm_pause_instance(struct vm *vm) 783 { 784 if (vm->is_paused) { 785 return (EALREADY); 786 } 787 vm->is_paused = true; 788 789 for (uint_t i = 0; i < vm->maxcpus; i++) { 790 struct vcpu *vcpu = &vm->vcpu[i]; 791 792 if (!CPU_ISSET(i, &vm->active_cpus)) { 793 continue; 794 } 795 vlapic_pause(vcpu->vlapic); 796 797 /* 798 * vCPU-specific pause logic includes stashing any 799 * to-be-injected events in exit_intinfo where it can be 800 * accessed in a manner generic to the backend. 801 */ 802 ops->vmpause(vm->cookie, i); 803 } 804 vhpet_pause(vm->vhpet); 805 vatpit_pause(vm->vatpit); 806 vrtc_pause(vm->vrtc); 807 808 return (0); 809 } 810 811 int 812 vm_resume_instance(struct vm *vm) 813 { 814 if (!vm->is_paused) { 815 return (EALREADY); 816 } 817 vm->is_paused = false; 818 819 vrtc_resume(vm->vrtc); 820 vatpit_resume(vm->vatpit); 821 vhpet_resume(vm->vhpet); 822 for (uint_t i = 0; i < vm->maxcpus; i++) { 823 struct vcpu *vcpu = &vm->vcpu[i]; 824 825 if (!CPU_ISSET(i, &vm->active_cpus)) { 826 continue; 827 } 828 vlapic_resume(vcpu->vlapic); 829 } 830 831 return (0); 832 } 833 834 int 835 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 836 { 837 vm_object_t *obj; 838 839 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 840 return (ENOMEM); 841 else 842 return (0); 843 } 844 845 int 846 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 847 { 848 return (vmspace_unmap(vm->vmspace, gpa, len)); 849 } 850 851 /* 852 * Return 'true' if 'gpa' is allocated in the guest address space. 853 * 854 * This function is called in the context of a running vcpu which acts as 855 * an implicit lock on 'vm->mem_maps[]'. 856 */ 857 bool 858 vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa) 859 { 860 struct mem_map *mm; 861 int i; 862 863 #ifdef INVARIANTS 864 int hostcpu, state; 865 state = vcpu_get_state(vm, vcpuid, &hostcpu); 866 KASSERT(state == VCPU_RUNNING && hostcpu == curcpu, 867 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu)); 868 #endif 869 870 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 871 mm = &vm->mem_maps[i]; 872 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len) 873 return (true); /* 'gpa' is sysmem or devmem */ 874 } 875 876 if (ppt_is_mmio(vm, gpa)) 877 return (true); /* 'gpa' is pci passthru mmio */ 878 879 return (false); 880 } 881 882 int 883 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem) 884 { 885 struct mem_seg *seg; 886 vm_object_t *obj; 887 888 if (ident < 0 || ident >= VM_MAX_MEMSEGS) 889 return (EINVAL); 890 891 if (len == 0 || (len & PAGE_MASK)) 892 return (EINVAL); 893 894 seg = &vm->mem_segs[ident]; 895 if (seg->object != NULL) { 896 if (seg->len == len && seg->sysmem == sysmem) 897 return (EEXIST); 898 else 899 return (EINVAL); 900 } 901 902 obj = vm_object_mem_allocate(len, vm->mem_transient); 903 if (obj == NULL) 904 return (ENOMEM); 905 906 seg->len = len; 907 seg->object = obj; 908 seg->sysmem = sysmem; 909 return (0); 910 } 911 912 int 913 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, 914 vm_object_t **objptr) 915 { 916 struct mem_seg *seg; 917 918 if (ident < 0 || ident >= VM_MAX_MEMSEGS) 919 return (EINVAL); 920 921 seg = &vm->mem_segs[ident]; 922 if (len) 923 *len = seg->len; 924 if (sysmem) 925 *sysmem = seg->sysmem; 926 if (objptr) 927 *objptr = seg->object; 928 return (0); 929 } 930 931 void 932 vm_free_memseg(struct vm *vm, int ident) 933 { 934 struct mem_seg *seg; 935 936 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS, 937 ("%s: invalid memseg ident %d", __func__, ident)); 938 939 seg = &vm->mem_segs[ident]; 940 if (seg->object != NULL) { 941 vm_object_release(seg->object); 942 bzero(seg, sizeof (struct mem_seg)); 943 } 944 } 945 946 int 947 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, 948 size_t len, int prot, int flags) 949 { 950 struct mem_seg *seg; 951 struct mem_map *m, *map; 952 vm_ooffset_t last; 953 int i, error; 954 955 if (prot == 0 || (prot & ~(PROT_ALL)) != 0) 956 return (EINVAL); 957 958 if (flags & ~VM_MEMMAP_F_WIRED) 959 return (EINVAL); 960 961 if (segid < 0 || segid >= VM_MAX_MEMSEGS) 962 return (EINVAL); 963 964 seg = &vm->mem_segs[segid]; 965 if (seg->object == NULL) 966 return (EINVAL); 967 968 last = first + len; 969 if (first < 0 || first >= last || last > seg->len) 970 return (EINVAL); 971 972 if ((gpa | first | last) & PAGE_MASK) 973 return (EINVAL); 974 975 map = NULL; 976 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 977 m = &vm->mem_maps[i]; 978 if (m->len == 0) { 979 map = m; 980 break; 981 } 982 } 983 984 if (map == NULL) 985 return (ENOSPC); 986 987 error = vmspace_map(vm->vmspace, seg->object, first, gpa, len, prot); 988 if (error != 0) 989 return (EFAULT); 990 991 vm_object_reference(seg->object); 992 993 if ((flags & VM_MEMMAP_F_WIRED) != 0) { 994 error = vmspace_populate(vm->vmspace, gpa, len); 995 if (error != 0) { 996 VERIFY0(vmspace_unmap(vm->vmspace, gpa, len)); 997 return (EFAULT); 998 } 999 } 1000 1001 map->gpa = gpa; 1002 map->len = len; 1003 map->segoff = first; 1004 map->segid = segid; 1005 map->prot = prot; 1006 map->flags = flags; 1007 return (0); 1008 } 1009 1010 int 1011 vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) 1012 { 1013 struct mem_map *m; 1014 int i; 1015 1016 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1017 m = &vm->mem_maps[i]; 1018 if (m->gpa == gpa && m->len == len && 1019 (m->flags & VM_MEMMAP_F_IOMMU) == 0) { 1020 vm_free_memmap(vm, i); 1021 return (0); 1022 } 1023 } 1024 1025 return (EINVAL); 1026 } 1027 1028 int 1029 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, 1030 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) 1031 { 1032 struct mem_map *mm, *mmnext; 1033 int i; 1034 1035 mmnext = NULL; 1036 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1037 mm = &vm->mem_maps[i]; 1038 if (mm->len == 0 || mm->gpa < *gpa) 1039 continue; 1040 if (mmnext == NULL || mm->gpa < mmnext->gpa) 1041 mmnext = mm; 1042 } 1043 1044 if (mmnext != NULL) { 1045 *gpa = mmnext->gpa; 1046 if (segid) 1047 *segid = mmnext->segid; 1048 if (segoff) 1049 *segoff = mmnext->segoff; 1050 if (len) 1051 *len = mmnext->len; 1052 if (prot) 1053 *prot = mmnext->prot; 1054 if (flags) 1055 *flags = mmnext->flags; 1056 return (0); 1057 } else { 1058 return (ENOENT); 1059 } 1060 } 1061 1062 static void 1063 vm_free_memmap(struct vm *vm, int ident) 1064 { 1065 struct mem_map *mm; 1066 int error; 1067 1068 mm = &vm->mem_maps[ident]; 1069 if (mm->len) { 1070 error = vmspace_unmap(vm->vmspace, mm->gpa, mm->len); 1071 VERIFY0(error); 1072 bzero(mm, sizeof (struct mem_map)); 1073 } 1074 } 1075 1076 static __inline bool 1077 sysmem_mapping(struct vm *vm, struct mem_map *mm) 1078 { 1079 1080 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem) 1081 return (true); 1082 else 1083 return (false); 1084 } 1085 1086 vm_paddr_t 1087 vmm_sysmem_maxaddr(struct vm *vm) 1088 { 1089 struct mem_map *mm; 1090 vm_paddr_t maxaddr; 1091 int i; 1092 1093 maxaddr = 0; 1094 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1095 mm = &vm->mem_maps[i]; 1096 if (sysmem_mapping(vm, mm)) { 1097 if (maxaddr < mm->gpa + mm->len) 1098 maxaddr = mm->gpa + mm->len; 1099 } 1100 } 1101 return (maxaddr); 1102 } 1103 1104 static void 1105 vm_iommu_modify(struct vm *vm, bool map) 1106 { 1107 int i, sz; 1108 vm_paddr_t gpa, hpa; 1109 struct mem_map *mm; 1110 vm_client_t *vmc; 1111 1112 sz = PAGE_SIZE; 1113 vmc = vmspace_client_alloc(vm->vmspace); 1114 1115 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1116 mm = &vm->mem_maps[i]; 1117 if (!sysmem_mapping(vm, mm)) 1118 continue; 1119 1120 if (map) { 1121 KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0, 1122 ("iommu map found invalid memmap %lx/%lx/%x", 1123 mm->gpa, mm->len, mm->flags)); 1124 if ((mm->flags & VM_MEMMAP_F_WIRED) == 0) 1125 continue; 1126 mm->flags |= VM_MEMMAP_F_IOMMU; 1127 } else { 1128 if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0) 1129 continue; 1130 mm->flags &= ~VM_MEMMAP_F_IOMMU; 1131 KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0, 1132 ("iommu unmap found invalid memmap %lx/%lx/%x", 1133 mm->gpa, mm->len, mm->flags)); 1134 } 1135 1136 gpa = mm->gpa; 1137 while (gpa < mm->gpa + mm->len) { 1138 vm_page_t *vmp; 1139 1140 vmp = vmc_hold(vmc, gpa, PROT_WRITE); 1141 ASSERT(vmp != NULL); 1142 hpa = ((uintptr_t)vmp_get_pfn(vmp) << PAGESHIFT); 1143 (void) vmp_release(vmp); 1144 1145 /* 1146 * When originally ported from FreeBSD, the logic for 1147 * adding memory to the guest domain would 1148 * simultaneously remove it from the host domain. The 1149 * justification for that is not clear, and FreeBSD has 1150 * subsequently changed the behavior to not remove the 1151 * memory from the host domain. 1152 * 1153 * Leaving the guest memory in the host domain for the 1154 * life of the VM is necessary to make it available for 1155 * DMA, such as through viona in the TX path. 1156 */ 1157 if (map) { 1158 iommu_create_mapping(vm->iommu, gpa, hpa, sz); 1159 } else { 1160 iommu_remove_mapping(vm->iommu, gpa, sz); 1161 } 1162 1163 gpa += PAGE_SIZE; 1164 } 1165 } 1166 vmc_destroy(vmc); 1167 1168 /* 1169 * Invalidate the cached translations associated with the domain 1170 * from which pages were removed. 1171 */ 1172 iommu_invalidate_tlb(vm->iommu); 1173 } 1174 1175 int 1176 vm_unassign_pptdev(struct vm *vm, int pptfd) 1177 { 1178 int error; 1179 1180 error = ppt_unassign_device(vm, pptfd); 1181 if (error) 1182 return (error); 1183 1184 if (ppt_assigned_devices(vm) == 0) 1185 vm_iommu_modify(vm, false); 1186 1187 return (0); 1188 } 1189 1190 int 1191 vm_assign_pptdev(struct vm *vm, int pptfd) 1192 { 1193 int error; 1194 vm_paddr_t maxaddr; 1195 1196 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */ 1197 if (ppt_assigned_devices(vm) == 0) { 1198 KASSERT(vm->iommu == NULL, 1199 ("vm_assign_pptdev: iommu must be NULL")); 1200 maxaddr = vmm_sysmem_maxaddr(vm); 1201 vm->iommu = iommu_create_domain(maxaddr); 1202 if (vm->iommu == NULL) 1203 return (ENXIO); 1204 vm_iommu_modify(vm, true); 1205 } 1206 1207 error = ppt_assign_device(vm, pptfd); 1208 return (error); 1209 } 1210 1211 int 1212 vm_get_register(struct vm *vm, int vcpuid, int reg, uint64_t *retval) 1213 { 1214 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 1215 return (EINVAL); 1216 1217 if (reg >= VM_REG_LAST) 1218 return (EINVAL); 1219 1220 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1221 switch (reg) { 1222 case VM_REG_GUEST_XCR0: 1223 *retval = vcpu->guest_xcr0; 1224 return (0); 1225 default: 1226 return (VMGETREG(vm->cookie, vcpuid, reg, retval)); 1227 } 1228 } 1229 1230 int 1231 vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val) 1232 { 1233 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 1234 return (EINVAL); 1235 1236 if (reg >= VM_REG_LAST) 1237 return (EINVAL); 1238 1239 int error; 1240 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1241 switch (reg) { 1242 case VM_REG_GUEST_RIP: 1243 error = VMSETREG(vm->cookie, vcpuid, reg, val); 1244 if (error == 0) { 1245 vcpu->nextrip = val; 1246 } 1247 return (error); 1248 case VM_REG_GUEST_XCR0: 1249 if (!validate_guest_xcr0(val, vmm_get_host_xcr0())) { 1250 return (EINVAL); 1251 } 1252 vcpu->guest_xcr0 = val; 1253 return (0); 1254 default: 1255 return (VMSETREG(vm->cookie, vcpuid, reg, val)); 1256 } 1257 } 1258 1259 static bool 1260 is_descriptor_table(int reg) 1261 { 1262 switch (reg) { 1263 case VM_REG_GUEST_IDTR: 1264 case VM_REG_GUEST_GDTR: 1265 return (true); 1266 default: 1267 return (false); 1268 } 1269 } 1270 1271 static bool 1272 is_segment_register(int reg) 1273 { 1274 switch (reg) { 1275 case VM_REG_GUEST_ES: 1276 case VM_REG_GUEST_CS: 1277 case VM_REG_GUEST_SS: 1278 case VM_REG_GUEST_DS: 1279 case VM_REG_GUEST_FS: 1280 case VM_REG_GUEST_GS: 1281 case VM_REG_GUEST_TR: 1282 case VM_REG_GUEST_LDTR: 1283 return (true); 1284 default: 1285 return (false); 1286 } 1287 } 1288 1289 int 1290 vm_get_seg_desc(struct vm *vm, int vcpu, int reg, struct seg_desc *desc) 1291 { 1292 1293 if (vcpu < 0 || vcpu >= vm->maxcpus) 1294 return (EINVAL); 1295 1296 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 1297 return (EINVAL); 1298 1299 return (VMGETDESC(vm->cookie, vcpu, reg, desc)); 1300 } 1301 1302 int 1303 vm_set_seg_desc(struct vm *vm, int vcpu, int reg, const struct seg_desc *desc) 1304 { 1305 if (vcpu < 0 || vcpu >= vm->maxcpus) 1306 return (EINVAL); 1307 1308 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 1309 return (EINVAL); 1310 1311 return (VMSETDESC(vm->cookie, vcpu, reg, desc)); 1312 } 1313 1314 static int 1315 translate_hma_xsave_result(hma_fpu_xsave_result_t res) 1316 { 1317 switch (res) { 1318 case HFXR_OK: 1319 return (0); 1320 case HFXR_NO_SPACE: 1321 return (ENOSPC); 1322 case HFXR_BAD_ALIGN: 1323 case HFXR_UNSUP_FMT: 1324 case HFXR_UNSUP_FEAT: 1325 case HFXR_INVALID_DATA: 1326 return (EINVAL); 1327 default: 1328 panic("unexpected xsave result"); 1329 } 1330 } 1331 1332 int 1333 vm_get_fpu(struct vm *vm, int vcpuid, void *buf, size_t len) 1334 { 1335 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 1336 return (EINVAL); 1337 1338 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1339 hma_fpu_xsave_result_t res; 1340 1341 res = hma_fpu_get_xsave_state(vcpu->guestfpu, buf, len); 1342 return (translate_hma_xsave_result(res)); 1343 } 1344 1345 int 1346 vm_set_fpu(struct vm *vm, int vcpuid, void *buf, size_t len) 1347 { 1348 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 1349 return (EINVAL); 1350 1351 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1352 hma_fpu_xsave_result_t res; 1353 1354 res = hma_fpu_set_xsave_state(vcpu->guestfpu, buf, len); 1355 return (translate_hma_xsave_result(res)); 1356 } 1357 1358 int 1359 vm_get_run_state(struct vm *vm, int vcpuid, uint32_t *state, uint8_t *sipi_vec) 1360 { 1361 struct vcpu *vcpu; 1362 1363 if (vcpuid < 0 || vcpuid >= vm->maxcpus) { 1364 return (EINVAL); 1365 } 1366 1367 vcpu = &vm->vcpu[vcpuid]; 1368 1369 vcpu_lock(vcpu); 1370 *state = vcpu->run_state; 1371 *sipi_vec = vcpu->sipi_vector; 1372 vcpu_unlock(vcpu); 1373 1374 return (0); 1375 } 1376 1377 int 1378 vm_set_run_state(struct vm *vm, int vcpuid, uint32_t state, uint8_t sipi_vec) 1379 { 1380 struct vcpu *vcpu; 1381 1382 if (vcpuid < 0 || vcpuid >= vm->maxcpus) { 1383 return (EINVAL); 1384 } 1385 if (!VRS_IS_VALID(state)) { 1386 return (EINVAL); 1387 } 1388 1389 vcpu = &vm->vcpu[vcpuid]; 1390 1391 vcpu_lock(vcpu); 1392 vcpu->run_state = state; 1393 vcpu->sipi_vector = sipi_vec; 1394 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 1395 vcpu_unlock(vcpu); 1396 1397 return (0); 1398 } 1399 1400 int 1401 vm_track_dirty_pages(struct vm *vm, uint64_t gpa, size_t len, uint8_t *bitmap) 1402 { 1403 vmspace_t *vms = vm_get_vmspace(vm); 1404 return (vmspace_track_dirty(vms, gpa, len, bitmap)); 1405 } 1406 1407 static void 1408 restore_guest_fpustate(struct vcpu *vcpu) 1409 { 1410 /* Save host FPU and restore guest FPU */ 1411 fpu_stop_emulating(); 1412 hma_fpu_start_guest(vcpu->guestfpu); 1413 1414 /* restore guest XCR0 if XSAVE is enabled in the host */ 1415 if (rcr4() & CR4_XSAVE) 1416 load_xcr(0, vcpu->guest_xcr0); 1417 1418 /* 1419 * The FPU is now "dirty" with the guest's state so turn on emulation 1420 * to trap any access to the FPU by the host. 1421 */ 1422 fpu_start_emulating(); 1423 } 1424 1425 static void 1426 save_guest_fpustate(struct vcpu *vcpu) 1427 { 1428 1429 if ((rcr0() & CR0_TS) == 0) 1430 panic("fpu emulation not enabled in host!"); 1431 1432 /* save guest XCR0 and restore host XCR0 */ 1433 if (rcr4() & CR4_XSAVE) { 1434 vcpu->guest_xcr0 = rxcr(0); 1435 load_xcr(0, vmm_get_host_xcr0()); 1436 } 1437 1438 /* save guest FPU and restore host FPU */ 1439 fpu_stop_emulating(); 1440 hma_fpu_stop_guest(vcpu->guestfpu); 1441 /* 1442 * When the host state has been restored, we should not re-enable 1443 * CR0.TS on illumos for eager FPU. 1444 */ 1445 } 1446 1447 static int 1448 vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate, 1449 bool from_idle) 1450 { 1451 struct vcpu *vcpu; 1452 int error; 1453 1454 vcpu = &vm->vcpu[vcpuid]; 1455 vcpu_assert_locked(vcpu); 1456 1457 /* 1458 * State transitions from the vmmdev_ioctl() must always begin from 1459 * the VCPU_IDLE state. This guarantees that there is only a single 1460 * ioctl() operating on a vcpu at any point. 1461 */ 1462 if (from_idle) { 1463 while (vcpu->state != VCPU_IDLE) { 1464 vcpu->reqidle = 1; 1465 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 1466 cv_wait(&vcpu->state_cv, &vcpu->lock); 1467 } 1468 } else { 1469 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 1470 "vcpu idle state")); 1471 } 1472 1473 if (vcpu->state == VCPU_RUNNING) { 1474 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " 1475 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); 1476 } else { 1477 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " 1478 "vcpu that is not running", vcpu->hostcpu)); 1479 } 1480 1481 /* 1482 * The following state transitions are allowed: 1483 * IDLE -> FROZEN -> IDLE 1484 * FROZEN -> RUNNING -> FROZEN 1485 * FROZEN -> SLEEPING -> FROZEN 1486 */ 1487 switch (vcpu->state) { 1488 case VCPU_IDLE: 1489 case VCPU_RUNNING: 1490 case VCPU_SLEEPING: 1491 error = (newstate != VCPU_FROZEN); 1492 break; 1493 case VCPU_FROZEN: 1494 error = (newstate == VCPU_FROZEN); 1495 break; 1496 default: 1497 error = 1; 1498 break; 1499 } 1500 1501 if (error) 1502 return (EBUSY); 1503 1504 vcpu->state = newstate; 1505 if (newstate == VCPU_RUNNING) 1506 vcpu->hostcpu = curcpu; 1507 else 1508 vcpu->hostcpu = NOCPU; 1509 1510 if (newstate == VCPU_IDLE) { 1511 cv_broadcast(&vcpu->state_cv); 1512 } 1513 1514 return (0); 1515 } 1516 1517 static void 1518 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1519 { 1520 int error; 1521 1522 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) 1523 panic("Error %d setting state to %d\n", error, newstate); 1524 } 1525 1526 static void 1527 vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1528 { 1529 int error; 1530 1531 if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0) 1532 panic("Error %d setting state to %d", error, newstate); 1533 } 1534 1535 /* 1536 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 1537 */ 1538 static int 1539 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled) 1540 { 1541 struct vcpu *vcpu; 1542 int vcpu_halted, vm_halted; 1543 bool userspace_exit = false; 1544 1545 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); 1546 1547 vcpu = &vm->vcpu[vcpuid]; 1548 vcpu_halted = 0; 1549 vm_halted = 0; 1550 1551 vcpu_lock(vcpu); 1552 while (1) { 1553 /* 1554 * Do a final check for pending interrupts (including NMI and 1555 * INIT) before putting this thread to sleep. 1556 */ 1557 if (vm_nmi_pending(vm, vcpuid)) 1558 break; 1559 if (vcpu_run_state_pending(vm, vcpuid)) 1560 break; 1561 if (!intr_disabled) { 1562 if (vm_extint_pending(vm, vcpuid) || 1563 vlapic_pending_intr(vcpu->vlapic, NULL)) { 1564 break; 1565 } 1566 } 1567 1568 /* 1569 * Also check for software events which would cause a wake-up. 1570 * This will set the appropriate exitcode directly, rather than 1571 * requiring a trip through VM_RUN(). 1572 */ 1573 if (vcpu_sleep_bailout_checks(vm, vcpuid)) { 1574 userspace_exit = true; 1575 break; 1576 } 1577 1578 /* 1579 * Some Linux guests implement "halt" by having all vcpus 1580 * execute HLT with interrupts disabled. 'halted_cpus' keeps 1581 * track of the vcpus that have entered this state. When all 1582 * vcpus enter the halted state the virtual machine is halted. 1583 */ 1584 if (intr_disabled) { 1585 if (!vcpu_halted && halt_detection_enabled) { 1586 vcpu_halted = 1; 1587 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); 1588 } 1589 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { 1590 vm_halted = 1; 1591 break; 1592 } 1593 } 1594 1595 vcpu_ustate_change(vm, vcpuid, VU_IDLE); 1596 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); 1597 (void) cv_wait_sig(&vcpu->vcpu_cv, &vcpu->lock); 1598 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); 1599 vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN); 1600 } 1601 1602 if (vcpu_halted) 1603 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); 1604 1605 vcpu_unlock(vcpu); 1606 1607 if (vm_halted) { 1608 (void) vm_suspend(vm, VM_SUSPEND_HALT); 1609 } 1610 1611 return (userspace_exit ? -1 : 0); 1612 } 1613 1614 static int 1615 vm_handle_paging(struct vm *vm, int vcpuid) 1616 { 1617 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1618 vm_client_t *vmc = vcpu->vmclient; 1619 struct vm_exit *vme = &vcpu->exitinfo; 1620 const int ftype = vme->u.paging.fault_type; 1621 1622 ASSERT0(vme->inst_length); 1623 ASSERT(ftype == PROT_READ || ftype == PROT_WRITE || ftype == PROT_EXEC); 1624 1625 if (vmc_fault(vmc, vme->u.paging.gpa, ftype) != 0) { 1626 /* 1627 * If the fault cannot be serviced, kick it out to userspace for 1628 * handling (or more likely, halting the instance). 1629 */ 1630 return (-1); 1631 } 1632 1633 return (0); 1634 } 1635 1636 int 1637 vm_service_mmio_read(struct vm *vm, int cpuid, uint64_t gpa, uint64_t *rval, 1638 int rsize) 1639 { 1640 int err = ESRCH; 1641 1642 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1643 struct vlapic *vlapic = vm_lapic(vm, cpuid); 1644 1645 err = vlapic_mmio_read(vlapic, gpa, rval, rsize); 1646 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1647 err = vioapic_mmio_read(vm, cpuid, gpa, rval, rsize); 1648 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1649 err = vhpet_mmio_read(vm, cpuid, gpa, rval, rsize); 1650 } 1651 1652 return (err); 1653 } 1654 1655 int 1656 vm_service_mmio_write(struct vm *vm, int cpuid, uint64_t gpa, uint64_t wval, 1657 int wsize) 1658 { 1659 int err = ESRCH; 1660 1661 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1662 struct vlapic *vlapic = vm_lapic(vm, cpuid); 1663 1664 err = vlapic_mmio_write(vlapic, gpa, wval, wsize); 1665 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1666 err = vioapic_mmio_write(vm, cpuid, gpa, wval, wsize); 1667 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1668 err = vhpet_mmio_write(vm, cpuid, gpa, wval, wsize); 1669 } 1670 1671 return (err); 1672 } 1673 1674 static int 1675 vm_handle_mmio_emul(struct vm *vm, int vcpuid) 1676 { 1677 struct vie *vie; 1678 struct vcpu *vcpu; 1679 struct vm_exit *vme; 1680 uint64_t inst_addr; 1681 int error, fault, cs_d; 1682 1683 vcpu = &vm->vcpu[vcpuid]; 1684 vme = &vcpu->exitinfo; 1685 vie = vcpu->vie_ctx; 1686 1687 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", 1688 __func__, vme->inst_length)); 1689 1690 inst_addr = vme->rip + vme->u.mmio_emul.cs_base; 1691 cs_d = vme->u.mmio_emul.cs_d; 1692 1693 /* Fetch the faulting instruction */ 1694 if (vie_needs_fetch(vie)) { 1695 error = vie_fetch_instruction(vie, vm, vcpuid, inst_addr, 1696 &fault); 1697 if (error != 0) { 1698 return (error); 1699 } else if (fault) { 1700 /* 1701 * If a fault during instruction fetch was encountered, 1702 * it will have asserted that the appropriate exception 1703 * be injected at next entry. 1704 * No further work is required. 1705 */ 1706 return (0); 1707 } 1708 } 1709 1710 if (vie_decode_instruction(vie, vm, vcpuid, cs_d) != 0) { 1711 /* Dump (unrecognized) instruction bytes in userspace */ 1712 vie_fallback_exitinfo(vie, vme); 1713 return (-1); 1714 } 1715 if (vme->u.mmio_emul.gla != VIE_INVALID_GLA && 1716 vie_verify_gla(vie, vm, vcpuid, vme->u.mmio_emul.gla) != 0) { 1717 /* Decoded GLA does not match GLA from VM exit state */ 1718 vie_fallback_exitinfo(vie, vme); 1719 return (-1); 1720 } 1721 1722 repeat: 1723 error = vie_emulate_mmio(vie, vm, vcpuid); 1724 if (error < 0) { 1725 /* 1726 * MMIO not handled by any of the in-kernel-emulated devices, so 1727 * make a trip out to userspace for it. 1728 */ 1729 vie_exitinfo(vie, vme); 1730 } else if (error == EAGAIN) { 1731 /* 1732 * Continue emulating the rep-prefixed instruction, which has 1733 * not completed its iterations. 1734 * 1735 * In case this can be emulated in-kernel and has a high 1736 * repetition count (causing a tight spin), it should be 1737 * deferential to yield conditions. 1738 */ 1739 if (!vcpu_should_yield(vm, vcpuid)) { 1740 goto repeat; 1741 } else { 1742 /* 1743 * Defer to the contending load by making a trip to 1744 * userspace with a no-op (BOGUS) exit reason. 1745 */ 1746 vie_reset(vie); 1747 vme->exitcode = VM_EXITCODE_BOGUS; 1748 return (-1); 1749 } 1750 } else if (error == 0) { 1751 /* Update %rip now that instruction has been emulated */ 1752 vie_advance_pc(vie, &vcpu->nextrip); 1753 } 1754 return (error); 1755 } 1756 1757 static int 1758 vm_handle_inout(struct vm *vm, int vcpuid, struct vm_exit *vme) 1759 { 1760 struct vcpu *vcpu; 1761 struct vie *vie; 1762 int err; 1763 1764 vcpu = &vm->vcpu[vcpuid]; 1765 vie = vcpu->vie_ctx; 1766 1767 repeat: 1768 err = vie_emulate_inout(vie, vm, vcpuid); 1769 1770 if (err < 0) { 1771 /* 1772 * In/out not handled by any of the in-kernel-emulated devices, 1773 * so make a trip out to userspace for it. 1774 */ 1775 vie_exitinfo(vie, vme); 1776 return (err); 1777 } else if (err == EAGAIN) { 1778 /* 1779 * Continue emulating the rep-prefixed ins/outs, which has not 1780 * completed its iterations. 1781 * 1782 * In case this can be emulated in-kernel and has a high 1783 * repetition count (causing a tight spin), it should be 1784 * deferential to yield conditions. 1785 */ 1786 if (!vcpu_should_yield(vm, vcpuid)) { 1787 goto repeat; 1788 } else { 1789 /* 1790 * Defer to the contending load by making a trip to 1791 * userspace with a no-op (BOGUS) exit reason. 1792 */ 1793 vie_reset(vie); 1794 vme->exitcode = VM_EXITCODE_BOGUS; 1795 return (-1); 1796 } 1797 } else if (err != 0) { 1798 /* Emulation failure. Bail all the way out to userspace. */ 1799 vme->exitcode = VM_EXITCODE_INST_EMUL; 1800 bzero(&vme->u.inst_emul, sizeof (vme->u.inst_emul)); 1801 return (-1); 1802 } 1803 1804 vie_advance_pc(vie, &vcpu->nextrip); 1805 return (0); 1806 } 1807 1808 static int 1809 vm_handle_inst_emul(struct vm *vm, int vcpuid) 1810 { 1811 struct vie *vie; 1812 struct vcpu *vcpu; 1813 struct vm_exit *vme; 1814 uint64_t cs_base; 1815 int error, fault, cs_d; 1816 1817 vcpu = &vm->vcpu[vcpuid]; 1818 vme = &vcpu->exitinfo; 1819 vie = vcpu->vie_ctx; 1820 1821 vie_cs_info(vie, vm, vcpuid, &cs_base, &cs_d); 1822 1823 /* Fetch the faulting instruction */ 1824 ASSERT(vie_needs_fetch(vie)); 1825 error = vie_fetch_instruction(vie, vm, vcpuid, vme->rip + cs_base, 1826 &fault); 1827 if (error != 0) { 1828 return (error); 1829 } else if (fault) { 1830 /* 1831 * If a fault during instruction fetch was encounted, it will 1832 * have asserted that the appropriate exception be injected at 1833 * next entry. No further work is required. 1834 */ 1835 return (0); 1836 } 1837 1838 if (vie_decode_instruction(vie, vm, vcpuid, cs_d) != 0) { 1839 /* Dump (unrecognized) instruction bytes in userspace */ 1840 vie_fallback_exitinfo(vie, vme); 1841 return (-1); 1842 } 1843 1844 error = vie_emulate_other(vie, vm, vcpuid); 1845 if (error != 0) { 1846 /* 1847 * Instruction emulation was unable to complete successfully, so 1848 * kick it out to userspace for handling. 1849 */ 1850 vie_fallback_exitinfo(vie, vme); 1851 } else { 1852 /* Update %rip now that instruction has been emulated */ 1853 vie_advance_pc(vie, &vcpu->nextrip); 1854 } 1855 return (error); 1856 } 1857 1858 static int 1859 vm_handle_suspend(struct vm *vm, int vcpuid) 1860 { 1861 int i; 1862 struct vcpu *vcpu; 1863 1864 vcpu = &vm->vcpu[vcpuid]; 1865 1866 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 1867 1868 /* 1869 * Wait until all 'active_cpus' have suspended themselves. 1870 */ 1871 vcpu_lock(vcpu); 1872 vcpu_ustate_change(vm, vcpuid, VU_INIT); 1873 while (1) { 1874 int rc; 1875 1876 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 1877 break; 1878 } 1879 1880 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); 1881 rc = cv_reltimedwait_sig(&vcpu->vcpu_cv, &vcpu->lock, hz, 1882 TR_CLOCK_TICK); 1883 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); 1884 1885 /* 1886 * If the userspace process driving the instance is killed, any 1887 * vCPUs yet to be marked suspended (because they are not 1888 * VM_RUN-ing in the kernel presently) will never reach that 1889 * state. 1890 * 1891 * To avoid vm_handle_suspend() getting stuck in the kernel 1892 * waiting for those vCPUs, offer a bail-out even though it 1893 * means returning without all vCPUs in a suspended state. 1894 */ 1895 if (rc <= 0) { 1896 if ((curproc->p_flag & SEXITING) != 0) { 1897 break; 1898 } 1899 } 1900 } 1901 vcpu_unlock(vcpu); 1902 1903 /* 1904 * Wakeup the other sleeping vcpus and return to userspace. 1905 */ 1906 for (i = 0; i < vm->maxcpus; i++) { 1907 if (CPU_ISSET(i, &vm->suspended_cpus)) { 1908 vcpu_notify_event(vm, i); 1909 } 1910 } 1911 1912 return (-1); 1913 } 1914 1915 static int 1916 vm_handle_reqidle(struct vm *vm, int vcpuid) 1917 { 1918 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1919 1920 vcpu_lock(vcpu); 1921 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle)); 1922 vcpu->reqidle = 0; 1923 vcpu_unlock(vcpu); 1924 return (-1); 1925 } 1926 1927 static int 1928 vm_handle_run_state(struct vm *vm, int vcpuid) 1929 { 1930 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1931 bool handled = false; 1932 1933 vcpu_lock(vcpu); 1934 while (1) { 1935 if ((vcpu->run_state & VRS_PEND_INIT) != 0) { 1936 vcpu_unlock(vcpu); 1937 VERIFY0(vcpu_arch_reset(vm, vcpuid, true)); 1938 vcpu_lock(vcpu); 1939 1940 vcpu->run_state &= ~(VRS_RUN | VRS_PEND_INIT); 1941 vcpu->run_state |= VRS_INIT; 1942 } 1943 1944 if ((vcpu->run_state & (VRS_INIT | VRS_RUN | VRS_PEND_SIPI)) == 1945 (VRS_INIT | VRS_PEND_SIPI)) { 1946 const uint8_t vector = vcpu->sipi_vector; 1947 1948 vcpu_unlock(vcpu); 1949 VERIFY0(vcpu_vector_sipi(vm, vcpuid, vector)); 1950 vcpu_lock(vcpu); 1951 1952 vcpu->run_state &= ~VRS_PEND_SIPI; 1953 vcpu->run_state |= VRS_RUN; 1954 } 1955 1956 /* 1957 * If the vCPU is now in the running state, there is no need to 1958 * wait for anything prior to re-entry. 1959 */ 1960 if ((vcpu->run_state & VRS_RUN) != 0) { 1961 handled = true; 1962 break; 1963 } 1964 1965 /* 1966 * Also check for software events which would cause a wake-up. 1967 * This will set the appropriate exitcode directly, rather than 1968 * requiring a trip through VM_RUN(). 1969 */ 1970 if (vcpu_sleep_bailout_checks(vm, vcpuid)) { 1971 break; 1972 } 1973 1974 vcpu_ustate_change(vm, vcpuid, VU_IDLE); 1975 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); 1976 (void) cv_wait_sig(&vcpu->vcpu_cv, &vcpu->lock); 1977 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); 1978 vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN); 1979 } 1980 vcpu_unlock(vcpu); 1981 1982 return (handled ? 0 : -1); 1983 } 1984 1985 static int 1986 vm_rdmtrr(const struct vm_mtrr *mtrr, uint32_t num, uint64_t *val) 1987 { 1988 switch (num) { 1989 case MSR_MTRRcap: 1990 *val = MTRR_CAP_WC | MTRR_CAP_FIXED | VMM_MTRR_VAR_MAX; 1991 break; 1992 case MSR_MTRRdefType: 1993 *val = mtrr->def_type; 1994 break; 1995 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 1996 *val = mtrr->fixed4k[num - MSR_MTRR4kBase]; 1997 break; 1998 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 1999 *val = mtrr->fixed16k[num - MSR_MTRR16kBase]; 2000 break; 2001 case MSR_MTRR64kBase: 2002 *val = mtrr->fixed64k; 2003 break; 2004 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 2005 uint_t offset = num - MSR_MTRRVarBase; 2006 if (offset % 2 == 0) { 2007 *val = mtrr->var[offset / 2].base; 2008 } else { 2009 *val = mtrr->var[offset / 2].mask; 2010 } 2011 break; 2012 } 2013 default: 2014 return (EINVAL); 2015 } 2016 2017 return (0); 2018 } 2019 2020 static int 2021 vm_wrmtrr(struct vm_mtrr *mtrr, uint32_t num, uint64_t val) 2022 { 2023 switch (num) { 2024 case MSR_MTRRcap: 2025 /* MTRRCAP is read only */ 2026 return (EPERM); 2027 case MSR_MTRRdefType: 2028 if (val & ~VMM_MTRR_DEF_MASK) { 2029 /* generate #GP on writes to reserved fields */ 2030 return (EINVAL); 2031 } 2032 mtrr->def_type = val; 2033 break; 2034 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 2035 mtrr->fixed4k[num - MSR_MTRR4kBase] = val; 2036 break; 2037 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2038 mtrr->fixed16k[num - MSR_MTRR16kBase] = val; 2039 break; 2040 case MSR_MTRR64kBase: 2041 mtrr->fixed64k = val; 2042 break; 2043 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 2044 uint_t offset = num - MSR_MTRRVarBase; 2045 if (offset % 2 == 0) { 2046 if (val & ~VMM_MTRR_PHYSBASE_MASK) { 2047 /* generate #GP on writes to reserved fields */ 2048 return (EINVAL); 2049 } 2050 mtrr->var[offset / 2].base = val; 2051 } else { 2052 if (val & ~VMM_MTRR_PHYSMASK_MASK) { 2053 /* generate #GP on writes to reserved fields */ 2054 return (EINVAL); 2055 } 2056 mtrr->var[offset / 2].mask = val; 2057 } 2058 break; 2059 } 2060 default: 2061 return (EINVAL); 2062 } 2063 2064 return (0); 2065 } 2066 2067 static bool 2068 is_mtrr_msr(uint32_t msr) 2069 { 2070 switch (msr) { 2071 case MSR_MTRRcap: 2072 case MSR_MTRRdefType: 2073 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 2074 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2075 case MSR_MTRR64kBase: 2076 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: 2077 return (true); 2078 default: 2079 return (false); 2080 } 2081 } 2082 2083 static int 2084 vm_handle_rdmsr(struct vm *vm, int vcpuid, struct vm_exit *vme) 2085 { 2086 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2087 const uint32_t code = vme->u.msr.code; 2088 uint64_t val = 0; 2089 2090 switch (code) { 2091 case MSR_MCG_CAP: 2092 case MSR_MCG_STATUS: 2093 val = 0; 2094 break; 2095 2096 case MSR_MTRRcap: 2097 case MSR_MTRRdefType: 2098 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 2099 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2100 case MSR_MTRR64kBase: 2101 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: 2102 if (vm_rdmtrr(&vcpu->mtrr, code, &val) != 0) 2103 vm_inject_gp(vm, vcpuid); 2104 break; 2105 2106 case MSR_TSC: 2107 /* 2108 * Get the guest TSC, applying necessary vCPU offsets. 2109 * 2110 * In all likelihood, this should always be handled in guest 2111 * context by VMX/SVM rather than taking an exit. (Both VMX and 2112 * SVM pass through read-only access to MSR_TSC to the guest.) 2113 * 2114 * The VM-wide TSC offset and per-vCPU offset are included in 2115 * the calculations of vcpu_tsc_offset(), so this is sufficient 2116 * to use as the offset in our calculations. 2117 * 2118 * No physical offset is requested of vcpu_tsc_offset() since 2119 * rdtsc_offset() takes care of that instead. 2120 */ 2121 val = calc_guest_tsc(rdtsc_offset(), vm->freq_multiplier, 2122 vcpu_tsc_offset(vm, vcpuid, false)); 2123 break; 2124 2125 default: 2126 /* 2127 * Anything not handled at this point will be kicked out to 2128 * userspace for attempted processing there. 2129 */ 2130 return (-1); 2131 } 2132 2133 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RAX, 2134 val & 0xffffffff)); 2135 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RDX, 2136 val >> 32)); 2137 return (0); 2138 } 2139 2140 static int 2141 vm_handle_wrmsr(struct vm *vm, int vcpuid, struct vm_exit *vme) 2142 { 2143 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2144 const uint32_t code = vme->u.msr.code; 2145 const uint64_t val = vme->u.msr.wval; 2146 2147 switch (code) { 2148 case MSR_MCG_CAP: 2149 case MSR_MCG_STATUS: 2150 /* Ignore writes */ 2151 break; 2152 2153 case MSR_MTRRcap: 2154 case MSR_MTRRdefType: 2155 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 2156 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2157 case MSR_MTRR64kBase: 2158 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: 2159 if (vm_wrmtrr(&vcpu->mtrr, code, val) != 0) 2160 vm_inject_gp(vm, vcpuid); 2161 break; 2162 2163 case MSR_TSC: 2164 /* 2165 * The effect of writing the TSC MSR is that a subsequent read 2166 * of the TSC would report that value written (plus any time 2167 * elapsed between the write and the read). 2168 * 2169 * To calculate that per-vCPU offset, we can work backwards from 2170 * the guest TSC at the time of write: 2171 * 2172 * value = current guest TSC + vCPU offset 2173 * 2174 * so therefore: 2175 * 2176 * value - current guest TSC = vCPU offset 2177 */ 2178 vcpu->tsc_offset = val - calc_guest_tsc(rdtsc_offset(), 2179 vm->freq_multiplier, vm->tsc_offset); 2180 break; 2181 2182 default: 2183 /* 2184 * Anything not handled at this point will be kicked out to 2185 * userspace for attempted processing there. 2186 */ 2187 return (-1); 2188 } 2189 2190 return (0); 2191 } 2192 2193 int 2194 vm_suspend(struct vm *vm, enum vm_suspend_how how) 2195 { 2196 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) 2197 return (EINVAL); 2198 2199 if (atomic_cmpset_int((uint_t *)&vm->suspend, 0, how) == 0) { 2200 return (EALREADY); 2201 } 2202 2203 /* 2204 * Notify all active vcpus that they are now suspended. 2205 */ 2206 for (uint_t i = 0; i < vm->maxcpus; i++) { 2207 struct vcpu *vcpu = &vm->vcpu[i]; 2208 2209 vcpu_lock(vcpu); 2210 if (vcpu->state == VCPU_IDLE || vcpu->state == VCPU_FROZEN) { 2211 /* 2212 * Any vCPUs not actively running or in HLT can be 2213 * marked as suspended immediately. 2214 */ 2215 if (CPU_ISSET(i, &vm->active_cpus)) { 2216 CPU_SET_ATOMIC(i, &vm->suspended_cpus); 2217 } 2218 } else { 2219 /* 2220 * Those which are running or in HLT will pick up the 2221 * suspended state after notification. 2222 */ 2223 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 2224 } 2225 vcpu_unlock(vcpu); 2226 } 2227 return (0); 2228 } 2229 2230 void 2231 vm_exit_run_state(struct vm *vm, int vcpuid, uint64_t rip) 2232 { 2233 struct vm_exit *vmexit; 2234 2235 vmexit = vm_exitinfo(vm, vcpuid); 2236 vmexit->rip = rip; 2237 vmexit->inst_length = 0; 2238 vmexit->exitcode = VM_EXITCODE_RUN_STATE; 2239 vmm_stat_incr(vm, vcpuid, VMEXIT_RUN_STATE, 1); 2240 } 2241 2242 /* 2243 * Some vmm resources, such as the lapic, may have CPU-specific resources 2244 * allocated to them which would benefit from migration onto the host CPU which 2245 * is processing the vcpu state. 2246 */ 2247 static void 2248 vm_localize_resources(struct vm *vm, struct vcpu *vcpu) 2249 { 2250 /* 2251 * Localizing cyclic resources requires acquisition of cpu_lock, and 2252 * doing so with kpreempt disabled is a recipe for deadlock disaster. 2253 */ 2254 VERIFY(curthread->t_preempt == 0); 2255 2256 /* 2257 * Do not bother with localization if this vCPU is about to return to 2258 * the host CPU it was last localized to. 2259 */ 2260 if (vcpu->lastloccpu == curcpu) 2261 return; 2262 2263 /* 2264 * Localize system-wide resources to the primary boot vCPU. While any 2265 * of the other vCPUs may access them, it keeps the potential interrupt 2266 * footprint constrained to CPUs involved with this instance. 2267 */ 2268 if (vcpu == &vm->vcpu[0]) { 2269 vhpet_localize_resources(vm->vhpet); 2270 vrtc_localize_resources(vm->vrtc); 2271 vatpit_localize_resources(vm->vatpit); 2272 } 2273 2274 vlapic_localize_resources(vcpu->vlapic); 2275 2276 vcpu->lastloccpu = curcpu; 2277 } 2278 2279 static void 2280 vmm_savectx(void *arg) 2281 { 2282 vm_thread_ctx_t *vtc = arg; 2283 struct vm *vm = vtc->vtc_vm; 2284 const int vcpuid = vtc->vtc_vcpuid; 2285 2286 if (ops->vmsavectx != NULL) { 2287 ops->vmsavectx(vm->cookie, vcpuid); 2288 } 2289 2290 /* 2291 * Account for going off-cpu, unless the vCPU is idled, where being 2292 * off-cpu is the explicit point. 2293 */ 2294 if (vm->vcpu[vcpuid].ustate != VU_IDLE) { 2295 vtc->vtc_ustate = vm->vcpu[vcpuid].ustate; 2296 vcpu_ustate_change(vm, vcpuid, VU_SCHED); 2297 } 2298 2299 /* 2300 * If the CPU holds the restored guest FPU state, save it and restore 2301 * the host FPU state before this thread goes off-cpu. 2302 */ 2303 if ((vtc->vtc_status & VTCS_FPU_RESTORED) != 0) { 2304 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2305 2306 save_guest_fpustate(vcpu); 2307 vtc->vtc_status &= ~VTCS_FPU_RESTORED; 2308 } 2309 } 2310 2311 static void 2312 vmm_restorectx(void *arg) 2313 { 2314 vm_thread_ctx_t *vtc = arg; 2315 struct vm *vm = vtc->vtc_vm; 2316 const int vcpuid = vtc->vtc_vcpuid; 2317 2318 /* Complete microstate accounting for vCPU being off-cpu */ 2319 if (vm->vcpu[vcpuid].ustate != VU_IDLE) { 2320 vcpu_ustate_change(vm, vcpuid, vtc->vtc_ustate); 2321 } 2322 2323 /* 2324 * When coming back on-cpu, only restore the guest FPU status if the 2325 * thread is in a context marked as requiring it. This should be rare, 2326 * occurring only when a future logic error results in a voluntary 2327 * sleep during the VMRUN critical section. 2328 * 2329 * The common case will result in elision of the guest FPU state 2330 * restoration, deferring that action until it is clearly necessary 2331 * during vm_run. 2332 */ 2333 VERIFY((vtc->vtc_status & VTCS_FPU_RESTORED) == 0); 2334 if ((vtc->vtc_status & VTCS_FPU_CTX_CRITICAL) != 0) { 2335 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2336 2337 restore_guest_fpustate(vcpu); 2338 vtc->vtc_status |= VTCS_FPU_RESTORED; 2339 } 2340 2341 if (ops->vmrestorectx != NULL) { 2342 ops->vmrestorectx(vm->cookie, vcpuid); 2343 } 2344 2345 } 2346 2347 /* Convenience defines for parsing vm_entry`cmd values */ 2348 #define VEC_MASK_FLAGS (VEC_FLAG_EXIT_CONSISTENT) 2349 #define VEC_MASK_CMD (~VEC_MASK_FLAGS) 2350 2351 static int 2352 vm_entry_actions(struct vm *vm, int vcpuid, const struct vm_entry *entry, 2353 struct vm_exit *vme) 2354 { 2355 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2356 struct vie *vie = vcpu->vie_ctx; 2357 int err = 0; 2358 2359 const uint_t cmd = entry->cmd & VEC_MASK_CMD; 2360 const uint_t flags = entry->cmd & VEC_MASK_FLAGS; 2361 2362 switch (cmd) { 2363 case VEC_DEFAULT: 2364 break; 2365 case VEC_DISCARD_INSTR: 2366 vie_reset(vie); 2367 break; 2368 case VEC_FULFILL_MMIO: 2369 err = vie_fulfill_mmio(vie, &entry->u.mmio); 2370 if (err == 0) { 2371 err = vie_emulate_mmio(vie, vm, vcpuid); 2372 if (err == 0) { 2373 vie_advance_pc(vie, &vcpu->nextrip); 2374 } else if (err < 0) { 2375 vie_exitinfo(vie, vme); 2376 } else if (err == EAGAIN) { 2377 /* 2378 * Clear the instruction emulation state in 2379 * order to re-enter VM context and continue 2380 * this 'rep <instruction>' 2381 */ 2382 vie_reset(vie); 2383 err = 0; 2384 } 2385 } 2386 break; 2387 case VEC_FULFILL_INOUT: 2388 err = vie_fulfill_inout(vie, &entry->u.inout); 2389 if (err == 0) { 2390 err = vie_emulate_inout(vie, vm, vcpuid); 2391 if (err == 0) { 2392 vie_advance_pc(vie, &vcpu->nextrip); 2393 } else if (err < 0) { 2394 vie_exitinfo(vie, vme); 2395 } else if (err == EAGAIN) { 2396 /* 2397 * Clear the instruction emulation state in 2398 * order to re-enter VM context and continue 2399 * this 'rep ins/outs' 2400 */ 2401 vie_reset(vie); 2402 err = 0; 2403 } 2404 } 2405 break; 2406 default: 2407 return (EINVAL); 2408 } 2409 2410 /* 2411 * Pay heed to requests for exit-when-vCPU-is-consistent requests, at 2412 * least when we are not immediately bound for another exit due to 2413 * multi-part instruction emulation or related causes. 2414 */ 2415 if ((flags & VEC_FLAG_EXIT_CONSISTENT) != 0 && err == 0) { 2416 vcpu->reqconsist = true; 2417 } 2418 2419 return (err); 2420 } 2421 2422 static int 2423 vm_loop_checks(struct vm *vm, int vcpuid, struct vm_exit *vme) 2424 { 2425 struct vie *vie; 2426 2427 vie = vm->vcpu[vcpuid].vie_ctx; 2428 2429 if (vie_pending(vie)) { 2430 /* 2431 * Userspace has not fulfilled the pending needs of the 2432 * instruction emulation, so bail back out. 2433 */ 2434 vie_exitinfo(vie, vme); 2435 return (-1); 2436 } 2437 2438 return (0); 2439 } 2440 2441 int 2442 vm_run(struct vm *vm, int vcpuid, const struct vm_entry *entry) 2443 { 2444 int error; 2445 struct vcpu *vcpu; 2446 struct vm_exit *vme; 2447 bool intr_disabled; 2448 int affinity_type = CPU_CURRENT; 2449 2450 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2451 return (EINVAL); 2452 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 2453 return (EINVAL); 2454 if (vm->is_paused) { 2455 return (EBUSY); 2456 } 2457 2458 vcpu = &vm->vcpu[vcpuid]; 2459 vme = &vcpu->exitinfo; 2460 2461 vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN); 2462 2463 vcpu->vtc.vtc_status = 0; 2464 ctxop_attach(curthread, vcpu->ctxop); 2465 2466 error = vm_entry_actions(vm, vcpuid, entry, vme); 2467 if (error != 0) { 2468 goto exit; 2469 } 2470 2471 restart: 2472 error = vm_loop_checks(vm, vcpuid, vme); 2473 if (error != 0) { 2474 goto exit; 2475 } 2476 2477 thread_affinity_set(curthread, affinity_type); 2478 /* 2479 * Resource localization should happen after the CPU affinity for the 2480 * thread has been set to ensure that access from restricted contexts, 2481 * such as VMX-accelerated APIC operations, can occur without inducing 2482 * cyclic cross-calls. 2483 * 2484 * This must be done prior to disabling kpreempt via critical_enter(). 2485 */ 2486 vm_localize_resources(vm, vcpu); 2487 affinity_type = CPU_CURRENT; 2488 critical_enter(); 2489 2490 /* Force a trip through update_sregs to reload %fs/%gs and friends */ 2491 PCB_SET_UPDATE_SEGS(&ttolwp(curthread)->lwp_pcb); 2492 2493 if ((vcpu->vtc.vtc_status & VTCS_FPU_RESTORED) == 0) { 2494 restore_guest_fpustate(vcpu); 2495 vcpu->vtc.vtc_status |= VTCS_FPU_RESTORED; 2496 } 2497 vcpu->vtc.vtc_status |= VTCS_FPU_CTX_CRITICAL; 2498 2499 vcpu_require_state(vm, vcpuid, VCPU_RUNNING); 2500 error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip); 2501 vcpu_require_state(vm, vcpuid, VCPU_FROZEN); 2502 2503 /* 2504 * Once clear of the delicate contexts comprising the VM_RUN handler, 2505 * thread CPU affinity can be loosened while other processing occurs. 2506 */ 2507 vcpu->vtc.vtc_status &= ~VTCS_FPU_CTX_CRITICAL; 2508 thread_affinity_clear(curthread); 2509 critical_exit(); 2510 2511 if (error != 0) { 2512 /* Communicate out any error from VMRUN() above */ 2513 goto exit; 2514 } 2515 2516 vcpu->nextrip = vme->rip + vme->inst_length; 2517 switch (vme->exitcode) { 2518 case VM_EXITCODE_REQIDLE: 2519 error = vm_handle_reqidle(vm, vcpuid); 2520 break; 2521 case VM_EXITCODE_RUN_STATE: 2522 error = vm_handle_run_state(vm, vcpuid); 2523 break; 2524 case VM_EXITCODE_SUSPENDED: 2525 error = vm_handle_suspend(vm, vcpuid); 2526 break; 2527 case VM_EXITCODE_IOAPIC_EOI: 2528 vioapic_process_eoi(vm, vcpuid, 2529 vme->u.ioapic_eoi.vector); 2530 break; 2531 case VM_EXITCODE_HLT: 2532 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 2533 error = vm_handle_hlt(vm, vcpuid, intr_disabled); 2534 break; 2535 case VM_EXITCODE_PAGING: 2536 error = vm_handle_paging(vm, vcpuid); 2537 break; 2538 case VM_EXITCODE_MMIO_EMUL: 2539 error = vm_handle_mmio_emul(vm, vcpuid); 2540 break; 2541 case VM_EXITCODE_INOUT: 2542 error = vm_handle_inout(vm, vcpuid, vme); 2543 break; 2544 case VM_EXITCODE_INST_EMUL: 2545 error = vm_handle_inst_emul(vm, vcpuid); 2546 break; 2547 case VM_EXITCODE_MONITOR: 2548 case VM_EXITCODE_MWAIT: 2549 case VM_EXITCODE_VMINSN: 2550 vm_inject_ud(vm, vcpuid); 2551 break; 2552 case VM_EXITCODE_RDMSR: 2553 error = vm_handle_rdmsr(vm, vcpuid, vme); 2554 break; 2555 case VM_EXITCODE_WRMSR: 2556 error = vm_handle_wrmsr(vm, vcpuid, vme); 2557 break; 2558 case VM_EXITCODE_HT: 2559 affinity_type = CPU_BEST; 2560 break; 2561 case VM_EXITCODE_MTRAP: 2562 VERIFY0(vm_suspend_cpu(vm, vcpuid)); 2563 error = -1; 2564 break; 2565 default: 2566 /* handled in userland */ 2567 error = -1; 2568 break; 2569 } 2570 2571 if (error == 0) { 2572 /* VM exit conditions handled in-kernel, continue running */ 2573 goto restart; 2574 } 2575 2576 exit: 2577 kpreempt_disable(); 2578 ctxop_detach(curthread, vcpu->ctxop); 2579 /* Make sure all of the needed vCPU context state is saved */ 2580 vmm_savectx(&vcpu->vtc); 2581 kpreempt_enable(); 2582 2583 vcpu_ustate_change(vm, vcpuid, VU_EMU_USER); 2584 return (error); 2585 } 2586 2587 int 2588 vm_restart_instruction(void *arg, int vcpuid) 2589 { 2590 struct vm *vm; 2591 struct vcpu *vcpu; 2592 enum vcpu_state state; 2593 uint64_t rip; 2594 int error; 2595 2596 vm = arg; 2597 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2598 return (EINVAL); 2599 2600 vcpu = &vm->vcpu[vcpuid]; 2601 state = vcpu_get_state(vm, vcpuid, NULL); 2602 if (state == VCPU_RUNNING) { 2603 /* 2604 * When a vcpu is "running" the next instruction is determined 2605 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. 2606 * Thus setting 'inst_length' to zero will cause the current 2607 * instruction to be restarted. 2608 */ 2609 vcpu->exitinfo.inst_length = 0; 2610 } else if (state == VCPU_FROZEN) { 2611 /* 2612 * When a vcpu is "frozen" it is outside the critical section 2613 * around VMRUN() and 'nextrip' points to the next instruction. 2614 * Thus instruction restart is achieved by setting 'nextrip' 2615 * to the vcpu's %rip. 2616 */ 2617 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip); 2618 KASSERT(!error, ("%s: error %d getting rip", __func__, error)); 2619 vcpu->nextrip = rip; 2620 } else { 2621 panic("%s: invalid state %d", __func__, state); 2622 } 2623 return (0); 2624 } 2625 2626 int 2627 vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) 2628 { 2629 struct vcpu *vcpu; 2630 2631 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2632 return (EINVAL); 2633 2634 vcpu = &vm->vcpu[vcpuid]; 2635 2636 if (VM_INTINFO_PENDING(info)) { 2637 const uint32_t type = VM_INTINFO_TYPE(info); 2638 const uint8_t vector = VM_INTINFO_VECTOR(info); 2639 2640 if (type == VM_INTINFO_NMI && vector != IDT_NMI) 2641 return (EINVAL); 2642 if (type == VM_INTINFO_HWEXCP && vector >= 32) 2643 return (EINVAL); 2644 if (info & VM_INTINFO_MASK_RSVD) 2645 return (EINVAL); 2646 } else { 2647 info = 0; 2648 } 2649 vcpu->exit_intinfo = info; 2650 return (0); 2651 } 2652 2653 enum exc_class { 2654 EXC_BENIGN, 2655 EXC_CONTRIBUTORY, 2656 EXC_PAGEFAULT 2657 }; 2658 2659 #define IDT_VE 20 /* Virtualization Exception (Intel specific) */ 2660 2661 static enum exc_class 2662 exception_class(uint64_t info) 2663 { 2664 ASSERT(VM_INTINFO_PENDING(info)); 2665 2666 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ 2667 switch (VM_INTINFO_TYPE(info)) { 2668 case VM_INTINFO_HWINTR: 2669 case VM_INTINFO_SWINTR: 2670 case VM_INTINFO_NMI: 2671 return (EXC_BENIGN); 2672 default: 2673 /* 2674 * Hardware exception. 2675 * 2676 * SVM and VT-x use identical type values to represent NMI, 2677 * hardware interrupt and software interrupt. 2678 * 2679 * SVM uses type '3' for all exceptions. VT-x uses type '3' 2680 * for exceptions except #BP and #OF. #BP and #OF use a type 2681 * value of '5' or '6'. Therefore we don't check for explicit 2682 * values of 'type' to classify 'intinfo' into a hardware 2683 * exception. 2684 */ 2685 break; 2686 } 2687 2688 switch (VM_INTINFO_VECTOR(info)) { 2689 case IDT_PF: 2690 case IDT_VE: 2691 return (EXC_PAGEFAULT); 2692 case IDT_DE: 2693 case IDT_TS: 2694 case IDT_NP: 2695 case IDT_SS: 2696 case IDT_GP: 2697 return (EXC_CONTRIBUTORY); 2698 default: 2699 return (EXC_BENIGN); 2700 } 2701 } 2702 2703 /* 2704 * Fetch event pending injection into the guest, if one exists. 2705 * 2706 * Returns true if an event is to be injected (which is placed in `retinfo`). 2707 */ 2708 bool 2709 vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) 2710 { 2711 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2712 const uint64_t info1 = vcpu->exit_intinfo; 2713 vcpu->exit_intinfo = 0; 2714 const uint64_t info2 = vcpu->exc_pending; 2715 vcpu->exc_pending = 0; 2716 2717 if (VM_INTINFO_PENDING(info1) && VM_INTINFO_PENDING(info2)) { 2718 /* 2719 * If an exception occurs while attempting to call the 2720 * double-fault handler the processor enters shutdown mode 2721 * (aka triple fault). 2722 */ 2723 if (VM_INTINFO_TYPE(info1) == VM_INTINFO_HWEXCP && 2724 VM_INTINFO_VECTOR(info1) == IDT_DF) { 2725 (void) vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT); 2726 *retinfo = 0; 2727 return (false); 2728 } 2729 /* 2730 * "Conditions for Generating a Double Fault" 2731 * Intel SDM, Vol3, Table 6-5 2732 */ 2733 const enum exc_class exc1 = exception_class(info1); 2734 const enum exc_class exc2 = exception_class(info2); 2735 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) || 2736 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) { 2737 /* Convert nested fault into a double fault. */ 2738 *retinfo = 2739 VM_INTINFO_VALID | 2740 VM_INTINFO_DEL_ERRCODE | 2741 VM_INTINFO_HWEXCP | 2742 IDT_DF; 2743 } else { 2744 /* Handle exceptions serially */ 2745 vcpu->exit_intinfo = info1; 2746 *retinfo = info2; 2747 } 2748 return (true); 2749 } else if (VM_INTINFO_PENDING(info1)) { 2750 *retinfo = info1; 2751 return (true); 2752 } else if (VM_INTINFO_PENDING(info2)) { 2753 *retinfo = info2; 2754 return (true); 2755 } 2756 2757 return (false); 2758 } 2759 2760 int 2761 vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) 2762 { 2763 struct vcpu *vcpu; 2764 2765 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2766 return (EINVAL); 2767 2768 vcpu = &vm->vcpu[vcpuid]; 2769 *info1 = vcpu->exit_intinfo; 2770 *info2 = vcpu->exc_pending; 2771 return (0); 2772 } 2773 2774 int 2775 vm_inject_exception(struct vm *vm, int vcpuid, uint8_t vector, 2776 bool errcode_valid, uint32_t errcode, bool restart_instruction) 2777 { 2778 struct vcpu *vcpu; 2779 uint64_t regval; 2780 int error; 2781 2782 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2783 return (EINVAL); 2784 2785 if (vector >= 32) 2786 return (EINVAL); 2787 2788 /* 2789 * NMIs are to be injected via their own specialized path using 2790 * vm_inject_nmi(). 2791 */ 2792 if (vector == IDT_NMI) { 2793 return (EINVAL); 2794 } 2795 2796 /* 2797 * A double fault exception should never be injected directly into 2798 * the guest. It is a derived exception that results from specific 2799 * combinations of nested faults. 2800 */ 2801 if (vector == IDT_DF) { 2802 return (EINVAL); 2803 } 2804 2805 vcpu = &vm->vcpu[vcpuid]; 2806 2807 if (VM_INTINFO_PENDING(vcpu->exc_pending)) { 2808 /* Unable to inject exception due to one already pending */ 2809 return (EBUSY); 2810 } 2811 2812 if (errcode_valid) { 2813 /* 2814 * Exceptions don't deliver an error code in real mode. 2815 */ 2816 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, ®val); 2817 VERIFY0(error); 2818 if ((regval & CR0_PE) == 0) { 2819 errcode_valid = false; 2820 } 2821 } 2822 2823 /* 2824 * From section 26.6.1 "Interruptibility State" in Intel SDM: 2825 * 2826 * Event blocking by "STI" or "MOV SS" is cleared after guest executes 2827 * one instruction or incurs an exception. 2828 */ 2829 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); 2830 VERIFY0(error); 2831 2832 if (restart_instruction) { 2833 VERIFY0(vm_restart_instruction(vm, vcpuid)); 2834 } 2835 2836 uint64_t val = VM_INTINFO_VALID | VM_INTINFO_HWEXCP | vector; 2837 if (errcode_valid) { 2838 val |= VM_INTINFO_DEL_ERRCODE; 2839 val |= (uint64_t)errcode << VM_INTINFO_SHIFT_ERRCODE; 2840 } 2841 vcpu->exc_pending = val; 2842 return (0); 2843 } 2844 2845 void 2846 vm_inject_ud(struct vm *vm, int vcpuid) 2847 { 2848 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_UD, false, 0, true)); 2849 } 2850 2851 void 2852 vm_inject_gp(struct vm *vm, int vcpuid) 2853 { 2854 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_GP, true, 0, true)); 2855 } 2856 2857 void 2858 vm_inject_ac(struct vm *vm, int vcpuid, uint32_t errcode) 2859 { 2860 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_AC, true, errcode, true)); 2861 } 2862 2863 void 2864 vm_inject_ss(struct vm *vm, int vcpuid, uint32_t errcode) 2865 { 2866 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_SS, true, errcode, true)); 2867 } 2868 2869 void 2870 vm_inject_pf(struct vm *vm, int vcpuid, uint32_t errcode, uint64_t cr2) 2871 { 2872 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2)); 2873 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_PF, true, errcode, true)); 2874 } 2875 2876 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 2877 2878 int 2879 vm_inject_nmi(struct vm *vm, int vcpuid) 2880 { 2881 struct vcpu *vcpu; 2882 2883 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2884 return (EINVAL); 2885 2886 vcpu = &vm->vcpu[vcpuid]; 2887 2888 vcpu->nmi_pending = true; 2889 vcpu_notify_event(vm, vcpuid); 2890 return (0); 2891 } 2892 2893 bool 2894 vm_nmi_pending(struct vm *vm, int vcpuid) 2895 { 2896 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2897 2898 return (vcpu->nmi_pending); 2899 } 2900 2901 void 2902 vm_nmi_clear(struct vm *vm, int vcpuid) 2903 { 2904 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2905 2906 ASSERT(vcpu->nmi_pending); 2907 2908 vcpu->nmi_pending = false; 2909 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); 2910 } 2911 2912 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); 2913 2914 int 2915 vm_inject_extint(struct vm *vm, int vcpuid) 2916 { 2917 struct vcpu *vcpu; 2918 2919 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2920 return (EINVAL); 2921 2922 vcpu = &vm->vcpu[vcpuid]; 2923 2924 vcpu->extint_pending = true; 2925 vcpu_notify_event(vm, vcpuid); 2926 return (0); 2927 } 2928 2929 bool 2930 vm_extint_pending(struct vm *vm, int vcpuid) 2931 { 2932 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2933 2934 return (vcpu->extint_pending); 2935 } 2936 2937 void 2938 vm_extint_clear(struct vm *vm, int vcpuid) 2939 { 2940 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2941 2942 ASSERT(vcpu->extint_pending); 2943 2944 vcpu->extint_pending = false; 2945 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1); 2946 } 2947 2948 int 2949 vm_inject_init(struct vm *vm, int vcpuid) 2950 { 2951 struct vcpu *vcpu; 2952 2953 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2954 return (EINVAL); 2955 2956 vcpu = &vm->vcpu[vcpuid]; 2957 vcpu_lock(vcpu); 2958 vcpu->run_state |= VRS_PEND_INIT; 2959 /* 2960 * As part of queuing the INIT request, clear any pending SIPI. It 2961 * would not otherwise survive across the reset of the vCPU when it 2962 * undergoes the requested INIT. We would not want it to linger when it 2963 * could be mistaken as a subsequent (after the INIT) SIPI request. 2964 */ 2965 vcpu->run_state &= ~VRS_PEND_SIPI; 2966 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 2967 2968 vcpu_unlock(vcpu); 2969 return (0); 2970 } 2971 2972 int 2973 vm_inject_sipi(struct vm *vm, int vcpuid, uint8_t vector) 2974 { 2975 struct vcpu *vcpu; 2976 2977 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2978 return (EINVAL); 2979 2980 vcpu = &vm->vcpu[vcpuid]; 2981 vcpu_lock(vcpu); 2982 vcpu->run_state |= VRS_PEND_SIPI; 2983 vcpu->sipi_vector = vector; 2984 /* SIPI is only actionable if the CPU is waiting in INIT state */ 2985 if ((vcpu->run_state & (VRS_INIT | VRS_RUN)) == VRS_INIT) { 2986 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 2987 } 2988 vcpu_unlock(vcpu); 2989 return (0); 2990 } 2991 2992 bool 2993 vcpu_run_state_pending(struct vm *vm, int vcpuid) 2994 { 2995 struct vcpu *vcpu; 2996 2997 ASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus); 2998 vcpu = &vm->vcpu[vcpuid]; 2999 3000 /* Of interest: vCPU not in running state or with pending INIT */ 3001 return ((vcpu->run_state & (VRS_RUN | VRS_PEND_INIT)) != VRS_RUN); 3002 } 3003 3004 int 3005 vcpu_arch_reset(struct vm *vm, int vcpuid, bool init_only) 3006 { 3007 struct seg_desc desc; 3008 const enum vm_reg_name clear_regs[] = { 3009 VM_REG_GUEST_CR2, 3010 VM_REG_GUEST_CR3, 3011 VM_REG_GUEST_CR4, 3012 VM_REG_GUEST_RAX, 3013 VM_REG_GUEST_RBX, 3014 VM_REG_GUEST_RCX, 3015 VM_REG_GUEST_RSI, 3016 VM_REG_GUEST_RDI, 3017 VM_REG_GUEST_RBP, 3018 VM_REG_GUEST_RSP, 3019 VM_REG_GUEST_R8, 3020 VM_REG_GUEST_R9, 3021 VM_REG_GUEST_R10, 3022 VM_REG_GUEST_R11, 3023 VM_REG_GUEST_R12, 3024 VM_REG_GUEST_R13, 3025 VM_REG_GUEST_R14, 3026 VM_REG_GUEST_R15, 3027 VM_REG_GUEST_DR0, 3028 VM_REG_GUEST_DR1, 3029 VM_REG_GUEST_DR2, 3030 VM_REG_GUEST_DR3, 3031 VM_REG_GUEST_EFER, 3032 }; 3033 const enum vm_reg_name data_segs[] = { 3034 VM_REG_GUEST_SS, 3035 VM_REG_GUEST_DS, 3036 VM_REG_GUEST_ES, 3037 VM_REG_GUEST_FS, 3038 VM_REG_GUEST_GS, 3039 }; 3040 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3041 3042 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3043 return (EINVAL); 3044 3045 for (uint_t i = 0; i < nitems(clear_regs); i++) { 3046 VERIFY0(vm_set_register(vm, vcpuid, clear_regs[i], 0)); 3047 } 3048 3049 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, 2)); 3050 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RIP, 0xfff0)); 3051 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CR0, 0x60000010)); 3052 3053 /* 3054 * The prescribed contents of %rdx differ slightly between the Intel and 3055 * AMD architectural definitions. The former expects the Extended Model 3056 * in bits 16-19 where the latter expects all the Family, Model, and 3057 * Stepping be there. Common boot ROMs appear to disregard this 3058 * anyways, so we stick with a compromise value similar to what is 3059 * spelled out in the Intel SDM. 3060 */ 3061 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RDX, 0x600)); 3062 3063 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_DR6, 0xffff0ff0)); 3064 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_DR7, 0x400)); 3065 3066 /* CS: Present, R/W, Accessed */ 3067 desc.access = 0x0093; 3068 desc.base = 0xffff0000; 3069 desc.limit = 0xffff; 3070 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_CS, &desc)); 3071 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CS, 0xf000)); 3072 3073 /* SS, DS, ES, FS, GS: Present, R/W, Accessed */ 3074 desc.access = 0x0093; 3075 desc.base = 0; 3076 desc.limit = 0xffff; 3077 for (uint_t i = 0; i < nitems(data_segs); i++) { 3078 VERIFY0(vm_set_seg_desc(vm, vcpuid, data_segs[i], &desc)); 3079 VERIFY0(vm_set_register(vm, vcpuid, data_segs[i], 0)); 3080 } 3081 3082 /* GDTR, IDTR */ 3083 desc.base = 0; 3084 desc.limit = 0xffff; 3085 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_GDTR, &desc)); 3086 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_IDTR, &desc)); 3087 3088 /* LDTR: Present, LDT */ 3089 desc.access = 0x0082; 3090 desc.base = 0; 3091 desc.limit = 0xffff; 3092 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_LDTR, &desc)); 3093 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_LDTR, 0)); 3094 3095 /* TR: Present, 32-bit TSS */ 3096 desc.access = 0x008b; 3097 desc.base = 0; 3098 desc.limit = 0xffff; 3099 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_TR, &desc)); 3100 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_TR, 0)); 3101 3102 vlapic_reset(vm_lapic(vm, vcpuid)); 3103 3104 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0)); 3105 3106 vcpu->exit_intinfo = 0; 3107 vcpu->exc_pending = 0; 3108 vcpu->nmi_pending = false; 3109 vcpu->extint_pending = 0; 3110 3111 /* 3112 * A CPU reset caused by power-on or system reset clears more state than 3113 * one which is trigged from an INIT IPI. 3114 */ 3115 if (!init_only) { 3116 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 3117 (void) hma_fpu_init(vcpu->guestfpu); 3118 3119 /* XXX: clear MSRs and other pieces */ 3120 bzero(&vcpu->mtrr, sizeof (vcpu->mtrr)); 3121 } 3122 3123 return (0); 3124 } 3125 3126 static int 3127 vcpu_vector_sipi(struct vm *vm, int vcpuid, uint8_t vector) 3128 { 3129 struct seg_desc desc; 3130 3131 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3132 return (EINVAL); 3133 3134 /* CS: Present, R/W, Accessed */ 3135 desc.access = 0x0093; 3136 desc.base = (uint64_t)vector << 12; 3137 desc.limit = 0xffff; 3138 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_CS, &desc)); 3139 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CS, 3140 (uint64_t)vector << 8)); 3141 3142 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RIP, 0)); 3143 3144 return (0); 3145 } 3146 3147 int 3148 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 3149 { 3150 if (vcpu < 0 || vcpu >= vm->maxcpus) 3151 return (EINVAL); 3152 3153 if (type < 0 || type >= VM_CAP_MAX) 3154 return (EINVAL); 3155 3156 return (VMGETCAP(vm->cookie, vcpu, type, retval)); 3157 } 3158 3159 int 3160 vm_set_capability(struct vm *vm, int vcpu, int type, int val) 3161 { 3162 if (vcpu < 0 || vcpu >= vm->maxcpus) 3163 return (EINVAL); 3164 3165 if (type < 0 || type >= VM_CAP_MAX) 3166 return (EINVAL); 3167 3168 return (VMSETCAP(vm->cookie, vcpu, type, val)); 3169 } 3170 3171 vcpu_cpuid_config_t * 3172 vm_cpuid_config(struct vm *vm, int vcpuid) 3173 { 3174 ASSERT3S(vcpuid, >=, 0); 3175 ASSERT3S(vcpuid, <, VM_MAXCPU); 3176 3177 return (&vm->vcpu[vcpuid].cpuid_cfg); 3178 } 3179 3180 struct vlapic * 3181 vm_lapic(struct vm *vm, int cpu) 3182 { 3183 ASSERT3S(cpu, >=, 0); 3184 ASSERT3S(cpu, <, VM_MAXCPU); 3185 3186 return (vm->vcpu[cpu].vlapic); 3187 } 3188 3189 struct vioapic * 3190 vm_ioapic(struct vm *vm) 3191 { 3192 3193 return (vm->vioapic); 3194 } 3195 3196 struct vhpet * 3197 vm_hpet(struct vm *vm) 3198 { 3199 3200 return (vm->vhpet); 3201 } 3202 3203 void * 3204 vm_iommu_domain(struct vm *vm) 3205 { 3206 3207 return (vm->iommu); 3208 } 3209 3210 int 3211 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, 3212 bool from_idle) 3213 { 3214 int error; 3215 struct vcpu *vcpu; 3216 3217 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3218 panic("vcpu_set_state: invalid vcpuid %d", vcpuid); 3219 3220 vcpu = &vm->vcpu[vcpuid]; 3221 3222 vcpu_lock(vcpu); 3223 error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle); 3224 vcpu_unlock(vcpu); 3225 3226 return (error); 3227 } 3228 3229 enum vcpu_state 3230 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) 3231 { 3232 struct vcpu *vcpu; 3233 enum vcpu_state state; 3234 3235 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3236 panic("vcpu_get_state: invalid vcpuid %d", vcpuid); 3237 3238 vcpu = &vm->vcpu[vcpuid]; 3239 3240 vcpu_lock(vcpu); 3241 state = vcpu->state; 3242 if (hostcpu != NULL) 3243 *hostcpu = vcpu->hostcpu; 3244 vcpu_unlock(vcpu); 3245 3246 return (state); 3247 } 3248 3249 /* 3250 * Calculate the TSC offset for a vCPU, applying physical CPU adjustments if 3251 * requested. The offset calculations include the VM-wide TSC offset. 3252 */ 3253 uint64_t 3254 vcpu_tsc_offset(struct vm *vm, int vcpuid, bool phys_adj) 3255 { 3256 ASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus); 3257 3258 uint64_t vcpu_off = vm->tsc_offset + vm->vcpu[vcpuid].tsc_offset; 3259 3260 if (phys_adj) { 3261 /* Include any offset for the current physical CPU too */ 3262 vcpu_off += vmm_host_tsc_delta(); 3263 } 3264 3265 return (vcpu_off); 3266 } 3267 3268 uint64_t 3269 vm_get_freq_multiplier(struct vm *vm) 3270 { 3271 return (vm->freq_multiplier); 3272 } 3273 3274 /* Normalize hrtime against the boot time for a VM */ 3275 hrtime_t 3276 vm_normalize_hrtime(struct vm *vm, hrtime_t hrt) 3277 { 3278 /* To avoid underflow/overflow UB, perform math as unsigned */ 3279 return ((hrtime_t)((uint64_t)hrt - (uint64_t)vm->boot_hrtime)); 3280 } 3281 3282 /* Denormalize hrtime against the boot time for a VM */ 3283 hrtime_t 3284 vm_denormalize_hrtime(struct vm *vm, hrtime_t hrt) 3285 { 3286 /* To avoid underflow/overflow UB, perform math as unsigned */ 3287 return ((hrtime_t)((uint64_t)hrt + (uint64_t)vm->boot_hrtime)); 3288 } 3289 3290 int 3291 vm_activate_cpu(struct vm *vm, int vcpuid) 3292 { 3293 3294 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3295 return (EINVAL); 3296 3297 if (CPU_ISSET(vcpuid, &vm->active_cpus)) 3298 return (EBUSY); 3299 3300 if (vm->suspend != 0) { 3301 return (EBUSY); 3302 } 3303 3304 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); 3305 3306 /* 3307 * It is possible that this vCPU was undergoing activation at the same 3308 * time that the VM was being suspended. If that happens to be the 3309 * case, it should reflect the suspended state immediately. 3310 */ 3311 if (atomic_load_acq_int((uint_t *)&vm->suspend) != 0) { 3312 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 3313 } 3314 3315 return (0); 3316 } 3317 3318 int 3319 vm_suspend_cpu(struct vm *vm, int vcpuid) 3320 { 3321 int i; 3322 3323 if (vcpuid < -1 || vcpuid >= vm->maxcpus) 3324 return (EINVAL); 3325 3326 if (vcpuid == -1) { 3327 vm->debug_cpus = vm->active_cpus; 3328 for (i = 0; i < vm->maxcpus; i++) { 3329 if (CPU_ISSET(i, &vm->active_cpus)) 3330 vcpu_notify_event(vm, i); 3331 } 3332 } else { 3333 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 3334 return (EINVAL); 3335 3336 CPU_SET_ATOMIC(vcpuid, &vm->debug_cpus); 3337 vcpu_notify_event(vm, vcpuid); 3338 } 3339 return (0); 3340 } 3341 3342 int 3343 vm_resume_cpu(struct vm *vm, int vcpuid) 3344 { 3345 3346 if (vcpuid < -1 || vcpuid >= vm->maxcpus) 3347 return (EINVAL); 3348 3349 if (vcpuid == -1) { 3350 CPU_ZERO(&vm->debug_cpus); 3351 } else { 3352 if (!CPU_ISSET(vcpuid, &vm->debug_cpus)) 3353 return (EINVAL); 3354 3355 CPU_CLR_ATOMIC(vcpuid, &vm->debug_cpus); 3356 } 3357 return (0); 3358 } 3359 3360 static bool 3361 vcpu_bailout_checks(struct vm *vm, int vcpuid, bool on_entry, 3362 uint64_t entry_rip) 3363 { 3364 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3365 struct vm_exit *vme = &vcpu->exitinfo; 3366 bool bail = false; 3367 3368 ASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus); 3369 3370 if (vm->suspend) { 3371 if (on_entry) { 3372 VERIFY(vm->suspend > VM_SUSPEND_NONE && 3373 vm->suspend < VM_SUSPEND_LAST); 3374 3375 vme->exitcode = VM_EXITCODE_SUSPENDED; 3376 vme->u.suspended.how = vm->suspend; 3377 } else { 3378 /* 3379 * Handling VM suspend is complicated, so if that 3380 * condition is detected outside of VM-entry itself, 3381 * just emit a BOGUS exitcode so we take a lap to pick 3382 * up the event during an entry and are directed into 3383 * the vm_handle_suspend() logic. 3384 */ 3385 vme->exitcode = VM_EXITCODE_BOGUS; 3386 } 3387 bail = true; 3388 } 3389 if (vcpu->reqidle) { 3390 vme->exitcode = VM_EXITCODE_REQIDLE; 3391 vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1); 3392 3393 if (!on_entry) { 3394 /* 3395 * A reqidle request detected outside of VM-entry can be 3396 * handled directly by clearing the request (and taking 3397 * a lap to userspace). 3398 */ 3399 vcpu_assert_locked(vcpu); 3400 vcpu->reqidle = 0; 3401 } 3402 bail = true; 3403 } 3404 if (vcpu->reqconsist) { 3405 /* 3406 * We only expect exit-when-consistent requests to be asserted 3407 * during entry, not as an otherwise spontaneous condition. As 3408 * such, we do not count it among the exit statistics, and emit 3409 * the expected BOGUS exitcode, while clearing the request. 3410 */ 3411 vme->exitcode = VM_EXITCODE_BOGUS; 3412 vcpu->reqconsist = false; 3413 bail = true; 3414 } 3415 if (vcpu_should_yield(vm, vcpuid)) { 3416 vme->exitcode = VM_EXITCODE_BOGUS; 3417 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1); 3418 bail = true; 3419 } 3420 if (CPU_ISSET(vcpuid, &vm->debug_cpus)) { 3421 vme->exitcode = VM_EXITCODE_DEBUG; 3422 bail = true; 3423 } 3424 3425 if (bail) { 3426 if (on_entry) { 3427 /* 3428 * If bailing out during VM-entry, the current %rip must 3429 * be recorded in the exitinfo. 3430 */ 3431 vme->rip = entry_rip; 3432 } 3433 vme->inst_length = 0; 3434 } 3435 return (bail); 3436 } 3437 3438 static bool 3439 vcpu_sleep_bailout_checks(struct vm *vm, int vcpuid) 3440 { 3441 /* 3442 * Bail-out check done prior to sleeping (in vCPU contexts like HLT or 3443 * wait-for-SIPI) expect that %rip is already populated in the vm_exit 3444 * structure, and we would only modify the exitcode. 3445 */ 3446 return (vcpu_bailout_checks(vm, vcpuid, false, 0)); 3447 } 3448 3449 bool 3450 vcpu_entry_bailout_checks(struct vm *vm, int vcpuid, uint64_t rip) 3451 { 3452 /* 3453 * Bail-out checks done as part of VM entry require an updated %rip to 3454 * populate the vm_exit struct if any of the conditions of interest are 3455 * matched in the check. 3456 */ 3457 return (vcpu_bailout_checks(vm, vcpuid, true, rip)); 3458 } 3459 3460 cpuset_t 3461 vm_active_cpus(struct vm *vm) 3462 { 3463 3464 return (vm->active_cpus); 3465 } 3466 3467 cpuset_t 3468 vm_debug_cpus(struct vm *vm) 3469 { 3470 3471 return (vm->debug_cpus); 3472 } 3473 3474 cpuset_t 3475 vm_suspended_cpus(struct vm *vm) 3476 { 3477 3478 return (vm->suspended_cpus); 3479 } 3480 3481 void * 3482 vcpu_stats(struct vm *vm, int vcpuid) 3483 { 3484 3485 return (vm->vcpu[vcpuid].stats); 3486 } 3487 3488 int 3489 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 3490 { 3491 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3492 return (EINVAL); 3493 3494 *state = vm->vcpu[vcpuid].x2apic_state; 3495 3496 return (0); 3497 } 3498 3499 int 3500 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 3501 { 3502 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3503 return (EINVAL); 3504 3505 if (state >= X2APIC_STATE_LAST) 3506 return (EINVAL); 3507 3508 vm->vcpu[vcpuid].x2apic_state = state; 3509 3510 vlapic_set_x2apic_state(vm, vcpuid, state); 3511 3512 return (0); 3513 } 3514 3515 /* 3516 * This function is called to ensure that a vcpu "sees" a pending event 3517 * as soon as possible: 3518 * - If the vcpu thread is sleeping then it is woken up. 3519 * - If the vcpu is running on a different host_cpu then an IPI will be directed 3520 * to the host_cpu to cause the vcpu to trap into the hypervisor. 3521 */ 3522 static void 3523 vcpu_notify_event_locked(struct vcpu *vcpu, vcpu_notify_t ntype) 3524 { 3525 int hostcpu; 3526 3527 ASSERT(ntype == VCPU_NOTIFY_APIC || VCPU_NOTIFY_EXIT); 3528 3529 hostcpu = vcpu->hostcpu; 3530 if (vcpu->state == VCPU_RUNNING) { 3531 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); 3532 if (hostcpu != curcpu) { 3533 if (ntype == VCPU_NOTIFY_APIC) { 3534 vlapic_post_intr(vcpu->vlapic, hostcpu); 3535 } else { 3536 poke_cpu(hostcpu); 3537 } 3538 } else { 3539 /* 3540 * If the 'vcpu' is running on 'curcpu' then it must 3541 * be sending a notification to itself (e.g. SELF_IPI). 3542 * The pending event will be picked up when the vcpu 3543 * transitions back to guest context. 3544 */ 3545 } 3546 } else { 3547 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " 3548 "with hostcpu %d", vcpu->state, hostcpu)); 3549 if (vcpu->state == VCPU_SLEEPING) { 3550 cv_signal(&vcpu->vcpu_cv); 3551 } 3552 } 3553 } 3554 3555 void 3556 vcpu_notify_event(struct vm *vm, int vcpuid) 3557 { 3558 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3559 3560 vcpu_lock(vcpu); 3561 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 3562 vcpu_unlock(vcpu); 3563 } 3564 3565 void 3566 vcpu_notify_event_type(struct vm *vm, int vcpuid, vcpu_notify_t ntype) 3567 { 3568 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3569 3570 if (ntype == VCPU_NOTIFY_NONE) { 3571 return; 3572 } 3573 3574 vcpu_lock(vcpu); 3575 vcpu_notify_event_locked(vcpu, ntype); 3576 vcpu_unlock(vcpu); 3577 } 3578 3579 void 3580 vcpu_ustate_change(struct vm *vm, int vcpuid, enum vcpu_ustate ustate) 3581 { 3582 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3583 hrtime_t now = gethrtime(); 3584 3585 ASSERT3U(ustate, !=, vcpu->ustate); 3586 ASSERT3S(ustate, <, VU_MAX); 3587 ASSERT3S(ustate, >=, VU_INIT); 3588 3589 hrtime_t delta = now - vcpu->ustate_when; 3590 vcpu->ustate_total[vcpu->ustate] += delta; 3591 3592 membar_producer(); 3593 3594 vcpu->ustate_when = now; 3595 vcpu->ustate = ustate; 3596 } 3597 3598 struct vmspace * 3599 vm_get_vmspace(struct vm *vm) 3600 { 3601 3602 return (vm->vmspace); 3603 } 3604 3605 struct vm_client * 3606 vm_get_vmclient(struct vm *vm, int vcpuid) 3607 { 3608 return (vm->vcpu[vcpuid].vmclient); 3609 } 3610 3611 int 3612 vm_apicid2vcpuid(struct vm *vm, int apicid) 3613 { 3614 /* 3615 * XXX apic id is assumed to be numerically identical to vcpu id 3616 */ 3617 return (apicid); 3618 } 3619 3620 struct vatpic * 3621 vm_atpic(struct vm *vm) 3622 { 3623 return (vm->vatpic); 3624 } 3625 3626 struct vatpit * 3627 vm_atpit(struct vm *vm) 3628 { 3629 return (vm->vatpit); 3630 } 3631 3632 struct vpmtmr * 3633 vm_pmtmr(struct vm *vm) 3634 { 3635 3636 return (vm->vpmtmr); 3637 } 3638 3639 struct vrtc * 3640 vm_rtc(struct vm *vm) 3641 { 3642 3643 return (vm->vrtc); 3644 } 3645 3646 enum vm_reg_name 3647 vm_segment_name(int seg) 3648 { 3649 static enum vm_reg_name seg_names[] = { 3650 VM_REG_GUEST_ES, 3651 VM_REG_GUEST_CS, 3652 VM_REG_GUEST_SS, 3653 VM_REG_GUEST_DS, 3654 VM_REG_GUEST_FS, 3655 VM_REG_GUEST_GS 3656 }; 3657 3658 KASSERT(seg >= 0 && seg < nitems(seg_names), 3659 ("%s: invalid segment encoding %d", __func__, seg)); 3660 return (seg_names[seg]); 3661 } 3662 3663 void 3664 vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 3665 uint_t num_copyinfo) 3666 { 3667 for (uint_t idx = 0; idx < num_copyinfo; idx++) { 3668 if (copyinfo[idx].cookie != NULL) { 3669 (void) vmp_release((vm_page_t *)copyinfo[idx].cookie); 3670 } 3671 } 3672 bzero(copyinfo, num_copyinfo * sizeof (struct vm_copyinfo)); 3673 } 3674 3675 int 3676 vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 3677 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 3678 uint_t num_copyinfo, int *fault) 3679 { 3680 uint_t idx, nused; 3681 size_t n, off, remaining; 3682 vm_client_t *vmc = vm_get_vmclient(vm, vcpuid); 3683 3684 bzero(copyinfo, sizeof (struct vm_copyinfo) * num_copyinfo); 3685 3686 nused = 0; 3687 remaining = len; 3688 while (remaining > 0) { 3689 uint64_t gpa; 3690 int error; 3691 3692 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo")); 3693 error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault); 3694 if (error || *fault) 3695 return (error); 3696 off = gpa & PAGEOFFSET; 3697 n = min(remaining, PAGESIZE - off); 3698 copyinfo[nused].gpa = gpa; 3699 copyinfo[nused].len = n; 3700 remaining -= n; 3701 gla += n; 3702 nused++; 3703 } 3704 3705 for (idx = 0; idx < nused; idx++) { 3706 vm_page_t *vmp; 3707 caddr_t hva; 3708 3709 vmp = vmc_hold(vmc, copyinfo[idx].gpa & PAGEMASK, prot); 3710 if (vmp == NULL) { 3711 break; 3712 } 3713 if ((prot & PROT_WRITE) != 0) { 3714 hva = (caddr_t)vmp_get_writable(vmp); 3715 } else { 3716 hva = (caddr_t)vmp_get_readable(vmp); 3717 } 3718 copyinfo[idx].hva = hva + (copyinfo[idx].gpa & PAGEOFFSET); 3719 copyinfo[idx].cookie = vmp; 3720 copyinfo[idx].prot = prot; 3721 } 3722 3723 if (idx != nused) { 3724 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo); 3725 return (EFAULT); 3726 } else { 3727 *fault = 0; 3728 return (0); 3729 } 3730 } 3731 3732 void 3733 vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr, 3734 size_t len) 3735 { 3736 char *dst; 3737 int idx; 3738 3739 dst = kaddr; 3740 idx = 0; 3741 while (len > 0) { 3742 ASSERT(copyinfo[idx].prot & PROT_READ); 3743 3744 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len); 3745 len -= copyinfo[idx].len; 3746 dst += copyinfo[idx].len; 3747 idx++; 3748 } 3749 } 3750 3751 void 3752 vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 3753 struct vm_copyinfo *copyinfo, size_t len) 3754 { 3755 const char *src; 3756 int idx; 3757 3758 src = kaddr; 3759 idx = 0; 3760 while (len > 0) { 3761 ASSERT(copyinfo[idx].prot & PROT_WRITE); 3762 3763 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len); 3764 len -= copyinfo[idx].len; 3765 src += copyinfo[idx].len; 3766 idx++; 3767 } 3768 } 3769 3770 /* 3771 * Return the amount of in-use and wired memory for the VM. Since 3772 * these are global stats, only return the values with for vCPU 0 3773 */ 3774 VMM_STAT_DECLARE(VMM_MEM_RESIDENT); 3775 3776 static void 3777 vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 3778 { 3779 if (vcpu == 0) { 3780 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT, 3781 PAGE_SIZE * vmspace_resident_count(vm->vmspace)); 3782 } 3783 } 3784 3785 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); 3786 3787 int 3788 vm_ioport_access(struct vm *vm, int vcpuid, bool in, uint16_t port, 3789 uint8_t bytes, uint32_t *val) 3790 { 3791 return (vm_inout_access(&vm->ioports, in, port, bytes, val)); 3792 } 3793 3794 /* 3795 * bhyve-internal interfaces to attach or detach IO port handlers. 3796 * Must be called with VM write lock held for safety. 3797 */ 3798 int 3799 vm_ioport_attach(struct vm *vm, uint16_t port, ioport_handler_t func, void *arg, 3800 void **cookie) 3801 { 3802 int err; 3803 err = vm_inout_attach(&vm->ioports, port, IOPF_DEFAULT, func, arg); 3804 if (err == 0) { 3805 *cookie = (void *)IOP_GEN_COOKIE(func, arg, port); 3806 } 3807 return (err); 3808 } 3809 int 3810 vm_ioport_detach(struct vm *vm, void **cookie, ioport_handler_t *old_func, 3811 void **old_arg) 3812 { 3813 uint16_t port = IOP_PORT_FROM_COOKIE((uintptr_t)*cookie); 3814 int err; 3815 3816 err = vm_inout_detach(&vm->ioports, port, false, old_func, old_arg); 3817 if (err == 0) { 3818 *cookie = NULL; 3819 } 3820 return (err); 3821 } 3822 3823 /* 3824 * External driver interfaces to attach or detach IO port handlers. 3825 * Must be called with VM write lock held for safety. 3826 */ 3827 int 3828 vm_ioport_hook(struct vm *vm, uint16_t port, ioport_handler_t func, 3829 void *arg, void **cookie) 3830 { 3831 int err; 3832 3833 if (port == 0) { 3834 return (EINVAL); 3835 } 3836 3837 err = vm_inout_attach(&vm->ioports, port, IOPF_DRV_HOOK, func, arg); 3838 if (err == 0) { 3839 *cookie = (void *)IOP_GEN_COOKIE(func, arg, port); 3840 } 3841 return (err); 3842 } 3843 void 3844 vm_ioport_unhook(struct vm *vm, void **cookie) 3845 { 3846 uint16_t port = IOP_PORT_FROM_COOKIE((uintptr_t)*cookie); 3847 ioport_handler_t old_func; 3848 void *old_arg; 3849 int err; 3850 3851 err = vm_inout_detach(&vm->ioports, port, true, &old_func, &old_arg); 3852 3853 /* ioport-hook-using drivers are expected to be well-behaved */ 3854 VERIFY0(err); 3855 VERIFY(IOP_GEN_COOKIE(old_func, old_arg, port) == (uintptr_t)*cookie); 3856 3857 *cookie = NULL; 3858 } 3859 3860 int 3861 vmm_kstat_update_vcpu(struct kstat *ksp, int rw) 3862 { 3863 struct vm *vm = ksp->ks_private; 3864 vmm_vcpu_kstats_t *vvk = ksp->ks_data; 3865 const int vcpuid = vvk->vvk_vcpu.value.ui32; 3866 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3867 3868 ASSERT3U(vcpuid, <, VM_MAXCPU); 3869 3870 vvk->vvk_time_init.value.ui64 = vcpu->ustate_total[VU_INIT]; 3871 vvk->vvk_time_run.value.ui64 = vcpu->ustate_total[VU_RUN]; 3872 vvk->vvk_time_idle.value.ui64 = vcpu->ustate_total[VU_IDLE]; 3873 vvk->vvk_time_emu_kern.value.ui64 = vcpu->ustate_total[VU_EMU_KERN]; 3874 vvk->vvk_time_emu_user.value.ui64 = vcpu->ustate_total[VU_EMU_USER]; 3875 vvk->vvk_time_sched.value.ui64 = vcpu->ustate_total[VU_SCHED]; 3876 3877 return (0); 3878 } 3879 3880 SET_DECLARE(vmm_data_version_entries, const vmm_data_version_entry_t); 3881 3882 static int 3883 vmm_data_find(const vmm_data_req_t *req, int vcpuid, 3884 const vmm_data_version_entry_t **resp) 3885 { 3886 const vmm_data_version_entry_t **vdpp, *vdp; 3887 3888 ASSERT(resp != NULL); 3889 ASSERT(req->vdr_result_len != NULL); 3890 3891 SET_FOREACH(vdpp, vmm_data_version_entries) { 3892 vdp = *vdpp; 3893 if (vdp->vdve_class != req->vdr_class || 3894 vdp->vdve_version != req->vdr_version) { 3895 continue; 3896 } 3897 3898 /* 3899 * Enforce any data length expectation expressed by the provider 3900 * for this data. 3901 */ 3902 if (vdp->vdve_len_expect != 0 && 3903 vdp->vdve_len_expect > req->vdr_len) { 3904 *req->vdr_result_len = vdp->vdve_len_expect; 3905 return (ENOSPC); 3906 } 3907 3908 /* 3909 * Make sure that the provided vcpuid is acceptable for the 3910 * backend handler. 3911 */ 3912 if (vdp->vdve_readf != NULL || vdp->vdve_writef != NULL) { 3913 /* 3914 * While it is tempting to demand the -1 sentinel value 3915 * in vcpuid here, that expectation was not established 3916 * for early consumers, so it is ignored. 3917 */ 3918 } else if (vdp->vdve_vcpu_readf != NULL || 3919 vdp->vdve_vcpu_writef != NULL) { 3920 /* 3921 * Per-vCPU handlers which permit "wildcard" access will 3922 * accept a vcpuid of -1 (for VM-wide data), while all 3923 * others expect vcpuid [0, VM_MAXCPU). 3924 */ 3925 const int llimit = vdp->vdve_vcpu_wildcard ? -1 : 0; 3926 if (vcpuid < llimit || vcpuid >= VM_MAXCPU) { 3927 return (EINVAL); 3928 } 3929 } else { 3930 /* 3931 * A provider with neither VM-wide nor per-vCPU handlers 3932 * is completely unexpected. Such a situation should be 3933 * made into a compile-time error. Bail out for now, 3934 * rather than punishing the user with a panic. 3935 */ 3936 return (EINVAL); 3937 } 3938 3939 3940 *resp = vdp; 3941 return (0); 3942 } 3943 return (EINVAL); 3944 } 3945 3946 static void * 3947 vmm_data_from_class(const vmm_data_req_t *req, struct vm *vm) 3948 { 3949 switch (req->vdr_class) { 3950 case VDC_REGISTER: 3951 case VDC_MSR: 3952 case VDC_FPU: 3953 case VDC_LAPIC: 3954 case VDC_VMM_ARCH: 3955 /* 3956 * These have per-CPU handling which is dispatched outside 3957 * vmm_data_version_entries listing. 3958 */ 3959 panic("Unexpected per-vcpu class %u", req->vdr_class); 3960 break; 3961 3962 case VDC_IOAPIC: 3963 return (vm->vioapic); 3964 case VDC_ATPIT: 3965 return (vm->vatpit); 3966 case VDC_ATPIC: 3967 return (vm->vatpic); 3968 case VDC_HPET: 3969 return (vm->vhpet); 3970 case VDC_PM_TIMER: 3971 return (vm->vpmtmr); 3972 case VDC_RTC: 3973 return (vm->vrtc); 3974 case VDC_VMM_TIME: 3975 return (vm); 3976 case VDC_VERSION: 3977 /* 3978 * Play along with all of the other classes which need backup 3979 * data, even though version info does not require it. 3980 */ 3981 return (vm); 3982 3983 default: 3984 /* The data class will have been validated by now */ 3985 panic("Unexpected class %u", req->vdr_class); 3986 } 3987 } 3988 3989 const uint32_t default_msr_iter[] = { 3990 /* 3991 * Although EFER is also available via the get/set-register interface, 3992 * we include it in the default list of emitted MSRs. 3993 */ 3994 MSR_EFER, 3995 3996 /* 3997 * While gsbase and fsbase are accessible via the MSR accessors, they 3998 * are not included in MSR iteration since they are covered by the 3999 * segment descriptor interface too. 4000 */ 4001 MSR_KGSBASE, 4002 4003 MSR_STAR, 4004 MSR_LSTAR, 4005 MSR_CSTAR, 4006 MSR_SF_MASK, 4007 4008 MSR_SYSENTER_CS_MSR, 4009 MSR_SYSENTER_ESP_MSR, 4010 MSR_SYSENTER_EIP_MSR, 4011 4012 MSR_PAT, 4013 4014 MSR_TSC, 4015 4016 MSR_MTRRcap, 4017 MSR_MTRRdefType, 4018 MSR_MTRR4kBase, MSR_MTRR4kBase + 1, MSR_MTRR4kBase + 2, 4019 MSR_MTRR4kBase + 3, MSR_MTRR4kBase + 4, MSR_MTRR4kBase + 5, 4020 MSR_MTRR4kBase + 6, MSR_MTRR4kBase + 7, 4021 MSR_MTRR16kBase, MSR_MTRR16kBase + 1, 4022 MSR_MTRR64kBase, 4023 }; 4024 4025 static int 4026 vmm_data_read_msr(struct vm *vm, int vcpuid, uint32_t msr, uint64_t *value) 4027 { 4028 int err = 0; 4029 4030 switch (msr) { 4031 case MSR_TSC: 4032 /* 4033 * The vmm-data interface for MSRs provides access to the 4034 * per-vCPU offset of the TSC, when reading/writing MSR_TSC. 4035 * 4036 * The VM-wide offset (and scaling) of the guest TSC is accessed 4037 * via the VMM_TIME data class. 4038 */ 4039 *value = vm->vcpu[vcpuid].tsc_offset; 4040 return (0); 4041 4042 default: 4043 if (is_mtrr_msr(msr)) { 4044 err = vm_rdmtrr(&vm->vcpu[vcpuid].mtrr, msr, value); 4045 } else { 4046 err = ops->vmgetmsr(vm->cookie, vcpuid, msr, value); 4047 } 4048 break; 4049 } 4050 4051 return (err); 4052 } 4053 4054 static int 4055 vmm_data_write_msr(struct vm *vm, int vcpuid, uint32_t msr, uint64_t value) 4056 { 4057 int err = 0; 4058 4059 switch (msr) { 4060 case MSR_TSC: 4061 /* See vmm_data_read_msr() for more detail */ 4062 vm->vcpu[vcpuid].tsc_offset = value; 4063 return (0); 4064 case MSR_MTRRcap: { 4065 /* 4066 * MTRRcap is read-only. If the desired value matches the 4067 * existing one, consider it a success. 4068 */ 4069 uint64_t comp; 4070 err = vm_rdmtrr(&vm->vcpu[vcpuid].mtrr, msr, &comp); 4071 if (err == 0 && comp != value) { 4072 return (EINVAL); 4073 } 4074 break; 4075 } 4076 default: 4077 if (is_mtrr_msr(msr)) { 4078 /* MTRRcap is already handled above */ 4079 ASSERT3U(msr, !=, MSR_MTRRcap); 4080 4081 err = vm_wrmtrr(&vm->vcpu[vcpuid].mtrr, msr, value); 4082 } else { 4083 err = ops->vmsetmsr(vm->cookie, vcpuid, msr, value); 4084 } 4085 break; 4086 } 4087 4088 return (err); 4089 } 4090 4091 static int 4092 vmm_data_read_msrs(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4093 { 4094 VERIFY3U(req->vdr_class, ==, VDC_MSR); 4095 VERIFY3U(req->vdr_version, ==, 1); 4096 4097 struct vdi_field_entry_v1 *entryp = req->vdr_data; 4098 4099 /* Specific MSRs requested */ 4100 if ((req->vdr_flags & VDX_FLAG_READ_COPYIN) != 0) { 4101 const uint_t count = 4102 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4103 4104 for (uint_t i = 0; i < count; i++, entryp++) { 4105 int err = vmm_data_read_msr(vm, vcpuid, 4106 entryp->vfe_ident, &entryp->vfe_value); 4107 4108 if (err != 0) { 4109 return (err); 4110 } 4111 } 4112 4113 *req->vdr_result_len = 4114 count * sizeof (struct vdi_field_entry_v1); 4115 return (0); 4116 } 4117 4118 /* 4119 * If specific MSRs are not requested, try to provide all those which we 4120 * know about instead. 4121 */ 4122 const uint_t num_msrs = nitems(default_msr_iter) + 4123 (VMM_MTRR_VAR_MAX * 2); 4124 const uint32_t output_len = 4125 num_msrs * sizeof (struct vdi_field_entry_v1); 4126 4127 *req->vdr_result_len = output_len; 4128 if (req->vdr_len < output_len) { 4129 return (ENOSPC); 4130 } 4131 4132 /* Output the MSRs in the default list */ 4133 for (uint_t i = 0; i < nitems(default_msr_iter); i++, entryp++) { 4134 entryp->vfe_ident = default_msr_iter[i]; 4135 4136 /* All of these MSRs are expected to work */ 4137 VERIFY0(vmm_data_read_msr(vm, vcpuid, entryp->vfe_ident, 4138 &entryp->vfe_value)); 4139 } 4140 4141 /* Output the variable MTRRs */ 4142 for (uint_t i = 0; i < (VMM_MTRR_VAR_MAX * 2); i++, entryp++) { 4143 entryp->vfe_ident = MSR_MTRRVarBase + i; 4144 4145 /* All of these MSRs are expected to work */ 4146 VERIFY0(vmm_data_read_msr(vm, vcpuid, entryp->vfe_ident, 4147 &entryp->vfe_value)); 4148 } 4149 return (0); 4150 } 4151 4152 static int 4153 vmm_data_write_msrs(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4154 { 4155 VERIFY3U(req->vdr_class, ==, VDC_MSR); 4156 VERIFY3U(req->vdr_version, ==, 1); 4157 4158 const struct vdi_field_entry_v1 *entryp = req->vdr_data; 4159 const uint_t entry_count = 4160 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4161 4162 /* 4163 * First make sure that all of the MSRs can be manipulated. 4164 * For now, this check is done by going though the getmsr handler 4165 */ 4166 for (uint_t i = 0; i < entry_count; i++, entryp++) { 4167 const uint64_t msr = entryp->vfe_ident; 4168 uint64_t val; 4169 4170 if (vmm_data_read_msr(vm, vcpuid, msr, &val) != 0) { 4171 return (EINVAL); 4172 } 4173 } 4174 4175 /* 4176 * Fairly confident that all of the 'set' operations are at least 4177 * targeting valid MSRs, continue on. 4178 */ 4179 entryp = req->vdr_data; 4180 for (uint_t i = 0; i < entry_count; i++, entryp++) { 4181 int err = vmm_data_write_msr(vm, vcpuid, entryp->vfe_ident, 4182 entryp->vfe_value); 4183 4184 if (err != 0) { 4185 return (err); 4186 } 4187 } 4188 *req->vdr_result_len = entry_count * sizeof (struct vdi_field_entry_v1); 4189 4190 return (0); 4191 } 4192 4193 static const vmm_data_version_entry_t msr_v1 = { 4194 .vdve_class = VDC_MSR, 4195 .vdve_version = 1, 4196 .vdve_len_per_item = sizeof (struct vdi_field_entry_v1), 4197 .vdve_vcpu_readf = vmm_data_read_msrs, 4198 .vdve_vcpu_writef = vmm_data_write_msrs, 4199 }; 4200 VMM_DATA_VERSION(msr_v1); 4201 4202 static const uint32_t vmm_arch_v1_fields[] = { 4203 VAI_VM_IS_PAUSED, 4204 }; 4205 4206 static const uint32_t vmm_arch_v1_vcpu_fields[] = { 4207 VAI_PEND_NMI, 4208 VAI_PEND_EXTINT, 4209 VAI_PEND_EXCP, 4210 VAI_PEND_INTINFO, 4211 }; 4212 4213 static bool 4214 vmm_read_arch_field(struct vm *vm, int vcpuid, uint32_t ident, uint64_t *valp) 4215 { 4216 ASSERT(valp != NULL); 4217 4218 if (vcpuid == -1) { 4219 switch (ident) { 4220 case VAI_VM_IS_PAUSED: 4221 *valp = vm->is_paused ? 1 : 0; 4222 return (true); 4223 default: 4224 break; 4225 } 4226 } else { 4227 VERIFY(vcpuid >= 0 && vcpuid <= VM_MAXCPU); 4228 4229 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 4230 switch (ident) { 4231 case VAI_PEND_NMI: 4232 *valp = vcpu->nmi_pending != 0 ? 1 : 0; 4233 return (true); 4234 case VAI_PEND_EXTINT: 4235 *valp = vcpu->extint_pending != 0 ? 1 : 0; 4236 return (true); 4237 case VAI_PEND_EXCP: 4238 *valp = vcpu->exc_pending; 4239 return (true); 4240 case VAI_PEND_INTINFO: 4241 *valp = vcpu->exit_intinfo; 4242 return (true); 4243 default: 4244 break; 4245 } 4246 } 4247 return (false); 4248 } 4249 4250 static int 4251 vmm_data_read_varch(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4252 { 4253 VERIFY3U(req->vdr_class, ==, VDC_VMM_ARCH); 4254 VERIFY3U(req->vdr_version, ==, 1); 4255 4256 /* per-vCPU fields are handled separately from VM-wide ones */ 4257 if (vcpuid != -1 && (vcpuid < 0 || vcpuid >= VM_MAXCPU)) { 4258 return (EINVAL); 4259 } 4260 4261 struct vdi_field_entry_v1 *entryp = req->vdr_data; 4262 4263 /* Specific fields requested */ 4264 if ((req->vdr_flags & VDX_FLAG_READ_COPYIN) != 0) { 4265 const uint_t count = 4266 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4267 4268 for (uint_t i = 0; i < count; i++, entryp++) { 4269 if (!vmm_read_arch_field(vm, vcpuid, entryp->vfe_ident, 4270 &entryp->vfe_value)) { 4271 return (EINVAL); 4272 } 4273 } 4274 *req->vdr_result_len = 4275 count * sizeof (struct vdi_field_entry_v1); 4276 return (0); 4277 } 4278 4279 /* Emit all of the possible values */ 4280 const uint32_t *idents; 4281 uint_t ident_count; 4282 4283 if (vcpuid == -1) { 4284 idents = vmm_arch_v1_fields; 4285 ident_count = nitems(vmm_arch_v1_fields); 4286 } else { 4287 idents = vmm_arch_v1_vcpu_fields; 4288 ident_count = nitems(vmm_arch_v1_vcpu_fields); 4289 4290 } 4291 4292 const uint32_t total_size = 4293 ident_count * sizeof (struct vdi_field_entry_v1); 4294 4295 *req->vdr_result_len = total_size; 4296 if (req->vdr_len < total_size) { 4297 return (ENOSPC); 4298 } 4299 for (uint_t i = 0; i < ident_count; i++, entryp++) { 4300 entryp->vfe_ident = idents[i]; 4301 VERIFY(vmm_read_arch_field(vm, vcpuid, entryp->vfe_ident, 4302 &entryp->vfe_value)); 4303 } 4304 return (0); 4305 } 4306 4307 static int 4308 vmm_data_write_varch_vcpu(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4309 { 4310 VERIFY3U(req->vdr_class, ==, VDC_VMM_ARCH); 4311 VERIFY3U(req->vdr_version, ==, 1); 4312 4313 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) { 4314 return (EINVAL); 4315 } 4316 4317 const struct vdi_field_entry_v1 *entryp = req->vdr_data; 4318 const uint_t entry_count = 4319 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4320 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 4321 4322 for (uint_t i = 0; i < entry_count; i++, entryp++) { 4323 const uint64_t val = entryp->vfe_value; 4324 4325 switch (entryp->vfe_ident) { 4326 case VAI_PEND_NMI: 4327 vcpu->nmi_pending = (val != 0); 4328 break; 4329 case VAI_PEND_EXTINT: 4330 vcpu->extint_pending = (val != 0); 4331 break; 4332 case VAI_PEND_EXCP: 4333 if (!VM_INTINFO_PENDING(val)) { 4334 vcpu->exc_pending = 0; 4335 } else if (VM_INTINFO_TYPE(val) != VM_INTINFO_HWEXCP || 4336 (val & VM_INTINFO_MASK_RSVD) != 0) { 4337 /* reject improperly-formed hw exception */ 4338 return (EINVAL); 4339 } else { 4340 vcpu->exc_pending = val; 4341 } 4342 break; 4343 case VAI_PEND_INTINFO: 4344 if (vm_exit_intinfo(vm, vcpuid, val) != 0) { 4345 return (EINVAL); 4346 } 4347 break; 4348 default: 4349 return (EINVAL); 4350 } 4351 } 4352 4353 *req->vdr_result_len = entry_count * sizeof (struct vdi_field_entry_v1); 4354 return (0); 4355 } 4356 4357 static int 4358 vmm_data_write_varch(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4359 { 4360 VERIFY3U(req->vdr_class, ==, VDC_VMM_ARCH); 4361 VERIFY3U(req->vdr_version, ==, 1); 4362 4363 /* per-vCPU fields are handled separately from VM-wide ones */ 4364 if (vcpuid != -1) { 4365 return (vmm_data_write_varch_vcpu(vm, vcpuid, req)); 4366 } 4367 4368 const struct vdi_field_entry_v1 *entryp = req->vdr_data; 4369 const uint_t entry_count = 4370 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4371 4372 if (entry_count > 0) { 4373 if (entryp->vfe_ident == VAI_VM_IS_PAUSED) { 4374 /* 4375 * The VM_PAUSE and VM_RESUME ioctls are the officially 4376 * sanctioned mechanisms for setting the is-paused state 4377 * of the VM. 4378 */ 4379 return (EPERM); 4380 } else { 4381 /* no other valid arch entries at this time */ 4382 return (EINVAL); 4383 } 4384 } 4385 4386 *req->vdr_result_len = entry_count * sizeof (struct vdi_field_entry_v1); 4387 return (0); 4388 } 4389 4390 static const vmm_data_version_entry_t vmm_arch_v1 = { 4391 .vdve_class = VDC_VMM_ARCH, 4392 .vdve_version = 1, 4393 .vdve_len_per_item = sizeof (struct vdi_field_entry_v1), 4394 .vdve_vcpu_readf = vmm_data_read_varch, 4395 .vdve_vcpu_writef = vmm_data_write_varch, 4396 4397 /* 4398 * Handlers for VMM_ARCH can process VM-wide (vcpuid == -1) entries in 4399 * addition to vCPU specific ones. 4400 */ 4401 .vdve_vcpu_wildcard = true, 4402 }; 4403 VMM_DATA_VERSION(vmm_arch_v1); 4404 4405 4406 /* 4407 * GUEST TIME SUPPORT 4408 * 4409 * Broadly, there are two categories of functionality related to time passing in 4410 * the guest: the guest's TSC and timers used by emulated devices. 4411 * 4412 * --------------------------- 4413 * GUEST TSC "VIRTUALIZATION" 4414 * --------------------------- 4415 * 4416 * The TSC can be read either via an instruction (rdtsc/rdtscp) or by reading 4417 * the TSC MSR. 4418 * 4419 * When a guest reads the TSC via its MSR, the guest will exit and we emulate 4420 * the rdmsr. More typically, the guest reads the TSC via a rdtsc(p) 4421 * instruction. Both SVM and VMX support virtualizing the guest TSC in hardware 4422 * -- that is, a guest will not generally exit on a rdtsc instruction. 4423 * 4424 * To support hardware-virtualized guest TSC, both SVM and VMX provide two knobs 4425 * for the hypervisor to adjust the guest's view of the TSC: 4426 * - TSC offset 4427 * - TSC frequency multiplier (also called "frequency ratio") 4428 * 4429 * When a guest calls rdtsc(p), the TSC value it sees is the sum of: 4430 * guest_tsc = (host TSC, scaled according to frequency multiplier) 4431 * + (TSC offset, programmed by hypervisor) 4432 * 4433 * See the discussions of the TSC offset and frequency multiplier below for more 4434 * details on each of these. 4435 * 4436 * -------------------- 4437 * TSC OFFSET OVERVIEW 4438 * -------------------- 4439 * 4440 * The TSC offset is a value added to the host TSC (which may be scaled first) 4441 * to provide the guest TSC. This offset addition is generally done by hardware, 4442 * but may be used in emulating the TSC if necessary. 4443 * 4444 * Recall that general formula for calculating the guest TSC is: 4445 * 4446 * guest_tsc = (host TSC, scaled if needed) + TSC offset 4447 * 4448 * Intuitively, the TSC offset is simply an offset of the host's TSC to make the 4449 * guest's view of the TSC appear correct: The guest TSC should be 0 at boot and 4450 * monotonically increase at a roughly constant frequency. Thus in the simplest 4451 * case, the TSC offset is just the negated value of the host TSC when the guest 4452 * was booted, assuming they have the same frequencies. 4453 * 4454 * In practice, there are several factors that can make calculating the TSC 4455 * offset more complicated, including: 4456 * 4457 * (1) the physical CPU the guest is running on 4458 * (2) whether the guest has written to the TSC of that vCPU 4459 * (3) differing host and guest frequencies, like after a live migration 4460 * (4) a guest running on a different system than where it was booted, like 4461 * after a live migration 4462 * 4463 * We will explore each of these factors individually. See below for a 4464 * summary. 4465 * 4466 * 4467 * (1) Physical CPU offsets 4468 * 4469 * The system maintains a set of per-CPU offsets to the TSC to provide a 4470 * consistent view of the TSC regardless of the CPU a thread is running on. 4471 * These offsets are included automatically as a part of rdtsc_offset(). 4472 * 4473 * The per-CPU offset must be included as a part reading the host TSC when 4474 * calculating the offset before running the guest on a given CPU. 4475 * 4476 * 4477 * (2) Guest TSC writes (vCPU offsets) 4478 * 4479 * The TSC is a writable MSR. When a guest writes to the TSC, this operation 4480 * should result in the TSC, when read from that vCPU, shows the value written, 4481 * plus whatever time has elapsed since the read. 4482 * 4483 * To support this, when the guest writes to the TSC, we store an additional 4484 * vCPU offset calculated to make future reads of the TSC map to what the guest 4485 * expects. 4486 * 4487 * 4488 * (3) Differing host and guest frequencies (host TSC scaling) 4489 * 4490 * A guest has the same frequency of its host when it boots, but it may be 4491 * migrated to a machine with a different TSC frequency. Systems expect that 4492 * their TSC frequency does not change. To support this fiction in which a guest 4493 * is running on hardware of a different TSC frequency, the hypervisor can 4494 * program a "frequency multiplier" that represents the ratio of guest/host 4495 * frequency. 4496 * 4497 * Any time a host TSC is used in calculations for the offset, it should be 4498 * "scaled" according to this multiplier, and the hypervisor should program the 4499 * multiplier before running a guest so that the hardware virtualization of the 4500 * TSC functions properly. Similarly, the multiplier should be used in any TSC 4501 * emulation. 4502 * 4503 * See below for more details about the frequency multiplier. 4504 * 4505 * 4506 * (4) Guest running on a system it did not boot on ("base guest TSC") 4507 * 4508 * When a guest boots, its TSC offset is simply the negated host TSC at the time 4509 * it booted. If a guest is migrated from a source host to a target host, the 4510 * TSC offset from the source host is no longer useful for several reasons: 4511 * - the target host TSC has no relationship to the source host TSC 4512 * - the guest did not boot on the target system, so the TSC of the target host 4513 * is not sufficient to describe how long the guest has been running prior to 4514 * migration 4515 * - the target system may have a different TSC frequency than the source system 4516 * 4517 * Ignoring the issue of frequency differences for a moment, let's consider how 4518 * to re-align the guest TSC with the host TSC of the target host. Intuitively, 4519 * for the guest to see the correct TSC, we still want to add some offset to the 4520 * host TSC that offsets how long this guest has been running on 4521 * the system. 4522 * 4523 * An example here might be helpful. Consider a source host and target host, 4524 * both with TSC frequencies of 1GHz. On the source host, the guest and host TSC 4525 * values might look like: 4526 * 4527 * +----------------------------------------------------------------------+ 4528 * | Event | source host TSC | guest TSC | 4529 * ------------------------------------------------------------------------ 4530 * | guest boot (t=0s) | 5000000000 | 5000000000 + -5000000000 | 4531 * | | | 0 | 4532 * ------------------------------------------------------------------------ 4533 * | guest rdtsc (t=10s)) | 15000000000 | 15000000000 + -5000000000 | 4534 * | | | 10000000000 | 4535 * ------------------------------------------------------------------------ 4536 * | migration (t=15s) | 20000000000 | 20000000000 + -5000000000 | 4537 * | | | 15000000000 | 4538 * +----------------------------------------------------------------------+ 4539 * 4540 * Ignoring the time it takes for a guest to physically migrate machines, on the 4541 * target host, we would expect the TSC to continue functioning as such: 4542 * 4543 * +----------------------------------------------------------------------+ 4544 * | Event | target host TSC | guest TSC | 4545 * ------------------------------------------------------------------------ 4546 * | guest migrate (t=15s) | 300000000000 | 15000000000 | 4547 * ------------------------------------------------------------------------ 4548 * | guest rdtsc (t=20s)) | 305000000000 | 20000000000 | 4549 * ------------------------------------------------------------------------ 4550 * 4551 * In order to produce a correct TSC value here, we can calculate a new 4552 * "effective" boot TSC that maps to what the host TSC would've been had it been 4553 * booted on the target. We add that to the guest TSC when it began to run on 4554 * this machine, and negate them both to get a new offset. In this example, the 4555 * effective boot TSC is: -(300000000000 - 15000000000) = -285000000000. 4556 * 4557 * +-------------------------------------------------------------------------+ 4558 * | Event | target host TSC | guest TSC | 4559 * --------------------------------------------------------------------------- 4560 * | guest "boot" (t=0s) | 285000000000 | 285000000000 + -285000000000 | 4561 * | | | 0 | 4562 * --------------------------------------------------------------------------- 4563 * | guest migrate (t=15s) | 300000000000 | 300000000000 + -285000000000 | 4564 * | | | 15000000000 | 4565 * --------------------------------------------------------------------------- 4566 * | guest rdtsc (t=20s)) | 305000000000 | 305000000000 + -285000000000 | 4567 * | | | 20000000000 | 4568 * --------------------------------------------------------------------------+ 4569 * 4570 * To support the offset calculation following a migration, the VMM data time 4571 * interface allows callers to set a "base guest TSC", which is the TSC value of 4572 * the guest when it began running on the host. The current guest TSC can be 4573 * requested via a read of the time data. See below for details on that 4574 * interface. 4575 * 4576 * Frequency differences between the host and the guest are accounted for when 4577 * scaling the host TSC. See below for details on the frequency multiplier. 4578 * 4579 * 4580 * -------------------- 4581 * TSC OFFSET SUMMARY 4582 * -------------------- 4583 * 4584 * Factoring in all of the components to the TSC above, the TSC offset that is 4585 * programmed by the hypervisor before running a given vCPU is: 4586 * 4587 * offset = -((base host TSC, scaled if needed) - base_guest_tsc) + vCPU offset 4588 * 4589 * This offset is stored in two pieces. Per-vCPU offsets are stored with the 4590 * given vCPU and added in when programming the offset. The rest of the offset 4591 * is stored as a VM-wide offset, and computed either at boot or when the time 4592 * data is written to. 4593 * 4594 * It is safe to add the vCPU offset and the VM-wide offsets together because 4595 * the vCPU offset is in terms of the guest TSC. The host TSC is scaled before 4596 * using it in calculations, so all TSC values are applicable to the same 4597 * frequency. 4598 * 4599 * Note: Though both the VM-wide offset and per-vCPU offsets may be negative, we 4600 * store them as unsigned values and perform all offsetting math unsigned. This 4601 * is to avoid UB from signed overflow. 4602 * 4603 * ------------------------- 4604 * TSC FREQUENCY MULTIPLIER 4605 * ------------------------- 4606 * 4607 * In order to account for frequency differences between the host and guest, SVM 4608 * and VMX provide an interface to set a "frequency multiplier" (or "frequency 4609 * ratio") representing guest to host frequency. In a hardware-virtualized read 4610 * of the TSC, the host TSC is scaled using this multiplier prior to adding the 4611 * programmed TSC offset. 4612 * 4613 * Both platforms represent the ratio as a fixed point number, where the lower 4614 * bits are used as a fractional component, and some number of the upper bits 4615 * are used as the integer component. 4616 * 4617 * Some example multipliers, for a platform with FRAC fractional bits in the 4618 * multiplier: 4619 * - guest frequency == host: 1 << FRAC 4620 * - guest frequency is 2x host: 1 << (FRAC + 1) 4621 * - guest frequency is 0.5x host: 1 << (FRAC - 1), as the highest-order 4622 * fractional bit represents 1/2 4623 * - guest frequency is 2.5x host: (1 << FRAC) | (1 << (FRAC - 1)) 4624 * and so on. 4625 * 4626 * In general, the frequency multiplier is calculated as follows: 4627 * (guest_hz * (1 << FRAC_SIZE)) / host_hz 4628 * 4629 * The multiplier should be used any time the host TSC value is used in 4630 * calculations with the guest TSC (and their frequencies differ). The function 4631 * `vmm_scale_tsc` is intended to be used for these purposes, as it will scale 4632 * the host TSC only if needed. 4633 * 4634 * The multiplier should also be programmed by the hypervisor before the guest 4635 * is run. 4636 * 4637 * 4638 * ---------------------------- 4639 * DEVICE TIMERS (BOOT_HRTIME) 4640 * ---------------------------- 4641 * 4642 * Emulated devices use timers to do things such as scheduling periodic events. 4643 * These timers are scheduled relative to the hrtime of the host. When device 4644 * state is exported or imported, we use boot_hrtime to normalize these timers 4645 * against the host hrtime. The boot_hrtime represents the hrtime of the host 4646 * when the guest was booted. 4647 * 4648 * If a guest is migrated to a different machine, boot_hrtime must be adjusted 4649 * to match the hrtime of when the guest was effectively booted on the target 4650 * host. This allows timers to continue functioning when device state is 4651 * imported on the target. 4652 * 4653 * 4654 * ------------------------ 4655 * VMM DATA TIME INTERFACE 4656 * ------------------------ 4657 * 4658 * In order to facilitate live migrations of guests, we provide an interface, 4659 * via the VMM data read/write ioctls, for userspace to make changes to the 4660 * guest's view of the TSC and device timers, allowing these features to 4661 * continue functioning after a migration. 4662 * 4663 * The interface was designed to expose the minimal amount of data needed for a 4664 * userspace component to make adjustments to the guest's view of time (e.g., to 4665 * account for time passing in a live migration). At a minimum, such a program 4666 * needs: 4667 * - the current guest TSC 4668 * - guest TSC frequency 4669 * - guest's boot_hrtime 4670 * - timestamps of when this data was taken (hrtime for hrtime calculations, and 4671 * wall clock time for computing time deltas between machines) 4672 * 4673 * The wall clock time is provided for consumers to make adjustments to the 4674 * guest TSC and boot_hrtime based on deltas observed during migrations. It may 4675 * be prudent for consumers to use this data only in circumstances where the 4676 * source and target have well-synchronized wall clocks, but nothing in the 4677 * interface depends on this assumption. 4678 * 4679 * On writes, consumers write back: 4680 * - the base guest TSC (used for TSC offset calculations) 4681 * - desired boot_hrtime 4682 * - guest_frequency (cannot change) 4683 * - hrtime of when this data was adjusted 4684 * - (wall clock time on writes is ignored) 4685 * 4686 * The interface will adjust the input guest TSC slightly, based on the input 4687 * hrtime, to account for latency between userspace calculations and application 4688 * of the data on the kernel side. This amounts to adding a small amount of 4689 * additional "uptime" for the guest. 4690 * 4691 * After the adjustments, the interface updates the VM-wide TSC offset and 4692 * boot_hrtime. Per-vCPU offsets are not adjusted, as those are already in terms 4693 * of the guest TSC and can be exported/imported via the MSR VMM data interface. 4694 * 4695 * 4696 * -------------------------------- 4697 * SUPPORTED PLATFORMS AND CAVEATS 4698 * -------------------------------- 4699 * 4700 * While both VMX and SVM offer TSC scaling as a feature, at this time only SVM 4701 * is supported by bhyve. 4702 * 4703 * The time data interface is designed such that Intel support can be added 4704 * easily, and all other aspects of the time interface should work on Intel. 4705 * (Without frequency control though, in practice, doing live migrations of 4706 * guests on Intel will not work for time-related things, as two machines 4707 * rarely have exactly the same frequency). 4708 * 4709 * Additionally, while on both SVM and VMX the frequency multiplier is a fixed 4710 * point number, each uses a different number of fractional and integer bits for 4711 * the multiplier. As such, calculating the multiplier and fractional bit size 4712 * is requested via the vmm_ops. 4713 * 4714 * Care should be taken to set reasonable limits for ratios based on the 4715 * platform, as the difference in fractional bits can lead to slightly different 4716 * tradeoffs in terms of representable ratios and potentially overflowing 4717 * calculations. 4718 */ 4719 4720 /* 4721 * Scales the TSC if needed, based on the input frequency multiplier. 4722 */ 4723 static uint64_t 4724 vmm_scale_tsc(uint64_t tsc, uint64_t mult) 4725 { 4726 const uint32_t frac_size = ops->fr_fracsize; 4727 4728 if (mult != VM_TSCM_NOSCALE) { 4729 VERIFY3U(frac_size, >, 0); 4730 return (scale_tsc(tsc, mult, frac_size)); 4731 } else { 4732 return (tsc); 4733 } 4734 } 4735 4736 /* 4737 * Calculate the frequency multiplier, which represents the ratio of 4738 * guest_hz / host_hz. The frequency multiplier is a fixed point number with 4739 * `frac_sz` fractional bits (fractional bits begin at bit 0). 4740 * 4741 * See comment for "calc_freq_multiplier" in "vmm_time_support.S" for more 4742 * information about valid input to this function. 4743 */ 4744 uint64_t 4745 vmm_calc_freq_multiplier(uint64_t guest_hz, uint64_t host_hz, 4746 uint32_t frac_size) 4747 { 4748 VERIFY3U(guest_hz, !=, 0); 4749 VERIFY3U(frac_size, >, 0); 4750 VERIFY3U(frac_size, <, 64); 4751 4752 return (calc_freq_multiplier(guest_hz, host_hz, frac_size)); 4753 } 4754 4755 /* 4756 * Calculate the guest VM-wide TSC offset. 4757 * 4758 * offset = - ((base host TSC, scaled if needed) - base_guest_tsc) 4759 * 4760 * The base_host_tsc and the base_guest_tsc are the TSC values of the host 4761 * (read on the system) and the guest (calculated) at the same point in time. 4762 * This allows us to fix the guest TSC at this point in time as a base, either 4763 * following boot (guest TSC = 0), or a change to the guest's time data from 4764 * userspace (such as in the case of a migration). 4765 */ 4766 static uint64_t 4767 calc_tsc_offset(uint64_t base_host_tsc, uint64_t base_guest_tsc, uint64_t mult) 4768 { 4769 const uint64_t htsc_scaled = vmm_scale_tsc(base_host_tsc, mult); 4770 if (htsc_scaled > base_guest_tsc) { 4771 return ((uint64_t)(- (int64_t)(htsc_scaled - base_guest_tsc))); 4772 } else { 4773 return (base_guest_tsc - htsc_scaled); 4774 } 4775 } 4776 4777 /* 4778 * Calculate an estimate of the guest TSC. 4779 * 4780 * guest_tsc = (host TSC, scaled if needed) + offset 4781 */ 4782 static uint64_t 4783 calc_guest_tsc(uint64_t host_tsc, uint64_t mult, uint64_t offset) 4784 { 4785 return (vmm_scale_tsc(host_tsc, mult) + offset); 4786 } 4787 4788 /* 4789 * Take a non-atomic "snapshot" of the current: 4790 * - TSC 4791 * - hrtime 4792 * - wall clock time 4793 */ 4794 static void 4795 vmm_time_snapshot(uint64_t *tsc, hrtime_t *hrtime, timespec_t *hrestime) 4796 { 4797 /* 4798 * Disable interrupts while we take the readings: In the absence of a 4799 * mechanism to convert hrtime to hrestime, we want the time between 4800 * each of these measurements to be as small as possible. 4801 */ 4802 ulong_t iflag = intr_clear(); 4803 4804 hrtime_t hrt = gethrtimeunscaledf(); 4805 *tsc = (uint64_t)hrt; 4806 *hrtime = hrt; 4807 scalehrtime(hrtime); 4808 gethrestime(hrestime); 4809 4810 intr_restore(iflag); 4811 } 4812 4813 /* 4814 * Read VMM Time data 4815 * 4816 * Provides: 4817 * - the current guest TSC and TSC frequency 4818 * - guest boot_hrtime 4819 * - timestamps of the read (hrtime and wall clock time) 4820 */ 4821 static int 4822 vmm_data_read_vmm_time(void *arg, const vmm_data_req_t *req) 4823 { 4824 VERIFY3U(req->vdr_class, ==, VDC_VMM_TIME); 4825 VERIFY3U(req->vdr_version, ==, 1); 4826 VERIFY3U(req->vdr_len, >=, sizeof (struct vdi_time_info_v1)); 4827 4828 struct vm *vm = arg; 4829 struct vdi_time_info_v1 *out = req->vdr_data; 4830 4831 /* Take a snapshot of this point in time */ 4832 uint64_t tsc; 4833 hrtime_t hrtime; 4834 timespec_t hrestime; 4835 vmm_time_snapshot(&tsc, &hrtime, &hrestime); 4836 4837 /* Write the output values */ 4838 out->vt_guest_freq = vm->guest_freq; 4839 4840 /* 4841 * Use only the VM-wide TSC offset for calculating the guest TSC, 4842 * ignoring per-vCPU offsets. This value is provided as a "base" guest 4843 * TSC at the time of the read; per-vCPU offsets are factored in as 4844 * needed elsewhere, either when running the vCPU or if the guest reads 4845 * the TSC via rdmsr. 4846 */ 4847 out->vt_guest_tsc = calc_guest_tsc(tsc, vm->freq_multiplier, 4848 vm->tsc_offset); 4849 out->vt_boot_hrtime = vm->boot_hrtime; 4850 out->vt_hrtime = hrtime; 4851 out->vt_hres_sec = hrestime.tv_sec; 4852 out->vt_hres_ns = hrestime.tv_nsec; 4853 4854 return (0); 4855 } 4856 4857 /* 4858 * Modify VMM Time data related values 4859 * 4860 * This interface serves to allow guests' TSC and device timers to continue 4861 * functioning across live migrations. On a successful write, the VM-wide TSC 4862 * offset and boot_hrtime of the guest are updated. 4863 * 4864 * The interface requires an hrtime of the system at which the caller wrote 4865 * this data; this allows us to adjust the TSC and boot_hrtime slightly to 4866 * account for time passing between the userspace call and application 4867 * of the data here. 4868 * 4869 * There are several possibilities for invalid input, including: 4870 * - a requested guest frequency of 0, or a frequency otherwise unsupported by 4871 * the underlying platform 4872 * - hrtime or boot_hrtime values that appear to be from the future 4873 * - the requested frequency does not match the host, and this system does not 4874 * have hardware TSC scaling support 4875 */ 4876 static int 4877 vmm_data_write_vmm_time(void *arg, const vmm_data_req_t *req) 4878 { 4879 VERIFY3U(req->vdr_class, ==, VDC_VMM_TIME); 4880 VERIFY3U(req->vdr_version, ==, 1); 4881 VERIFY3U(req->vdr_len, >=, sizeof (struct vdi_time_info_v1)); 4882 4883 struct vm *vm = arg; 4884 const struct vdi_time_info_v1 *src = req->vdr_data; 4885 4886 /* 4887 * Platform-specific checks will verify the requested frequency against 4888 * the supported range further, but a frequency of 0 is never valid. 4889 */ 4890 if (src->vt_guest_freq == 0) { 4891 return (EINVAL); 4892 } 4893 4894 /* 4895 * Check whether the request frequency is supported and get the 4896 * frequency multiplier. 4897 */ 4898 uint64_t mult = VM_TSCM_NOSCALE; 4899 freqratio_res_t res = ops->vmfreqratio(src->vt_guest_freq, 4900 vmm_host_freq, &mult); 4901 switch (res) { 4902 case FR_SCALING_NOT_SUPPORTED: 4903 /* 4904 * This system doesn't support TSC scaling, and the guest/host 4905 * frequencies differ 4906 */ 4907 return (EPERM); 4908 case FR_OUT_OF_RANGE: 4909 /* Requested frequency ratio is too small/large */ 4910 return (EINVAL); 4911 case FR_SCALING_NOT_NEEDED: 4912 /* Host and guest frequencies are the same */ 4913 VERIFY3U(mult, ==, VM_TSCM_NOSCALE); 4914 break; 4915 case FR_VALID: 4916 VERIFY3U(mult, !=, VM_TSCM_NOSCALE); 4917 break; 4918 } 4919 4920 /* 4921 * Find (and validate) the hrtime delta between the input request and 4922 * when we received it so that we can bump the TSC to account for time 4923 * passing. 4924 * 4925 * We ignore the hrestime as input, as this is a field that 4926 * exists for reads. 4927 */ 4928 uint64_t tsc; 4929 hrtime_t hrtime; 4930 timespec_t hrestime; 4931 vmm_time_snapshot(&tsc, &hrtime, &hrestime); 4932 if ((src->vt_hrtime > hrtime) || (src->vt_boot_hrtime > hrtime)) { 4933 /* 4934 * The caller has passed in an hrtime / boot_hrtime from the 4935 * future. 4936 */ 4937 return (EINVAL); 4938 } 4939 hrtime_t hrt_delta = hrtime - src->vt_hrtime; 4940 4941 /* Calculate guest TSC adjustment */ 4942 const uint64_t host_ticks = unscalehrtime(hrt_delta); 4943 const uint64_t guest_ticks = vmm_scale_tsc(host_ticks, 4944 vm->freq_multiplier); 4945 const uint64_t base_guest_tsc = src->vt_guest_tsc + guest_ticks; 4946 4947 /* Update guest time data */ 4948 vm->freq_multiplier = mult; 4949 vm->guest_freq = src->vt_guest_freq; 4950 vm->boot_hrtime = src->vt_boot_hrtime; 4951 vm->tsc_offset = calc_tsc_offset(tsc, base_guest_tsc, 4952 vm->freq_multiplier); 4953 4954 return (0); 4955 } 4956 4957 static const vmm_data_version_entry_t vmm_time_v1 = { 4958 .vdve_class = VDC_VMM_TIME, 4959 .vdve_version = 1, 4960 .vdve_len_expect = sizeof (struct vdi_time_info_v1), 4961 .vdve_readf = vmm_data_read_vmm_time, 4962 .vdve_writef = vmm_data_write_vmm_time, 4963 }; 4964 VMM_DATA_VERSION(vmm_time_v1); 4965 4966 4967 static int 4968 vmm_data_read_versions(void *arg, const vmm_data_req_t *req) 4969 { 4970 VERIFY3U(req->vdr_class, ==, VDC_VERSION); 4971 VERIFY3U(req->vdr_version, ==, 1); 4972 4973 const uint32_t total_size = SET_COUNT(vmm_data_version_entries) * 4974 sizeof (struct vdi_version_entry_v1); 4975 4976 /* Make sure there is room for all of the entries */ 4977 *req->vdr_result_len = total_size; 4978 if (req->vdr_len < *req->vdr_result_len) { 4979 return (ENOSPC); 4980 } 4981 4982 struct vdi_version_entry_v1 *entryp = req->vdr_data; 4983 const vmm_data_version_entry_t **vdpp; 4984 SET_FOREACH(vdpp, vmm_data_version_entries) { 4985 const vmm_data_version_entry_t *vdp = *vdpp; 4986 4987 entryp->vve_class = vdp->vdve_class; 4988 entryp->vve_version = vdp->vdve_version; 4989 entryp->vve_len_expect = vdp->vdve_len_expect; 4990 entryp->vve_len_per_item = vdp->vdve_len_per_item; 4991 entryp++; 4992 } 4993 return (0); 4994 } 4995 4996 static int 4997 vmm_data_write_versions(void *arg, const vmm_data_req_t *req) 4998 { 4999 /* Writing to the version information makes no sense */ 5000 return (EPERM); 5001 } 5002 5003 static const vmm_data_version_entry_t versions_v1 = { 5004 .vdve_class = VDC_VERSION, 5005 .vdve_version = 1, 5006 .vdve_len_per_item = sizeof (struct vdi_version_entry_v1), 5007 .vdve_readf = vmm_data_read_versions, 5008 .vdve_writef = vmm_data_write_versions, 5009 }; 5010 VMM_DATA_VERSION(versions_v1); 5011 5012 int 5013 vmm_data_read(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 5014 { 5015 int err = 0; 5016 5017 const vmm_data_version_entry_t *entry = NULL; 5018 err = vmm_data_find(req, vcpuid, &entry); 5019 if (err != 0) { 5020 return (err); 5021 } 5022 ASSERT(entry != NULL); 5023 5024 if (entry->vdve_readf != NULL) { 5025 void *datap = vmm_data_from_class(req, vm); 5026 5027 err = entry->vdve_readf(datap, req); 5028 } else if (entry->vdve_vcpu_readf != NULL) { 5029 err = entry->vdve_vcpu_readf(vm, vcpuid, req); 5030 } else { 5031 err = EINVAL; 5032 } 5033 5034 /* 5035 * Successful reads of fixed-length data should populate the length of 5036 * that result. 5037 */ 5038 if (err == 0 && entry->vdve_len_expect != 0) { 5039 *req->vdr_result_len = entry->vdve_len_expect; 5040 } 5041 5042 return (err); 5043 } 5044 5045 int 5046 vmm_data_write(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 5047 { 5048 int err = 0; 5049 5050 const vmm_data_version_entry_t *entry = NULL; 5051 err = vmm_data_find(req, vcpuid, &entry); 5052 if (err != 0) { 5053 return (err); 5054 } 5055 ASSERT(entry != NULL); 5056 5057 if (entry->vdve_writef != NULL) { 5058 void *datap = vmm_data_from_class(req, vm); 5059 5060 err = entry->vdve_writef(datap, req); 5061 } else if (entry->vdve_vcpu_writef != NULL) { 5062 err = entry->vdve_vcpu_writef(vm, vcpuid, req); 5063 } else { 5064 err = EINVAL; 5065 } 5066 5067 /* 5068 * Successful writes of fixed-length data should populate the length of 5069 * that result. 5070 */ 5071 if (err == 0 && entry->vdve_len_expect != 0) { 5072 *req->vdr_result_len = entry->vdve_len_expect; 5073 } 5074 5075 return (err); 5076 } 5077