1 /* $OpenBSD: vmmvar.h,v 1.23 2016/10/26 05:15:13 mlarkin Exp $ */ 2 /* 3 * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 /* 19 * CPU capabilities for VMM operation 20 */ 21 #ifndef _MACHINE_VMMVAR_H_ 22 #define _MACHINE_VMMVAR_H_ 23 24 #define VMM_HV_SIGNATURE "OpenBSDVMM58" 25 26 #define VMM_MAX_MEM_RANGES 16 27 #define VMM_MAX_DISKS_PER_VM 2 28 #define VMM_MAX_PATH_DISK 128 29 #define VMM_MAX_NAME_LEN 32 30 #define VMM_MAX_KERNEL_PATH 128 31 #define VMM_MAX_VCPUS_PER_VM 64 32 #define VMM_MAX_VM_MEM_SIZE (512 * 1024) 33 #define VMM_MAX_NICS_PER_VM 4 34 35 #define VMM_PCI_MMIO_BAR_BASE 0xF0000000 36 #define VMM_PCI_MMIO_BAR_END 0xF0FFFFFF 37 #define VMM_PCI_MMIO_BAR_SIZE 0x00010000 38 #define VMM_PCI_IO_BAR_BASE 0x1000 39 #define VMM_PCI_IO_BAR_END 0xFFFF 40 #define VMM_PCI_IO_BAR_SIZE 0x1000 41 42 /* VMX: Basic Exit Reasons */ 43 #define VMX_EXIT_NMI 0 44 #define VMX_EXIT_EXTINT 1 45 #define VMX_EXIT_TRIPLE_FAULT 2 46 #define VMX_EXIT_INIT 3 47 #define VMX_EXIT_SIPI 4 48 #define VMX_EXIT_IO_SMI 5 49 #define VMX_EXIT_OTHER_SMI 6 50 #define VMX_EXIT_INT_WINDOW 7 51 #define VMX_EXIT_NMI_WINDOW 8 52 #define VMX_EXIT_TASK_SWITCH 9 53 #define VMX_EXIT_CPUID 10 54 #define VMX_EXIT_GETSEC 11 55 #define VMX_EXIT_HLT 12 56 #define VMX_EXIT_INVD 13 57 #define VMX_EXIT_INVLPG 14 58 #define VMX_EXIT_RDPMC 15 59 #define VMX_EXIT_RDTSC 16 60 #define VMX_EXIT_RSM 17 61 #define VMX_EXIT_VMCALL 18 62 #define VMX_EXIT_VMCLEAR 19 63 #define VMX_EXIT_VMLAUNCH 20 64 #define VMX_EXIT_VMPTRLD 21 65 #define VMX_EXIT_VMPTRST 22 66 #define VMX_EXIT_VMREAD 23 67 #define VMX_EXIT_VMRESUME 24 68 #define VMX_EXIT_VMWRITE 25 69 #define VMX_EXIT_VMXOFF 26 70 #define VMX_EXIT_VMXON 27 71 #define VMX_EXIT_CR_ACCESS 28 72 #define VMX_EXIT_MOV_DR 29 73 #define VMX_EXIT_IO 30 74 #define VMX_EXIT_RDMSR 31 75 #define VMX_EXIT_WRMSR 32 76 #define VMX_EXIT_ENTRY_FAILED_GUEST_STATE 33 77 #define VMX_EXIT_ENTRY_FAILED_MSR_LOAD 34 78 #define VMX_EXIT_MWAIT 36 79 #define VMX_EXIT_MTF 37 80 #define VMX_EXIT_MONITOR 39 81 #define VMX_EXIT_PAUSE 40 82 #define VMX_EXIT_ENTRY_FAILED_MCE 41 83 #define VMX_EXIT_TPR_BELOW_THRESHOLD 43 84 #define VMX_EXIT_APIC_ACCESS 44 85 #define VMX_EXIT_VIRTUALIZED_EOI 45 86 #define VMX_EXIT_GDTR_IDTR 46 87 #define VMX_EXIT_LDTR_TR 47 88 #define VMX_EXIT_EPT_VIOLATION 48 89 #define VMX_EXIT_EPT_MISCONFIGURATION 49 90 #define VMX_EXIT_INVEPT 50 91 #define VMX_EXIT_RDTSCP 51 92 #define VMX_EXIT_VMX_PREEMPTION_TIMER_EXPIRED 52 93 #define VMX_EXIT_INVVPID 53 94 #define VMX_EXIT_WBINVD 54 95 #define VMX_EXIT_XSETBV 55 96 #define VMX_EXIT_APIC_WRITE 56 97 #define VMX_EXIT_RDRAND 57 98 #define VMX_EXIT_INVPCID 58 99 #define VMX_EXIT_VMFUNC 59 100 #define VMX_EXIT_RDSEED 61 101 #define VMX_EXIT_XSAVES 63 102 #define VMX_EXIT_XRSTORS 64 103 104 /* 105 * VMX: Misc defines 106 */ 107 #define VMX_MAX_CR3_TARGETS 256 108 109 #define VM_EXIT_TERMINATED 0xFFFE 110 #define VM_EXIT_NONE 0xFFFF 111 112 /* 113 * VCPU state values. Note that there is a conversion function in vmm.c 114 * (vcpu_state_decode) that converts these to human readable strings, 115 * so this enum and vcpu_state_decode should be kept in sync. 116 */ 117 enum { 118 VCPU_STATE_STOPPED, 119 VCPU_STATE_RUNNING, 120 VCPU_STATE_REQTERM, 121 VCPU_STATE_TERMINATED, 122 VCPU_STATE_UNKNOWN, 123 }; 124 125 enum { 126 VEI_DIR_OUT, 127 VEI_DIR_IN 128 }; 129 130 /* 131 * vm exit data 132 * vm_exit_inout : describes an IN/OUT exit 133 */ 134 struct vm_exit_inout { 135 uint8_t vei_size; /* Size of access */ 136 uint8_t vei_dir; /* Direction */ 137 uint8_t vei_rep; /* REP prefix? */ 138 uint8_t vei_string; /* string variety? */ 139 uint8_t vei_encoding; /* operand encoding */ 140 uint16_t vei_port; /* port */ 141 uint32_t vei_data; /* data (for IN insns) */ 142 }; 143 144 union vm_exit { 145 struct vm_exit_inout vei; /* IN/OUT exit */ 146 }; 147 148 /* 149 * struct vcpu_segment_info describes a segment + selector set, used 150 * in constructing the initial vcpu register content 151 */ 152 struct vcpu_segment_info { 153 uint16_t vsi_sel; 154 uint32_t vsi_limit; 155 uint32_t vsi_ar; 156 uint64_t vsi_base; 157 }; 158 159 #define VCPU_REGS_RAX 0 160 #define VCPU_REGS_RBX 1 161 #define VCPU_REGS_RCX 2 162 #define VCPU_REGS_RDX 3 163 #define VCPU_REGS_RSI 4 164 #define VCPU_REGS_RDI 5 165 #define VCPU_REGS_R8 6 166 #define VCPU_REGS_R9 7 167 #define VCPU_REGS_R10 8 168 #define VCPU_REGS_R11 9 169 #define VCPU_REGS_R12 10 170 #define VCPU_REGS_R13 11 171 #define VCPU_REGS_R14 12 172 #define VCPU_REGS_R15 13 173 #define VCPU_REGS_RSP 14 174 #define VCPU_REGS_RBP 15 175 #define VCPU_REGS_RIP 16 176 #define VCPU_REGS_RFLAGS 17 177 #define VCPU_REGS_NGPRS (VCPU_REGS_RFLAGS + 1) 178 179 #define VCPU_REGS_CR0 0 180 #define VCPU_REGS_CR2 1 181 #define VCPU_REGS_CR3 2 182 #define VCPU_REGS_CR4 3 183 #define VCPU_REGS_CR8 4 184 #define VCPU_REGS_NCRS (VCPU_REGS_CR8 + 1) 185 186 #define VCPU_REGS_CS 0 187 #define VCPU_REGS_DS 1 188 #define VCPU_REGS_ES 2 189 #define VCPU_REGS_FS 3 190 #define VCPU_REGS_GS 4 191 #define VCPU_REGS_SS 5 192 #define VCPU_REGS_LDTR 6 193 #define VCPU_REGS_TR 7 194 #define VCPU_REGS_NSREGS (VCPU_REGS_TR + 1) 195 196 struct vcpu_reg_state { 197 uint64_t vrs_gprs[VCPU_REGS_NGPRS]; 198 uint64_t vrs_crs[VCPU_REGS_NCRS]; 199 struct vcpu_segment_info vrs_sregs[VCPU_REGS_NSREGS]; 200 struct vcpu_segment_info vrs_gdtr; 201 struct vcpu_segment_info vrs_idtr; 202 }; 203 204 struct vm_mem_range { 205 paddr_t vmr_gpa; 206 vaddr_t vmr_va; 207 size_t vmr_size; 208 }; 209 210 struct vm_create_params { 211 /* Input parameters to VMM_IOC_CREATE */ 212 size_t vcp_nmemranges; 213 size_t vcp_ncpus; 214 size_t vcp_ndisks; 215 size_t vcp_nnics; 216 struct vm_mem_range vcp_memranges[VMM_MAX_MEM_RANGES]; 217 char vcp_disks[VMM_MAX_DISKS_PER_VM][VMM_MAX_PATH_DISK]; 218 char vcp_name[VMM_MAX_NAME_LEN]; 219 char vcp_kernel[VMM_MAX_KERNEL_PATH]; 220 uint8_t vcp_macs[VMM_MAX_NICS_PER_VM][6]; 221 222 /* Output parameter from VMM_IOC_CREATE */ 223 uint32_t vcp_id; 224 }; 225 226 struct vm_run_params { 227 /* Input parameters to VMM_IOC_RUN */ 228 uint32_t vrp_vm_id; 229 uint32_t vrp_vcpu_id; 230 uint8_t vrp_continue; /* Continuing from an exit */ 231 uint16_t vrp_irq; /* IRQ to inject */ 232 233 /* Input/output parameter to VMM_IOC_RUN */ 234 union vm_exit *vrp_exit; /* updated exit data */ 235 236 /* Output parameter from VMM_IOC_RUN */ 237 uint16_t vrp_exit_reason; /* exit reason */ 238 uint8_t vrp_irqready; /* ready for IRQ on entry */ 239 }; 240 241 struct vm_info_result { 242 /* Output parameters from VMM_IOC_INFO */ 243 size_t vir_memory_size; 244 size_t vir_used_size; 245 size_t vir_ncpus; 246 uint8_t vir_vcpu_state[VMM_MAX_VCPUS_PER_VM]; 247 pid_t vir_creator_pid; 248 uint32_t vir_id; 249 char vir_name[VMM_MAX_NAME_LEN]; 250 }; 251 252 struct vm_info_params { 253 /* Input parameters to VMM_IOC_INFO */ 254 size_t vip_size; /* Output buffer size */ 255 256 /* Output Parameters from VMM_IOC_INFO */ 257 size_t vip_info_ct; /* # of entries returned */ 258 struct vm_info_result *vip_info; /* Output buffer */ 259 }; 260 261 struct vm_terminate_params { 262 /* Input parameters to VMM_IOC_TERM */ 263 uint32_t vtp_vm_id; 264 }; 265 266 struct vm_resetcpu_params { 267 /* Input parameters to VMM_IOC_RESETCPU */ 268 uint32_t vrp_vm_id; 269 uint32_t vrp_vcpu_id; 270 struct vcpu_reg_state vrp_init_state; 271 }; 272 273 struct vm_intr_params { 274 /* Input parameters to VMM_IOC_INTR */ 275 uint32_t vip_vm_id; 276 uint32_t vip_vcpu_id; 277 uint16_t vip_intr; 278 }; 279 280 #define VM_RWREGS_GPRS 0x1 /* read/write GPRs */ 281 #define VM_RWREGS_SREGS 0x2 /* read/write segment registers */ 282 #define VM_RWREGS_CRS 0x4 /* read/write CRs */ 283 #define VM_RWREGS_ALL (VM_RWREGS_GPRS | VM_RWREGS_SREGS | VM_RWREGS_CRS) 284 285 struct vm_rwregs_params { 286 uint32_t vrwp_vm_id; 287 uint32_t vrwp_vcpu_id; 288 uint64_t vrwp_mask; 289 struct vcpu_reg_state vrwp_regs; 290 }; 291 292 /* IOCTL definitions */ 293 #define VMM_IOC_CREATE _IOWR('V', 1, struct vm_create_params) /* Create VM */ 294 #define VMM_IOC_RUN _IOWR('V', 2, struct vm_run_params) /* Run VCPU */ 295 #define VMM_IOC_INFO _IOWR('V', 3, struct vm_info_params) /* Get VM Info */ 296 #define VMM_IOC_TERM _IOW('V', 4, struct vm_terminate_params) /* Terminate VM */ 297 #define VMM_IOC_RESETCPU _IOW('V', 5, struct vm_resetcpu_params) /* Reset */ 298 #define VMM_IOC_INTR _IOW('V', 6, struct vm_intr_params) /* Intr pending */ 299 #define VMM_IOC_READREGS _IOWR('V', 7, struct vm_rwregs_params) /* Get registers */ 300 #define VMM_IOC_WRITEREGS _IOW('V', 8, struct vm_rwregs_params) /* Set registers */ 301 302 #ifdef _KERNEL 303 304 #define VMX_FAIL_LAUNCH_UNKNOWN 1 305 #define VMX_FAIL_LAUNCH_INVALID_VMCS 2 306 #define VMX_FAIL_LAUNCH_VALID_VMCS 3 307 308 #define VMX_NUM_MSR_STORE 6 309 310 /* MSR bitmap manipulation macros */ 311 #define MSRIDX(m) ((m) / 8) 312 #define MSRBIT(m) (1 << (m) % 8) 313 314 enum { 315 VMM_MODE_UNKNOWN, 316 VMM_MODE_VMX, 317 VMM_MODE_EPT, 318 VMM_MODE_SVM, 319 VMM_MODE_RVI 320 }; 321 322 enum { 323 VMM_MEM_TYPE_REGULAR, 324 VMM_MEM_TYPE_UNKNOWN 325 }; 326 327 /* Forward declarations */ 328 struct vm; 329 330 /* 331 * Implementation-specific cpu state 332 */ 333 struct vmcb { 334 }; 335 336 struct vmcs { 337 uint32_t vmcs_revision; 338 }; 339 340 struct vmx_invvpid_descriptor 341 { 342 uint64_t vid_vpid; 343 uint64_t vid_addr; 344 }; 345 346 struct vmx_invept_descriptor 347 { 348 uint64_t vid_eptp; 349 uint64_t vid_reserved; 350 }; 351 352 struct vmx_msr_store 353 { 354 uint64_t vms_index; 355 uint64_t vms_data; 356 }; 357 358 /* 359 * Storage for guest registers not preserved in VMCS and various exit 360 * information. 361 * 362 * Note that vmx_enter_guest depends on the layout of this struct for 363 * field access. 364 */ 365 struct vmx_gueststate 366 { 367 /* %rsi should be first */ 368 uint64_t vg_rsi; /* 0x00 */ 369 uint64_t vg_rax; /* 0x08 */ 370 uint64_t vg_rbx; /* 0x10 */ 371 uint64_t vg_rcx; /* 0x18 */ 372 uint64_t vg_rdx; /* 0x20 */ 373 uint64_t vg_rdi; /* 0x28 */ 374 uint64_t vg_rbp; /* 0x30 */ 375 uint64_t vg_r8; /* 0x38 */ 376 uint64_t vg_r9; /* 0x40 */ 377 uint64_t vg_r10; /* 0x48 */ 378 uint64_t vg_r11; /* 0x50 */ 379 uint64_t vg_r12; /* 0x58 */ 380 uint64_t vg_r13; /* 0x60 */ 381 uint64_t vg_r14; /* 0x68 */ 382 uint64_t vg_r15; /* 0x70 */ 383 uint64_t vg_cr2; /* 0x78 */ 384 uint64_t vg_rip; /* 0x80 */ 385 uint32_t vg_exit_reason; /* 0x88 */ 386 uint64_t vg_rflags; /* 0x90 */ 387 }; 388 389 /* 390 * Virtual Machine 391 */ 392 struct vm; 393 394 /* 395 * Virtual CPU 396 */ 397 struct vcpu { 398 /* VMCS / VMCB pointer */ 399 vaddr_t vc_control_va; 400 uint64_t vc_control_pa; 401 402 /* VLAPIC pointer */ 403 vaddr_t vc_vlapic_va; 404 uint64_t vc_vlapic_pa; 405 406 /* MSR bitmap address */ 407 vaddr_t vc_msr_bitmap_va; 408 uint64_t vc_msr_bitmap_pa; 409 410 struct vm *vc_parent; 411 uint32_t vc_id; 412 u_int vc_state; 413 SLIST_ENTRY(vcpu) vc_vcpu_link; 414 vaddr_t vc_hsa_stack_va; 415 416 uint8_t vc_virt_mode; 417 418 struct cpu_info *vc_last_pcpu; 419 union vm_exit vc_exit; 420 421 uint16_t vc_intr; 422 uint8_t vc_irqready; 423 424 /* VMX only */ 425 uint64_t vc_vmx_basic; 426 uint64_t vc_vmx_entry_ctls; 427 uint64_t vc_vmx_true_entry_ctls; 428 uint64_t vc_vmx_exit_ctls; 429 uint64_t vc_vmx_true_exit_ctls; 430 uint64_t vc_vmx_pinbased_ctls; 431 uint64_t vc_vmx_true_pinbased_ctls; 432 uint64_t vc_vmx_procbased_ctls; 433 uint64_t vc_vmx_true_procbased_ctls; 434 uint64_t vc_vmx_procbased2_ctls; 435 struct vmx_gueststate vc_gueststate; 436 vaddr_t vc_vmx_msr_exit_save_va; 437 paddr_t vc_vmx_msr_exit_save_pa; 438 vaddr_t vc_vmx_msr_exit_load_va; 439 paddr_t vc_vmx_msr_exit_load_pa; 440 vaddr_t vc_vmx_msr_entry_load_va; 441 paddr_t vc_vmx_msr_entry_load_pa; 442 }; 443 444 SLIST_HEAD(vcpu_head, vcpu); 445 446 void vmm_dispatch_intr(vaddr_t); 447 int vmxon(uint64_t *); 448 int vmxoff(void); 449 int vmclear(uint64_t *); 450 int vmptrld(uint64_t *); 451 int vmptrst(uint64_t *); 452 int vmwrite(uint64_t, uint64_t); 453 int vmread(uint64_t, uint64_t *); 454 void invvpid(uint64_t, struct vmx_invvpid_descriptor *); 455 void invept(uint64_t, struct vmx_invept_descriptor *); 456 int vmx_enter_guest(uint64_t *, struct vmx_gueststate *, int); 457 void start_vmm_on_cpu(struct cpu_info *); 458 void stop_vmm_on_cpu(struct cpu_info *); 459 460 #endif /* _KERNEL */ 461 462 #endif /* ! _MACHINE_VMMVAR_H_ */ 463