1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 /* 31 * This file and its contents are supplied under the terms of the 32 * Common Development and Distribution License ("CDDL"), version 1.0. 33 * You may only use this file in accordance with the terms of version 34 * 1.0 of the CDDL. 35 * 36 * A full copy of the text of the CDDL should have accompanied this 37 * source. A copy of the CDDL is also available via the Internet at 38 * http://www.illumos.org/license/CDDL. 39 * 40 * Copyright 2014 Pluribus Networks Inc. 41 * Copyright 2017 Joyent, Inc. 42 * Copyright 2020 Oxide Computer Company 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 51 #include <machine/specialreg.h> 52 #include <machine/vmm.h> 53 #include "vmx.h" 54 55 /* Bits 0-30 of VMX_BASIC MSR contain VMCS revision identifier */ 56 #define VMX_BASIC_REVISION(v) ((v) & 0x7fffffff) 57 58 uint32_t 59 vmcs_field_encoding(int ident) 60 { 61 switch (ident) { 62 case VM_REG_GUEST_CR0: 63 return (VMCS_GUEST_CR0); 64 case VM_REG_GUEST_CR3: 65 return (VMCS_GUEST_CR3); 66 case VM_REG_GUEST_CR4: 67 return (VMCS_GUEST_CR4); 68 case VM_REG_GUEST_DR7: 69 return (VMCS_GUEST_DR7); 70 case VM_REG_GUEST_RSP: 71 return (VMCS_GUEST_RSP); 72 case VM_REG_GUEST_RIP: 73 return (VMCS_GUEST_RIP); 74 case VM_REG_GUEST_RFLAGS: 75 return (VMCS_GUEST_RFLAGS); 76 case VM_REG_GUEST_ES: 77 return (VMCS_GUEST_ES_SELECTOR); 78 case VM_REG_GUEST_CS: 79 return (VMCS_GUEST_CS_SELECTOR); 80 case VM_REG_GUEST_SS: 81 return (VMCS_GUEST_SS_SELECTOR); 82 case VM_REG_GUEST_DS: 83 return (VMCS_GUEST_DS_SELECTOR); 84 case VM_REG_GUEST_FS: 85 return (VMCS_GUEST_FS_SELECTOR); 86 case VM_REG_GUEST_GS: 87 return (VMCS_GUEST_GS_SELECTOR); 88 case VM_REG_GUEST_TR: 89 return (VMCS_GUEST_TR_SELECTOR); 90 case VM_REG_GUEST_LDTR: 91 return (VMCS_GUEST_LDTR_SELECTOR); 92 case VM_REG_GUEST_EFER: 93 return (VMCS_GUEST_IA32_EFER); 94 case VM_REG_GUEST_PDPTE0: 95 return (VMCS_GUEST_PDPTE0); 96 case VM_REG_GUEST_PDPTE1: 97 return (VMCS_GUEST_PDPTE1); 98 case VM_REG_GUEST_PDPTE2: 99 return (VMCS_GUEST_PDPTE2); 100 case VM_REG_GUEST_PDPTE3: 101 return (VMCS_GUEST_PDPTE3); 102 case VM_REG_GUEST_ENTRY_INST_LENGTH: 103 return (VMCS_ENTRY_INST_LENGTH); 104 default: 105 return (VMCS_INVALID_ENCODING); 106 } 107 } 108 109 void 110 vmcs_seg_desc_encoding(int seg, uint32_t *base, uint32_t *lim, uint32_t *acc) 111 { 112 switch (seg) { 113 case VM_REG_GUEST_ES: 114 *base = VMCS_GUEST_ES_BASE; 115 *lim = VMCS_GUEST_ES_LIMIT; 116 *acc = VMCS_GUEST_ES_ACCESS_RIGHTS; 117 break; 118 case VM_REG_GUEST_CS: 119 *base = VMCS_GUEST_CS_BASE; 120 *lim = VMCS_GUEST_CS_LIMIT; 121 *acc = VMCS_GUEST_CS_ACCESS_RIGHTS; 122 break; 123 case VM_REG_GUEST_SS: 124 *base = VMCS_GUEST_SS_BASE; 125 *lim = VMCS_GUEST_SS_LIMIT; 126 *acc = VMCS_GUEST_SS_ACCESS_RIGHTS; 127 break; 128 case VM_REG_GUEST_DS: 129 *base = VMCS_GUEST_DS_BASE; 130 *lim = VMCS_GUEST_DS_LIMIT; 131 *acc = VMCS_GUEST_DS_ACCESS_RIGHTS; 132 break; 133 case VM_REG_GUEST_FS: 134 *base = VMCS_GUEST_FS_BASE; 135 *lim = VMCS_GUEST_FS_LIMIT; 136 *acc = VMCS_GUEST_FS_ACCESS_RIGHTS; 137 break; 138 case VM_REG_GUEST_GS: 139 *base = VMCS_GUEST_GS_BASE; 140 *lim = VMCS_GUEST_GS_LIMIT; 141 *acc = VMCS_GUEST_GS_ACCESS_RIGHTS; 142 break; 143 case VM_REG_GUEST_TR: 144 *base = VMCS_GUEST_TR_BASE; 145 *lim = VMCS_GUEST_TR_LIMIT; 146 *acc = VMCS_GUEST_TR_ACCESS_RIGHTS; 147 break; 148 case VM_REG_GUEST_LDTR: 149 *base = VMCS_GUEST_LDTR_BASE; 150 *lim = VMCS_GUEST_LDTR_LIMIT; 151 *acc = VMCS_GUEST_LDTR_ACCESS_RIGHTS; 152 break; 153 case VM_REG_GUEST_IDTR: 154 *base = VMCS_GUEST_IDTR_BASE; 155 *lim = VMCS_GUEST_IDTR_LIMIT; 156 *acc = VMCS_INVALID_ENCODING; 157 break; 158 case VM_REG_GUEST_GDTR: 159 *base = VMCS_GUEST_GDTR_BASE; 160 *lim = VMCS_GUEST_GDTR_LIMIT; 161 *acc = VMCS_INVALID_ENCODING; 162 break; 163 default: 164 panic("invalid segment register %d", seg); 165 } 166 } 167 168 void 169 vmcs_clear(uintptr_t vmcs_pa) 170 { 171 int err; 172 173 __asm __volatile("vmclear %[addr];" 174 VMX_SET_ERROR_CODE_ASM 175 : [error] "=r" (err) 176 : [addr] "m" (vmcs_pa) 177 : "memory"); 178 179 if (err != 0) { 180 panic("vmclear(%p) error %d", (void *)vmcs_pa, err); 181 } 182 183 /* 184 * A call to critical_enter() was made in vmcs_load() to prevent 185 * preemption. Now that the VMCS is unloaded, it is safe to relax that 186 * restriction. 187 */ 188 critical_exit(); 189 } 190 191 void 192 vmcs_initialize(struct vmcs *vmcs, uintptr_t vmcs_pa) 193 { 194 int err; 195 196 /* set to VMCS revision */ 197 vmcs->identifier = VMX_BASIC_REVISION(rdmsr(MSR_VMX_BASIC)); 198 199 /* 200 * Perform a vmclear on the VMCS, but without the critical section 201 * manipulation as done by vmcs_clear() above. 202 */ 203 __asm __volatile("vmclear %[addr];" 204 VMX_SET_ERROR_CODE_ASM 205 : [error] "=r" (err) 206 : [addr] "m" (vmcs_pa) 207 : "memory"); 208 209 if (err != 0) { 210 panic("vmclear(%p) error %d", (void *)vmcs_pa, err); 211 } 212 } 213 214 void 215 vmcs_load(uintptr_t vmcs_pa) 216 { 217 int err; 218 219 /* 220 * While the VMCS is loaded on the CPU for subsequent operations, it is 221 * important that the thread not be preempted. That is ensured with 222 * critical_enter() here, with a matching critical_exit() call in 223 * vmcs_clear() once the VMCS is unloaded. 224 */ 225 critical_enter(); 226 227 __asm __volatile("vmptrld %[addr];" 228 VMX_SET_ERROR_CODE_ASM 229 : [error] "=r" (err) 230 : [addr] "m" (vmcs_pa) 231 : "memory"); 232 233 if (err != 0) { 234 panic("vmptrld(%p) error %d", (void *)vmcs_pa, err); 235 } 236 } 237 238 uint64_t 239 vmcs_read(uint32_t encoding) 240 { 241 int error; 242 uint64_t val; 243 244 __asm __volatile("vmread %[enc], %[val];" 245 VMX_SET_ERROR_CODE_ASM 246 : [error] "=r" (error), [val] "=r" (val) 247 : [enc] "r" ((uint64_t)encoding) 248 : "memory"); 249 250 if (error != 0) { 251 panic("vmread(%x) error %d", encoding, error); 252 } 253 254 return (val); 255 } 256 257 void 258 vmcs_write(uint32_t encoding, uint64_t val) 259 { 260 int error; 261 262 __asm __volatile("vmwrite %[val], %[enc];" 263 VMX_SET_ERROR_CODE_ASM 264 : [error] "=r" (error) 265 : [val] "r" (val), [enc] "r" ((uint64_t)encoding) 266 : "memory"); 267 268 if (error != 0) { 269 panic("vmwrite(%x, %lx) error %d", encoding, val, error); 270 } 271 } 272