1 /* $OpenBSD: vcpu.c,v 1.6 2023/05/13 23:15:28 dv Exp $ */ 2 3 /* 4 * Copyright (c) 2022 Dave Voutila <dv@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/types.h> 20 #include <sys/ioctl.h> 21 #include <sys/mman.h> 22 23 #include <machine/specialreg.h> 24 #include <machine/vmmvar.h> 25 26 #include <dev/vmm/vmm.h> 27 28 #include <err.h> 29 #include <errno.h> 30 #include <fcntl.h> 31 #include <stdio.h> 32 #include <stdlib.h> 33 #include <string.h> 34 #include <unistd.h> 35 36 #define KIB 1024 37 #define MIB (1 << 20) 38 #define VMM_NODE "/dev/vmm" 39 40 #define PCKBC_AUX 0x61 41 42 const char *VM_NAME = "regress"; 43 44 /* Originally from vmd(8)'s vm.c */ 45 const struct vcpu_reg_state vcpu_init_flat16 = { 46 .vrs_gprs[VCPU_REGS_RFLAGS] = 0x2, 47 .vrs_gprs[VCPU_REGS_RIP] = 0xFFF0, 48 .vrs_gprs[VCPU_REGS_RSP] = 0x0, 49 .vrs_crs[VCPU_REGS_CR0] = 0x60000010, 50 .vrs_crs[VCPU_REGS_CR3] = 0, 51 .vrs_sregs[VCPU_REGS_CS] = { 0xF000, 0xFFFF, 0x809F, 0xF0000}, 52 .vrs_sregs[VCPU_REGS_DS] = { 0x0, 0xFFFF, 0x8093, 0x0}, 53 .vrs_sregs[VCPU_REGS_ES] = { 0x0, 0xFFFF, 0x8093, 0x0}, 54 .vrs_sregs[VCPU_REGS_FS] = { 0x0, 0xFFFF, 0x8093, 0x0}, 55 .vrs_sregs[VCPU_REGS_GS] = { 0x0, 0xFFFF, 0x8093, 0x0}, 56 .vrs_sregs[VCPU_REGS_SS] = { 0x0, 0xFFFF, 0x8093, 0x0}, 57 .vrs_gdtr = { 0x0, 0xFFFF, 0x0, 0x0}, 58 .vrs_idtr = { 0x0, 0xFFFF, 0x0, 0x0}, 59 .vrs_sregs[VCPU_REGS_LDTR] = { 0x0, 0xFFFF, 0x0082, 0x0}, 60 .vrs_sregs[VCPU_REGS_TR] = { 0x0, 0xFFFF, 0x008B, 0x0}, 61 .vrs_msrs[VCPU_REGS_EFER] = 0ULL, 62 .vrs_drs[VCPU_REGS_DR0] = 0x0, 63 .vrs_drs[VCPU_REGS_DR1] = 0x0, 64 .vrs_drs[VCPU_REGS_DR2] = 0x0, 65 .vrs_drs[VCPU_REGS_DR3] = 0x0, 66 .vrs_drs[VCPU_REGS_DR6] = 0xFFFF0FF0, 67 .vrs_drs[VCPU_REGS_DR7] = 0x400, 68 .vrs_msrs[VCPU_REGS_STAR] = 0ULL, 69 .vrs_msrs[VCPU_REGS_LSTAR] = 0ULL, 70 .vrs_msrs[VCPU_REGS_CSTAR] = 0ULL, 71 .vrs_msrs[VCPU_REGS_SFMASK] = 0ULL, 72 .vrs_msrs[VCPU_REGS_KGSBASE] = 0ULL, 73 .vrs_crs[VCPU_REGS_XCR0] = XFEATURE_X87 74 }; 75 76 int 77 main(int argc, char **argv) 78 { 79 struct vm_create_params vcp; 80 struct vm_exit *exit = NULL; 81 struct vm_info_params vip; 82 struct vm_info_result *info = NULL, *ours = NULL; 83 struct vm_resetcpu_params vresetp; 84 struct vm_run_params vrunp; 85 struct vm_terminate_params vtp; 86 struct vm_sharemem_params vsp; 87 88 struct vm_mem_range *vmr; 89 int fd, ret = 1; 90 size_t i, j; 91 void *p; 92 93 fd = open(VMM_NODE, O_RDWR); 94 if (fd == -1) 95 err(1, "open %s", VMM_NODE); 96 97 /* 98 * 1. Create our VM with 1 vcpu and 2 MiB of memory. 99 */ 100 memset(&vcp, 0, sizeof(vcp)); 101 strlcpy(vcp.vcp_name, VM_NAME, sizeof(vcp.vcp_name)); 102 vcp.vcp_ncpus = 1; 103 104 /* Split into two ranges, similar to how vmd(8) might do it. */ 105 vcp.vcp_nmemranges = 2; 106 vcp.vcp_memranges[0].vmr_gpa = 0x0; 107 vcp.vcp_memranges[0].vmr_size = 640 * KIB; 108 vcp.vcp_memranges[1].vmr_gpa = 640 * KIB; 109 vcp.vcp_memranges[1].vmr_size = (2 * MIB) - (640 * KIB); 110 111 /* Allocate memory. */ 112 for (i = 0; i < vcp.vcp_nmemranges; i++) { 113 vmr = &vcp.vcp_memranges[i]; 114 p = mmap(NULL, vmr->vmr_size, PROT_READ | PROT_WRITE, 115 MAP_PRIVATE | MAP_ANON, -1, 0); 116 if (p == MAP_FAILED) 117 err(1, "mmap"); 118 119 /* 120 * Fill with 2-byte IN instructions that read from what would 121 * be an ancient XT PC Keyboard status port. These reads will 122 * trigger vm exits. 123 */ 124 if (vmr->vmr_size % 2 != 0) 125 errx(1, "memory ranges must be multiple of 2"); 126 for (j = 0; j < vmr->vmr_size; j += 2) { 127 ((uint8_t*)p)[j + 0] = 0xE4; 128 ((uint8_t*)p)[j + 1] = PCKBC_AUX; 129 } 130 vmr->vmr_va = (vaddr_t)p; 131 printf("created mapped region %zu: { gpa: 0x%08lx, size: %lu," 132 " hva: 0x%lx }\n", i, vmr->vmr_gpa, vmr->vmr_size, 133 vmr->vmr_va); 134 } 135 136 if (ioctl(fd, VMM_IOC_CREATE, &vcp) == -1) 137 err(1, "VMM_IOC_CREATE"); 138 printf("created vm %d named \"%s\"\n", vcp.vcp_id, vcp.vcp_name); 139 140 /* 141 * 2. Check we can create shared memory mappings. 142 */ 143 memset(&vsp, 0, sizeof(vsp)); 144 vsp.vsp_nmemranges = vcp.vcp_nmemranges; 145 memcpy(&vsp.vsp_memranges, &vcp.vcp_memranges, 146 sizeof(vsp.vsp_memranges)); 147 vsp.vsp_vm_id = vcp.vcp_id; 148 149 /* Find some new va ranges... */ 150 for (i = 0; i < vsp.vsp_nmemranges; i++) { 151 vmr = &vsp.vsp_memranges[i]; 152 p = mmap(NULL, vmr->vmr_size, PROT_READ | PROT_WRITE, 153 MAP_PRIVATE | MAP_ANON, -1, 0); 154 if (p == MAP_FAILED) 155 err(1, "mmap"); 156 vmr->vmr_va = (vaddr_t)p; 157 } 158 159 /* Release our mappings so vmm can replace them. */ 160 for (i = 0; i < vsp.vsp_nmemranges; i++) { 161 vmr = &vsp.vsp_memranges[i]; 162 munmap((void*)vmr->vmr_va, vmr->vmr_size); 163 } 164 165 /* Perform the shared mapping. */ 166 if (ioctl(fd, VMM_IOC_SHAREMEM, &vsp) == -1) 167 err(1, "VMM_IOC_SHAREMEM"); 168 printf("created shared memory mappings\n"); 169 170 /* We should see our reset vector instructions in the new mappings. */ 171 for (i = 0; i < vsp.vsp_nmemranges; i++) { 172 vmr = &vsp.vsp_memranges[i]; 173 p = (void*)vmr->vmr_va; 174 175 for (j = 0; j < vmr->vmr_size; j += 2) { 176 if (((uint8_t*)p)[j + 0] != 0xE4) 177 errx(1, "bad byte"); 178 if (((uint8_t*)p)[j + 1] != PCKBC_AUX) 179 errx(1, "bad byte"); 180 } 181 printf("checked shared region %zu: { gpa: 0x%08lx, size: %lu," 182 " hva: 0x%lx }\n", i, vmr->vmr_gpa, vmr->vmr_size, 183 vmr->vmr_va); 184 } 185 printf("validated shared memory mappings\n"); 186 187 /* 188 * 3. Check that our VM exists. 189 */ 190 memset(&vip, 0, sizeof(vip)); 191 vip.vip_size = 0; 192 info = NULL; 193 194 if (ioctl(fd, VMM_IOC_INFO, &vip) == -1) { 195 warn("VMM_IOC_INFO(1)"); 196 goto out; 197 } 198 199 if (vip.vip_size == 0) { 200 warn("no vms found"); 201 goto out; 202 } 203 204 info = malloc(vip.vip_size); 205 if (info == NULL) { 206 warn("malloc"); 207 goto out; 208 } 209 210 /* Second request that retrieves the VMs. */ 211 vip.vip_info = info; 212 if (ioctl(fd, VMM_IOC_INFO, &vip) == -1) { 213 warn("VMM_IOC_INFO(2)"); 214 goto out; 215 } 216 217 for (i = 0; i * sizeof(*info) < vip.vip_size; i++) { 218 if (info[i].vir_id == vcp.vcp_id) { 219 ours = &info[i]; 220 break; 221 } 222 } 223 if (ours == NULL) { 224 warn("failed to find vm %uz\n", vcp.vcp_id); 225 goto out; 226 } 227 228 if (ours->vir_id != vcp.vcp_id) { 229 warnx("expected vm id %uz, got %uz", vcp.vcp_id, ours->vir_id); 230 goto out; 231 } 232 if (strncmp(ours->vir_name, VM_NAME, strlen(VM_NAME)) != 0) { 233 warnx("expected vm name \"%s\", got \"%s\"", VM_NAME, 234 ours->vir_name); 235 goto out; 236 } 237 printf("found vm %d named \"%s\"\n", vcp.vcp_id, ours->vir_name); 238 ours = NULL; 239 240 /* 241 * 4. Reset our VCPU and initialize register state. 242 */ 243 memset(&vresetp, 0, sizeof(vresetp)); 244 vresetp.vrp_vm_id = vcp.vcp_id; 245 vresetp.vrp_vcpu_id = 0; /* XXX SP */ 246 memcpy(&vresetp.vrp_init_state, &vcpu_init_flat16, 247 sizeof(vcpu_init_flat16)); 248 249 if (ioctl(fd, VMM_IOC_RESETCPU, &vresetp) == -1) { 250 warn("VMM_IOC_RESETCPU"); 251 goto out; 252 } 253 printf("reset vcpu %d for vm %d\n", vresetp.vrp_vcpu_id, 254 vresetp.vrp_vm_id); 255 256 /* 257 * 5. Run the vcpu, expecting an immediate exit for IO assist. 258 */ 259 exit = malloc(sizeof(*exit)); 260 if (exit == NULL) { 261 warn("failed to allocate memory for vm_exit"); 262 goto out; 263 } 264 265 memset(&vrunp, 0, sizeof(vrunp)); 266 vrunp.vrp_exit = exit; 267 vrunp.vrp_vcpu_id = 0; /* XXX SP */ 268 vrunp.vrp_vm_id = vcp.vcp_id; 269 vrunp.vrp_irq = 0x0; 270 vrunp.vrp_irqready = 1; 271 272 if (ioctl(fd, VMM_IOC_RUN, &vrunp) == -1) { 273 warn("VMM_IOC_RUN"); 274 goto out; 275 } 276 277 if (vrunp.vrp_vm_id != vcp.vcp_id) { 278 warnx("expected vm id %uz, got %uz\n", vcp.vcp_id, 279 vrunp.vrp_vm_id); 280 goto out; 281 } 282 283 switch (vrunp.vrp_exit_reason) { 284 case SVM_VMEXIT_IOIO: 285 case VMX_EXIT_IO: 286 printf("vcpu %d on vm %d exited for io assist\n", 287 vrunp.vrp_vcpu_id, vrunp.vrp_vm_id); 288 break; 289 default: 290 warnx("unexpected vm exit reason: 0%04x", 291 vrunp.vrp_exit_reason); 292 goto out; 293 } 294 295 exit = vrunp.vrp_exit; 296 if (exit->vei.vei_port != PCKBC_AUX) { 297 warnx("expected io port to be PCKBC_AUX, got 0x%02x", 298 exit->vei.vei_port); 299 goto out; 300 } 301 302 /* 303 * If we made it here, we're close to passing. Any failures during 304 * cleanup will reset ret back to non-zero. 305 */ 306 ret = 0; 307 308 out: 309 /* 310 * 6. Terminate our VM and clean up. 311 */ 312 memset(&vtp, 0, sizeof(vtp)); 313 vtp.vtp_vm_id = vcp.vcp_id; 314 if (ioctl(fd, VMM_IOC_TERM, &vtp) == -1) { 315 warn("VMM_IOC_TERM"); 316 ret = 1; 317 } else 318 printf("terminated vm %d\n", vtp.vtp_vm_id); 319 320 close(fd); 321 free(info); 322 free(exit); 323 324 /* Unmap memory. */ 325 for (i = 0; i < vcp.vcp_nmemranges; i++) { 326 vmr = &vcp.vcp_memranges[i]; 327 if (vmr->vmr_va) { 328 if (munmap((void *)vmr->vmr_va, vmr->vmr_size)) { 329 warn("failed to unmap orginal region %zu @ hva " 330 "0x%lx", i, vmr->vmr_va); 331 ret = 1; 332 } else 333 printf("unmapped origin region %zu @ hva " 334 "0x%lx\n", i, vmr->vmr_va); 335 } 336 vmr = &vsp.vsp_memranges[i]; 337 if (vmr->vmr_va) { 338 if (munmap((void *)vmr->vmr_va, vmr->vmr_size)) { 339 warn("failed to unmap shared region %zu @ hva " 340 "0x%lx", i, vmr->vmr_va); 341 ret = 1; 342 } else 343 printf("unmapped shared region %zu @ hva " 344 "0x%lx\n", i, vmr->vmr_va); 345 } 346 } 347 348 return (ret); 349 } 350