1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/capsicum.h> 31 #include <sys/sysctl.h> 32 #include <sys/ioctl.h> 33 #include <sys/mman.h> 34 #include <sys/linker.h> 35 #include <sys/module.h> 36 #include <sys/_iovec.h> 37 #include <sys/cpuset.h> 38 39 #include <capsicum_helpers.h> 40 #include <errno.h> 41 #include <stdbool.h> 42 #include <stdio.h> 43 #include <stdlib.h> 44 #include <assert.h> 45 #include <string.h> 46 #include <fcntl.h> 47 #include <unistd.h> 48 49 #include <libutil.h> 50 51 #include <vm/vm.h> 52 #include <machine/vmm.h> 53 #include <machine/vmm_dev.h> 54 #include <machine/vmm_snapshot.h> 55 56 #include "vmmapi.h" 57 #include "internal.h" 58 59 #define MB (1024 * 1024UL) 60 #define GB (1024 * 1024 * 1024UL) 61 62 /* 63 * Size of the guard region before and after the virtual address space 64 * mapping the guest physical memory. This must be a multiple of the 65 * superpage size for performance reasons. 66 */ 67 #define VM_MMAP_GUARD_SIZE (4 * MB) 68 69 #define PROT_RW (PROT_READ | PROT_WRITE) 70 #define PROT_ALL (PROT_READ | PROT_WRITE | PROT_EXEC) 71 72 struct vmctx { 73 int fd; 74 uint32_t lowmem_limit; 75 int memflags; 76 size_t lowmem; 77 size_t highmem; 78 char *baseaddr; 79 char *name; 80 }; 81 82 #define CREATE(x) sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x))) 83 #define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x))) 84 85 static int 86 vm_device_open(const char *name) 87 { 88 int fd, len; 89 char *vmfile; 90 91 len = strlen("/dev/vmm/") + strlen(name) + 1; 92 vmfile = malloc(len); 93 assert(vmfile != NULL); 94 snprintf(vmfile, len, "/dev/vmm/%s", name); 95 96 /* Open the device file */ 97 fd = open(vmfile, O_RDWR, 0); 98 99 free(vmfile); 100 return (fd); 101 } 102 103 int 104 vm_create(const char *name) 105 { 106 /* Try to load vmm(4) module before creating a guest. */ 107 if (modfind("vmm") < 0) 108 kldload("vmm"); 109 return (CREATE(name)); 110 } 111 112 struct vmctx * 113 vm_open(const char *name) 114 { 115 struct vmctx *vm; 116 int saved_errno; 117 118 vm = malloc(sizeof(struct vmctx) + strlen(name) + 1); 119 assert(vm != NULL); 120 121 vm->fd = -1; 122 vm->memflags = 0; 123 vm->lowmem_limit = 3 * GB; 124 vm->name = (char *)(vm + 1); 125 strcpy(vm->name, name); 126 127 if ((vm->fd = vm_device_open(vm->name)) < 0) 128 goto err; 129 130 return (vm); 131 err: 132 saved_errno = errno; 133 free(vm); 134 errno = saved_errno; 135 return (NULL); 136 } 137 138 void 139 vm_close(struct vmctx *vm) 140 { 141 assert(vm != NULL); 142 143 close(vm->fd); 144 free(vm); 145 } 146 147 void 148 vm_destroy(struct vmctx *vm) 149 { 150 assert(vm != NULL); 151 152 if (vm->fd >= 0) 153 close(vm->fd); 154 DESTROY(vm->name); 155 156 free(vm); 157 } 158 159 struct vcpu * 160 vm_vcpu_open(struct vmctx *ctx, int vcpuid) 161 { 162 struct vcpu *vcpu; 163 164 vcpu = malloc(sizeof(*vcpu)); 165 vcpu->ctx = ctx; 166 vcpu->vcpuid = vcpuid; 167 return (vcpu); 168 } 169 170 void 171 vm_vcpu_close(struct vcpu *vcpu) 172 { 173 free(vcpu); 174 } 175 176 int 177 vcpu_id(struct vcpu *vcpu) 178 { 179 return (vcpu->vcpuid); 180 } 181 182 int 183 vm_parse_memsize(const char *opt, size_t *ret_memsize) 184 { 185 char *endptr; 186 size_t optval; 187 int error; 188 189 optval = strtoul(opt, &endptr, 0); 190 if (*opt != '\0' && *endptr == '\0') { 191 /* 192 * For the sake of backward compatibility if the memory size 193 * specified on the command line is less than a megabyte then 194 * it is interpreted as being in units of MB. 195 */ 196 if (optval < MB) 197 optval *= MB; 198 *ret_memsize = optval; 199 error = 0; 200 } else 201 error = expand_number(opt, ret_memsize); 202 203 return (error); 204 } 205 206 uint32_t 207 vm_get_lowmem_limit(struct vmctx *ctx) 208 { 209 210 return (ctx->lowmem_limit); 211 } 212 213 void 214 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit) 215 { 216 217 ctx->lowmem_limit = limit; 218 } 219 220 void 221 vm_set_memflags(struct vmctx *ctx, int flags) 222 { 223 224 ctx->memflags = flags; 225 } 226 227 int 228 vm_get_memflags(struct vmctx *ctx) 229 { 230 231 return (ctx->memflags); 232 } 233 234 /* 235 * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len). 236 */ 237 int 238 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off, 239 size_t len, int prot) 240 { 241 struct vm_memmap memmap; 242 int error, flags; 243 244 memmap.gpa = gpa; 245 memmap.segid = segid; 246 memmap.segoff = off; 247 memmap.len = len; 248 memmap.prot = prot; 249 memmap.flags = 0; 250 251 if (ctx->memflags & VM_MEM_F_WIRED) 252 memmap.flags |= VM_MEMMAP_F_WIRED; 253 254 /* 255 * If this mapping already exists then don't create it again. This 256 * is the common case for SYSMEM mappings created by bhyveload(8). 257 */ 258 error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags); 259 if (error == 0 && gpa == memmap.gpa) { 260 if (segid != memmap.segid || off != memmap.segoff || 261 prot != memmap.prot || flags != memmap.flags) { 262 errno = EEXIST; 263 return (-1); 264 } else { 265 return (0); 266 } 267 } 268 269 error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap); 270 return (error); 271 } 272 273 int 274 vm_get_guestmem_from_ctx(struct vmctx *ctx, char **guest_baseaddr, 275 size_t *lowmem_size, size_t *highmem_size) 276 { 277 278 *guest_baseaddr = ctx->baseaddr; 279 *lowmem_size = ctx->lowmem; 280 *highmem_size = ctx->highmem; 281 return (0); 282 } 283 284 int 285 vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len) 286 { 287 struct vm_munmap munmap; 288 int error; 289 290 munmap.gpa = gpa; 291 munmap.len = len; 292 293 error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap); 294 return (error); 295 } 296 297 int 298 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid, 299 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) 300 { 301 struct vm_memmap memmap; 302 int error; 303 304 bzero(&memmap, sizeof(struct vm_memmap)); 305 memmap.gpa = *gpa; 306 error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap); 307 if (error == 0) { 308 *gpa = memmap.gpa; 309 *segid = memmap.segid; 310 *segoff = memmap.segoff; 311 *len = memmap.len; 312 *prot = memmap.prot; 313 *flags = memmap.flags; 314 } 315 return (error); 316 } 317 318 /* 319 * Return 0 if the segments are identical and non-zero otherwise. 320 * 321 * This is slightly complicated by the fact that only device memory segments 322 * are named. 323 */ 324 static int 325 cmpseg(size_t len, const char *str, size_t len2, const char *str2) 326 { 327 328 if (len == len2) { 329 if ((!str && !str2) || (str && str2 && !strcmp(str, str2))) 330 return (0); 331 } 332 return (-1); 333 } 334 335 static int 336 vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name) 337 { 338 struct vm_memseg memseg; 339 size_t n; 340 int error; 341 342 /* 343 * If the memory segment has already been created then just return. 344 * This is the usual case for the SYSMEM segment created by userspace 345 * loaders like bhyveload(8). 346 */ 347 error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name, 348 sizeof(memseg.name)); 349 if (error) 350 return (error); 351 352 if (memseg.len != 0) { 353 if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) { 354 errno = EINVAL; 355 return (-1); 356 } else { 357 return (0); 358 } 359 } 360 361 bzero(&memseg, sizeof(struct vm_memseg)); 362 memseg.segid = segid; 363 memseg.len = len; 364 if (name != NULL) { 365 n = strlcpy(memseg.name, name, sizeof(memseg.name)); 366 if (n >= sizeof(memseg.name)) { 367 errno = ENAMETOOLONG; 368 return (-1); 369 } 370 } 371 372 error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg); 373 return (error); 374 } 375 376 int 377 vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf, 378 size_t bufsize) 379 { 380 struct vm_memseg memseg; 381 size_t n; 382 int error; 383 384 memseg.segid = segid; 385 error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg); 386 if (error == 0) { 387 *lenp = memseg.len; 388 n = strlcpy(namebuf, memseg.name, bufsize); 389 if (n >= bufsize) { 390 errno = ENAMETOOLONG; 391 error = -1; 392 } 393 } 394 return (error); 395 } 396 397 static int 398 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base) 399 { 400 char *ptr; 401 int error, flags; 402 403 /* Map 'len' bytes starting at 'gpa' in the guest address space */ 404 error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL); 405 if (error) 406 return (error); 407 408 flags = MAP_SHARED | MAP_FIXED; 409 if ((ctx->memflags & VM_MEM_F_INCORE) == 0) 410 flags |= MAP_NOCORE; 411 412 /* mmap into the process address space on the host */ 413 ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa); 414 if (ptr == MAP_FAILED) 415 return (-1); 416 417 return (0); 418 } 419 420 int 421 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms) 422 { 423 size_t objsize, len; 424 vm_paddr_t gpa; 425 char *baseaddr, *ptr; 426 int error; 427 428 assert(vms == VM_MMAP_ALL); 429 430 /* 431 * If 'memsize' cannot fit entirely in the 'lowmem' segment then 432 * create another 'highmem' segment above 4GB for the remainder. 433 */ 434 if (memsize > ctx->lowmem_limit) { 435 ctx->lowmem = ctx->lowmem_limit; 436 ctx->highmem = memsize - ctx->lowmem_limit; 437 objsize = 4*GB + ctx->highmem; 438 } else { 439 ctx->lowmem = memsize; 440 ctx->highmem = 0; 441 objsize = ctx->lowmem; 442 } 443 444 error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL); 445 if (error) 446 return (error); 447 448 /* 449 * Stake out a contiguous region covering the guest physical memory 450 * and the adjoining guard regions. 451 */ 452 len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE; 453 ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0); 454 if (ptr == MAP_FAILED) 455 return (-1); 456 457 baseaddr = ptr + VM_MMAP_GUARD_SIZE; 458 if (ctx->highmem > 0) { 459 gpa = 4*GB; 460 len = ctx->highmem; 461 error = setup_memory_segment(ctx, gpa, len, baseaddr); 462 if (error) 463 return (error); 464 } 465 466 if (ctx->lowmem > 0) { 467 gpa = 0; 468 len = ctx->lowmem; 469 error = setup_memory_segment(ctx, gpa, len, baseaddr); 470 if (error) 471 return (error); 472 } 473 474 ctx->baseaddr = baseaddr; 475 476 return (0); 477 } 478 479 /* 480 * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in 481 * the lowmem or highmem regions. 482 * 483 * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region. 484 * The instruction emulation code depends on this behavior. 485 */ 486 void * 487 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len) 488 { 489 490 if (ctx->lowmem > 0) { 491 if (gaddr < ctx->lowmem && len <= ctx->lowmem && 492 gaddr + len <= ctx->lowmem) 493 return (ctx->baseaddr + gaddr); 494 } 495 496 if (ctx->highmem > 0) { 497 if (gaddr >= 4*GB) { 498 if (gaddr < 4*GB + ctx->highmem && 499 len <= ctx->highmem && 500 gaddr + len <= 4*GB + ctx->highmem) 501 return (ctx->baseaddr + gaddr); 502 } 503 } 504 505 return (NULL); 506 } 507 508 vm_paddr_t 509 vm_rev_map_gpa(struct vmctx *ctx, void *addr) 510 { 511 vm_paddr_t offaddr; 512 513 offaddr = (char *)addr - ctx->baseaddr; 514 515 if (ctx->lowmem > 0) 516 if (offaddr <= ctx->lowmem) 517 return (offaddr); 518 519 if (ctx->highmem > 0) 520 if (offaddr >= 4*GB && offaddr < 4*GB + ctx->highmem) 521 return (offaddr); 522 523 return ((vm_paddr_t)-1); 524 } 525 526 const char * 527 vm_get_name(struct vmctx *ctx) 528 { 529 530 return (ctx->name); 531 } 532 533 size_t 534 vm_get_lowmem_size(struct vmctx *ctx) 535 { 536 537 return (ctx->lowmem); 538 } 539 540 size_t 541 vm_get_highmem_size(struct vmctx *ctx) 542 { 543 544 return (ctx->highmem); 545 } 546 547 void * 548 vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len) 549 { 550 char pathname[MAXPATHLEN]; 551 size_t len2; 552 char *base, *ptr; 553 int fd, error, flags; 554 555 fd = -1; 556 ptr = MAP_FAILED; 557 if (name == NULL || strlen(name) == 0) { 558 errno = EINVAL; 559 goto done; 560 } 561 562 error = vm_alloc_memseg(ctx, segid, len, name); 563 if (error) 564 goto done; 565 566 strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname)); 567 strlcat(pathname, ctx->name, sizeof(pathname)); 568 strlcat(pathname, ".", sizeof(pathname)); 569 strlcat(pathname, name, sizeof(pathname)); 570 571 fd = open(pathname, O_RDWR); 572 if (fd < 0) 573 goto done; 574 575 /* 576 * Stake out a contiguous region covering the device memory and the 577 * adjoining guard regions. 578 */ 579 len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE; 580 base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 581 0); 582 if (base == MAP_FAILED) 583 goto done; 584 585 flags = MAP_SHARED | MAP_FIXED; 586 if ((ctx->memflags & VM_MEM_F_INCORE) == 0) 587 flags |= MAP_NOCORE; 588 589 /* mmap the devmem region in the host address space */ 590 ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0); 591 done: 592 if (fd >= 0) 593 close(fd); 594 return (ptr); 595 } 596 597 static int 598 vcpu_ioctl(struct vcpu *vcpu, u_long cmd, void *arg) 599 { 600 /* 601 * XXX: fragile, handle with care 602 * Assumes that the first field of the ioctl data 603 * is the vcpuid. 604 */ 605 *(int *)arg = vcpu->vcpuid; 606 return (ioctl(vcpu->ctx->fd, cmd, arg)); 607 } 608 609 int 610 vm_set_desc(struct vcpu *vcpu, int reg, 611 uint64_t base, uint32_t limit, uint32_t access) 612 { 613 int error; 614 struct vm_seg_desc vmsegdesc; 615 616 bzero(&vmsegdesc, sizeof(vmsegdesc)); 617 vmsegdesc.regnum = reg; 618 vmsegdesc.desc.base = base; 619 vmsegdesc.desc.limit = limit; 620 vmsegdesc.desc.access = access; 621 622 error = vcpu_ioctl(vcpu, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc); 623 return (error); 624 } 625 626 int 627 vm_get_desc(struct vcpu *vcpu, int reg, uint64_t *base, uint32_t *limit, 628 uint32_t *access) 629 { 630 int error; 631 struct vm_seg_desc vmsegdesc; 632 633 bzero(&vmsegdesc, sizeof(vmsegdesc)); 634 vmsegdesc.regnum = reg; 635 636 error = vcpu_ioctl(vcpu, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc); 637 if (error == 0) { 638 *base = vmsegdesc.desc.base; 639 *limit = vmsegdesc.desc.limit; 640 *access = vmsegdesc.desc.access; 641 } 642 return (error); 643 } 644 645 int 646 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *seg_desc) 647 { 648 int error; 649 650 error = vm_get_desc(vcpu, reg, &seg_desc->base, &seg_desc->limit, 651 &seg_desc->access); 652 return (error); 653 } 654 655 int 656 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val) 657 { 658 int error; 659 struct vm_register vmreg; 660 661 bzero(&vmreg, sizeof(vmreg)); 662 vmreg.regnum = reg; 663 vmreg.regval = val; 664 665 error = vcpu_ioctl(vcpu, VM_SET_REGISTER, &vmreg); 666 return (error); 667 } 668 669 int 670 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *ret_val) 671 { 672 int error; 673 struct vm_register vmreg; 674 675 bzero(&vmreg, sizeof(vmreg)); 676 vmreg.regnum = reg; 677 678 error = vcpu_ioctl(vcpu, VM_GET_REGISTER, &vmreg); 679 *ret_val = vmreg.regval; 680 return (error); 681 } 682 683 int 684 vm_set_register_set(struct vcpu *vcpu, unsigned int count, 685 const int *regnums, uint64_t *regvals) 686 { 687 int error; 688 struct vm_register_set vmregset; 689 690 bzero(&vmregset, sizeof(vmregset)); 691 vmregset.count = count; 692 vmregset.regnums = regnums; 693 vmregset.regvals = regvals; 694 695 error = vcpu_ioctl(vcpu, VM_SET_REGISTER_SET, &vmregset); 696 return (error); 697 } 698 699 int 700 vm_get_register_set(struct vcpu *vcpu, unsigned int count, 701 const int *regnums, uint64_t *regvals) 702 { 703 int error; 704 struct vm_register_set vmregset; 705 706 bzero(&vmregset, sizeof(vmregset)); 707 vmregset.count = count; 708 vmregset.regnums = regnums; 709 vmregset.regvals = regvals; 710 711 error = vcpu_ioctl(vcpu, VM_GET_REGISTER_SET, &vmregset); 712 return (error); 713 } 714 715 int 716 vm_run(struct vcpu *vcpu, struct vm_run *vmrun) 717 { 718 return (vcpu_ioctl(vcpu, VM_RUN, vmrun)); 719 } 720 721 int 722 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how) 723 { 724 struct vm_suspend vmsuspend; 725 726 bzero(&vmsuspend, sizeof(vmsuspend)); 727 vmsuspend.how = how; 728 return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend)); 729 } 730 731 int 732 vm_reinit(struct vmctx *ctx) 733 { 734 735 return (ioctl(ctx->fd, VM_REINIT, 0)); 736 } 737 738 int 739 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid, 740 uint32_t errcode, int restart_instruction) 741 { 742 struct vm_exception exc; 743 744 exc.vector = vector; 745 exc.error_code = errcode; 746 exc.error_code_valid = errcode_valid; 747 exc.restart_instruction = restart_instruction; 748 749 return (vcpu_ioctl(vcpu, VM_INJECT_EXCEPTION, &exc)); 750 } 751 752 int 753 vm_apicid2vcpu(struct vmctx *ctx __unused, int apicid) 754 { 755 /* 756 * The apic id associated with the 'vcpu' has the same numerical value 757 * as the 'vcpu' itself. 758 */ 759 return (apicid); 760 } 761 762 int 763 vm_lapic_irq(struct vcpu *vcpu, int vector) 764 { 765 struct vm_lapic_irq vmirq; 766 767 bzero(&vmirq, sizeof(vmirq)); 768 vmirq.vector = vector; 769 770 return (vcpu_ioctl(vcpu, VM_LAPIC_IRQ, &vmirq)); 771 } 772 773 int 774 vm_lapic_local_irq(struct vcpu *vcpu, int vector) 775 { 776 struct vm_lapic_irq vmirq; 777 778 bzero(&vmirq, sizeof(vmirq)); 779 vmirq.vector = vector; 780 781 return (vcpu_ioctl(vcpu, VM_LAPIC_LOCAL_IRQ, &vmirq)); 782 } 783 784 int 785 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg) 786 { 787 struct vm_lapic_msi vmmsi; 788 789 bzero(&vmmsi, sizeof(vmmsi)); 790 vmmsi.addr = addr; 791 vmmsi.msg = msg; 792 793 return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi)); 794 } 795 796 int 797 vm_ioapic_assert_irq(struct vmctx *ctx, int irq) 798 { 799 struct vm_ioapic_irq ioapic_irq; 800 801 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 802 ioapic_irq.irq = irq; 803 804 return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq)); 805 } 806 807 int 808 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq) 809 { 810 struct vm_ioapic_irq ioapic_irq; 811 812 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 813 ioapic_irq.irq = irq; 814 815 return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq)); 816 } 817 818 int 819 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq) 820 { 821 struct vm_ioapic_irq ioapic_irq; 822 823 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 824 ioapic_irq.irq = irq; 825 826 return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq)); 827 } 828 829 int 830 vm_ioapic_pincount(struct vmctx *ctx, int *pincount) 831 { 832 833 return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount)); 834 } 835 836 int 837 vm_readwrite_kernemu_device(struct vcpu *vcpu, vm_paddr_t gpa, 838 bool write, int size, uint64_t *value) 839 { 840 struct vm_readwrite_kernemu_device irp = { 841 .access_width = fls(size) - 1, 842 .gpa = gpa, 843 .value = write ? *value : ~0ul, 844 }; 845 long cmd = (write ? VM_SET_KERNEMU_DEV : VM_GET_KERNEMU_DEV); 846 int rc; 847 848 rc = vcpu_ioctl(vcpu, cmd, &irp); 849 if (rc == 0 && !write) 850 *value = irp.value; 851 return (rc); 852 } 853 854 int 855 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 856 { 857 struct vm_isa_irq isa_irq; 858 859 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 860 isa_irq.atpic_irq = atpic_irq; 861 isa_irq.ioapic_irq = ioapic_irq; 862 863 return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq)); 864 } 865 866 int 867 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 868 { 869 struct vm_isa_irq isa_irq; 870 871 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 872 isa_irq.atpic_irq = atpic_irq; 873 isa_irq.ioapic_irq = ioapic_irq; 874 875 return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq)); 876 } 877 878 int 879 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 880 { 881 struct vm_isa_irq isa_irq; 882 883 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 884 isa_irq.atpic_irq = atpic_irq; 885 isa_irq.ioapic_irq = ioapic_irq; 886 887 return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq)); 888 } 889 890 int 891 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq, 892 enum vm_intr_trigger trigger) 893 { 894 struct vm_isa_irq_trigger isa_irq_trigger; 895 896 bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger)); 897 isa_irq_trigger.atpic_irq = atpic_irq; 898 isa_irq_trigger.trigger = trigger; 899 900 return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger)); 901 } 902 903 int 904 vm_inject_nmi(struct vcpu *vcpu) 905 { 906 struct vm_nmi vmnmi; 907 908 bzero(&vmnmi, sizeof(vmnmi)); 909 910 return (vcpu_ioctl(vcpu, VM_INJECT_NMI, &vmnmi)); 911 } 912 913 static const char *capstrmap[] = { 914 [VM_CAP_HALT_EXIT] = "hlt_exit", 915 [VM_CAP_MTRAP_EXIT] = "mtrap_exit", 916 [VM_CAP_PAUSE_EXIT] = "pause_exit", 917 [VM_CAP_UNRESTRICTED_GUEST] = "unrestricted_guest", 918 [VM_CAP_ENABLE_INVPCID] = "enable_invpcid", 919 [VM_CAP_BPT_EXIT] = "bpt_exit", 920 [VM_CAP_RDPID] = "rdpid", 921 [VM_CAP_RDTSCP] = "rdtscp", 922 [VM_CAP_IPI_EXIT] = "ipi_exit", 923 [VM_CAP_MASK_HWINTR] = "mask_hwintr", 924 [VM_CAP_RFLAGS_TF] = "rflags_tf", 925 }; 926 927 int 928 vm_capability_name2type(const char *capname) 929 { 930 int i; 931 932 for (i = 0; i < (int)nitems(capstrmap); i++) { 933 if (strcmp(capstrmap[i], capname) == 0) 934 return (i); 935 } 936 937 return (-1); 938 } 939 940 const char * 941 vm_capability_type2name(int type) 942 { 943 if (type >= 0 && type < (int)nitems(capstrmap)) 944 return (capstrmap[type]); 945 946 return (NULL); 947 } 948 949 int 950 vm_get_capability(struct vcpu *vcpu, enum vm_cap_type cap, int *retval) 951 { 952 int error; 953 struct vm_capability vmcap; 954 955 bzero(&vmcap, sizeof(vmcap)); 956 vmcap.captype = cap; 957 958 error = vcpu_ioctl(vcpu, VM_GET_CAPABILITY, &vmcap); 959 *retval = vmcap.capval; 960 return (error); 961 } 962 963 int 964 vm_set_capability(struct vcpu *vcpu, enum vm_cap_type cap, int val) 965 { 966 struct vm_capability vmcap; 967 968 bzero(&vmcap, sizeof(vmcap)); 969 vmcap.captype = cap; 970 vmcap.capval = val; 971 972 return (vcpu_ioctl(vcpu, VM_SET_CAPABILITY, &vmcap)); 973 } 974 975 int 976 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func) 977 { 978 struct vm_pptdev pptdev; 979 980 bzero(&pptdev, sizeof(pptdev)); 981 pptdev.bus = bus; 982 pptdev.slot = slot; 983 pptdev.func = func; 984 985 return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev)); 986 } 987 988 int 989 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func) 990 { 991 struct vm_pptdev pptdev; 992 993 bzero(&pptdev, sizeof(pptdev)); 994 pptdev.bus = bus; 995 pptdev.slot = slot; 996 pptdev.func = func; 997 998 return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev)); 999 } 1000 1001 int 1002 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, 1003 vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 1004 { 1005 struct vm_pptdev_mmio pptmmio; 1006 1007 bzero(&pptmmio, sizeof(pptmmio)); 1008 pptmmio.bus = bus; 1009 pptmmio.slot = slot; 1010 pptmmio.func = func; 1011 pptmmio.gpa = gpa; 1012 pptmmio.len = len; 1013 pptmmio.hpa = hpa; 1014 1015 return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio)); 1016 } 1017 1018 int 1019 vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, 1020 vm_paddr_t gpa, size_t len) 1021 { 1022 struct vm_pptdev_mmio pptmmio; 1023 1024 bzero(&pptmmio, sizeof(pptmmio)); 1025 pptmmio.bus = bus; 1026 pptmmio.slot = slot; 1027 pptmmio.func = func; 1028 pptmmio.gpa = gpa; 1029 pptmmio.len = len; 1030 1031 return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio)); 1032 } 1033 1034 int 1035 vm_setup_pptdev_msi(struct vmctx *ctx, int bus, int slot, int func, 1036 uint64_t addr, uint64_t msg, int numvec) 1037 { 1038 struct vm_pptdev_msi pptmsi; 1039 1040 bzero(&pptmsi, sizeof(pptmsi)); 1041 pptmsi.bus = bus; 1042 pptmsi.slot = slot; 1043 pptmsi.func = func; 1044 pptmsi.msg = msg; 1045 pptmsi.addr = addr; 1046 pptmsi.numvec = numvec; 1047 1048 return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi)); 1049 } 1050 1051 int 1052 vm_setup_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func, 1053 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control) 1054 { 1055 struct vm_pptdev_msix pptmsix; 1056 1057 bzero(&pptmsix, sizeof(pptmsix)); 1058 pptmsix.bus = bus; 1059 pptmsix.slot = slot; 1060 pptmsix.func = func; 1061 pptmsix.idx = idx; 1062 pptmsix.msg = msg; 1063 pptmsix.addr = addr; 1064 pptmsix.vector_control = vector_control; 1065 1066 return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix); 1067 } 1068 1069 int 1070 vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func) 1071 { 1072 struct vm_pptdev ppt; 1073 1074 bzero(&ppt, sizeof(ppt)); 1075 ppt.bus = bus; 1076 ppt.slot = slot; 1077 ppt.func = func; 1078 1079 return ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &ppt); 1080 } 1081 1082 uint64_t * 1083 vm_get_stats(struct vcpu *vcpu, struct timeval *ret_tv, 1084 int *ret_entries) 1085 { 1086 static _Thread_local uint64_t *stats_buf; 1087 static _Thread_local u_int stats_count; 1088 uint64_t *new_stats; 1089 struct vm_stats vmstats; 1090 u_int count, index; 1091 bool have_stats; 1092 1093 have_stats = false; 1094 count = 0; 1095 for (index = 0;; index += nitems(vmstats.statbuf)) { 1096 vmstats.index = index; 1097 if (vcpu_ioctl(vcpu, VM_STATS, &vmstats) != 0) 1098 break; 1099 if (stats_count < index + vmstats.num_entries) { 1100 new_stats = realloc(stats_buf, 1101 (index + vmstats.num_entries) * sizeof(uint64_t)); 1102 if (new_stats == NULL) { 1103 errno = ENOMEM; 1104 return (NULL); 1105 } 1106 stats_count = index + vmstats.num_entries; 1107 stats_buf = new_stats; 1108 } 1109 memcpy(stats_buf + index, vmstats.statbuf, 1110 vmstats.num_entries * sizeof(uint64_t)); 1111 count += vmstats.num_entries; 1112 have_stats = true; 1113 1114 if (vmstats.num_entries != nitems(vmstats.statbuf)) 1115 break; 1116 } 1117 if (have_stats) { 1118 if (ret_entries) 1119 *ret_entries = count; 1120 if (ret_tv) 1121 *ret_tv = vmstats.tv; 1122 return (stats_buf); 1123 } else 1124 return (NULL); 1125 } 1126 1127 const char * 1128 vm_get_stat_desc(struct vmctx *ctx, int index) 1129 { 1130 static struct vm_stat_desc statdesc; 1131 1132 statdesc.index = index; 1133 if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0) 1134 return (statdesc.desc); 1135 else 1136 return (NULL); 1137 } 1138 1139 int 1140 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state) 1141 { 1142 int error; 1143 struct vm_x2apic x2apic; 1144 1145 bzero(&x2apic, sizeof(x2apic)); 1146 1147 error = vcpu_ioctl(vcpu, VM_GET_X2APIC_STATE, &x2apic); 1148 *state = x2apic.state; 1149 return (error); 1150 } 1151 1152 int 1153 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state) 1154 { 1155 int error; 1156 struct vm_x2apic x2apic; 1157 1158 bzero(&x2apic, sizeof(x2apic)); 1159 x2apic.state = state; 1160 1161 error = vcpu_ioctl(vcpu, VM_SET_X2APIC_STATE, &x2apic); 1162 1163 return (error); 1164 } 1165 1166 int 1167 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num) 1168 { 1169 int error, i; 1170 struct vm_gpa_pte gpapte; 1171 1172 bzero(&gpapte, sizeof(gpapte)); 1173 gpapte.gpa = gpa; 1174 1175 error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte); 1176 1177 if (error == 0) { 1178 *num = gpapte.ptenum; 1179 for (i = 0; i < gpapte.ptenum; i++) 1180 pte[i] = gpapte.pte[i]; 1181 } 1182 1183 return (error); 1184 } 1185 1186 int 1187 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities) 1188 { 1189 int error; 1190 struct vm_hpet_cap cap; 1191 1192 bzero(&cap, sizeof(struct vm_hpet_cap)); 1193 error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap); 1194 if (capabilities != NULL) 1195 *capabilities = cap.capabilities; 1196 return (error); 1197 } 1198 1199 int 1200 vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging, 1201 uint64_t gla, int prot, uint64_t *gpa, int *fault) 1202 { 1203 struct vm_gla2gpa gg; 1204 int error; 1205 1206 bzero(&gg, sizeof(struct vm_gla2gpa)); 1207 gg.prot = prot; 1208 gg.gla = gla; 1209 gg.paging = *paging; 1210 1211 error = vcpu_ioctl(vcpu, VM_GLA2GPA, &gg); 1212 if (error == 0) { 1213 *fault = gg.fault; 1214 *gpa = gg.gpa; 1215 } 1216 return (error); 1217 } 1218 1219 int 1220 vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging, 1221 uint64_t gla, int prot, uint64_t *gpa, int *fault) 1222 { 1223 struct vm_gla2gpa gg; 1224 int error; 1225 1226 bzero(&gg, sizeof(struct vm_gla2gpa)); 1227 gg.prot = prot; 1228 gg.gla = gla; 1229 gg.paging = *paging; 1230 1231 error = vcpu_ioctl(vcpu, VM_GLA2GPA_NOFAULT, &gg); 1232 if (error == 0) { 1233 *fault = gg.fault; 1234 *gpa = gg.gpa; 1235 } 1236 return (error); 1237 } 1238 1239 #ifndef min 1240 #define min(a,b) (((a) < (b)) ? (a) : (b)) 1241 #endif 1242 1243 int 1244 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging, 1245 uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt, 1246 int *fault) 1247 { 1248 void *va; 1249 uint64_t gpa, off; 1250 int error, i, n; 1251 1252 for (i = 0; i < iovcnt; i++) { 1253 iov[i].iov_base = 0; 1254 iov[i].iov_len = 0; 1255 } 1256 1257 while (len) { 1258 assert(iovcnt > 0); 1259 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault); 1260 if (error || *fault) 1261 return (error); 1262 1263 off = gpa & PAGE_MASK; 1264 n = MIN(len, PAGE_SIZE - off); 1265 1266 va = vm_map_gpa(vcpu->ctx, gpa, n); 1267 if (va == NULL) 1268 return (EFAULT); 1269 1270 iov->iov_base = va; 1271 iov->iov_len = n; 1272 iov++; 1273 iovcnt--; 1274 1275 gla += n; 1276 len -= n; 1277 } 1278 return (0); 1279 } 1280 1281 void 1282 vm_copy_teardown(struct iovec *iov __unused, int iovcnt __unused) 1283 { 1284 /* 1285 * Intentionally empty. This is used by the instruction 1286 * emulation code shared with the kernel. The in-kernel 1287 * version of this is non-empty. 1288 */ 1289 } 1290 1291 void 1292 vm_copyin(struct iovec *iov, void *vp, size_t len) 1293 { 1294 const char *src; 1295 char *dst; 1296 size_t n; 1297 1298 dst = vp; 1299 while (len) { 1300 assert(iov->iov_len); 1301 n = min(len, iov->iov_len); 1302 src = iov->iov_base; 1303 bcopy(src, dst, n); 1304 1305 iov++; 1306 dst += n; 1307 len -= n; 1308 } 1309 } 1310 1311 void 1312 vm_copyout(const void *vp, struct iovec *iov, size_t len) 1313 { 1314 const char *src; 1315 char *dst; 1316 size_t n; 1317 1318 src = vp; 1319 while (len) { 1320 assert(iov->iov_len); 1321 n = min(len, iov->iov_len); 1322 dst = iov->iov_base; 1323 bcopy(src, dst, n); 1324 1325 iov++; 1326 src += n; 1327 len -= n; 1328 } 1329 } 1330 1331 static int 1332 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus) 1333 { 1334 struct vm_cpuset vm_cpuset; 1335 int error; 1336 1337 bzero(&vm_cpuset, sizeof(struct vm_cpuset)); 1338 vm_cpuset.which = which; 1339 vm_cpuset.cpusetsize = sizeof(cpuset_t); 1340 vm_cpuset.cpus = cpus; 1341 1342 error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset); 1343 return (error); 1344 } 1345 1346 int 1347 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus) 1348 { 1349 1350 return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus)); 1351 } 1352 1353 int 1354 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus) 1355 { 1356 1357 return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus)); 1358 } 1359 1360 int 1361 vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus) 1362 { 1363 1364 return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus)); 1365 } 1366 1367 int 1368 vm_activate_cpu(struct vcpu *vcpu) 1369 { 1370 struct vm_activate_cpu ac; 1371 int error; 1372 1373 bzero(&ac, sizeof(struct vm_activate_cpu)); 1374 error = vcpu_ioctl(vcpu, VM_ACTIVATE_CPU, &ac); 1375 return (error); 1376 } 1377 1378 int 1379 vm_suspend_all_cpus(struct vmctx *ctx) 1380 { 1381 struct vm_activate_cpu ac; 1382 int error; 1383 1384 bzero(&ac, sizeof(struct vm_activate_cpu)); 1385 ac.vcpuid = -1; 1386 error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac); 1387 return (error); 1388 } 1389 1390 int 1391 vm_suspend_cpu(struct vcpu *vcpu) 1392 { 1393 struct vm_activate_cpu ac; 1394 int error; 1395 1396 bzero(&ac, sizeof(struct vm_activate_cpu)); 1397 error = vcpu_ioctl(vcpu, VM_SUSPEND_CPU, &ac); 1398 return (error); 1399 } 1400 1401 int 1402 vm_resume_cpu(struct vcpu *vcpu) 1403 { 1404 struct vm_activate_cpu ac; 1405 int error; 1406 1407 bzero(&ac, sizeof(struct vm_activate_cpu)); 1408 error = vcpu_ioctl(vcpu, VM_RESUME_CPU, &ac); 1409 return (error); 1410 } 1411 1412 int 1413 vm_resume_all_cpus(struct vmctx *ctx) 1414 { 1415 struct vm_activate_cpu ac; 1416 int error; 1417 1418 bzero(&ac, sizeof(struct vm_activate_cpu)); 1419 ac.vcpuid = -1; 1420 error = ioctl(ctx->fd, VM_RESUME_CPU, &ac); 1421 return (error); 1422 } 1423 1424 int 1425 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2) 1426 { 1427 struct vm_intinfo vmii; 1428 int error; 1429 1430 bzero(&vmii, sizeof(struct vm_intinfo)); 1431 error = vcpu_ioctl(vcpu, VM_GET_INTINFO, &vmii); 1432 if (error == 0) { 1433 *info1 = vmii.info1; 1434 *info2 = vmii.info2; 1435 } 1436 return (error); 1437 } 1438 1439 int 1440 vm_set_intinfo(struct vcpu *vcpu, uint64_t info1) 1441 { 1442 struct vm_intinfo vmii; 1443 int error; 1444 1445 bzero(&vmii, sizeof(struct vm_intinfo)); 1446 vmii.info1 = info1; 1447 error = vcpu_ioctl(vcpu, VM_SET_INTINFO, &vmii); 1448 return (error); 1449 } 1450 1451 int 1452 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value) 1453 { 1454 struct vm_rtc_data rtcdata; 1455 int error; 1456 1457 bzero(&rtcdata, sizeof(struct vm_rtc_data)); 1458 rtcdata.offset = offset; 1459 rtcdata.value = value; 1460 error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata); 1461 return (error); 1462 } 1463 1464 int 1465 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval) 1466 { 1467 struct vm_rtc_data rtcdata; 1468 int error; 1469 1470 bzero(&rtcdata, sizeof(struct vm_rtc_data)); 1471 rtcdata.offset = offset; 1472 error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata); 1473 if (error == 0) 1474 *retval = rtcdata.value; 1475 return (error); 1476 } 1477 1478 int 1479 vm_rtc_settime(struct vmctx *ctx, time_t secs) 1480 { 1481 struct vm_rtc_time rtctime; 1482 int error; 1483 1484 bzero(&rtctime, sizeof(struct vm_rtc_time)); 1485 rtctime.secs = secs; 1486 error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime); 1487 return (error); 1488 } 1489 1490 int 1491 vm_rtc_gettime(struct vmctx *ctx, time_t *secs) 1492 { 1493 struct vm_rtc_time rtctime; 1494 int error; 1495 1496 bzero(&rtctime, sizeof(struct vm_rtc_time)); 1497 error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime); 1498 if (error == 0) 1499 *secs = rtctime.secs; 1500 return (error); 1501 } 1502 1503 int 1504 vm_restart_instruction(struct vcpu *vcpu) 1505 { 1506 int arg; 1507 1508 return (vcpu_ioctl(vcpu, VM_RESTART_INSTRUCTION, &arg)); 1509 } 1510 1511 int 1512 vm_snapshot_req(struct vmctx *ctx, struct vm_snapshot_meta *meta) 1513 { 1514 1515 if (ioctl(ctx->fd, VM_SNAPSHOT_REQ, meta) == -1) { 1516 #ifdef SNAPSHOT_DEBUG 1517 fprintf(stderr, "%s: snapshot failed for %s: %d\r\n", 1518 __func__, meta->dev_name, errno); 1519 #endif 1520 return (-1); 1521 } 1522 return (0); 1523 } 1524 1525 int 1526 vm_restore_time(struct vmctx *ctx) 1527 { 1528 int dummy; 1529 1530 dummy = 0; 1531 return (ioctl(ctx->fd, VM_RESTORE_TIME, &dummy)); 1532 } 1533 1534 int 1535 vm_set_topology(struct vmctx *ctx, 1536 uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus) 1537 { 1538 struct vm_cpu_topology topology; 1539 1540 bzero(&topology, sizeof (struct vm_cpu_topology)); 1541 topology.sockets = sockets; 1542 topology.cores = cores; 1543 topology.threads = threads; 1544 topology.maxcpus = maxcpus; 1545 return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology)); 1546 } 1547 1548 int 1549 vm_get_topology(struct vmctx *ctx, 1550 uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus) 1551 { 1552 struct vm_cpu_topology topology; 1553 int error; 1554 1555 bzero(&topology, sizeof (struct vm_cpu_topology)); 1556 error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology); 1557 if (error == 0) { 1558 *sockets = topology.sockets; 1559 *cores = topology.cores; 1560 *threads = topology.threads; 1561 *maxcpus = topology.maxcpus; 1562 } 1563 return (error); 1564 } 1565 1566 /* Keep in sync with machine/vmm_dev.h. */ 1567 static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT, 1568 VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG, 1569 VM_MMAP_GETNEXT, VM_MUNMAP_MEMSEG, VM_SET_REGISTER, VM_GET_REGISTER, 1570 VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR, 1571 VM_SET_REGISTER_SET, VM_GET_REGISTER_SET, 1572 VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV, 1573 VM_INJECT_EXCEPTION, VM_LAPIC_IRQ, VM_LAPIC_LOCAL_IRQ, 1574 VM_LAPIC_MSI, VM_IOAPIC_ASSERT_IRQ, VM_IOAPIC_DEASSERT_IRQ, 1575 VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ, 1576 VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER, 1577 VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV, 1578 VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI, 1579 VM_PPTDEV_MSIX, VM_UNMAP_PPTDEV_MMIO, VM_PPTDEV_DISABLE_MSIX, 1580 VM_INJECT_NMI, VM_STATS, VM_STAT_DESC, 1581 VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE, 1582 VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA, 1583 VM_GLA2GPA_NOFAULT, 1584 VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SUSPEND_CPU, VM_RESUME_CPU, 1585 VM_SET_INTINFO, VM_GET_INTINFO, 1586 VM_RTC_WRITE, VM_RTC_READ, VM_RTC_SETTIME, VM_RTC_GETTIME, 1587 VM_RESTART_INSTRUCTION, VM_SET_TOPOLOGY, VM_GET_TOPOLOGY, 1588 VM_SNAPSHOT_REQ, VM_RESTORE_TIME 1589 }; 1590 1591 int 1592 vm_limit_rights(struct vmctx *ctx) 1593 { 1594 cap_rights_t rights; 1595 size_t ncmds; 1596 1597 cap_rights_init(&rights, CAP_IOCTL, CAP_MMAP_RW); 1598 if (caph_rights_limit(ctx->fd, &rights) != 0) 1599 return (-1); 1600 ncmds = nitems(vm_ioctl_cmds); 1601 if (caph_ioctls_limit(ctx->fd, vm_ioctl_cmds, ncmds) != 0) 1602 return (-1); 1603 return (0); 1604 } 1605 1606 /* 1607 * Avoid using in new code. Operations on the fd should be wrapped here so that 1608 * capability rights can be kept in sync. 1609 */ 1610 int 1611 vm_get_device_fd(struct vmctx *ctx) 1612 { 1613 1614 return (ctx->fd); 1615 } 1616 1617 /* Legacy interface, do not use. */ 1618 const cap_ioctl_t * 1619 vm_get_ioctls(size_t *len) 1620 { 1621 cap_ioctl_t *cmds; 1622 1623 if (len == NULL) { 1624 cmds = malloc(sizeof(vm_ioctl_cmds)); 1625 if (cmds == NULL) 1626 return (NULL); 1627 bcopy(vm_ioctl_cmds, cmds, sizeof(vm_ioctl_cmds)); 1628 return (cmds); 1629 } 1630 1631 *len = nitems(vm_ioctl_cmds); 1632 return (NULL); 1633 } 1634