Lines Matching refs:vm

107 	struct vm	*vm;		/* (o) */  member
161 struct vm { struct
197 VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format) argument
200 VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1)
203 VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2)
206 VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3)
209 VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
235 DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
295 static void vm_free_memmap(struct vm *vm, int ident);
296 static bool sysmem_mapping(struct vm *vm, struct mem_map *mm);
342 vcpu_alloc(struct vm *vm, int vcpu_id) in vcpu_alloc() argument
346 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, in vcpu_alloc()
354 vcpu->vm = vm; in vcpu_alloc()
364 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); in vcpu_init()
495 vm_init(struct vm *vm, bool create) in vm_init() argument
497 vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace)); in vm_init()
498 vm->iommu = NULL; in vm_init()
499 vm->vioapic = vioapic_init(vm); in vm_init()
500 vm->vhpet = vhpet_init(vm); in vm_init()
501 vm->vatpic = vatpic_init(vm); in vm_init()
502 vm->vatpit = vatpit_init(vm); in vm_init()
503 vm->vpmtmr = vpmtmr_init(vm); in vm_init()
505 vm->vrtc = vrtc_init(vm); in vm_init()
507 CPU_ZERO(&vm->active_cpus); in vm_init()
508 CPU_ZERO(&vm->debug_cpus); in vm_init()
509 CPU_ZERO(&vm->startup_cpus); in vm_init()
511 vm->suspend = 0; in vm_init()
512 CPU_ZERO(&vm->suspended_cpus); in vm_init()
515 for (int i = 0; i < vm->maxcpus; i++) { in vm_init()
516 if (vm->vcpu[i] != NULL) in vm_init()
517 vcpu_init(vm->vcpu[i]); in vm_init()
523 vm_disable_vcpu_creation(struct vm *vm) in vm_disable_vcpu_creation() argument
525 sx_xlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
526 vm->dying = true; in vm_disable_vcpu_creation()
527 sx_xunlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
531 vm_alloc_vcpu(struct vm *vm, int vcpuid) in vm_alloc_vcpu() argument
535 if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm)) in vm_alloc_vcpu()
538 vcpu = atomic_load_ptr(&vm->vcpu[vcpuid]); in vm_alloc_vcpu()
542 sx_xlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
543 vcpu = vm->vcpu[vcpuid]; in vm_alloc_vcpu()
544 if (vcpu == NULL && !vm->dying) { in vm_alloc_vcpu()
545 vcpu = vcpu_alloc(vm, vcpuid); in vm_alloc_vcpu()
552 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], in vm_alloc_vcpu()
555 sx_xunlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
560 vm_slock_vcpus(struct vm *vm) in vm_slock_vcpus() argument
562 sx_slock(&vm->vcpus_init_lock); in vm_slock_vcpus()
566 vm_unlock_vcpus(struct vm *vm) in vm_unlock_vcpus() argument
568 sx_unlock(&vm->vcpus_init_lock); in vm_unlock_vcpus()
578 vm_create(const char *name, struct vm **retvm) in vm_create()
580 struct vm *vm; in vm_create() local
598 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); in vm_create()
599 strcpy(vm->name, name); in vm_create()
600 vm->vmspace = vmspace; in vm_create()
601 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); in vm_create()
602 sx_init(&vm->mem_segs_lock, "vm mem_segs"); in vm_create()
603 sx_init(&vm->vcpus_init_lock, "vm vcpus"); in vm_create()
604 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK | in vm_create()
607 vm->sockets = 1; in vm_create()
608 vm->cores = cores_per_package; /* XXX backwards compatibility */ in vm_create()
609 vm->threads = threads_per_core; /* XXX backwards compatibility */ in vm_create()
610 vm->maxcpus = vm_maxcpu; in vm_create()
612 vm_init(vm, true); in vm_create()
614 *retvm = vm; in vm_create()
619 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, in vm_get_topology() argument
622 *sockets = vm->sockets; in vm_get_topology()
623 *cores = vm->cores; in vm_get_topology()
624 *threads = vm->threads; in vm_get_topology()
625 *maxcpus = vm->maxcpus; in vm_get_topology()
629 vm_get_maxcpus(struct vm *vm) in vm_get_maxcpus() argument
631 return (vm->maxcpus); in vm_get_maxcpus()
635 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, in vm_set_topology() argument
639 if ((sockets * cores * threads) > vm->maxcpus) in vm_set_topology()
641 vm->sockets = sockets; in vm_set_topology()
642 vm->cores = cores; in vm_set_topology()
643 vm->threads = threads; in vm_set_topology()
648 vm_cleanup(struct vm *vm, bool destroy) in vm_cleanup() argument
654 vm_xlock_memsegs(vm); in vm_cleanup()
656 ppt_unassign_all(vm); in vm_cleanup()
658 if (vm->iommu != NULL) in vm_cleanup()
659 iommu_destroy_domain(vm->iommu); in vm_cleanup()
662 vrtc_cleanup(vm->vrtc); in vm_cleanup()
664 vrtc_reset(vm->vrtc); in vm_cleanup()
665 vpmtmr_cleanup(vm->vpmtmr); in vm_cleanup()
666 vatpit_cleanup(vm->vatpit); in vm_cleanup()
667 vhpet_cleanup(vm->vhpet); in vm_cleanup()
668 vatpic_cleanup(vm->vatpic); in vm_cleanup()
669 vioapic_cleanup(vm->vioapic); in vm_cleanup()
671 for (i = 0; i < vm->maxcpus; i++) { in vm_cleanup()
672 if (vm->vcpu[i] != NULL) in vm_cleanup()
673 vcpu_cleanup(vm->vcpu[i], destroy); in vm_cleanup()
676 vmmops_cleanup(vm->cookie); in vm_cleanup()
687 mm = &vm->mem_maps[i]; in vm_cleanup()
688 if (destroy || !sysmem_mapping(vm, mm)) in vm_cleanup()
689 vm_free_memmap(vm, i); in vm_cleanup()
694 vm_free_memseg(vm, i); in vm_cleanup()
695 vm_unlock_memsegs(vm); in vm_cleanup()
697 vmmops_vmspace_free(vm->vmspace); in vm_cleanup()
698 vm->vmspace = NULL; in vm_cleanup()
700 free(vm->vcpu, M_VM); in vm_cleanup()
701 sx_destroy(&vm->vcpus_init_lock); in vm_cleanup()
702 sx_destroy(&vm->mem_segs_lock); in vm_cleanup()
703 mtx_destroy(&vm->rendezvous_mtx); in vm_cleanup()
708 vm_destroy(struct vm *vm) in vm_destroy() argument
710 vm_cleanup(vm, true); in vm_destroy()
711 free(vm, M_VM); in vm_destroy()
715 vm_reinit(struct vm *vm) in vm_reinit() argument
722 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { in vm_reinit()
723 vm_cleanup(vm, false); in vm_reinit()
724 vm_init(vm, false); in vm_reinit()
734 vm_name(struct vm *vm) in vm_name() argument
736 return (vm->name); in vm_name()
740 vm_slock_memsegs(struct vm *vm) in vm_slock_memsegs() argument
742 sx_slock(&vm->mem_segs_lock); in vm_slock_memsegs()
746 vm_xlock_memsegs(struct vm *vm) in vm_xlock_memsegs() argument
748 sx_xlock(&vm->mem_segs_lock); in vm_xlock_memsegs()
752 vm_unlock_memsegs(struct vm *vm) in vm_unlock_memsegs() argument
754 sx_unlock(&vm->mem_segs_lock); in vm_unlock_memsegs()
758 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) in vm_map_mmio() argument
762 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) in vm_map_mmio()
769 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) in vm_unmap_mmio() argument
772 vmm_mmio_free(vm->vmspace, gpa, len); in vm_unmap_mmio()
785 struct vm *vm = vcpu->vm; in vm_mem_allocated() local
797 mm = &vm->mem_maps[i]; in vm_mem_allocated()
802 if (ppt_is_mmio(vm, gpa)) in vm_mem_allocated()
809 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem) in vm_alloc_memseg() argument
814 sx_assert(&vm->mem_segs_lock, SX_XLOCKED); in vm_alloc_memseg()
822 seg = &vm->mem_segs[ident]; in vm_alloc_memseg()
841 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, in vm_get_memseg() argument
846 sx_assert(&vm->mem_segs_lock, SX_LOCKED); in vm_get_memseg()
851 seg = &vm->mem_segs[ident]; in vm_get_memseg()
862 vm_free_memseg(struct vm *vm, int ident) in vm_free_memseg() argument
869 seg = &vm->mem_segs[ident]; in vm_free_memseg()
877 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, in vm_mmap_memseg() argument
894 seg = &vm->mem_segs[segid]; in vm_mmap_memseg()
907 m = &vm->mem_maps[i]; in vm_mmap_memseg()
917 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa, in vm_mmap_memseg()
925 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len, in vm_mmap_memseg()
928 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len); in vm_mmap_memseg()
944 vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) in vm_munmap_memseg() argument
950 m = &vm->mem_maps[i]; in vm_munmap_memseg()
953 vm_free_memmap(vm, i); in vm_munmap_memseg()
962 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, in vm_mmap_getnext() argument
970 mm = &vm->mem_maps[i]; in vm_mmap_getnext()
996 vm_free_memmap(struct vm *vm, int ident) in vm_free_memmap() argument
1001 mm = &vm->mem_maps[ident]; in vm_free_memmap()
1003 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa, in vm_free_memmap()
1012 sysmem_mapping(struct vm *vm, struct mem_map *mm) in sysmem_mapping() argument
1015 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem) in sysmem_mapping()
1022 vmm_sysmem_maxaddr(struct vm *vm) in vmm_sysmem_maxaddr() argument
1030 mm = &vm->mem_maps[i]; in vmm_sysmem_maxaddr()
1031 if (sysmem_mapping(vm, mm)) { in vmm_sysmem_maxaddr()
1040 vm_iommu_map(struct vm *vm) in vm_iommu_map() argument
1046 sx_assert(&vm->mem_segs_lock, SX_LOCKED); in vm_iommu_map()
1049 mm = &vm->mem_maps[i]; in vm_iommu_map()
1050 if (!sysmem_mapping(vm, mm)) in vm_iommu_map()
1061 hpa = pmap_extract(vmspace_pmap(vm->vmspace), gpa); in vm_iommu_map()
1077 vm, (uintmax_t)gpa, (uintmax_t)hpa)); in vm_iommu_map()
1079 iommu_create_mapping(vm->iommu, gpa, hpa, PAGE_SIZE); in vm_iommu_map()
1087 vm_iommu_unmap(struct vm *vm) in vm_iommu_unmap() argument
1093 sx_assert(&vm->mem_segs_lock, SX_LOCKED); in vm_iommu_unmap()
1096 mm = &vm->mem_maps[i]; in vm_iommu_unmap()
1097 if (!sysmem_mapping(vm, mm)) in vm_iommu_unmap()
1109 vmspace_pmap(vm->vmspace), gpa))), in vm_iommu_unmap()
1111 vm, (uintmax_t)gpa)); in vm_iommu_unmap()
1112 iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE); in vm_iommu_unmap()
1120 iommu_invalidate_tlb(vm->iommu); in vm_iommu_unmap()
1124 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) in vm_unassign_pptdev() argument
1128 error = ppt_unassign_device(vm, bus, slot, func); in vm_unassign_pptdev()
1132 if (ppt_assigned_devices(vm) == 0) in vm_unassign_pptdev()
1133 vm_iommu_unmap(vm); in vm_unassign_pptdev()
1139 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) in vm_assign_pptdev() argument
1145 if (ppt_assigned_devices(vm) == 0) { in vm_assign_pptdev()
1146 KASSERT(vm->iommu == NULL, in vm_assign_pptdev()
1148 maxaddr = vmm_sysmem_maxaddr(vm); in vm_assign_pptdev()
1149 vm->iommu = iommu_create_domain(maxaddr); in vm_assign_pptdev()
1150 if (vm->iommu == NULL) in vm_assign_pptdev()
1152 vm_iommu_map(vm); in vm_assign_pptdev()
1155 error = ppt_assign_device(vm, bus, slot, func); in vm_assign_pptdev()
1160 _vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, in _vm_gpa_hold() argument
1173 mm = &vm->mem_maps[i]; in _vm_gpa_hold()
1175 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, in _vm_gpa_hold()
1203 return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie)); in vm_gpa_hold()
1207 vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, in vm_gpa_hold_global() argument
1210 sx_assert(&vm->mem_segs_lock, SX_LOCKED); in vm_gpa_hold_global()
1211 return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie)); in vm_gpa_hold_global()
1438 struct vm *vm = vcpu->vm; in vm_handle_rendezvous() local
1445 mtx_lock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1446 while (vm->rendezvous_func != NULL) { in vm_handle_rendezvous()
1448 CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus); in vm_handle_rendezvous()
1450 if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && in vm_handle_rendezvous()
1451 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { in vm_handle_rendezvous()
1453 (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg); in vm_handle_rendezvous()
1454 CPU_SET(vcpuid, &vm->rendezvous_done_cpus); in vm_handle_rendezvous()
1456 if (CPU_CMP(&vm->rendezvous_req_cpus, in vm_handle_rendezvous()
1457 &vm->rendezvous_done_cpus) == 0) { in vm_handle_rendezvous()
1459 CPU_ZERO(&vm->rendezvous_req_cpus); in vm_handle_rendezvous()
1460 vm->rendezvous_func = NULL; in vm_handle_rendezvous()
1461 wakeup(&vm->rendezvous_func); in vm_handle_rendezvous()
1465 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, in vm_handle_rendezvous()
1468 mtx_unlock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1472 mtx_lock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1475 mtx_unlock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1485 struct vm *vm = vcpu->vm; in vm_handle_hlt() local
1496 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); in vm_handle_hlt()
1509 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle) in vm_handle_hlt()
1538 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); in vm_handle_hlt()
1540 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { in vm_handle_hlt()
1563 &vm->halted_cpus); in vm_handle_hlt()
1572 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); in vm_handle_hlt()
1577 vm_suspend(vm, VM_SUSPEND_HALT); in vm_handle_hlt()
1585 struct vm *vm = vcpu->vm; in vm_handle_paging() local
1601 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), in vm_handle_paging()
1611 map = &vm->vmspace->vm_map; in vm_handle_paging()
1702 struct vm *vm = vcpu->vm; in vm_handle_suspend() local
1709 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); in vm_handle_suspend()
1720 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { in vm_handle_suspend()
1725 if (vm->rendezvous_func == NULL) { in vm_handle_suspend()
1747 for (i = 0; i < vm->maxcpus; i++) { in vm_handle_suspend()
1748 if (CPU_ISSET(i, &vm->suspended_cpus)) { in vm_handle_suspend()
1749 vcpu_notify_event(vm_vcpu(vm, i), false); in vm_handle_suspend()
1803 vm_suspend(struct vm *vm, enum vm_suspend_how how) in vm_suspend() argument
1810 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { in vm_suspend()
1811 VM_CTR2(vm, "virtual machine already suspended %d/%d", in vm_suspend()
1812 vm->suspend, how); in vm_suspend()
1816 VM_CTR1(vm, "virtual machine successfully suspended %d", how); in vm_suspend()
1821 for (i = 0; i < vm->maxcpus; i++) { in vm_suspend()
1822 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend()
1823 vcpu_notify_event(vm_vcpu(vm, i), false); in vm_suspend()
1832 struct vm *vm = vcpu->vm; in vm_exit_suspended() local
1835 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, in vm_exit_suspended()
1836 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); in vm_exit_suspended()
1842 vmexit->u.suspended.how = vm->suspend; in vm_exit_suspended()
1895 struct vm *vm = vcpu->vm; in vm_run() local
1906 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) in vm_run()
1909 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) in vm_run()
1912 pmap = vmspace_pmap(vm->vmspace); in vm_run()
1914 evinfo.rptr = &vm->rendezvous_req_cpus; in vm_run()
1915 evinfo.sptr = &vm->suspend; in vm_run()
1951 vioapic_process_eoi(vm, vme->u.ioapic_eoi.vector); in vm_run()
2131 vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT); in nested_fault()
2368 struct vm *
2371 return (vcpu->vm); in vcpu_vm()
2381 vm_vcpu(struct vm *vm, int vcpuid) in vm_vcpu() argument
2383 return (vm->vcpu[vcpuid]); in vm_vcpu()
2393 vm_ioapic(struct vm *vm) in vm_ioapic() argument
2396 return (vm->vioapic); in vm_ioapic()
2400 vm_hpet(struct vm *vm) in vm_hpet() argument
2403 return (vm->vhpet); in vm_hpet()
2449 vm_iommu_domain(struct vm *vm) in vm_iommu_domain() argument
2452 return (vm->iommu); in vm_iommu_domain()
2484 struct vm *vm = vcpu->vm; in vm_activate_cpu() local
2486 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_activate_cpu()
2490 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); in vm_activate_cpu()
2495 vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu) in vm_suspend_cpu() argument
2498 vm->debug_cpus = vm->active_cpus; in vm_suspend_cpu()
2499 for (int i = 0; i < vm->maxcpus; i++) { in vm_suspend_cpu()
2500 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend_cpu()
2501 vcpu_notify_event(vm_vcpu(vm, i), false); in vm_suspend_cpu()
2504 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_suspend_cpu()
2507 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_suspend_cpu()
2514 vm_resume_cpu(struct vm *vm, struct vcpu *vcpu) in vm_resume_cpu() argument
2518 CPU_ZERO(&vm->debug_cpus); in vm_resume_cpu()
2520 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) in vm_resume_cpu()
2523 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_resume_cpu()
2532 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); in vcpu_debugged()
2536 vm_active_cpus(struct vm *vm) in vm_active_cpus() argument
2539 return (vm->active_cpus); in vm_active_cpus()
2543 vm_debug_cpus(struct vm *vm) in vm_debug_cpus() argument
2546 return (vm->debug_cpus); in vm_debug_cpus()
2550 vm_suspended_cpus(struct vm *vm) in vm_suspended_cpus() argument
2553 return (vm->suspended_cpus); in vm_suspended_cpus()
2561 vm_start_cpus(struct vm *vm, const cpuset_t *tostart) in vm_start_cpus() argument
2565 mtx_lock(&vm->rendezvous_mtx); in vm_start_cpus()
2566 CPU_AND(&set, &vm->startup_cpus, tostart); in vm_start_cpus()
2567 CPU_ANDNOT(&vm->startup_cpus, &vm->startup_cpus, &set); in vm_start_cpus()
2568 mtx_unlock(&vm->rendezvous_mtx); in vm_start_cpus()
2573 vm_await_start(struct vm *vm, const cpuset_t *waiting) in vm_await_start() argument
2575 mtx_lock(&vm->rendezvous_mtx); in vm_await_start()
2576 CPU_OR(&vm->startup_cpus, &vm->startup_cpus, waiting); in vm_await_start()
2577 mtx_unlock(&vm->rendezvous_mtx); in vm_await_start()
2655 vm_get_vmspace(struct vm *vm) in vm_get_vmspace() argument
2658 return (vm->vmspace); in vm_get_vmspace()
2662 vm_apicid2vcpuid(struct vm *vm, int apicid) in vm_apicid2vcpuid() argument
2674 struct vm *vm = vcpu->vm; in vm_smp_rendezvous() local
2683 mtx_lock(&vm->rendezvous_mtx); in vm_smp_rendezvous()
2684 if (vm->rendezvous_func != NULL) { in vm_smp_rendezvous()
2691 mtx_unlock(&vm->rendezvous_mtx); in vm_smp_rendezvous()
2697 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " in vm_smp_rendezvous()
2701 vm->rendezvous_req_cpus = dest; in vm_smp_rendezvous()
2702 CPU_ZERO(&vm->rendezvous_done_cpus); in vm_smp_rendezvous()
2703 vm->rendezvous_arg = arg; in vm_smp_rendezvous()
2704 vm->rendezvous_func = func; in vm_smp_rendezvous()
2705 mtx_unlock(&vm->rendezvous_mtx); in vm_smp_rendezvous()
2711 for (i = 0; i < vm->maxcpus; i++) { in vm_smp_rendezvous()
2713 vcpu_notify_event(vm_vcpu(vm, i), false); in vm_smp_rendezvous()
2720 vm_atpic(struct vm *vm) in vm_atpic() argument
2722 return (vm->vatpic); in vm_atpic()
2726 vm_atpit(struct vm *vm) in vm_atpit() argument
2728 return (vm->vatpit); in vm_atpit()
2732 vm_pmtmr(struct vm *vm) in vm_pmtmr() argument
2735 return (vm->vpmtmr); in vm_pmtmr()
2739 vm_rtc(struct vm *vm) in vm_rtc() argument
2742 return (vm->vrtc); in vm_rtc()
2865 vmspace_resident_count(vcpu->vm->vmspace)); in vm_get_rescnt()
2875 pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace))); in vm_get_wiredcnt()
2884 vm_snapshot_vcpus(struct vm *vm, struct vm_snapshot_meta *meta) in vm_snapshot_vcpus() argument
2892 maxcpus = vm_get_maxcpus(vm); in vm_snapshot_vcpus()
2894 vcpu = vm->vcpu[i]; in vm_snapshot_vcpus()
2924 vm_snapshot_vm(struct vm *vm, struct vm_snapshot_meta *meta) in vm_snapshot_vm() argument
2928 ret = vm_snapshot_vcpus(vm, meta); in vm_snapshot_vm()
2932 SNAPSHOT_VAR_OR_LEAVE(vm->startup_cpus, meta, ret, done); in vm_snapshot_vm()
2938 vm_snapshot_vcpu(struct vm *vm, struct vm_snapshot_meta *meta) in vm_snapshot_vcpu() argument
2946 maxcpus = vm_get_maxcpus(vm); in vm_snapshot_vcpu()
2948 vcpu = vm->vcpu[i]; in vm_snapshot_vcpu()
2968 vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta) in vm_snapshot_req() argument
2974 ret = vm_snapshot_vcpu(vm, meta); in vm_snapshot_req()
2977 ret = vm_snapshot_vm(vm, meta); in vm_snapshot_req()
2980 ret = vioapic_snapshot(vm_ioapic(vm), meta); in vm_snapshot_req()
2983 ret = vlapic_snapshot(vm, meta); in vm_snapshot_req()
2986 ret = vhpet_snapshot(vm_hpet(vm), meta); in vm_snapshot_req()
2989 ret = vatpic_snapshot(vm_atpic(vm), meta); in vm_snapshot_req()
2992 ret = vatpit_snapshot(vm_atpit(vm), meta); in vm_snapshot_req()
2995 ret = vpmtmr_snapshot(vm_pmtmr(vm), meta); in vm_snapshot_req()
2998 ret = vrtc_snapshot(vm_rtc(vm), meta); in vm_snapshot_req()
3015 vm_restore_time(struct vm *vm) in vm_restore_time() argument
3024 error = vhpet_restore_time(vm_hpet(vm)); in vm_restore_time()
3028 maxcpus = vm_get_maxcpus(vm); in vm_restore_time()
3030 vcpu = vm->vcpu[i]; in vm_restore_time()