1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM_GET/SET_* tests
4  *
5  * Copyright (C) 2018, Red Hat, Inc.
6  *
7  * Tests for vCPU state save/restore, including nested guest state.
8  */
9 #define _GNU_SOURCE /* for program_invocation_short_name */
10 #include <fcntl.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/ioctl.h>
15 
16 #include "test_util.h"
17 
18 #include "kvm_util.h"
19 #include "processor.h"
20 #include "vmx.h"
21 #include "svm_util.h"
22 
23 #define L2_GUEST_STACK_SIZE 256
24 
25 void svm_l2_guest_code(void)
26 {
27 	GUEST_SYNC(4);
28 	/* Exit to L1 */
29 	vmcall();
30 	GUEST_SYNC(6);
31 	/* Done, exit to L1 and never come back.  */
32 	vmcall();
33 }
34 
35 static void svm_l1_guest_code(struct svm_test_data *svm)
36 {
37 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
38 	struct vmcb *vmcb = svm->vmcb;
39 
40 	GUEST_ASSERT(svm->vmcb_gpa);
41 	/* Prepare for L2 execution. */
42 	generic_svm_setup(svm, svm_l2_guest_code,
43 			  &l2_guest_stack[L2_GUEST_STACK_SIZE]);
44 
45 	GUEST_SYNC(3);
46 	run_guest(vmcb, svm->vmcb_gpa);
47 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
48 	GUEST_SYNC(5);
49 	vmcb->save.rip += 3;
50 	run_guest(vmcb, svm->vmcb_gpa);
51 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
52 	GUEST_SYNC(7);
53 }
54 
55 void vmx_l2_guest_code(void)
56 {
57 	GUEST_SYNC(6);
58 
59 	/* Exit to L1 */
60 	vmcall();
61 
62 	/* L1 has now set up a shadow VMCS for us.  */
63 	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
64 	GUEST_SYNC(10);
65 	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
66 	GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
67 	GUEST_SYNC(11);
68 	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
69 	GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
70 	GUEST_SYNC(12);
71 
72 	/* Done, exit to L1 and never come back.  */
73 	vmcall();
74 }
75 
76 static void vmx_l1_guest_code(struct vmx_pages *vmx_pages)
77 {
78 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
79 
80 	GUEST_ASSERT(vmx_pages->vmcs_gpa);
81 	GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
82 	GUEST_SYNC(3);
83 	GUEST_ASSERT(load_vmcs(vmx_pages));
84 	GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
85 
86 	GUEST_SYNC(4);
87 	GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
88 
89 	prepare_vmcs(vmx_pages, vmx_l2_guest_code,
90 		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
91 
92 	GUEST_SYNC(5);
93 	GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
94 	GUEST_ASSERT(!vmlaunch());
95 	GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
96 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
97 
98 	/* Check that the launched state is preserved.  */
99 	GUEST_ASSERT(vmlaunch());
100 
101 	GUEST_ASSERT(!vmresume());
102 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
103 
104 	GUEST_SYNC(7);
105 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
106 
107 	GUEST_ASSERT(!vmresume());
108 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
109 
110 	vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3);
111 
112 	vmwrite(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
113 	vmwrite(VMCS_LINK_POINTER, vmx_pages->shadow_vmcs_gpa);
114 
115 	GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
116 	GUEST_ASSERT(vmlaunch());
117 	GUEST_SYNC(8);
118 	GUEST_ASSERT(vmlaunch());
119 	GUEST_ASSERT(vmresume());
120 
121 	vmwrite(GUEST_RIP, 0xc0ffee);
122 	GUEST_SYNC(9);
123 	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
124 
125 	GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
126 	GUEST_ASSERT(!vmresume());
127 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
128 
129 	GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
130 	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
131 	GUEST_ASSERT(vmlaunch());
132 	GUEST_ASSERT(vmresume());
133 	GUEST_SYNC(13);
134 	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
135 	GUEST_ASSERT(vmlaunch());
136 	GUEST_ASSERT(vmresume());
137 }
138 
139 static void __attribute__((__flatten__)) guest_code(void *arg)
140 {
141 	GUEST_SYNC(1);
142 	GUEST_SYNC(2);
143 
144 	if (arg) {
145 		if (this_cpu_has(X86_FEATURE_SVM))
146 			svm_l1_guest_code(arg);
147 		else
148 			vmx_l1_guest_code(arg);
149 	}
150 
151 	GUEST_DONE();
152 }
153 
154 int main(int argc, char *argv[])
155 {
156 	vm_vaddr_t nested_gva = 0;
157 
158 	struct kvm_regs regs1, regs2;
159 	struct kvm_vcpu *vcpu;
160 	struct kvm_vm *vm;
161 	struct kvm_run *run;
162 	struct kvm_x86_state *state;
163 	struct ucall uc;
164 	int stage;
165 
166 	/* Create VM */
167 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
168 	run = vcpu->run;
169 
170 	vcpu_regs_get(vcpu, &regs1);
171 
172 	if (kvm_has_cap(KVM_CAP_NESTED_STATE)) {
173 		if (kvm_cpu_has(X86_FEATURE_SVM))
174 			vcpu_alloc_svm(vm, &nested_gva);
175 		else if (kvm_cpu_has(X86_FEATURE_VMX))
176 			vcpu_alloc_vmx(vm, &nested_gva);
177 	}
178 
179 	if (!nested_gva)
180 		pr_info("will skip nested state checks\n");
181 
182 	vcpu_args_set(vcpu, 1, nested_gva);
183 
184 	for (stage = 1;; stage++) {
185 		vcpu_run(vcpu);
186 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
187 			    "Stage %d: unexpected exit reason: %u (%s),\n",
188 			    stage, run->exit_reason,
189 			    exit_reason_str(run->exit_reason));
190 
191 		switch (get_ucall(vcpu, &uc)) {
192 		case UCALL_ABORT:
193 			REPORT_GUEST_ASSERT(uc);
194 			/* NOT REACHED */
195 		case UCALL_SYNC:
196 			break;
197 		case UCALL_DONE:
198 			goto done;
199 		default:
200 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
201 		}
202 
203 		/* UCALL_SYNC is handled here.  */
204 		TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
205 			    uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
206 			    stage, (ulong)uc.args[1]);
207 
208 		state = vcpu_save_state(vcpu);
209 		memset(&regs1, 0, sizeof(regs1));
210 		vcpu_regs_get(vcpu, &regs1);
211 
212 		kvm_vm_release(vm);
213 
214 		/* Restore state in a new VM.  */
215 		vcpu = vm_recreate_with_one_vcpu(vm);
216 		vcpu_load_state(vcpu, state);
217 		run = vcpu->run;
218 		kvm_x86_state_cleanup(state);
219 
220 		memset(&regs2, 0, sizeof(regs2));
221 		vcpu_regs_get(vcpu, &regs2);
222 		TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
223 			    "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
224 			    (ulong) regs2.rdi, (ulong) regs2.rsi);
225 	}
226 
227 done:
228 	kvm_vm_free(vm);
229 }
230