1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * svm_vmcall_test
4  *
5  * Copyright (C) 2020, Red Hat, Inc.
6  *
7  * Nested SVM testing: VMCALL
8  */
9 
10 #include "test_util.h"
11 #include "kvm_util.h"
12 #include "processor.h"
13 #include "svm_util.h"
14 
15 static void l2_guest_code(struct svm_test_data *svm)
16 {
17 	__asm__ __volatile__("vmcall");
18 }
19 
20 static void l1_guest_code(struct svm_test_data *svm)
21 {
22 	#define L2_GUEST_STACK_SIZE 64
23 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
24 	struct vmcb *vmcb = svm->vmcb;
25 
26 	/* Prepare for L2 execution. */
27 	generic_svm_setup(svm, l2_guest_code,
28 			  &l2_guest_stack[L2_GUEST_STACK_SIZE]);
29 
30 	run_guest(vmcb, svm->vmcb_gpa);
31 
32 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
33 	GUEST_DONE();
34 }
35 
36 int main(int argc, char *argv[])
37 {
38 	struct kvm_vcpu *vcpu;
39 	vm_vaddr_t svm_gva;
40 	struct kvm_vm *vm;
41 
42 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
43 
44 	vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
45 
46 	vcpu_alloc_svm(vm, &svm_gva);
47 	vcpu_args_set(vcpu, 1, svm_gva);
48 
49 	for (;;) {
50 		volatile struct kvm_run *run = vcpu->run;
51 		struct ucall uc;
52 
53 		vcpu_run(vcpu);
54 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
55 			    "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
56 			    run->exit_reason,
57 			    exit_reason_str(run->exit_reason));
58 
59 		switch (get_ucall(vcpu, &uc)) {
60 		case UCALL_ABORT:
61 			REPORT_GUEST_ASSERT(uc);
62 			/* NOT REACHED */
63 		case UCALL_SYNC:
64 			break;
65 		case UCALL_DONE:
66 			goto done;
67 		default:
68 			TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
69 		}
70 	}
71 done:
72 	kvm_vm_free(vm);
73 	return 0;
74 }
75