xref: /linux/tools/testing/selftests/kvm/steal_time.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * steal/stolen time test
4  *
5  * Copyright (C) 2020, Red Hat, Inc.
6  */
7 #define _GNU_SOURCE
8 #include <stdio.h>
9 #include <time.h>
10 #include <sched.h>
11 #include <pthread.h>
12 #include <linux/kernel.h>
13 #include <asm/kvm.h>
14 #include <asm/kvm_para.h>
15 
16 #include "test_util.h"
17 #include "kvm_util.h"
18 #include "processor.h"
19 
20 #define NR_VCPUS		4
21 #define ST_GPA_BASE		(1 << 30)
22 
23 static void *st_gva[NR_VCPUS];
24 static uint64_t guest_stolen_time[NR_VCPUS];
25 
26 #if defined(__x86_64__)
27 
28 /* steal_time must have 64-byte alignment */
29 #define STEAL_TIME_SIZE		((sizeof(struct kvm_steal_time) + 63) & ~63)
30 
31 static void check_status(struct kvm_steal_time *st)
32 {
33 	GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
34 	GUEST_ASSERT(READ_ONCE(st->flags) == 0);
35 	GUEST_ASSERT(READ_ONCE(st->preempted) == 0);
36 }
37 
38 static void guest_code(int cpu)
39 {
40 	struct kvm_steal_time *st = st_gva[cpu];
41 	uint32_t version;
42 
43 	GUEST_ASSERT(rdmsr(MSR_KVM_STEAL_TIME) == ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
44 
45 	memset(st, 0, sizeof(*st));
46 	GUEST_SYNC(0);
47 
48 	check_status(st);
49 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
50 	version = READ_ONCE(st->version);
51 	check_status(st);
52 	GUEST_SYNC(1);
53 
54 	check_status(st);
55 	GUEST_ASSERT(version < READ_ONCE(st->version));
56 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
57 	check_status(st);
58 	GUEST_DONE();
59 }
60 
61 static void steal_time_init(struct kvm_vm *vm)
62 {
63 	int i;
64 
65 	if (!(kvm_get_supported_cpuid_entry(KVM_CPUID_FEATURES)->eax &
66 	      KVM_FEATURE_STEAL_TIME)) {
67 		print_skip("steal-time not supported");
68 		exit(KSFT_SKIP);
69 	}
70 
71 	for (i = 0; i < NR_VCPUS; ++i) {
72 		int ret;
73 
74 		/* ST_GPA_BASE is identity mapped */
75 		st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
76 		sync_global_to_guest(vm, st_gva[i]);
77 
78 		ret = _vcpu_set_msr(vm, i, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK);
79 		TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
80 
81 		vcpu_set_msr(vm, i, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
82 	}
83 }
84 
85 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid)
86 {
87 	struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpuid]);
88 	int i;
89 
90 	pr_info("VCPU%d:\n", vcpuid);
91 	pr_info("    steal:     %lld\n", st->steal);
92 	pr_info("    version:   %d\n", st->version);
93 	pr_info("    flags:     %d\n", st->flags);
94 	pr_info("    preempted: %d\n", st->preempted);
95 	pr_info("    u8_pad:    ");
96 	for (i = 0; i < 3; ++i)
97 		pr_info("%d", st->u8_pad[i]);
98 	pr_info("\n    pad:       ");
99 	for (i = 0; i < 11; ++i)
100 		pr_info("%d", st->pad[i]);
101 	pr_info("\n");
102 }
103 
104 #elif defined(__aarch64__)
105 
106 /* PV_TIME_ST must have 64-byte alignment */
107 #define STEAL_TIME_SIZE		((sizeof(struct st_time) + 63) & ~63)
108 
109 #define SMCCC_ARCH_FEATURES	0x80000001
110 #define PV_TIME_FEATURES	0xc5000020
111 #define PV_TIME_ST		0xc5000021
112 
113 struct st_time {
114 	uint32_t rev;
115 	uint32_t attr;
116 	uint64_t st_time;
117 };
118 
119 static int64_t smccc(uint32_t func, uint64_t arg)
120 {
121 	unsigned long ret;
122 
123 	asm volatile(
124 		"mov	w0, %w1\n"
125 		"mov	x1, %2\n"
126 		"hvc	#0\n"
127 		"mov	%0, x0\n"
128 	: "=r" (ret) : "r" (func), "r" (arg) :
129 	  "x0", "x1", "x2", "x3");
130 
131 	return ret;
132 }
133 
134 static void check_status(struct st_time *st)
135 {
136 	GUEST_ASSERT(READ_ONCE(st->rev) == 0);
137 	GUEST_ASSERT(READ_ONCE(st->attr) == 0);
138 }
139 
140 static void guest_code(int cpu)
141 {
142 	struct st_time *st;
143 	int64_t status;
144 
145 	status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES);
146 	GUEST_ASSERT(status == 0);
147 	status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES);
148 	GUEST_ASSERT(status == 0);
149 	status = smccc(PV_TIME_FEATURES, PV_TIME_ST);
150 	GUEST_ASSERT(status == 0);
151 
152 	status = smccc(PV_TIME_ST, 0);
153 	GUEST_ASSERT(status != -1);
154 	GUEST_ASSERT(status == (ulong)st_gva[cpu]);
155 
156 	st = (struct st_time *)status;
157 	GUEST_SYNC(0);
158 
159 	check_status(st);
160 	WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
161 	GUEST_SYNC(1);
162 
163 	check_status(st);
164 	WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
165 	GUEST_DONE();
166 }
167 
168 static void steal_time_init(struct kvm_vm *vm)
169 {
170 	struct kvm_device_attr dev = {
171 		.group = KVM_ARM_VCPU_PVTIME_CTRL,
172 		.attr = KVM_ARM_VCPU_PVTIME_IPA,
173 	};
174 	int i, ret;
175 
176 	ret = _vcpu_ioctl(vm, 0, KVM_HAS_DEVICE_ATTR, &dev);
177 	if (ret != 0 && errno == ENXIO) {
178 		print_skip("steal-time not supported");
179 		exit(KSFT_SKIP);
180 	}
181 
182 	for (i = 0; i < NR_VCPUS; ++i) {
183 		uint64_t st_ipa;
184 
185 		vcpu_ioctl(vm, i, KVM_HAS_DEVICE_ATTR, &dev);
186 
187 		dev.addr = (uint64_t)&st_ipa;
188 
189 		/* ST_GPA_BASE is identity mapped */
190 		st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
191 		sync_global_to_guest(vm, st_gva[i]);
192 
193 		st_ipa = (ulong)st_gva[i] | 1;
194 		ret = _vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev);
195 		TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
196 
197 		st_ipa = (ulong)st_gva[i];
198 		vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev);
199 
200 		ret = _vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev);
201 		TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
202 
203 	}
204 }
205 
206 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid)
207 {
208 	struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpuid]);
209 
210 	pr_info("VCPU%d:\n", vcpuid);
211 	pr_info("    rev:     %d\n", st->rev);
212 	pr_info("    attr:    %d\n", st->attr);
213 	pr_info("    st_time: %ld\n", st->st_time);
214 }
215 
216 #endif
217 
218 static void *do_steal_time(void *arg)
219 {
220 	struct timespec ts, stop;
221 
222 	clock_gettime(CLOCK_MONOTONIC, &ts);
223 	stop = timespec_add_ns(ts, MIN_RUN_DELAY_NS);
224 
225 	while (1) {
226 		clock_gettime(CLOCK_MONOTONIC, &ts);
227 		if (timespec_to_ns(timespec_sub(ts, stop)) >= 0)
228 			break;
229 	}
230 
231 	return NULL;
232 }
233 
234 static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
235 {
236 	struct ucall uc;
237 
238 	vcpu_args_set(vm, vcpuid, 1, vcpuid);
239 
240 	vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL);
241 
242 	switch (get_ucall(vm, vcpuid, &uc)) {
243 	case UCALL_SYNC:
244 	case UCALL_DONE:
245 		break;
246 	case UCALL_ABORT:
247 		TEST_ASSERT(false, "%s at %s:%ld", (const char *)uc.args[0],
248 			    __FILE__, uc.args[1]);
249 	default:
250 		TEST_ASSERT(false, "Unexpected exit: %s",
251 			    exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
252 	}
253 }
254 
255 int main(int ac, char **av)
256 {
257 	struct kvm_vm *vm;
258 	pthread_attr_t attr;
259 	pthread_t thread;
260 	cpu_set_t cpuset;
261 	unsigned int gpages;
262 	long stolen_time;
263 	long run_delay;
264 	bool verbose;
265 	int i;
266 
267 	verbose = ac > 1 && (!strncmp(av[1], "-v", 3) || !strncmp(av[1], "--verbose", 10));
268 
269 	/* Set CPU affinity so we can force preemption of the VCPU */
270 	CPU_ZERO(&cpuset);
271 	CPU_SET(0, &cpuset);
272 	pthread_attr_init(&attr);
273 	pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
274 	pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
275 
276 	/* Create a one VCPU guest and an identity mapped memslot for the steal time structure */
277 	vm = vm_create_default(0, 0, guest_code);
278 	gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
279 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
280 	virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
281 	ucall_init(vm, NULL);
282 
283 	/* Add the rest of the VCPUs */
284 	for (i = 1; i < NR_VCPUS; ++i)
285 		vm_vcpu_add_default(vm, i, guest_code);
286 
287 	steal_time_init(vm);
288 
289 	/* Run test on each VCPU */
290 	for (i = 0; i < NR_VCPUS; ++i) {
291 		/* First VCPU run initializes steal-time */
292 		run_vcpu(vm, i);
293 
294 		/* Second VCPU run, expect guest stolen time to be <= run_delay */
295 		run_vcpu(vm, i);
296 		sync_global_from_guest(vm, guest_stolen_time[i]);
297 		stolen_time = guest_stolen_time[i];
298 		run_delay = get_run_delay();
299 		TEST_ASSERT(stolen_time <= run_delay,
300 			    "Expected stolen time <= %ld, got %ld",
301 			    run_delay, stolen_time);
302 
303 		/* Steal time from the VCPU. The steal time thread has the same CPU affinity as the VCPUs. */
304 		run_delay = get_run_delay();
305 		pthread_create(&thread, &attr, do_steal_time, NULL);
306 		do
307 			sched_yield();
308 		while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
309 		pthread_join(thread, NULL);
310 		run_delay = get_run_delay() - run_delay;
311 		TEST_ASSERT(run_delay >= MIN_RUN_DELAY_NS,
312 			    "Expected run_delay >= %ld, got %ld",
313 			    MIN_RUN_DELAY_NS, run_delay);
314 
315 		/* Run VCPU again to confirm stolen time is consistent with run_delay */
316 		run_vcpu(vm, i);
317 		sync_global_from_guest(vm, guest_stolen_time[i]);
318 		stolen_time = guest_stolen_time[i] - stolen_time;
319 		TEST_ASSERT(stolen_time >= run_delay,
320 			    "Expected stolen time >= %ld, got %ld",
321 			    run_delay, stolen_time);
322 
323 		if (verbose) {
324 			pr_info("VCPU%d: total-stolen-time=%ld test-stolen-time=%ld", i,
325 				guest_stolen_time[i], stolen_time);
326 			if (stolen_time == run_delay)
327 				pr_info(" (BONUS: guest test-stolen-time even exactly matches test-run_delay)");
328 			pr_info("\n");
329 			steal_time_dump(vm, i);
330 		}
331 	}
332 
333 	return 0;
334 }
335