1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021, Red Hat, Inc.
4  *
5  * Tests for Hyper-V clocksources
6  */
7 #include "test_util.h"
8 #include "kvm_util.h"
9 #include "processor.h"
10 
11 struct ms_hyperv_tsc_page {
12 	volatile u32 tsc_sequence;
13 	u32 reserved1;
14 	volatile u64 tsc_scale;
15 	volatile s64 tsc_offset;
16 } __packed;
17 
18 #define HV_X64_MSR_GUEST_OS_ID			0x40000000
19 #define HV_X64_MSR_TIME_REF_COUNT		0x40000020
20 #define HV_X64_MSR_REFERENCE_TSC		0x40000021
21 #define HV_X64_MSR_TSC_FREQUENCY		0x40000022
22 #define HV_X64_MSR_REENLIGHTENMENT_CONTROL	0x40000106
23 #define HV_X64_MSR_TSC_EMULATION_CONTROL	0x40000107
24 
25 /* Simplified mul_u64_u64_shr() */
mul_u64_u64_shr64(u64 a,u64 b)26 static inline u64 mul_u64_u64_shr64(u64 a, u64 b)
27 {
28 	union {
29 		u64 ll;
30 		struct {
31 			u32 low, high;
32 		} l;
33 	} rm, rn, rh, a0, b0;
34 	u64 c;
35 
36 	a0.ll = a;
37 	b0.ll = b;
38 
39 	rm.ll = (u64)a0.l.low * b0.l.high;
40 	rn.ll = (u64)a0.l.high * b0.l.low;
41 	rh.ll = (u64)a0.l.high * b0.l.high;
42 
43 	rh.l.low = c = rm.l.high + rn.l.high + rh.l.low;
44 	rh.l.high = (c >> 32) + rh.l.high;
45 
46 	return rh.ll;
47 }
48 
nop_loop(void)49 static inline void nop_loop(void)
50 {
51 	int i;
52 
53 	for (i = 0; i < 1000000; i++)
54 		asm volatile("nop");
55 }
56 
check_tsc_msr_rdtsc(void)57 static inline void check_tsc_msr_rdtsc(void)
58 {
59 	u64 tsc_freq, r1, r2, t1, t2;
60 	s64 delta_ns;
61 
62 	tsc_freq = rdmsr(HV_X64_MSR_TSC_FREQUENCY);
63 	GUEST_ASSERT(tsc_freq > 0);
64 
65 	/* First, check MSR-based clocksource */
66 	r1 = rdtsc();
67 	t1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
68 	nop_loop();
69 	r2 = rdtsc();
70 	t2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
71 
72 	GUEST_ASSERT(r2 > r1 && t2 > t1);
73 
74 	/* HV_X64_MSR_TIME_REF_COUNT is in 100ns */
75 	delta_ns = ((t2 - t1) * 100) - ((r2 - r1) * 1000000000 / tsc_freq);
76 	if (delta_ns < 0)
77 		delta_ns = -delta_ns;
78 
79 	/* 1% tolerance */
80 	GUEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100);
81 }
82 
get_tscpage_ts(struct ms_hyperv_tsc_page * tsc_page)83 static inline u64 get_tscpage_ts(struct ms_hyperv_tsc_page *tsc_page)
84 {
85 	return mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
86 }
87 
check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page * tsc_page)88 static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page)
89 {
90 	u64 r1, r2, t1, t2;
91 
92 	/* Compare TSC page clocksource with HV_X64_MSR_TIME_REF_COUNT */
93 	t1 = get_tscpage_ts(tsc_page);
94 	r1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
95 
96 	/* 10 ms tolerance */
97 	GUEST_ASSERT(r1 >= t1 && r1 - t1 < 100000);
98 	nop_loop();
99 
100 	t2 = get_tscpage_ts(tsc_page);
101 	r2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
102 	GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000);
103 }
104 
guest_main(struct ms_hyperv_tsc_page * tsc_page,vm_paddr_t tsc_page_gpa)105 static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_gpa)
106 {
107 	u64 tsc_scale, tsc_offset;
108 
109 	/* Set Guest OS id to enable Hyper-V emulation */
110 	GUEST_SYNC(1);
111 	wrmsr(HV_X64_MSR_GUEST_OS_ID, (u64)0x8100 << 48);
112 	GUEST_SYNC(2);
113 
114 	check_tsc_msr_rdtsc();
115 
116 	GUEST_SYNC(3);
117 
118 	/* Set up TSC page is disabled state, check that it's clean */
119 	wrmsr(HV_X64_MSR_REFERENCE_TSC, tsc_page_gpa);
120 	GUEST_ASSERT(tsc_page->tsc_sequence == 0);
121 	GUEST_ASSERT(tsc_page->tsc_scale == 0);
122 	GUEST_ASSERT(tsc_page->tsc_offset == 0);
123 
124 	GUEST_SYNC(4);
125 
126 	/* Set up TSC page is enabled state */
127 	wrmsr(HV_X64_MSR_REFERENCE_TSC, tsc_page_gpa | 0x1);
128 	GUEST_ASSERT(tsc_page->tsc_sequence != 0);
129 
130 	GUEST_SYNC(5);
131 
132 	check_tsc_msr_tsc_page(tsc_page);
133 
134 	GUEST_SYNC(6);
135 
136 	tsc_offset = tsc_page->tsc_offset;
137 	/* Call KVM_SET_CLOCK from userspace, check that TSC page was updated */
138 
139 	GUEST_SYNC(7);
140 	/* Sanity check TSC page timestamp, it should be close to 0 */
141 	GUEST_ASSERT(get_tscpage_ts(tsc_page) < 100000);
142 
143 	GUEST_ASSERT(tsc_page->tsc_offset != tsc_offset);
144 
145 	nop_loop();
146 
147 	/*
148 	 * Enable Re-enlightenment and check that TSC page stays constant across
149 	 * KVM_SET_CLOCK.
150 	 */
151 	wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0x1 << 16 | 0xff);
152 	wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL, 0x1);
153 	tsc_offset = tsc_page->tsc_offset;
154 	tsc_scale = tsc_page->tsc_scale;
155 	GUEST_SYNC(8);
156 	GUEST_ASSERT(tsc_page->tsc_offset == tsc_offset);
157 	GUEST_ASSERT(tsc_page->tsc_scale == tsc_scale);
158 
159 	GUEST_SYNC(9);
160 
161 	check_tsc_msr_tsc_page(tsc_page);
162 
163 	/*
164 	 * Disable re-enlightenment and TSC page, check that KVM doesn't update
165 	 * it anymore.
166 	 */
167 	wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
168 	wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
169 	wrmsr(HV_X64_MSR_REFERENCE_TSC, 0);
170 	memset(tsc_page, 0, sizeof(*tsc_page));
171 
172 	GUEST_SYNC(10);
173 	GUEST_ASSERT(tsc_page->tsc_sequence == 0);
174 	GUEST_ASSERT(tsc_page->tsc_offset == 0);
175 	GUEST_ASSERT(tsc_page->tsc_scale == 0);
176 
177 	GUEST_DONE();
178 }
179 
180 #define VCPU_ID 0
181 
host_check_tsc_msr_rdtsc(struct kvm_vm * vm)182 static void host_check_tsc_msr_rdtsc(struct kvm_vm *vm)
183 {
184 	u64 tsc_freq, r1, r2, t1, t2;
185 	s64 delta_ns;
186 
187 	tsc_freq = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TSC_FREQUENCY);
188 	TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero");
189 
190 	/* First, check MSR-based clocksource */
191 	r1 = rdtsc();
192 	t1 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
193 	nop_loop();
194 	r2 = rdtsc();
195 	t2 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
196 
197 	TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2);
198 
199 	/* HV_X64_MSR_TIME_REF_COUNT is in 100ns */
200 	delta_ns = ((t2 - t1) * 100) - ((r2 - r1) * 1000000000 / tsc_freq);
201 	if (delta_ns < 0)
202 		delta_ns = -delta_ns;
203 
204 	/* 1% tolerance */
205 	TEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100,
206 		    "Elapsed time does not match (MSR=%ld, TSC=%ld)",
207 		    (t2 - t1) * 100, (r2 - r1) * 1000000000 / tsc_freq);
208 }
209 
main(void)210 int main(void)
211 {
212 	struct kvm_vm *vm;
213 	struct kvm_run *run;
214 	struct ucall uc;
215 	vm_vaddr_t tsc_page_gva;
216 	int stage;
217 
218 	vm = vm_create_default(VCPU_ID, 0, guest_main);
219 	run = vcpu_state(vm, VCPU_ID);
220 
221 	vcpu_set_hv_cpuid(vm, VCPU_ID);
222 
223 	tsc_page_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
224 	memset(addr_gpa2hva(vm, tsc_page_gva), 0x0, getpagesize());
225 	TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
226 		"TSC page has to be page aligned\n");
227 	vcpu_args_set(vm, VCPU_ID, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
228 
229 	host_check_tsc_msr_rdtsc(vm);
230 
231 	for (stage = 1;; stage++) {
232 		_vcpu_run(vm, VCPU_ID);
233 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
234 			    "Stage %d: unexpected exit reason: %u (%s),\n",
235 			    stage, run->exit_reason,
236 			    exit_reason_str(run->exit_reason));
237 
238 		switch (get_ucall(vm, VCPU_ID, &uc)) {
239 		case UCALL_ABORT:
240 			TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
241 				  __FILE__, uc.args[1]);
242 			/* NOT REACHED */
243 		case UCALL_SYNC:
244 			break;
245 		case UCALL_DONE:
246 			/* Keep in sync with guest_main() */
247 			TEST_ASSERT(stage == 11, "Testing ended prematurely, stage %d\n",
248 				    stage);
249 			goto out;
250 		default:
251 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
252 		}
253 
254 		TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
255 			    uc.args[1] == stage,
256 			    "Stage %d: Unexpected register values vmexit, got %lx",
257 			    stage, (ulong)uc.args[1]);
258 
259 		/* Reset kvmclock triggering TSC page update */
260 		if (stage == 7 || stage == 8 || stage == 10) {
261 			struct kvm_clock_data clock = {0};
262 
263 			vm_ioctl(vm, KVM_SET_CLOCK, &clock);
264 		}
265 	}
266 
267 out:
268 	kvm_vm_free(vm);
269 }
270