1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Tests for MSR_IA32_TSC and MSR_IA32_TSC_ADJUST.
4  *
5  * Copyright (C) 2020, Red Hat, Inc.
6  */
7 #include <stdio.h>
8 #include <string.h>
9 #include "kvm_util.h"
10 #include "processor.h"
11 
12 #define UNITY                  (1ull << 30)
13 #define HOST_ADJUST            (UNITY * 64)
14 #define GUEST_STEP             (UNITY * 4)
15 #define ROUND(x)               ((x + UNITY / 2) & -UNITY)
16 #define rounded_rdmsr(x)       ROUND(rdmsr(x))
17 #define rounded_host_rdmsr(x)  ROUND(vcpu_get_msr(vcpu, x))
18 
19 static void guest_code(void)
20 {
21 	u64 val = 0;
22 
23 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
24 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
25 
26 	/* Guest: writes to MSR_IA32_TSC affect both MSRs.  */
27 	val = 1ull * GUEST_STEP;
28 	wrmsr(MSR_IA32_TSC, val);
29 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
30 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
31 
32 	/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs.  */
33 	GUEST_SYNC(2);
34 	val = 2ull * GUEST_STEP;
35 	wrmsr(MSR_IA32_TSC_ADJUST, val);
36 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
37 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
38 
39 	/* Host: setting the TSC offset.  */
40 	GUEST_SYNC(3);
41 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
42 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
43 
44 	/*
45 	 * Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
46 	 * host-side offset and affect both MSRs.
47 	 */
48 	GUEST_SYNC(4);
49 	val = 3ull * GUEST_STEP;
50 	wrmsr(MSR_IA32_TSC_ADJUST, val);
51 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
52 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
53 
54 	/*
55 	 * Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
56 	 * offset is now visible in MSR_IA32_TSC_ADJUST.
57 	 */
58 	GUEST_SYNC(5);
59 	val = 4ull * GUEST_STEP;
60 	wrmsr(MSR_IA32_TSC, val);
61 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
62 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
63 
64 	GUEST_DONE();
65 }
66 
67 static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
68 {
69 	struct ucall uc;
70 
71 	vcpu_run(vcpu);
72 
73 	switch (get_ucall(vcpu, &uc)) {
74 	case UCALL_SYNC:
75 		TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
76 			    uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx",
77 			    stage + 1, (ulong)uc.args[1]);
78 		return;
79 	case UCALL_DONE:
80 		return;
81 	case UCALL_ABORT:
82 		REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
83 	default:
84 		TEST_ASSERT(false, "Unexpected exit: %s",
85 			    exit_reason_str(vcpu->run->exit_reason));
86 	}
87 }
88 
89 int main(void)
90 {
91 	struct kvm_vcpu *vcpu;
92 	struct kvm_vm *vm;
93 	uint64_t val;
94 
95 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
96 
97 	val = 0;
98 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
99 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
100 
101 	/* Guest: writes to MSR_IA32_TSC affect both MSRs.  */
102 	run_vcpu(vcpu, 1);
103 	val = 1ull * GUEST_STEP;
104 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
105 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
106 
107 	/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs.  */
108 	run_vcpu(vcpu, 2);
109 	val = 2ull * GUEST_STEP;
110 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
111 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
112 
113 	/*
114 	 * Host: writes to MSR_IA32_TSC set the host-side offset
115 	 * and therefore do not change MSR_IA32_TSC_ADJUST.
116 	 */
117 	vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val);
118 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
119 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
120 	run_vcpu(vcpu, 3);
121 
122 	/* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC.  */
123 	vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456);
124 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
125 	ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456);
126 
127 	/* Restore previous value.  */
128 	vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val);
129 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
130 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
131 
132 	/*
133 	 * Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
134 	 * host-side offset and affect both MSRs.
135 	 */
136 	run_vcpu(vcpu, 4);
137 	val = 3ull * GUEST_STEP;
138 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
139 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
140 
141 	/*
142 	 * Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
143 	 * offset is now visible in MSR_IA32_TSC_ADJUST.
144 	 */
145 	run_vcpu(vcpu, 5);
146 	val = 4ull * GUEST_STEP;
147 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
148 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
149 
150 	kvm_vm_free(vm);
151 
152 	return 0;
153 }
154