xref: /linux/arch/x86/kvm/svm/pmu.c (revision 908fc4c2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for AMD
4  *
5  * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6  *
7  * Author:
8  *   Wei Huang <wei@redhat.com>
9  *
10  * Implementation is based on pmu_intel.c file
11  */
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <linux/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "pmu.h"
19 #include "svm.h"
20 
21 enum pmu_type {
22 	PMU_TYPE_COUNTER = 0,
23 	PMU_TYPE_EVNTSEL,
24 };
25 
26 enum index {
27 	INDEX_ZERO = 0,
28 	INDEX_ONE,
29 	INDEX_TWO,
30 	INDEX_THREE,
31 	INDEX_FOUR,
32 	INDEX_FIVE,
33 	INDEX_ERROR,
34 };
35 
36 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
37 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
38 	[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
39 	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
40 	[2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
41 	[3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
42 	[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
43 	[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
44 	[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
45 	[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
46 };
47 
48 /* duplicated from amd_f17h_perfmon_event_map. */
49 static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
50 	[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
51 	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
52 	[2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
53 	[3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
54 	[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
55 	[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
56 	[6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
57 	[7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
58 };
59 
60 /* amd_pmc_perf_hw_id depends on these being the same size */
61 static_assert(ARRAY_SIZE(amd_event_mapping) ==
62 	     ARRAY_SIZE(amd_f17h_event_mapping));
63 
64 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
65 {
66 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
67 
68 	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
69 		if (type == PMU_TYPE_COUNTER)
70 			return MSR_F15H_PERF_CTR;
71 		else
72 			return MSR_F15H_PERF_CTL;
73 	} else {
74 		if (type == PMU_TYPE_COUNTER)
75 			return MSR_K7_PERFCTR0;
76 		else
77 			return MSR_K7_EVNTSEL0;
78 	}
79 }
80 
81 static enum index msr_to_index(u32 msr)
82 {
83 	switch (msr) {
84 	case MSR_F15H_PERF_CTL0:
85 	case MSR_F15H_PERF_CTR0:
86 	case MSR_K7_EVNTSEL0:
87 	case MSR_K7_PERFCTR0:
88 		return INDEX_ZERO;
89 	case MSR_F15H_PERF_CTL1:
90 	case MSR_F15H_PERF_CTR1:
91 	case MSR_K7_EVNTSEL1:
92 	case MSR_K7_PERFCTR1:
93 		return INDEX_ONE;
94 	case MSR_F15H_PERF_CTL2:
95 	case MSR_F15H_PERF_CTR2:
96 	case MSR_K7_EVNTSEL2:
97 	case MSR_K7_PERFCTR2:
98 		return INDEX_TWO;
99 	case MSR_F15H_PERF_CTL3:
100 	case MSR_F15H_PERF_CTR3:
101 	case MSR_K7_EVNTSEL3:
102 	case MSR_K7_PERFCTR3:
103 		return INDEX_THREE;
104 	case MSR_F15H_PERF_CTL4:
105 	case MSR_F15H_PERF_CTR4:
106 		return INDEX_FOUR;
107 	case MSR_F15H_PERF_CTL5:
108 	case MSR_F15H_PERF_CTR5:
109 		return INDEX_FIVE;
110 	default:
111 		return INDEX_ERROR;
112 	}
113 }
114 
115 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
116 					     enum pmu_type type)
117 {
118 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
119 
120 	if (!vcpu->kvm->arch.enable_pmu)
121 		return NULL;
122 
123 	switch (msr) {
124 	case MSR_F15H_PERF_CTL0:
125 	case MSR_F15H_PERF_CTL1:
126 	case MSR_F15H_PERF_CTL2:
127 	case MSR_F15H_PERF_CTL3:
128 	case MSR_F15H_PERF_CTL4:
129 	case MSR_F15H_PERF_CTL5:
130 		if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
131 			return NULL;
132 		fallthrough;
133 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
134 		if (type != PMU_TYPE_EVNTSEL)
135 			return NULL;
136 		break;
137 	case MSR_F15H_PERF_CTR0:
138 	case MSR_F15H_PERF_CTR1:
139 	case MSR_F15H_PERF_CTR2:
140 	case MSR_F15H_PERF_CTR3:
141 	case MSR_F15H_PERF_CTR4:
142 	case MSR_F15H_PERF_CTR5:
143 		if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
144 			return NULL;
145 		fallthrough;
146 	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
147 		if (type != PMU_TYPE_COUNTER)
148 			return NULL;
149 		break;
150 	default:
151 		return NULL;
152 	}
153 
154 	return &pmu->gp_counters[msr_to_index(msr)];
155 }
156 
157 static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
158 {
159 	struct kvm_event_hw_type_mapping *event_mapping;
160 	u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
161 	u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
162 	int i;
163 
164 	/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
165 	if (WARN_ON(pmc_is_fixed(pmc)))
166 		return PERF_COUNT_HW_MAX;
167 
168 	if (guest_cpuid_family(pmc->vcpu) >= 0x17)
169 		event_mapping = amd_f17h_event_mapping;
170 	else
171 		event_mapping = amd_event_mapping;
172 
173 	for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
174 		if (event_mapping[i].eventsel == event_select
175 		    && event_mapping[i].unit_mask == unit_mask)
176 			break;
177 
178 	if (i == ARRAY_SIZE(amd_event_mapping))
179 		return PERF_COUNT_HW_MAX;
180 
181 	return event_mapping[i].event_type;
182 }
183 
184 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
185  * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
186  */
187 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
188 {
189 	return true;
190 }
191 
192 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
193 {
194 	unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
195 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
196 
197 	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
198 		/*
199 		 * The idx is contiguous. The MSRs are not. The counter MSRs
200 		 * are interleaved with the event select MSRs.
201 		 */
202 		pmc_idx *= 2;
203 	}
204 
205 	return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
206 }
207 
208 static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
209 {
210 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
211 
212 	idx &= ~(3u << 30);
213 
214 	return idx < pmu->nr_arch_gp_counters;
215 }
216 
217 /* idx is the ECX register of RDPMC instruction */
218 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
219 	unsigned int idx, u64 *mask)
220 {
221 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
222 	struct kvm_pmc *counters;
223 
224 	idx &= ~(3u << 30);
225 	if (idx >= pmu->nr_arch_gp_counters)
226 		return NULL;
227 	counters = pmu->gp_counters;
228 
229 	return &counters[idx];
230 }
231 
232 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
233 {
234 	/* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough.  */
235 	return false;
236 }
237 
238 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
239 {
240 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
241 	struct kvm_pmc *pmc;
242 
243 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
244 	pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
245 
246 	return pmc;
247 }
248 
249 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
250 {
251 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
252 	struct kvm_pmc *pmc;
253 	u32 msr = msr_info->index;
254 
255 	/* MSR_PERFCTRn */
256 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
257 	if (pmc) {
258 		msr_info->data = pmc_read_counter(pmc);
259 		return 0;
260 	}
261 	/* MSR_EVNTSELn */
262 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
263 	if (pmc) {
264 		msr_info->data = pmc->eventsel;
265 		return 0;
266 	}
267 
268 	return 1;
269 }
270 
271 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
272 {
273 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
274 	struct kvm_pmc *pmc;
275 	u32 msr = msr_info->index;
276 	u64 data = msr_info->data;
277 
278 	/* MSR_PERFCTRn */
279 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
280 	if (pmc) {
281 		pmc->counter += data - pmc_read_counter(pmc);
282 		pmc_update_sample_period(pmc);
283 		return 0;
284 	}
285 	/* MSR_EVNTSELn */
286 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
287 	if (pmc) {
288 		data &= ~pmu->reserved_bits;
289 		if (data != pmc->eventsel)
290 			reprogram_gp_counter(pmc, data);
291 		return 0;
292 	}
293 
294 	return 1;
295 }
296 
297 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
298 {
299 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
300 
301 	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
302 		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
303 	else
304 		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
305 
306 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
307 	pmu->reserved_bits = 0xfffffff000280000ull;
308 	pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
309 	pmu->version = 1;
310 	/* not applicable to AMD; but clean them to prevent any fall out */
311 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
312 	pmu->nr_arch_fixed_counters = 0;
313 	pmu->global_status = 0;
314 	bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
315 }
316 
317 static void amd_pmu_init(struct kvm_vcpu *vcpu)
318 {
319 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
320 	int i;
321 
322 	BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
323 
324 	for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
325 		pmu->gp_counters[i].type = KVM_PMC_GP;
326 		pmu->gp_counters[i].vcpu = vcpu;
327 		pmu->gp_counters[i].idx = i;
328 		pmu->gp_counters[i].current_config = 0;
329 	}
330 }
331 
332 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
333 {
334 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
335 	int i;
336 
337 	for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
338 		struct kvm_pmc *pmc = &pmu->gp_counters[i];
339 
340 		pmc_stop_counter(pmc);
341 		pmc->counter = pmc->eventsel = 0;
342 	}
343 }
344 
345 struct kvm_pmu_ops amd_pmu_ops __initdata = {
346 	.pmc_perf_hw_id = amd_pmc_perf_hw_id,
347 	.pmc_is_enabled = amd_pmc_is_enabled,
348 	.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
349 	.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
350 	.msr_idx_to_pmc = amd_msr_idx_to_pmc,
351 	.is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
352 	.is_valid_msr = amd_is_valid_msr,
353 	.get_msr = amd_pmu_get_msr,
354 	.set_msr = amd_pmu_set_msr,
355 	.refresh = amd_pmu_refresh,
356 	.init = amd_pmu_init,
357 	.reset = amd_pmu_reset,
358 };
359