xref: /linux/arch/x86/kvm/pmu.h (revision 1e525507)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_PMU_H
3 #define __KVM_X86_PMU_H
4 
5 #include <linux/nospec.h>
6 
7 #include <asm/kvm_host.h>
8 
9 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
10 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
11 #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
12 
13 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |	\
14 					  MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
15 
16 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
17 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
18 
19 #define VMWARE_BACKDOOR_PMC_HOST_TSC		0x10000
20 #define VMWARE_BACKDOOR_PMC_REAL_TIME		0x10001
21 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME	0x10002
22 
23 #define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
24 
25 struct kvm_pmu_emulated_event_selectors {
26 	u64 INSTRUCTIONS_RETIRED;
27 	u64 BRANCH_INSTRUCTIONS_RETIRED;
28 };
29 
30 struct kvm_pmu_ops {
31 	struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
32 		unsigned int idx, u64 *mask);
33 	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
34 	int (*check_rdpmc_early)(struct kvm_vcpu *vcpu, unsigned int idx);
35 	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
36 	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
37 	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
38 	void (*refresh)(struct kvm_vcpu *vcpu);
39 	void (*init)(struct kvm_vcpu *vcpu);
40 	void (*reset)(struct kvm_vcpu *vcpu);
41 	void (*deliver_pmi)(struct kvm_vcpu *vcpu);
42 	void (*cleanup)(struct kvm_vcpu *vcpu);
43 
44 	const u64 EVENTSEL_EVENT;
45 	const int MAX_NR_GP_COUNTERS;
46 	const int MIN_NR_GP_COUNTERS;
47 };
48 
49 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
50 
51 static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
52 {
53 	/*
54 	 * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
55 	 * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
56 	 * greater than zero.  However, KVM only exposes and emulates the MSR
57 	 * to/for the guest if the guest PMU supports at least "Architectural
58 	 * Performance Monitoring Version 2".
59 	 *
60 	 * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2.
61 	 */
62 	return pmu->version > 1;
63 }
64 
65 /*
66  * KVM tracks all counters in 64-bit bitmaps, with general purpose counters
67  * mapped to bits 31:0 and fixed counters mapped to 63:32, e.g. fixed counter 0
68  * is tracked internally via index 32.  On Intel, (AMD doesn't support fixed
69  * counters), this mirrors how fixed counters are mapped to PERF_GLOBAL_CTRL
70  * and similar MSRs, i.e. tracking fixed counters at base index 32 reduces the
71  * amounter of boilerplate needed to iterate over PMCs *and* simplifies common
72  * enabling/disable/reset operations.
73  *
74  * WARNING!  This helper is only for lookups that are initiated by KVM, it is
75  * NOT safe for guest lookups, e.g. will do the wrong thing if passed a raw
76  * ECX value from RDPMC (fixed counters are accessed by setting bit 30 in ECX
77  * for RDPMC, not by adding 32 to the fixed counter index).
78  */
79 static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx)
80 {
81 	if (idx < pmu->nr_arch_gp_counters)
82 		return &pmu->gp_counters[idx];
83 
84 	idx -= KVM_FIXED_PMC_BASE_IDX;
85 	if (idx >= 0 && idx < pmu->nr_arch_fixed_counters)
86 		return &pmu->fixed_counters[idx];
87 
88 	return NULL;
89 }
90 
91 #define kvm_for_each_pmc(pmu, pmc, i, bitmap)			\
92 	for_each_set_bit(i, bitmap, X86_PMC_IDX_MAX)		\
93 		if (!(pmc = kvm_pmc_idx_to_pmc(pmu, i)))	\
94 			continue;				\
95 		else						\
96 
97 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
98 {
99 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
100 
101 	return pmu->counter_bitmask[pmc->type];
102 }
103 
104 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
105 {
106 	u64 counter, enabled, running;
107 
108 	counter = pmc->counter + pmc->emulated_counter;
109 
110 	if (pmc->perf_event && !pmc->is_paused)
111 		counter += perf_event_read_value(pmc->perf_event,
112 						 &enabled, &running);
113 	/* FIXME: Scaling needed? */
114 	return counter & pmc_bitmask(pmc);
115 }
116 
117 void pmc_write_counter(struct kvm_pmc *pmc, u64 val);
118 
119 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
120 {
121 	return pmc->type == KVM_PMC_GP;
122 }
123 
124 static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
125 {
126 	return pmc->type == KVM_PMC_FIXED;
127 }
128 
129 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
130 						 u64 data)
131 {
132 	return !(pmu->global_ctrl_mask & data);
133 }
134 
135 /* returns general purpose PMC with the specified MSR. Note that it can be
136  * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
137  * parameter to tell them apart.
138  */
139 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
140 					 u32 base)
141 {
142 	if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
143 		u32 index = array_index_nospec(msr - base,
144 					       pmu->nr_arch_gp_counters);
145 
146 		return &pmu->gp_counters[index];
147 	}
148 
149 	return NULL;
150 }
151 
152 /* returns fixed PMC with the specified MSR */
153 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
154 {
155 	int base = MSR_CORE_PERF_FIXED_CTR0;
156 
157 	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
158 		u32 index = array_index_nospec(msr - base,
159 					       pmu->nr_arch_fixed_counters);
160 
161 		return &pmu->fixed_counters[index];
162 	}
163 
164 	return NULL;
165 }
166 
167 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
168 {
169 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
170 
171 	if (pmc_is_fixed(pmc))
172 		return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
173 					pmc->idx - KVM_FIXED_PMC_BASE_IDX) & 0x3;
174 
175 	return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
176 }
177 
178 extern struct x86_pmu_capability kvm_pmu_cap;
179 extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel;
180 
181 static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
182 {
183 	bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
184 	int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
185 
186 	/*
187 	 * Hybrid PMUs don't play nice with virtualization without careful
188 	 * configuration by userspace, and KVM's APIs for reporting supported
189 	 * vPMU features do not account for hybrid PMUs.  Disable vPMU support
190 	 * for hybrid PMUs until KVM gains a way to let userspace opt-in.
191 	 */
192 	if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
193 		enable_pmu = false;
194 
195 	if (enable_pmu) {
196 		perf_get_x86_pmu_capability(&kvm_pmu_cap);
197 
198 		/*
199 		 * WARN if perf did NOT disable hardware PMU if the number of
200 		 * architecturally required GP counters aren't present, i.e. if
201 		 * there are a non-zero number of counters, but fewer than what
202 		 * is architecturally required.
203 		 */
204 		if (!kvm_pmu_cap.num_counters_gp ||
205 		    WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
206 			enable_pmu = false;
207 		else if (is_intel && !kvm_pmu_cap.version)
208 			enable_pmu = false;
209 	}
210 
211 	if (!enable_pmu) {
212 		memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
213 		return;
214 	}
215 
216 	kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
217 	kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
218 					  pmu_ops->MAX_NR_GP_COUNTERS);
219 	kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
220 					     KVM_PMC_MAX_FIXED);
221 
222 	kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
223 		perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
224 	kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED =
225 		perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
226 }
227 
228 static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
229 {
230 	set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
231 	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
232 }
233 
234 static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
235 {
236 	int bit;
237 
238 	if (!diff)
239 		return;
240 
241 	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
242 		set_bit(bit, pmu->reprogram_pmi);
243 	kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu));
244 }
245 
246 /*
247  * Check if a PMC is enabled by comparing it against global_ctrl bits.
248  *
249  * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled.
250  */
251 static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
252 {
253 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
254 
255 	if (!kvm_pmu_has_perf_global_ctrl(pmu))
256 		return true;
257 
258 	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
259 }
260 
261 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
262 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
263 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
264 int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx);
265 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
266 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
267 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
268 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
269 void kvm_pmu_init(struct kvm_vcpu *vcpu);
270 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
271 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
272 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
273 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel);
274 
275 bool is_vmware_backdoor_pmc(u32 pmc_idx);
276 
277 extern struct kvm_pmu_ops intel_pmu_ops;
278 extern struct kvm_pmu_ops amd_pmu_ops;
279 #endif /* __KVM_X86_PMU_H */
280