xref: /linux/arch/arm64/kvm/pmu-emul.c (revision d642ef71)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Linaro Ltd.
4  * Author: Shannon Zhao <shannon.zhao@linaro.org>
5  */
6 
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/list.h>
11 #include <linux/perf_event.h>
12 #include <linux/perf/arm_pmu.h>
13 #include <linux/uaccess.h>
14 #include <asm/kvm_emulate.h>
15 #include <kvm/arm_pmu.h>
16 #include <kvm/arm_vgic.h>
17 #include <asm/arm_pmuv3.h>
18 
19 #define PERF_ATTR_CFG1_COUNTER_64BIT	BIT(0)
20 
21 DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
22 
23 static LIST_HEAD(arm_pmus);
24 static DEFINE_MUTEX(arm_pmus_lock);
25 
26 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
27 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
28 
29 static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
30 {
31 	return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]);
32 }
33 
34 static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
35 {
36 	return &vcpu->arch.pmu.pmc[cnt_idx];
37 }
38 
39 static u32 __kvm_pmu_event_mask(unsigned int pmuver)
40 {
41 	switch (pmuver) {
42 	case ID_AA64DFR0_EL1_PMUVer_IMP:
43 		return GENMASK(9, 0);
44 	case ID_AA64DFR0_EL1_PMUVer_V3P1:
45 	case ID_AA64DFR0_EL1_PMUVer_V3P4:
46 	case ID_AA64DFR0_EL1_PMUVer_V3P5:
47 	case ID_AA64DFR0_EL1_PMUVer_V3P7:
48 		return GENMASK(15, 0);
49 	default:		/* Shouldn't be here, just for sanity */
50 		WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
51 		return 0;
52 	}
53 }
54 
55 static u32 kvm_pmu_event_mask(struct kvm *kvm)
56 {
57 	u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
58 	u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
59 
60 	return __kvm_pmu_event_mask(pmuver);
61 }
62 
63 u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
64 {
65 	u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
66 		   kvm_pmu_event_mask(kvm);
67 	u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
68 
69 	if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0))
70 		mask |= ARMV8_PMU_INCLUDE_EL2;
71 
72 	if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr0))
73 		mask |= ARMV8_PMU_EXCLUDE_NS_EL0 |
74 			ARMV8_PMU_EXCLUDE_NS_EL1 |
75 			ARMV8_PMU_EXCLUDE_EL3;
76 
77 	return mask;
78 }
79 
80 /**
81  * kvm_pmc_is_64bit - determine if counter is 64bit
82  * @pmc: counter context
83  */
84 static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
85 {
86 	return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
87 		kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc)));
88 }
89 
90 static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
91 {
92 	u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc));
93 
94 	return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
95 	       (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
96 }
97 
98 static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc)
99 {
100 	return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX &&
101 		!kvm_pmc_has_64bit_overflow(pmc));
102 }
103 
104 static u32 counter_index_to_reg(u64 idx)
105 {
106 	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
107 }
108 
109 static u32 counter_index_to_evtreg(u64 idx)
110 {
111 	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
112 }
113 
114 static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
115 {
116 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
117 	u64 counter, reg, enabled, running;
118 
119 	reg = counter_index_to_reg(pmc->idx);
120 	counter = __vcpu_sys_reg(vcpu, reg);
121 
122 	/*
123 	 * The real counter value is equal to the value of counter register plus
124 	 * the value perf event counts.
125 	 */
126 	if (pmc->perf_event)
127 		counter += perf_event_read_value(pmc->perf_event, &enabled,
128 						 &running);
129 
130 	if (!kvm_pmc_is_64bit(pmc))
131 		counter = lower_32_bits(counter);
132 
133 	return counter;
134 }
135 
136 /**
137  * kvm_pmu_get_counter_value - get PMU counter value
138  * @vcpu: The vcpu pointer
139  * @select_idx: The counter index
140  */
141 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
142 {
143 	if (!kvm_vcpu_has_pmu(vcpu))
144 		return 0;
145 
146 	return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
147 }
148 
149 static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
150 {
151 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
152 	u64 reg;
153 
154 	kvm_pmu_release_perf_event(pmc);
155 
156 	reg = counter_index_to_reg(pmc->idx);
157 
158 	if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX &&
159 	    !force) {
160 		/*
161 		 * Even with PMUv3p5, AArch32 cannot write to the top
162 		 * 32bit of the counters. The only possible course of
163 		 * action is to use PMCR.P, which will reset them to
164 		 * 0 (the only use of the 'force' parameter).
165 		 */
166 		val  = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
167 		val |= lower_32_bits(val);
168 	}
169 
170 	__vcpu_sys_reg(vcpu, reg) = val;
171 
172 	/* Recreate the perf event to reflect the updated sample_period */
173 	kvm_pmu_create_perf_event(pmc);
174 }
175 
176 /**
177  * kvm_pmu_set_counter_value - set PMU counter value
178  * @vcpu: The vcpu pointer
179  * @select_idx: The counter index
180  * @val: The counter value
181  */
182 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
183 {
184 	if (!kvm_vcpu_has_pmu(vcpu))
185 		return;
186 
187 	kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
188 }
189 
190 /**
191  * kvm_pmu_release_perf_event - remove the perf event
192  * @pmc: The PMU counter pointer
193  */
194 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
195 {
196 	if (pmc->perf_event) {
197 		perf_event_disable(pmc->perf_event);
198 		perf_event_release_kernel(pmc->perf_event);
199 		pmc->perf_event = NULL;
200 	}
201 }
202 
203 /**
204  * kvm_pmu_stop_counter - stop PMU counter
205  * @pmc: The PMU counter pointer
206  *
207  * If this counter has been configured to monitor some event, release it here.
208  */
209 static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
210 {
211 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
212 	u64 reg, val;
213 
214 	if (!pmc->perf_event)
215 		return;
216 
217 	val = kvm_pmu_get_pmc_value(pmc);
218 
219 	reg = counter_index_to_reg(pmc->idx);
220 
221 	__vcpu_sys_reg(vcpu, reg) = val;
222 
223 	kvm_pmu_release_perf_event(pmc);
224 }
225 
226 /**
227  * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
228  * @vcpu: The vcpu pointer
229  *
230  */
231 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
232 {
233 	int i;
234 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
235 
236 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
237 		pmu->pmc[i].idx = i;
238 }
239 
240 /**
241  * kvm_pmu_vcpu_reset - reset pmu state for cpu
242  * @vcpu: The vcpu pointer
243  *
244  */
245 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
246 {
247 	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
248 	int i;
249 
250 	for_each_set_bit(i, &mask, 32)
251 		kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i));
252 }
253 
254 /**
255  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
256  * @vcpu: The vcpu pointer
257  *
258  */
259 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
260 {
261 	int i;
262 
263 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
264 		kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i));
265 	irq_work_sync(&vcpu->arch.pmu.overflow_work);
266 }
267 
268 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
269 {
270 	u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT;
271 
272 	val &= ARMV8_PMU_PMCR_N_MASK;
273 	if (val == 0)
274 		return BIT(ARMV8_PMU_CYCLE_IDX);
275 	else
276 		return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
277 }
278 
279 /**
280  * kvm_pmu_enable_counter_mask - enable selected PMU counters
281  * @vcpu: The vcpu pointer
282  * @val: the value guest writes to PMCNTENSET register
283  *
284  * Call perf_event_enable to start counting the perf event
285  */
286 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
287 {
288 	int i;
289 	if (!kvm_vcpu_has_pmu(vcpu))
290 		return;
291 
292 	if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
293 		return;
294 
295 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
296 		struct kvm_pmc *pmc;
297 
298 		if (!(val & BIT(i)))
299 			continue;
300 
301 		pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
302 
303 		if (!pmc->perf_event) {
304 			kvm_pmu_create_perf_event(pmc);
305 		} else {
306 			perf_event_enable(pmc->perf_event);
307 			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
308 				kvm_debug("fail to enable perf event\n");
309 		}
310 	}
311 }
312 
313 /**
314  * kvm_pmu_disable_counter_mask - disable selected PMU counters
315  * @vcpu: The vcpu pointer
316  * @val: the value guest writes to PMCNTENCLR register
317  *
318  * Call perf_event_disable to stop counting the perf event
319  */
320 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
321 {
322 	int i;
323 
324 	if (!kvm_vcpu_has_pmu(vcpu) || !val)
325 		return;
326 
327 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
328 		struct kvm_pmc *pmc;
329 
330 		if (!(val & BIT(i)))
331 			continue;
332 
333 		pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
334 
335 		if (pmc->perf_event)
336 			perf_event_disable(pmc->perf_event);
337 	}
338 }
339 
340 static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
341 {
342 	u64 reg = 0;
343 
344 	if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
345 		reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
346 		reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
347 		reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
348 	}
349 
350 	return reg;
351 }
352 
353 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
354 {
355 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
356 	bool overflow;
357 
358 	if (!kvm_vcpu_has_pmu(vcpu))
359 		return;
360 
361 	overflow = !!kvm_pmu_overflow_status(vcpu);
362 	if (pmu->irq_level == overflow)
363 		return;
364 
365 	pmu->irq_level = overflow;
366 
367 	if (likely(irqchip_in_kernel(vcpu->kvm))) {
368 		int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
369 					      pmu->irq_num, overflow, pmu);
370 		WARN_ON(ret);
371 	}
372 }
373 
374 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
375 {
376 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
377 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
378 	bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
379 
380 	if (likely(irqchip_in_kernel(vcpu->kvm)))
381 		return false;
382 
383 	return pmu->irq_level != run_level;
384 }
385 
386 /*
387  * Reflect the PMU overflow interrupt output level into the kvm_run structure
388  */
389 void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
390 {
391 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
392 
393 	/* Populate the timer bitmap for user space */
394 	regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
395 	if (vcpu->arch.pmu.irq_level)
396 		regs->device_irq_level |= KVM_ARM_DEV_PMU;
397 }
398 
399 /**
400  * kvm_pmu_flush_hwstate - flush pmu state to cpu
401  * @vcpu: The vcpu pointer
402  *
403  * Check if the PMU has overflowed while we were running in the host, and inject
404  * an interrupt if that was the case.
405  */
406 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
407 {
408 	kvm_pmu_update_state(vcpu);
409 }
410 
411 /**
412  * kvm_pmu_sync_hwstate - sync pmu state from cpu
413  * @vcpu: The vcpu pointer
414  *
415  * Check if the PMU has overflowed while we were running in the guest, and
416  * inject an interrupt if that was the case.
417  */
418 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
419 {
420 	kvm_pmu_update_state(vcpu);
421 }
422 
423 /**
424  * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
425  * to the event.
426  * This is why we need a callback to do it once outside of the NMI context.
427  */
428 static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
429 {
430 	struct kvm_vcpu *vcpu;
431 
432 	vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work);
433 	kvm_vcpu_kick(vcpu);
434 }
435 
436 /*
437  * Perform an increment on any of the counters described in @mask,
438  * generating the overflow if required, and propagate it as a chained
439  * event if possible.
440  */
441 static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
442 				      unsigned long mask, u32 event)
443 {
444 	int i;
445 
446 	if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
447 		return;
448 
449 	/* Weed out disabled counters */
450 	mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
451 
452 	for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
453 		struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
454 		u64 type, reg;
455 
456 		/* Filter on event type */
457 		type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
458 		type &= kvm_pmu_event_mask(vcpu->kvm);
459 		if (type != event)
460 			continue;
461 
462 		/* Increment this counter */
463 		reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
464 		if (!kvm_pmc_is_64bit(pmc))
465 			reg = lower_32_bits(reg);
466 		__vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
467 
468 		/* No overflow? move on */
469 		if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
470 			continue;
471 
472 		/* Mark overflow */
473 		__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
474 
475 		if (kvm_pmu_counter_can_chain(pmc))
476 			kvm_pmu_counter_increment(vcpu, BIT(i + 1),
477 						  ARMV8_PMUV3_PERFCTR_CHAIN);
478 	}
479 }
480 
481 /* Compute the sample period for a given counter value */
482 static u64 compute_period(struct kvm_pmc *pmc, u64 counter)
483 {
484 	u64 val;
485 
486 	if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc))
487 		val = (-counter) & GENMASK(63, 0);
488 	else
489 		val = (-counter) & GENMASK(31, 0);
490 
491 	return val;
492 }
493 
494 /**
495  * When the perf event overflows, set the overflow status and inform the vcpu.
496  */
497 static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
498 				  struct perf_sample_data *data,
499 				  struct pt_regs *regs)
500 {
501 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
502 	struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
503 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
504 	int idx = pmc->idx;
505 	u64 period;
506 
507 	cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
508 
509 	/*
510 	 * Reset the sample period to the architectural limit,
511 	 * i.e. the point where the counter overflows.
512 	 */
513 	period = compute_period(pmc, local64_read(&perf_event->count));
514 
515 	local64_set(&perf_event->hw.period_left, 0);
516 	perf_event->attr.sample_period = period;
517 	perf_event->hw.sample_period = period;
518 
519 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
520 
521 	if (kvm_pmu_counter_can_chain(pmc))
522 		kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
523 					  ARMV8_PMUV3_PERFCTR_CHAIN);
524 
525 	if (kvm_pmu_overflow_status(vcpu)) {
526 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
527 
528 		if (!in_nmi())
529 			kvm_vcpu_kick(vcpu);
530 		else
531 			irq_work_queue(&vcpu->arch.pmu.overflow_work);
532 	}
533 
534 	cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
535 }
536 
537 /**
538  * kvm_pmu_software_increment - do software increment
539  * @vcpu: The vcpu pointer
540  * @val: the value guest writes to PMSWINC register
541  */
542 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
543 {
544 	kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
545 }
546 
547 /**
548  * kvm_pmu_handle_pmcr - handle PMCR register
549  * @vcpu: The vcpu pointer
550  * @val: the value guest writes to PMCR register
551  */
552 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
553 {
554 	int i;
555 
556 	if (!kvm_vcpu_has_pmu(vcpu))
557 		return;
558 
559 	/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
560 	if (!kvm_pmu_is_3p5(vcpu))
561 		val &= ~ARMV8_PMU_PMCR_LP;
562 
563 	/* The reset bits don't indicate any state, and shouldn't be saved. */
564 	__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
565 
566 	if (val & ARMV8_PMU_PMCR_E) {
567 		kvm_pmu_enable_counter_mask(vcpu,
568 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
569 	} else {
570 		kvm_pmu_disable_counter_mask(vcpu,
571 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
572 	}
573 
574 	if (val & ARMV8_PMU_PMCR_C)
575 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
576 
577 	if (val & ARMV8_PMU_PMCR_P) {
578 		unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
579 		mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
580 		for_each_set_bit(i, &mask, 32)
581 			kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
582 	}
583 	kvm_vcpu_pmu_restore_guest(vcpu);
584 }
585 
586 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
587 {
588 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
589 	return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) &&
590 	       (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
591 }
592 
593 /**
594  * kvm_pmu_create_perf_event - create a perf event for a counter
595  * @pmc: Counter context
596  */
597 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
598 {
599 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
600 	struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
601 	struct perf_event *event;
602 	struct perf_event_attr attr;
603 	u64 eventsel, reg, data;
604 	bool p, u, nsk, nsu;
605 
606 	reg = counter_index_to_evtreg(pmc->idx);
607 	data = __vcpu_sys_reg(vcpu, reg);
608 
609 	kvm_pmu_stop_counter(pmc);
610 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
611 		eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
612 	else
613 		eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
614 
615 	/*
616 	 * Neither SW increment nor chained events need to be backed
617 	 * by a perf event.
618 	 */
619 	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
620 	    eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
621 		return;
622 
623 	/*
624 	 * If we have a filter in place and that the event isn't allowed, do
625 	 * not install a perf event either.
626 	 */
627 	if (vcpu->kvm->arch.pmu_filter &&
628 	    !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
629 		return;
630 
631 	p = data & ARMV8_PMU_EXCLUDE_EL1;
632 	u = data & ARMV8_PMU_EXCLUDE_EL0;
633 	nsk = data & ARMV8_PMU_EXCLUDE_NS_EL1;
634 	nsu = data & ARMV8_PMU_EXCLUDE_NS_EL0;
635 
636 	memset(&attr, 0, sizeof(struct perf_event_attr));
637 	attr.type = arm_pmu->pmu.type;
638 	attr.size = sizeof(attr);
639 	attr.pinned = 1;
640 	attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
641 	attr.exclude_user = (u != nsu);
642 	attr.exclude_kernel = (p != nsk);
643 	attr.exclude_hv = 1; /* Don't count EL2 events */
644 	attr.exclude_host = 1; /* Don't count host events */
645 	attr.config = eventsel;
646 
647 	/*
648 	 * If counting with a 64bit counter, advertise it to the perf
649 	 * code, carefully dealing with the initial sample period
650 	 * which also depends on the overflow.
651 	 */
652 	if (kvm_pmc_is_64bit(pmc))
653 		attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
654 
655 	attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc));
656 
657 	event = perf_event_create_kernel_counter(&attr, -1, current,
658 						 kvm_pmu_perf_overflow, pmc);
659 
660 	if (IS_ERR(event)) {
661 		pr_err_once("kvm: pmu event creation failed %ld\n",
662 			    PTR_ERR(event));
663 		return;
664 	}
665 
666 	pmc->perf_event = event;
667 }
668 
669 /**
670  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
671  * @vcpu: The vcpu pointer
672  * @data: The data guest writes to PMXEVTYPER_EL0
673  * @select_idx: The number of selected counter
674  *
675  * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
676  * event with given hardware event number. Here we call perf_event API to
677  * emulate this action and create a kernel perf event for it.
678  */
679 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
680 				    u64 select_idx)
681 {
682 	struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
683 	u64 reg;
684 
685 	if (!kvm_vcpu_has_pmu(vcpu))
686 		return;
687 
688 	reg = counter_index_to_evtreg(pmc->idx);
689 	__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
690 
691 	kvm_pmu_create_perf_event(pmc);
692 }
693 
694 void kvm_host_pmu_init(struct arm_pmu *pmu)
695 {
696 	struct arm_pmu_entry *entry;
697 
698 	/*
699 	 * Check the sanitised PMU version for the system, as KVM does not
700 	 * support implementations where PMUv3 exists on a subset of CPUs.
701 	 */
702 	if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
703 		return;
704 
705 	mutex_lock(&arm_pmus_lock);
706 
707 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
708 	if (!entry)
709 		goto out_unlock;
710 
711 	entry->arm_pmu = pmu;
712 	list_add_tail(&entry->entry, &arm_pmus);
713 
714 	if (list_is_singular(&arm_pmus))
715 		static_branch_enable(&kvm_arm_pmu_available);
716 
717 out_unlock:
718 	mutex_unlock(&arm_pmus_lock);
719 }
720 
721 static struct arm_pmu *kvm_pmu_probe_armpmu(void)
722 {
723 	struct arm_pmu *tmp, *pmu = NULL;
724 	struct arm_pmu_entry *entry;
725 	int cpu;
726 
727 	mutex_lock(&arm_pmus_lock);
728 
729 	/*
730 	 * It is safe to use a stale cpu to iterate the list of PMUs so long as
731 	 * the same value is used for the entirety of the loop. Given this, and
732 	 * the fact that no percpu data is used for the lookup there is no need
733 	 * to disable preemption.
734 	 *
735 	 * It is still necessary to get a valid cpu, though, to probe for the
736 	 * default PMU instance as userspace is not required to specify a PMU
737 	 * type. In order to uphold the preexisting behavior KVM selects the
738 	 * PMU instance for the core during vcpu init. A dependent use
739 	 * case would be a user with disdain of all things big.LITTLE that
740 	 * affines the VMM to a particular cluster of cores.
741 	 *
742 	 * In any case, userspace should just do the sane thing and use the UAPI
743 	 * to select a PMU type directly. But, be wary of the baggage being
744 	 * carried here.
745 	 */
746 	cpu = raw_smp_processor_id();
747 	list_for_each_entry(entry, &arm_pmus, entry) {
748 		tmp = entry->arm_pmu;
749 
750 		if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
751 			pmu = tmp;
752 			break;
753 		}
754 	}
755 
756 	mutex_unlock(&arm_pmus_lock);
757 
758 	return pmu;
759 }
760 
761 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
762 {
763 	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
764 	u64 val, mask = 0;
765 	int base, i, nr_events;
766 
767 	if (!kvm_vcpu_has_pmu(vcpu))
768 		return 0;
769 
770 	if (!pmceid1) {
771 		val = read_sysreg(pmceid0_el0);
772 		/* always support CHAIN */
773 		val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
774 		base = 0;
775 	} else {
776 		val = read_sysreg(pmceid1_el0);
777 		/*
778 		 * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
779 		 * as RAZ
780 		 */
781 		val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
782 			 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
783 			 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
784 		base = 32;
785 	}
786 
787 	if (!bmap)
788 		return val;
789 
790 	nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
791 
792 	for (i = 0; i < 32; i += 8) {
793 		u64 byte;
794 
795 		byte = bitmap_get_value8(bmap, base + i);
796 		mask |= byte << i;
797 		if (nr_events >= (0x4000 + base + 32)) {
798 			byte = bitmap_get_value8(bmap, 0x4000 + base + i);
799 			mask |= byte << (32 + i);
800 		}
801 	}
802 
803 	return val & mask;
804 }
805 
806 void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
807 {
808 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
809 
810 	kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
811 
812 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
813 	__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
814 	__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
815 }
816 
817 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
818 {
819 	if (!kvm_vcpu_has_pmu(vcpu))
820 		return 0;
821 
822 	if (!vcpu->arch.pmu.created)
823 		return -EINVAL;
824 
825 	/*
826 	 * A valid interrupt configuration for the PMU is either to have a
827 	 * properly configured interrupt number and using an in-kernel
828 	 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
829 	 */
830 	if (irqchip_in_kernel(vcpu->kvm)) {
831 		int irq = vcpu->arch.pmu.irq_num;
832 		/*
833 		 * If we are using an in-kernel vgic, at this point we know
834 		 * the vgic will be initialized, so we can check the PMU irq
835 		 * number against the dimensions of the vgic and make sure
836 		 * it's valid.
837 		 */
838 		if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
839 			return -EINVAL;
840 	} else if (kvm_arm_pmu_irq_initialized(vcpu)) {
841 		   return -EINVAL;
842 	}
843 
844 	/* One-off reload of the PMU on first run */
845 	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
846 
847 	return 0;
848 }
849 
850 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
851 {
852 	if (irqchip_in_kernel(vcpu->kvm)) {
853 		int ret;
854 
855 		/*
856 		 * If using the PMU with an in-kernel virtual GIC
857 		 * implementation, we require the GIC to be already
858 		 * initialized when initializing the PMU.
859 		 */
860 		if (!vgic_initialized(vcpu->kvm))
861 			return -ENODEV;
862 
863 		if (!kvm_arm_pmu_irq_initialized(vcpu))
864 			return -ENXIO;
865 
866 		ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
867 					 &vcpu->arch.pmu);
868 		if (ret)
869 			return ret;
870 	}
871 
872 	init_irq_work(&vcpu->arch.pmu.overflow_work,
873 		      kvm_pmu_perf_overflow_notify_vcpu);
874 
875 	vcpu->arch.pmu.created = true;
876 	return 0;
877 }
878 
879 /*
880  * For one VM the interrupt type must be same for each vcpu.
881  * As a PPI, the interrupt number is the same for all vcpus,
882  * while as an SPI it must be a separate number per vcpu.
883  */
884 static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
885 {
886 	unsigned long i;
887 	struct kvm_vcpu *vcpu;
888 
889 	kvm_for_each_vcpu(i, vcpu, kvm) {
890 		if (!kvm_arm_pmu_irq_initialized(vcpu))
891 			continue;
892 
893 		if (irq_is_ppi(irq)) {
894 			if (vcpu->arch.pmu.irq_num != irq)
895 				return false;
896 		} else {
897 			if (vcpu->arch.pmu.irq_num == irq)
898 				return false;
899 		}
900 	}
901 
902 	return true;
903 }
904 
905 /**
906  * kvm_arm_pmu_get_max_counters - Return the max number of PMU counters.
907  * @kvm: The kvm pointer
908  */
909 u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
910 {
911 	struct arm_pmu *arm_pmu = kvm->arch.arm_pmu;
912 
913 	/*
914 	 * The arm_pmu->num_events considers the cycle counter as well.
915 	 * Ignore that and return only the general-purpose counters.
916 	 */
917 	return arm_pmu->num_events - 1;
918 }
919 
920 static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
921 {
922 	lockdep_assert_held(&kvm->arch.config_lock);
923 
924 	kvm->arch.arm_pmu = arm_pmu;
925 	kvm->arch.pmcr_n = kvm_arm_pmu_get_max_counters(kvm);
926 }
927 
928 /**
929  * kvm_arm_set_default_pmu - No PMU set, get the default one.
930  * @kvm: The kvm pointer
931  *
932  * The observant among you will notice that the supported_cpus
933  * mask does not get updated for the default PMU even though it
934  * is quite possible the selected instance supports only a
935  * subset of cores in the system. This is intentional, and
936  * upholds the preexisting behavior on heterogeneous systems
937  * where vCPUs can be scheduled on any core but the guest
938  * counters could stop working.
939  */
940 int kvm_arm_set_default_pmu(struct kvm *kvm)
941 {
942 	struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu();
943 
944 	if (!arm_pmu)
945 		return -ENODEV;
946 
947 	kvm_arm_set_pmu(kvm, arm_pmu);
948 	return 0;
949 }
950 
951 static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
952 {
953 	struct kvm *kvm = vcpu->kvm;
954 	struct arm_pmu_entry *entry;
955 	struct arm_pmu *arm_pmu;
956 	int ret = -ENXIO;
957 
958 	lockdep_assert_held(&kvm->arch.config_lock);
959 	mutex_lock(&arm_pmus_lock);
960 
961 	list_for_each_entry(entry, &arm_pmus, entry) {
962 		arm_pmu = entry->arm_pmu;
963 		if (arm_pmu->pmu.type == pmu_id) {
964 			if (kvm_vm_has_ran_once(kvm) ||
965 			    (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
966 				ret = -EBUSY;
967 				break;
968 			}
969 
970 			kvm_arm_set_pmu(kvm, arm_pmu);
971 			cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
972 			ret = 0;
973 			break;
974 		}
975 	}
976 
977 	mutex_unlock(&arm_pmus_lock);
978 	return ret;
979 }
980 
981 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
982 {
983 	struct kvm *kvm = vcpu->kvm;
984 
985 	lockdep_assert_held(&kvm->arch.config_lock);
986 
987 	if (!kvm_vcpu_has_pmu(vcpu))
988 		return -ENODEV;
989 
990 	if (vcpu->arch.pmu.created)
991 		return -EBUSY;
992 
993 	switch (attr->attr) {
994 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
995 		int __user *uaddr = (int __user *)(long)attr->addr;
996 		int irq;
997 
998 		if (!irqchip_in_kernel(kvm))
999 			return -EINVAL;
1000 
1001 		if (get_user(irq, uaddr))
1002 			return -EFAULT;
1003 
1004 		/* The PMU overflow interrupt can be a PPI or a valid SPI. */
1005 		if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
1006 			return -EINVAL;
1007 
1008 		if (!pmu_irq_is_valid(kvm, irq))
1009 			return -EINVAL;
1010 
1011 		if (kvm_arm_pmu_irq_initialized(vcpu))
1012 			return -EBUSY;
1013 
1014 		kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
1015 		vcpu->arch.pmu.irq_num = irq;
1016 		return 0;
1017 	}
1018 	case KVM_ARM_VCPU_PMU_V3_FILTER: {
1019 		u8 pmuver = kvm_arm_pmu_get_pmuver_limit();
1020 		struct kvm_pmu_event_filter __user *uaddr;
1021 		struct kvm_pmu_event_filter filter;
1022 		int nr_events;
1023 
1024 		/*
1025 		 * Allow userspace to specify an event filter for the entire
1026 		 * event range supported by PMUVer of the hardware, rather
1027 		 * than the guest's PMUVer for KVM backward compatibility.
1028 		 */
1029 		nr_events = __kvm_pmu_event_mask(pmuver) + 1;
1030 
1031 		uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
1032 
1033 		if (copy_from_user(&filter, uaddr, sizeof(filter)))
1034 			return -EFAULT;
1035 
1036 		if (((u32)filter.base_event + filter.nevents) > nr_events ||
1037 		    (filter.action != KVM_PMU_EVENT_ALLOW &&
1038 		     filter.action != KVM_PMU_EVENT_DENY))
1039 			return -EINVAL;
1040 
1041 		if (kvm_vm_has_ran_once(kvm))
1042 			return -EBUSY;
1043 
1044 		if (!kvm->arch.pmu_filter) {
1045 			kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
1046 			if (!kvm->arch.pmu_filter)
1047 				return -ENOMEM;
1048 
1049 			/*
1050 			 * The default depends on the first applied filter.
1051 			 * If it allows events, the default is to deny.
1052 			 * Conversely, if the first filter denies a set of
1053 			 * events, the default is to allow.
1054 			 */
1055 			if (filter.action == KVM_PMU_EVENT_ALLOW)
1056 				bitmap_zero(kvm->arch.pmu_filter, nr_events);
1057 			else
1058 				bitmap_fill(kvm->arch.pmu_filter, nr_events);
1059 		}
1060 
1061 		if (filter.action == KVM_PMU_EVENT_ALLOW)
1062 			bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1063 		else
1064 			bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1065 
1066 		return 0;
1067 	}
1068 	case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
1069 		int __user *uaddr = (int __user *)(long)attr->addr;
1070 		int pmu_id;
1071 
1072 		if (get_user(pmu_id, uaddr))
1073 			return -EFAULT;
1074 
1075 		return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
1076 	}
1077 	case KVM_ARM_VCPU_PMU_V3_INIT:
1078 		return kvm_arm_pmu_v3_init(vcpu);
1079 	}
1080 
1081 	return -ENXIO;
1082 }
1083 
1084 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1085 {
1086 	switch (attr->attr) {
1087 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
1088 		int __user *uaddr = (int __user *)(long)attr->addr;
1089 		int irq;
1090 
1091 		if (!irqchip_in_kernel(vcpu->kvm))
1092 			return -EINVAL;
1093 
1094 		if (!kvm_vcpu_has_pmu(vcpu))
1095 			return -ENODEV;
1096 
1097 		if (!kvm_arm_pmu_irq_initialized(vcpu))
1098 			return -ENXIO;
1099 
1100 		irq = vcpu->arch.pmu.irq_num;
1101 		return put_user(irq, uaddr);
1102 	}
1103 	}
1104 
1105 	return -ENXIO;
1106 }
1107 
1108 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1109 {
1110 	switch (attr->attr) {
1111 	case KVM_ARM_VCPU_PMU_V3_IRQ:
1112 	case KVM_ARM_VCPU_PMU_V3_INIT:
1113 	case KVM_ARM_VCPU_PMU_V3_FILTER:
1114 	case KVM_ARM_VCPU_PMU_V3_SET_PMU:
1115 		if (kvm_vcpu_has_pmu(vcpu))
1116 			return 0;
1117 	}
1118 
1119 	return -ENXIO;
1120 }
1121 
1122 u8 kvm_arm_pmu_get_pmuver_limit(void)
1123 {
1124 	u64 tmp;
1125 
1126 	tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1127 	tmp = cpuid_feature_cap_perfmon_field(tmp,
1128 					      ID_AA64DFR0_EL1_PMUVer_SHIFT,
1129 					      ID_AA64DFR0_EL1_PMUVer_V3P5);
1130 	return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
1131 }
1132 
1133 /**
1134  * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
1135  * @vcpu: The vcpu pointer
1136  */
1137 u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
1138 {
1139 	u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0) &
1140 			~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
1141 
1142 	return pmcr | ((u64)vcpu->kvm->arch.pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
1143 }
1144