xref: /linux/arch/riscv/include/asm/kvm_vcpu_pmu.h (revision 4e21f223)
18f0153ecSAtish Patra /* SPDX-License-Identifier: GPL-2.0-only */
28f0153ecSAtish Patra /*
38f0153ecSAtish Patra  * Copyright (c) 2023 Rivos Inc
48f0153ecSAtish Patra  *
58f0153ecSAtish Patra  * Authors:
68f0153ecSAtish Patra  *     Atish Patra <atishp@rivosinc.com>
78f0153ecSAtish Patra  */
88f0153ecSAtish Patra 
98f0153ecSAtish Patra #ifndef __KVM_VCPU_RISCV_PMU_H
108f0153ecSAtish Patra #define __KVM_VCPU_RISCV_PMU_H
118f0153ecSAtish Patra 
128f0153ecSAtish Patra #include <linux/perf/riscv_pmu.h>
138f0153ecSAtish Patra #include <asm/sbi.h>
148f0153ecSAtish Patra 
158f0153ecSAtish Patra #ifdef CONFIG_RISCV_PMU_SBI
168f0153ecSAtish Patra #define RISCV_KVM_MAX_FW_CTRS	32
178f0153ecSAtish Patra #define RISCV_KVM_MAX_HW_CTRS	32
188f0153ecSAtish Patra #define RISCV_KVM_MAX_COUNTERS	(RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
198f0153ecSAtish Patra static_assert(RISCV_KVM_MAX_COUNTERS <= 64);
208f0153ecSAtish Patra 
21badc3868SAtish Patra struct kvm_fw_event {
22badc3868SAtish Patra 	/* Current value of the event */
2308fb07d6SAtish Patra 	u64 value;
24badc3868SAtish Patra 
25badc3868SAtish Patra 	/* Event monitoring status */
26badc3868SAtish Patra 	bool started;
27badc3868SAtish Patra };
28badc3868SAtish Patra 
298f0153ecSAtish Patra /* Per virtual pmu counter data */
308f0153ecSAtish Patra struct kvm_pmc {
318f0153ecSAtish Patra 	u8 idx;
328f0153ecSAtish Patra 	struct perf_event *perf_event;
338f0153ecSAtish Patra 	u64 counter_val;
348f0153ecSAtish Patra 	union sbi_pmu_ctr_info cinfo;
358f0153ecSAtish Patra 	/* Event monitoring status */
368f0153ecSAtish Patra 	bool started;
37badc3868SAtish Patra 	/* Monitoring event ID */
38badc3868SAtish Patra 	unsigned long event_idx;
3916b0bde9SAtish Patra 	struct kvm_vcpu *vcpu;
408f0153ecSAtish Patra };
418f0153ecSAtish Patra 
428f0153ecSAtish Patra /* PMU data structure per vcpu */
438f0153ecSAtish Patra struct kvm_pmu {
448f0153ecSAtish Patra 	struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS];
45badc3868SAtish Patra 	struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS];
468f0153ecSAtish Patra 	/* Number of the virtual firmware counters available */
478f0153ecSAtish Patra 	int num_fw_ctrs;
488f0153ecSAtish Patra 	/* Number of the virtual hardware counters available */
498f0153ecSAtish Patra 	int num_hw_ctrs;
508f0153ecSAtish Patra 	/* A flag to indicate that pmu initialization is done */
518f0153ecSAtish Patra 	bool init_done;
528f0153ecSAtish Patra 	/* Bit map of all the virtual counter used */
538f0153ecSAtish Patra 	DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
5416b0bde9SAtish Patra 	/* Bit map of all the virtual counter overflown */
5516b0bde9SAtish Patra 	DECLARE_BITMAP(pmc_overflown, RISCV_KVM_MAX_COUNTERS);
56c2f41ddbSAtish Patra 	/* The address of the counter snapshot area (guest physical address) */
57c2f41ddbSAtish Patra 	gpa_t snapshot_addr;
58c2f41ddbSAtish Patra 	/* The actual data of the snapshot */
59c2f41ddbSAtish Patra 	struct riscv_pmu_snapshot_data *sdata;
608f0153ecSAtish Patra };
618f0153ecSAtish Patra 
628f0153ecSAtish Patra #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
638f0153ecSAtish Patra #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu_context))
648f0153ecSAtish Patra 
65a9ac6c37SAtish Patra #if defined(CONFIG_32BIT)
66a9ac6c37SAtish Patra #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
67a9ac6c37SAtish Patra {.base = CSR_CYCLEH,	.count = 31,	.func = kvm_riscv_vcpu_pmu_read_hpm }, \
68a9ac6c37SAtish Patra {.base = CSR_CYCLE,	.count = 31,	.func = kvm_riscv_vcpu_pmu_read_hpm },
69a9ac6c37SAtish Patra #else
70a9ac6c37SAtish Patra #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
71a9ac6c37SAtish Patra {.base = CSR_CYCLE,	.count = 31,	.func = kvm_riscv_vcpu_pmu_read_hpm },
72a9ac6c37SAtish Patra #endif
73a9ac6c37SAtish Patra 
74badc3868SAtish Patra int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
75a9ac6c37SAtish Patra int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
76a9ac6c37SAtish Patra 				unsigned long *val, unsigned long new_val,
77a9ac6c37SAtish Patra 				unsigned long wr_mask);
78a9ac6c37SAtish Patra 
798f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata);
808f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
818f0153ecSAtish Patra 				struct kvm_vcpu_sbi_return *retdata);
828f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
838f0153ecSAtish Patra 				 unsigned long ctr_mask, unsigned long flags, u64 ival,
848f0153ecSAtish Patra 				 struct kvm_vcpu_sbi_return *retdata);
858f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
868f0153ecSAtish Patra 				unsigned long ctr_mask, unsigned long flags,
878f0153ecSAtish Patra 				struct kvm_vcpu_sbi_return *retdata);
888f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
898f0153ecSAtish Patra 				     unsigned long ctr_mask, unsigned long flags,
908f0153ecSAtish Patra 				     unsigned long eidx, u64 evtdata,
918f0153ecSAtish Patra 				     struct kvm_vcpu_sbi_return *retdata);
92*4e21f223SAtish Patra int kvm_riscv_vcpu_pmu_fw_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
938f0153ecSAtish Patra 				struct kvm_vcpu_sbi_return *retdata);
9408fb07d6SAtish Patra int kvm_riscv_vcpu_pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx,
9508fb07d6SAtish Patra 				      struct kvm_vcpu_sbi_return *retdata);
968f0153ecSAtish Patra void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
97c2f41ddbSAtish Patra int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long saddr_low,
98c2f41ddbSAtish Patra 				      unsigned long saddr_high, unsigned long flags,
99c2f41ddbSAtish Patra 				      struct kvm_vcpu_sbi_return *retdata);
1008f0153ecSAtish Patra void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
1018f0153ecSAtish Patra void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
1028f0153ecSAtish Patra 
1038f0153ecSAtish Patra #else
1048f0153ecSAtish Patra struct kvm_pmu {
1058f0153ecSAtish Patra };
1068f0153ecSAtish Patra 
107a9ac6c37SAtish Patra #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
108a9ac6c37SAtish Patra {.base = 0,	.count = 0,	.func = NULL },
109a9ac6c37SAtish Patra 
kvm_riscv_vcpu_pmu_init(struct kvm_vcpu * vcpu)1108f0153ecSAtish Patra static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu * vcpu,unsigned long fid)111badc3868SAtish Patra static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
112badc3868SAtish Patra {
113badc3868SAtish Patra 	return 0;
114badc3868SAtish Patra }
115badc3868SAtish Patra 
kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu * vcpu)1168f0153ecSAtish Patra static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {}
kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu * vcpu)1178f0153ecSAtish Patra static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {}
1188f0153ecSAtish Patra #endif /* CONFIG_RISCV_PMU_SBI */
1198f0153ecSAtish Patra #endif /* !__KVM_VCPU_RISCV_PMU_H */
120