1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * ARMv8 PMUv3 Performance Events handling code.
4 *
5 * Copyright (C) 2012 ARM Limited
6 * Author: Will Deacon <will.deacon@arm.com>
7 *
8 * This code is based heavily on the ARMv7 perf event code.
9 */
10
11 #include <asm/irq_regs.h>
12 #include <asm/perf_event.h>
13 #include <asm/sysreg.h>
14 #include <asm/virt.h>
15
16 #include <clocksource/arm_arch_timer.h>
17
18 #include <linux/acpi.h>
19 #include <linux/clocksource.h>
20 #include <linux/kvm_host.h>
21 #include <linux/of.h>
22 #include <linux/perf/arm_pmu.h>
23 #include <linux/platform_device.h>
24 #include <linux/sched_clock.h>
25 #include <linux/smp.h>
26
27 /* ARMv8 Cortex-A53 specific event types. */
28 #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
29
30 /* ARMv8 Cavium ThunderX specific event types. */
31 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
32 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
33 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
34 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
35 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
36
37 /*
38 * ARMv8 Architectural defined events, not all of these may
39 * be supported on any given implementation. Unsupported events will
40 * be disabled at run-time based on the PMCEID registers.
41 */
42 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
43 PERF_MAP_ALL_UNSUPPORTED,
44 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
45 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
46 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
47 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
48 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
49 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
50 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
51 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
52 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
53 };
54
55 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
56 [PERF_COUNT_HW_CACHE_OP_MAX]
57 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
58 PERF_CACHE_MAP_ALL_UNSUPPORTED,
59
60 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
61 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
62
63 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
64 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
65
66 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
67 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
68
69 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
70 [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
71
72 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD,
73 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD,
74
75 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
76 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
77 };
78
79 static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
80 [PERF_COUNT_HW_CACHE_OP_MAX]
81 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
82 PERF_CACHE_MAP_ALL_UNSUPPORTED,
83
84 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
85
86 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
87 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
88 };
89
90 static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
91 [PERF_COUNT_HW_CACHE_OP_MAX]
92 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
93 PERF_CACHE_MAP_ALL_UNSUPPORTED,
94
95 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
96 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
97 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
98 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
99
100 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
101 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
102
103 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
104 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
105 };
106
107 static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
108 [PERF_COUNT_HW_CACHE_OP_MAX]
109 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
110 PERF_CACHE_MAP_ALL_UNSUPPORTED,
111
112 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
113 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
114 };
115
116 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
117 [PERF_COUNT_HW_CACHE_OP_MAX]
118 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
119 PERF_CACHE_MAP_ALL_UNSUPPORTED,
120
121 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
122 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
123 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
124 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
125 [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
126 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
127
128 [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
129 [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
130
131 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
132 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
133 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
134 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
135 };
136
137 static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
138 [PERF_COUNT_HW_CACHE_OP_MAX]
139 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
140 PERF_CACHE_MAP_ALL_UNSUPPORTED,
141
142 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
143 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
144 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
145 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
146
147 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
148 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
149 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
150 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
151
152 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
153 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
154 };
155
156 static ssize_t
armv8pmu_events_sysfs_show(struct device * dev,struct device_attribute * attr,char * page)157 armv8pmu_events_sysfs_show(struct device *dev,
158 struct device_attribute *attr, char *page)
159 {
160 struct perf_pmu_events_attr *pmu_attr;
161
162 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
163
164 return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
165 }
166
167 #define ARMV8_EVENT_ATTR(name, config) \
168 (&((struct perf_pmu_events_attr) { \
169 .attr = __ATTR(name, 0444, armv8pmu_events_sysfs_show, NULL), \
170 .id = config, \
171 }).attr.attr)
172
173 static struct attribute *armv8_pmuv3_event_attrs[] = {
174 ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
175 ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
176 ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
177 ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
178 ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE),
179 ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL),
180 ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED),
181 ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED),
182 ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED),
183 ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN),
184 ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN),
185 ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED),
186 ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED),
187 ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED),
188 ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED),
189 ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED),
190 ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED),
191 ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
192 ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED),
193 ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS),
194 ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE),
195 ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB),
196 ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE),
197 ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL),
198 ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB),
199 ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS),
200 ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR),
201 ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC),
202 ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED),
203 ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES),
204 /* Don't expose the chain event in /sys, since it's useless in isolation */
205 ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE),
206 ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE),
207 ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED),
208 ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED),
209 ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND),
210 ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND),
211 ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB),
212 ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB),
213 ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE),
214 ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL),
215 ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE),
216 ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL),
217 ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE),
218 ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB),
219 ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL),
220 ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL),
221 ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB),
222 ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB),
223 ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS),
224 ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE),
225 ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS),
226 ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK),
227 ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK),
228 ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD),
229 ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD),
230 ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD),
231 ARMV8_EVENT_ATTR(l1d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD),
232 ARMV8_EVENT_ATTR(op_retired, ARMV8_PMUV3_PERFCTR_OP_RETIRED),
233 ARMV8_EVENT_ATTR(op_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC),
234 ARMV8_EVENT_ATTR(stall, ARMV8_PMUV3_PERFCTR_STALL),
235 ARMV8_EVENT_ATTR(stall_slot_backend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND),
236 ARMV8_EVENT_ATTR(stall_slot_frontend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND),
237 ARMV8_EVENT_ATTR(stall_slot, ARMV8_PMUV3_PERFCTR_STALL_SLOT),
238 ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP),
239 ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED),
240 ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE),
241 ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION),
242 ARMV8_EVENT_ATTR(cnt_cycles, ARMV8_AMU_PERFCTR_CNT_CYCLES),
243 ARMV8_EVENT_ATTR(stall_backend_mem, ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM),
244 ARMV8_EVENT_ATTR(l1i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS),
245 ARMV8_EVENT_ATTR(l2d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD),
246 ARMV8_EVENT_ATTR(l2i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS),
247 ARMV8_EVENT_ATTR(l3d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD),
248 ARMV8_EVENT_ATTR(ldst_align_lat, ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT),
249 ARMV8_EVENT_ATTR(ld_align_lat, ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT),
250 ARMV8_EVENT_ATTR(st_align_lat, ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT),
251 ARMV8_EVENT_ATTR(mem_access_checked, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED),
252 ARMV8_EVENT_ATTR(mem_access_checked_rd, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD),
253 ARMV8_EVENT_ATTR(mem_access_checked_wr, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR),
254 NULL,
255 };
256
257 static umode_t
armv8pmu_event_attr_is_visible(struct kobject * kobj,struct attribute * attr,int unused)258 armv8pmu_event_attr_is_visible(struct kobject *kobj,
259 struct attribute *attr, int unused)
260 {
261 struct device *dev = kobj_to_dev(kobj);
262 struct pmu *pmu = dev_get_drvdata(dev);
263 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
264 struct perf_pmu_events_attr *pmu_attr;
265
266 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
267
268 if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
269 test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
270 return attr->mode;
271
272 if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
273 u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
274
275 if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
276 test_bit(id, cpu_pmu->pmceid_ext_bitmap))
277 return attr->mode;
278 }
279
280 return 0;
281 }
282
283 static const struct attribute_group armv8_pmuv3_events_attr_group = {
284 .name = "events",
285 .attrs = armv8_pmuv3_event_attrs,
286 .is_visible = armv8pmu_event_attr_is_visible,
287 };
288
289 PMU_FORMAT_ATTR(event, "config:0-15");
290 PMU_FORMAT_ATTR(long, "config1:0");
291
armv8pmu_event_is_64bit(struct perf_event * event)292 static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
293 {
294 return event->attr.config1 & 0x1;
295 }
296
297 static struct attribute *armv8_pmuv3_format_attrs[] = {
298 &format_attr_event.attr,
299 &format_attr_long.attr,
300 NULL,
301 };
302
303 static const struct attribute_group armv8_pmuv3_format_attr_group = {
304 .name = "format",
305 .attrs = armv8_pmuv3_format_attrs,
306 };
307
slots_show(struct device * dev,struct device_attribute * attr,char * page)308 static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
309 char *page)
310 {
311 struct pmu *pmu = dev_get_drvdata(dev);
312 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
313 u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
314
315 return snprintf(page, PAGE_SIZE, "0x%08x\n", slots);
316 }
317
318 static DEVICE_ATTR_RO(slots);
319
320 static struct attribute *armv8_pmuv3_caps_attrs[] = {
321 &dev_attr_slots.attr,
322 NULL,
323 };
324
325 static const struct attribute_group armv8_pmuv3_caps_attr_group = {
326 .name = "caps",
327 .attrs = armv8_pmuv3_caps_attrs,
328 };
329
330 /*
331 * Perf Events' indices
332 */
333 #define ARMV8_IDX_CYCLE_COUNTER 0
334 #define ARMV8_IDX_COUNTER0 1
335
336
337 /*
338 * We unconditionally enable ARMv8.5-PMU long event counter support
339 * (64-bit events) where supported. Indicate if this arm_pmu has long
340 * event counter support.
341 */
armv8pmu_has_long_event(struct arm_pmu * cpu_pmu)342 static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
343 {
344 return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5);
345 }
346
347 /*
348 * We must chain two programmable counters for 64 bit events,
349 * except when we have allocated the 64bit cycle counter (for CPU
350 * cycles event). This must be called only when the event has
351 * a counter allocated.
352 */
armv8pmu_event_is_chained(struct perf_event * event)353 static inline bool armv8pmu_event_is_chained(struct perf_event *event)
354 {
355 int idx = event->hw.idx;
356 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
357
358 return !WARN_ON(idx < 0) &&
359 armv8pmu_event_is_64bit(event) &&
360 !armv8pmu_has_long_event(cpu_pmu) &&
361 (idx != ARMV8_IDX_CYCLE_COUNTER);
362 }
363
364 /*
365 * ARMv8 low level PMU access
366 */
367
368 /*
369 * Perf Event to low level counters mapping
370 */
371 #define ARMV8_IDX_TO_COUNTER(x) \
372 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
373
374 /*
375 * This code is really good
376 */
377
378 #define PMEVN_CASE(n, case_macro) \
379 case n: case_macro(n); break
380
381 #define PMEVN_SWITCH(x, case_macro) \
382 do { \
383 switch (x) { \
384 PMEVN_CASE(0, case_macro); \
385 PMEVN_CASE(1, case_macro); \
386 PMEVN_CASE(2, case_macro); \
387 PMEVN_CASE(3, case_macro); \
388 PMEVN_CASE(4, case_macro); \
389 PMEVN_CASE(5, case_macro); \
390 PMEVN_CASE(6, case_macro); \
391 PMEVN_CASE(7, case_macro); \
392 PMEVN_CASE(8, case_macro); \
393 PMEVN_CASE(9, case_macro); \
394 PMEVN_CASE(10, case_macro); \
395 PMEVN_CASE(11, case_macro); \
396 PMEVN_CASE(12, case_macro); \
397 PMEVN_CASE(13, case_macro); \
398 PMEVN_CASE(14, case_macro); \
399 PMEVN_CASE(15, case_macro); \
400 PMEVN_CASE(16, case_macro); \
401 PMEVN_CASE(17, case_macro); \
402 PMEVN_CASE(18, case_macro); \
403 PMEVN_CASE(19, case_macro); \
404 PMEVN_CASE(20, case_macro); \
405 PMEVN_CASE(21, case_macro); \
406 PMEVN_CASE(22, case_macro); \
407 PMEVN_CASE(23, case_macro); \
408 PMEVN_CASE(24, case_macro); \
409 PMEVN_CASE(25, case_macro); \
410 PMEVN_CASE(26, case_macro); \
411 PMEVN_CASE(27, case_macro); \
412 PMEVN_CASE(28, case_macro); \
413 PMEVN_CASE(29, case_macro); \
414 PMEVN_CASE(30, case_macro); \
415 default: WARN(1, "Invalid PMEV* index\n"); \
416 } \
417 } while (0)
418
419 #define RETURN_READ_PMEVCNTRN(n) \
420 return read_sysreg(pmevcntr##n##_el0)
read_pmevcntrn(int n)421 static unsigned long read_pmevcntrn(int n)
422 {
423 PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
424 return 0;
425 }
426
427 #define WRITE_PMEVCNTRN(n) \
428 write_sysreg(val, pmevcntr##n##_el0)
write_pmevcntrn(int n,unsigned long val)429 static void write_pmevcntrn(int n, unsigned long val)
430 {
431 PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
432 }
433
434 #define WRITE_PMEVTYPERN(n) \
435 write_sysreg(val, pmevtyper##n##_el0)
write_pmevtypern(int n,unsigned long val)436 static void write_pmevtypern(int n, unsigned long val)
437 {
438 PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
439 }
440
armv8pmu_pmcr_read(void)441 static inline u32 armv8pmu_pmcr_read(void)
442 {
443 return read_sysreg(pmcr_el0);
444 }
445
armv8pmu_pmcr_write(u32 val)446 static inline void armv8pmu_pmcr_write(u32 val)
447 {
448 val &= ARMV8_PMU_PMCR_MASK;
449 isb();
450 write_sysreg(val, pmcr_el0);
451 }
452
armv8pmu_has_overflowed(u32 pmovsr)453 static inline int armv8pmu_has_overflowed(u32 pmovsr)
454 {
455 return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
456 }
457
armv8pmu_counter_has_overflowed(u32 pmnc,int idx)458 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
459 {
460 return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
461 }
462
armv8pmu_read_evcntr(int idx)463 static inline u64 armv8pmu_read_evcntr(int idx)
464 {
465 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
466
467 return read_pmevcntrn(counter);
468 }
469
armv8pmu_read_hw_counter(struct perf_event * event)470 static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
471 {
472 int idx = event->hw.idx;
473 u64 val = armv8pmu_read_evcntr(idx);
474
475 if (armv8pmu_event_is_chained(event))
476 val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
477 return val;
478 }
479
480 /*
481 * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP
482 * is set the event counters also become 64-bit counters. Unless the
483 * user has requested a long counter (attr.config1) then we want to
484 * interrupt upon 32-bit overflow - we achieve this by applying a bias.
485 */
armv8pmu_event_needs_bias(struct perf_event * event)486 static bool armv8pmu_event_needs_bias(struct perf_event *event)
487 {
488 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
489 struct hw_perf_event *hwc = &event->hw;
490 int idx = hwc->idx;
491
492 if (armv8pmu_event_is_64bit(event))
493 return false;
494
495 if (armv8pmu_has_long_event(cpu_pmu) ||
496 idx == ARMV8_IDX_CYCLE_COUNTER)
497 return true;
498
499 return false;
500 }
501
armv8pmu_bias_long_counter(struct perf_event * event,u64 value)502 static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value)
503 {
504 if (armv8pmu_event_needs_bias(event))
505 value |= GENMASK(63, 32);
506
507 return value;
508 }
509
armv8pmu_unbias_long_counter(struct perf_event * event,u64 value)510 static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value)
511 {
512 if (armv8pmu_event_needs_bias(event))
513 value &= ~GENMASK(63, 32);
514
515 return value;
516 }
517
armv8pmu_read_counter(struct perf_event * event)518 static u64 armv8pmu_read_counter(struct perf_event *event)
519 {
520 struct hw_perf_event *hwc = &event->hw;
521 int idx = hwc->idx;
522 u64 value;
523
524 if (idx == ARMV8_IDX_CYCLE_COUNTER)
525 value = read_sysreg(pmccntr_el0);
526 else
527 value = armv8pmu_read_hw_counter(event);
528
529 return armv8pmu_unbias_long_counter(event, value);
530 }
531
armv8pmu_write_evcntr(int idx,u64 value)532 static inline void armv8pmu_write_evcntr(int idx, u64 value)
533 {
534 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
535
536 write_pmevcntrn(counter, value);
537 }
538
armv8pmu_write_hw_counter(struct perf_event * event,u64 value)539 static inline void armv8pmu_write_hw_counter(struct perf_event *event,
540 u64 value)
541 {
542 int idx = event->hw.idx;
543
544 if (armv8pmu_event_is_chained(event)) {
545 armv8pmu_write_evcntr(idx, upper_32_bits(value));
546 armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
547 } else {
548 armv8pmu_write_evcntr(idx, value);
549 }
550 }
551
armv8pmu_write_counter(struct perf_event * event,u64 value)552 static void armv8pmu_write_counter(struct perf_event *event, u64 value)
553 {
554 struct hw_perf_event *hwc = &event->hw;
555 int idx = hwc->idx;
556
557 value = armv8pmu_bias_long_counter(event, value);
558
559 if (idx == ARMV8_IDX_CYCLE_COUNTER)
560 write_sysreg(value, pmccntr_el0);
561 else
562 armv8pmu_write_hw_counter(event, value);
563 }
564
armv8pmu_write_evtype(int idx,u32 val)565 static inline void armv8pmu_write_evtype(int idx, u32 val)
566 {
567 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
568
569 val &= ARMV8_PMU_EVTYPE_MASK;
570 write_pmevtypern(counter, val);
571 }
572
armv8pmu_write_event_type(struct perf_event * event)573 static inline void armv8pmu_write_event_type(struct perf_event *event)
574 {
575 struct hw_perf_event *hwc = &event->hw;
576 int idx = hwc->idx;
577
578 /*
579 * For chained events, the low counter is programmed to count
580 * the event of interest and the high counter is programmed
581 * with CHAIN event code with filters set to count at all ELs.
582 */
583 if (armv8pmu_event_is_chained(event)) {
584 u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
585 ARMV8_PMU_INCLUDE_EL2;
586
587 armv8pmu_write_evtype(idx - 1, hwc->config_base);
588 armv8pmu_write_evtype(idx, chain_evt);
589 } else {
590 if (idx == ARMV8_IDX_CYCLE_COUNTER)
591 write_sysreg(hwc->config_base, pmccfiltr_el0);
592 else
593 armv8pmu_write_evtype(idx, hwc->config_base);
594 }
595 }
596
armv8pmu_event_cnten_mask(struct perf_event * event)597 static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
598 {
599 int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
600 u32 mask = BIT(counter);
601
602 if (armv8pmu_event_is_chained(event))
603 mask |= BIT(counter - 1);
604 return mask;
605 }
606
armv8pmu_enable_counter(u32 mask)607 static inline void armv8pmu_enable_counter(u32 mask)
608 {
609 /*
610 * Make sure event configuration register writes are visible before we
611 * enable the counter.
612 * */
613 isb();
614 write_sysreg(mask, pmcntenset_el0);
615 }
616
armv8pmu_enable_event_counter(struct perf_event * event)617 static inline void armv8pmu_enable_event_counter(struct perf_event *event)
618 {
619 struct perf_event_attr *attr = &event->attr;
620 u32 mask = armv8pmu_event_cnten_mask(event);
621
622 kvm_set_pmu_events(mask, attr);
623
624 /* We rely on the hypervisor switch code to enable guest counters */
625 if (!kvm_pmu_counter_deferred(attr))
626 armv8pmu_enable_counter(mask);
627 }
628
armv8pmu_disable_counter(u32 mask)629 static inline void armv8pmu_disable_counter(u32 mask)
630 {
631 write_sysreg(mask, pmcntenclr_el0);
632 /*
633 * Make sure the effects of disabling the counter are visible before we
634 * start configuring the event.
635 */
636 isb();
637 }
638
armv8pmu_disable_event_counter(struct perf_event * event)639 static inline void armv8pmu_disable_event_counter(struct perf_event *event)
640 {
641 struct perf_event_attr *attr = &event->attr;
642 u32 mask = armv8pmu_event_cnten_mask(event);
643
644 kvm_clr_pmu_events(mask);
645
646 /* We rely on the hypervisor switch code to disable guest counters */
647 if (!kvm_pmu_counter_deferred(attr))
648 armv8pmu_disable_counter(mask);
649 }
650
armv8pmu_enable_intens(u32 mask)651 static inline void armv8pmu_enable_intens(u32 mask)
652 {
653 write_sysreg(mask, pmintenset_el1);
654 }
655
armv8pmu_enable_event_irq(struct perf_event * event)656 static inline void armv8pmu_enable_event_irq(struct perf_event *event)
657 {
658 u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
659 armv8pmu_enable_intens(BIT(counter));
660 }
661
armv8pmu_disable_intens(u32 mask)662 static inline void armv8pmu_disable_intens(u32 mask)
663 {
664 write_sysreg(mask, pmintenclr_el1);
665 isb();
666 /* Clear the overflow flag in case an interrupt is pending. */
667 write_sysreg(mask, pmovsclr_el0);
668 isb();
669 }
670
armv8pmu_disable_event_irq(struct perf_event * event)671 static inline void armv8pmu_disable_event_irq(struct perf_event *event)
672 {
673 u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
674 armv8pmu_disable_intens(BIT(counter));
675 }
676
armv8pmu_getreset_flags(void)677 static inline u32 armv8pmu_getreset_flags(void)
678 {
679 u32 value;
680
681 /* Read */
682 value = read_sysreg(pmovsclr_el0);
683
684 /* Write to clear flags */
685 value &= ARMV8_PMU_OVSR_MASK;
686 write_sysreg(value, pmovsclr_el0);
687
688 return value;
689 }
690
armv8pmu_enable_event(struct perf_event * event)691 static void armv8pmu_enable_event(struct perf_event *event)
692 {
693 /*
694 * Enable counter and interrupt, and set the counter to count
695 * the event that we're interested in.
696 */
697
698 /*
699 * Disable counter
700 */
701 armv8pmu_disable_event_counter(event);
702
703 /*
704 * Set event.
705 */
706 armv8pmu_write_event_type(event);
707
708 /*
709 * Enable interrupt for this counter
710 */
711 armv8pmu_enable_event_irq(event);
712
713 /*
714 * Enable counter
715 */
716 armv8pmu_enable_event_counter(event);
717 }
718
armv8pmu_disable_event(struct perf_event * event)719 static void armv8pmu_disable_event(struct perf_event *event)
720 {
721 /*
722 * Disable counter
723 */
724 armv8pmu_disable_event_counter(event);
725
726 /*
727 * Disable interrupt for this counter
728 */
729 armv8pmu_disable_event_irq(event);
730 }
731
armv8pmu_start(struct arm_pmu * cpu_pmu)732 static void armv8pmu_start(struct arm_pmu *cpu_pmu)
733 {
734 /* Enable all counters */
735 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
736 }
737
armv8pmu_stop(struct arm_pmu * cpu_pmu)738 static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
739 {
740 /* Disable all counters */
741 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
742 }
743
armv8pmu_handle_irq(struct arm_pmu * cpu_pmu)744 static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
745 {
746 u32 pmovsr;
747 struct perf_sample_data data;
748 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
749 struct pt_regs *regs;
750 int idx;
751
752 /*
753 * Get and reset the IRQ flags
754 */
755 pmovsr = armv8pmu_getreset_flags();
756
757 /*
758 * Did an overflow occur?
759 */
760 if (!armv8pmu_has_overflowed(pmovsr))
761 return IRQ_NONE;
762
763 /*
764 * Handle the counter(s) overflow(s)
765 */
766 regs = get_irq_regs();
767
768 /*
769 * Stop the PMU while processing the counter overflows
770 * to prevent skews in group events.
771 */
772 armv8pmu_stop(cpu_pmu);
773 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
774 struct perf_event *event = cpuc->events[idx];
775 struct hw_perf_event *hwc;
776
777 /* Ignore if we don't have an event. */
778 if (!event)
779 continue;
780
781 /*
782 * We have a single interrupt for all counters. Check that
783 * each counter has overflowed before we process it.
784 */
785 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
786 continue;
787
788 hwc = &event->hw;
789 armpmu_event_update(event);
790 perf_sample_data_init(&data, 0, hwc->last_period);
791 if (!armpmu_event_set_period(event))
792 continue;
793
794 /*
795 * Perf event overflow will queue the processing of the event as
796 * an irq_work which will be taken care of in the handling of
797 * IPI_IRQ_WORK.
798 */
799 if (perf_event_overflow(event, &data, regs))
800 cpu_pmu->disable(event);
801 }
802 armv8pmu_start(cpu_pmu);
803
804 return IRQ_HANDLED;
805 }
806
armv8pmu_get_single_idx(struct pmu_hw_events * cpuc,struct arm_pmu * cpu_pmu)807 static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
808 struct arm_pmu *cpu_pmu)
809 {
810 int idx;
811
812 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) {
813 if (!test_and_set_bit(idx, cpuc->used_mask))
814 return idx;
815 }
816 return -EAGAIN;
817 }
818
armv8pmu_get_chain_idx(struct pmu_hw_events * cpuc,struct arm_pmu * cpu_pmu)819 static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
820 struct arm_pmu *cpu_pmu)
821 {
822 int idx;
823
824 /*
825 * Chaining requires two consecutive event counters, where
826 * the lower idx must be even.
827 */
828 for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
829 if (!test_and_set_bit(idx, cpuc->used_mask)) {
830 /* Check if the preceding even counter is available */
831 if (!test_and_set_bit(idx - 1, cpuc->used_mask))
832 return idx;
833 /* Release the Odd counter */
834 clear_bit(idx, cpuc->used_mask);
835 }
836 }
837 return -EAGAIN;
838 }
839
armv8pmu_get_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)840 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
841 struct perf_event *event)
842 {
843 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
844 struct hw_perf_event *hwc = &event->hw;
845 unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
846
847 /* Always prefer to place a cycle counter into the cycle counter. */
848 if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
849 if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
850 return ARMV8_IDX_CYCLE_COUNTER;
851 }
852
853 /*
854 * Otherwise use events counters
855 */
856 if (armv8pmu_event_is_64bit(event) &&
857 !armv8pmu_has_long_event(cpu_pmu))
858 return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
859 else
860 return armv8pmu_get_single_idx(cpuc, cpu_pmu);
861 }
862
armv8pmu_clear_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)863 static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
864 struct perf_event *event)
865 {
866 int idx = event->hw.idx;
867
868 clear_bit(idx, cpuc->used_mask);
869 if (armv8pmu_event_is_chained(event))
870 clear_bit(idx - 1, cpuc->used_mask);
871 }
872
873 /*
874 * Add an event filter to a given event.
875 */
armv8pmu_set_event_filter(struct hw_perf_event * event,struct perf_event_attr * attr)876 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
877 struct perf_event_attr *attr)
878 {
879 unsigned long config_base = 0;
880
881 if (attr->exclude_idle)
882 return -EPERM;
883
884 /*
885 * If we're running in hyp mode, then we *are* the hypervisor.
886 * Therefore we ignore exclude_hv in this configuration, since
887 * there's no hypervisor to sample anyway. This is consistent
888 * with other architectures (x86 and Power).
889 */
890 if (is_kernel_in_hyp_mode()) {
891 if (!attr->exclude_kernel && !attr->exclude_host)
892 config_base |= ARMV8_PMU_INCLUDE_EL2;
893 if (attr->exclude_guest)
894 config_base |= ARMV8_PMU_EXCLUDE_EL1;
895 if (attr->exclude_host)
896 config_base |= ARMV8_PMU_EXCLUDE_EL0;
897 } else {
898 if (!attr->exclude_hv && !attr->exclude_host)
899 config_base |= ARMV8_PMU_INCLUDE_EL2;
900 }
901
902 /*
903 * Filter out !VHE kernels and guest kernels
904 */
905 if (attr->exclude_kernel)
906 config_base |= ARMV8_PMU_EXCLUDE_EL1;
907
908 if (attr->exclude_user)
909 config_base |= ARMV8_PMU_EXCLUDE_EL0;
910
911 /*
912 * Install the filter into config_base as this is used to
913 * construct the event type.
914 */
915 event->config_base = config_base;
916
917 return 0;
918 }
919
armv8pmu_filter_match(struct perf_event * event)920 static int armv8pmu_filter_match(struct perf_event *event)
921 {
922 unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
923 return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
924 }
925
armv8pmu_reset(void * info)926 static void armv8pmu_reset(void *info)
927 {
928 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
929 u32 pmcr;
930
931 /* The counter and interrupt enable registers are unknown at reset. */
932 armv8pmu_disable_counter(U32_MAX);
933 armv8pmu_disable_intens(U32_MAX);
934
935 /* Clear the counters we flip at guest entry/exit */
936 kvm_clr_pmu_events(U32_MAX);
937
938 /*
939 * Initialize & Reset PMNC. Request overflow interrupt for
940 * 64 bit cycle counter but cheat in armv8pmu_write_counter().
941 */
942 pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC;
943
944 /* Enable long event counter support where available */
945 if (armv8pmu_has_long_event(cpu_pmu))
946 pmcr |= ARMV8_PMU_PMCR_LP;
947
948 armv8pmu_pmcr_write(pmcr);
949 }
950
__armv8_pmuv3_map_event(struct perf_event * event,const unsigned (* extra_event_map)[PERF_COUNT_HW_MAX],const unsigned (* extra_cache_map)[PERF_COUNT_HW_CACHE_MAX][PERF_COUNT_HW_CACHE_OP_MAX][PERF_COUNT_HW_CACHE_RESULT_MAX])951 static int __armv8_pmuv3_map_event(struct perf_event *event,
952 const unsigned (*extra_event_map)
953 [PERF_COUNT_HW_MAX],
954 const unsigned (*extra_cache_map)
955 [PERF_COUNT_HW_CACHE_MAX]
956 [PERF_COUNT_HW_CACHE_OP_MAX]
957 [PERF_COUNT_HW_CACHE_RESULT_MAX])
958 {
959 int hw_event_id;
960 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
961
962 hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
963 &armv8_pmuv3_perf_cache_map,
964 ARMV8_PMU_EVTYPE_EVENT);
965
966 if (armv8pmu_event_is_64bit(event))
967 event->hw.flags |= ARMPMU_EVT_64BIT;
968
969 /* Only expose micro/arch events supported by this PMU */
970 if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
971 && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
972 return hw_event_id;
973 }
974
975 return armpmu_map_event(event, extra_event_map, extra_cache_map,
976 ARMV8_PMU_EVTYPE_EVENT);
977 }
978
armv8_pmuv3_map_event(struct perf_event * event)979 static int armv8_pmuv3_map_event(struct perf_event *event)
980 {
981 return __armv8_pmuv3_map_event(event, NULL, NULL);
982 }
983
armv8_a53_map_event(struct perf_event * event)984 static int armv8_a53_map_event(struct perf_event *event)
985 {
986 return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
987 }
988
armv8_a57_map_event(struct perf_event * event)989 static int armv8_a57_map_event(struct perf_event *event)
990 {
991 return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
992 }
993
armv8_a73_map_event(struct perf_event * event)994 static int armv8_a73_map_event(struct perf_event *event)
995 {
996 return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
997 }
998
armv8_thunder_map_event(struct perf_event * event)999 static int armv8_thunder_map_event(struct perf_event *event)
1000 {
1001 return __armv8_pmuv3_map_event(event, NULL,
1002 &armv8_thunder_perf_cache_map);
1003 }
1004
armv8_vulcan_map_event(struct perf_event * event)1005 static int armv8_vulcan_map_event(struct perf_event *event)
1006 {
1007 return __armv8_pmuv3_map_event(event, NULL,
1008 &armv8_vulcan_perf_cache_map);
1009 }
1010
1011 struct armv8pmu_probe_info {
1012 struct arm_pmu *pmu;
1013 bool present;
1014 };
1015
__armv8pmu_probe_pmu(void * info)1016 static void __armv8pmu_probe_pmu(void *info)
1017 {
1018 struct armv8pmu_probe_info *probe = info;
1019 struct arm_pmu *cpu_pmu = probe->pmu;
1020 u64 dfr0;
1021 u64 pmceid_raw[2];
1022 u32 pmceid[2];
1023 int pmuver;
1024
1025 dfr0 = read_sysreg(id_aa64dfr0_el1);
1026 pmuver = cpuid_feature_extract_unsigned_field(dfr0,
1027 ID_AA64DFR0_PMUVER_SHIFT);
1028 if (pmuver == 0xf || pmuver == 0)
1029 return;
1030
1031 cpu_pmu->pmuver = pmuver;
1032 probe->present = true;
1033
1034 /* Read the nb of CNTx counters supported from PMNC */
1035 cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
1036 & ARMV8_PMU_PMCR_N_MASK;
1037
1038 /* Add the CPU cycles counter */
1039 cpu_pmu->num_events += 1;
1040
1041 pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
1042 pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
1043
1044 bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
1045 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
1046
1047 pmceid[0] = pmceid_raw[0] >> 32;
1048 pmceid[1] = pmceid_raw[1] >> 32;
1049
1050 bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
1051 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
1052
1053 /* store PMMIR_EL1 register for sysfs */
1054 if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31)))
1055 cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
1056 else
1057 cpu_pmu->reg_pmmir = 0;
1058 }
1059
armv8pmu_probe_pmu(struct arm_pmu * cpu_pmu)1060 static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
1061 {
1062 struct armv8pmu_probe_info probe = {
1063 .pmu = cpu_pmu,
1064 .present = false,
1065 };
1066 int ret;
1067
1068 ret = smp_call_function_any(&cpu_pmu->supported_cpus,
1069 __armv8pmu_probe_pmu,
1070 &probe, 1);
1071 if (ret)
1072 return ret;
1073
1074 return probe.present ? 0 : -ENODEV;
1075 }
1076
armv8_pmu_init(struct arm_pmu * cpu_pmu,char * name,int (* map_event)(struct perf_event * event),const struct attribute_group * events,const struct attribute_group * format,const struct attribute_group * caps)1077 static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
1078 int (*map_event)(struct perf_event *event),
1079 const struct attribute_group *events,
1080 const struct attribute_group *format,
1081 const struct attribute_group *caps)
1082 {
1083 int ret = armv8pmu_probe_pmu(cpu_pmu);
1084 if (ret)
1085 return ret;
1086
1087 cpu_pmu->handle_irq = armv8pmu_handle_irq;
1088 cpu_pmu->enable = armv8pmu_enable_event;
1089 cpu_pmu->disable = armv8pmu_disable_event;
1090 cpu_pmu->read_counter = armv8pmu_read_counter;
1091 cpu_pmu->write_counter = armv8pmu_write_counter;
1092 cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
1093 cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
1094 cpu_pmu->start = armv8pmu_start;
1095 cpu_pmu->stop = armv8pmu_stop;
1096 cpu_pmu->reset = armv8pmu_reset;
1097 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
1098 cpu_pmu->filter_match = armv8pmu_filter_match;
1099
1100 cpu_pmu->name = name;
1101 cpu_pmu->map_event = map_event;
1102 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ?
1103 events : &armv8_pmuv3_events_attr_group;
1104 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ?
1105 format : &armv8_pmuv3_format_attr_group;
1106 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ?
1107 caps : &armv8_pmuv3_caps_attr_group;
1108
1109 return 0;
1110 }
1111
armv8_pmu_init_nogroups(struct arm_pmu * cpu_pmu,char * name,int (* map_event)(struct perf_event * event))1112 static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name,
1113 int (*map_event)(struct perf_event *event))
1114 {
1115 return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL);
1116 }
1117
armv8_pmuv3_init(struct arm_pmu * cpu_pmu)1118 static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
1119 {
1120 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_pmuv3",
1121 armv8_pmuv3_map_event);
1122 }
1123
armv8_a34_pmu_init(struct arm_pmu * cpu_pmu)1124 static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu)
1125 {
1126 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a34",
1127 armv8_pmuv3_map_event);
1128 }
1129
armv8_a35_pmu_init(struct arm_pmu * cpu_pmu)1130 static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
1131 {
1132 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35",
1133 armv8_a53_map_event);
1134 }
1135
armv8_a53_pmu_init(struct arm_pmu * cpu_pmu)1136 static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
1137 {
1138 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53",
1139 armv8_a53_map_event);
1140 }
1141
armv8_a55_pmu_init(struct arm_pmu * cpu_pmu)1142 static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu)
1143 {
1144 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a55",
1145 armv8_pmuv3_map_event);
1146 }
1147
armv8_a57_pmu_init(struct arm_pmu * cpu_pmu)1148 static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
1149 {
1150 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57",
1151 armv8_a57_map_event);
1152 }
1153
armv8_a65_pmu_init(struct arm_pmu * cpu_pmu)1154 static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu)
1155 {
1156 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a65",
1157 armv8_pmuv3_map_event);
1158 }
1159
armv8_a72_pmu_init(struct arm_pmu * cpu_pmu)1160 static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
1161 {
1162 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72",
1163 armv8_a57_map_event);
1164 }
1165
armv8_a73_pmu_init(struct arm_pmu * cpu_pmu)1166 static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
1167 {
1168 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73",
1169 armv8_a73_map_event);
1170 }
1171
armv8_a75_pmu_init(struct arm_pmu * cpu_pmu)1172 static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu)
1173 {
1174 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a75",
1175 armv8_pmuv3_map_event);
1176 }
1177
armv8_a76_pmu_init(struct arm_pmu * cpu_pmu)1178 static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu)
1179 {
1180 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a76",
1181 armv8_pmuv3_map_event);
1182 }
1183
armv8_a77_pmu_init(struct arm_pmu * cpu_pmu)1184 static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu)
1185 {
1186 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a77",
1187 armv8_pmuv3_map_event);
1188 }
1189
armv8_a78_pmu_init(struct arm_pmu * cpu_pmu)1190 static int armv8_a78_pmu_init(struct arm_pmu *cpu_pmu)
1191 {
1192 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a78",
1193 armv8_pmuv3_map_event);
1194 }
1195
armv8_e1_pmu_init(struct arm_pmu * cpu_pmu)1196 static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu)
1197 {
1198 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_e1",
1199 armv8_pmuv3_map_event);
1200 }
1201
armv8_n1_pmu_init(struct arm_pmu * cpu_pmu)1202 static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu)
1203 {
1204 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_n1",
1205 armv8_pmuv3_map_event);
1206 }
1207
armv8_thunder_pmu_init(struct arm_pmu * cpu_pmu)1208 static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
1209 {
1210 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder",
1211 armv8_thunder_map_event);
1212 }
1213
armv8_vulcan_pmu_init(struct arm_pmu * cpu_pmu)1214 static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
1215 {
1216 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan",
1217 armv8_vulcan_map_event);
1218 }
1219
1220 static const struct of_device_id armv8_pmu_of_device_ids[] = {
1221 {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
1222 {.compatible = "arm,cortex-a34-pmu", .data = armv8_a34_pmu_init},
1223 {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
1224 {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
1225 {.compatible = "arm,cortex-a55-pmu", .data = armv8_a55_pmu_init},
1226 {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
1227 {.compatible = "arm,cortex-a65-pmu", .data = armv8_a65_pmu_init},
1228 {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
1229 {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
1230 {.compatible = "arm,cortex-a75-pmu", .data = armv8_a75_pmu_init},
1231 {.compatible = "arm,cortex-a76-pmu", .data = armv8_a76_pmu_init},
1232 {.compatible = "arm,cortex-a77-pmu", .data = armv8_a77_pmu_init},
1233 {.compatible = "arm,cortex-a78-pmu", .data = armv8_a78_pmu_init},
1234 {.compatible = "arm,neoverse-e1-pmu", .data = armv8_e1_pmu_init},
1235 {.compatible = "arm,neoverse-n1-pmu", .data = armv8_n1_pmu_init},
1236 {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
1237 {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
1238 {},
1239 };
1240
armv8_pmu_device_probe(struct platform_device * pdev)1241 static int armv8_pmu_device_probe(struct platform_device *pdev)
1242 {
1243 return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
1244 }
1245
1246 static struct platform_driver armv8_pmu_driver = {
1247 .driver = {
1248 .name = ARMV8_PMU_PDEV_NAME,
1249 .of_match_table = armv8_pmu_of_device_ids,
1250 .suppress_bind_attrs = true,
1251 },
1252 .probe = armv8_pmu_device_probe,
1253 };
1254
armv8_pmu_driver_init(void)1255 static int __init armv8_pmu_driver_init(void)
1256 {
1257 if (acpi_disabled)
1258 return platform_driver_register(&armv8_pmu_driver);
1259 else
1260 return arm_pmu_acpi_probe(armv8_pmuv3_init);
1261 }
device_initcall(armv8_pmu_driver_init)1262 device_initcall(armv8_pmu_driver_init)
1263
1264 void arch_perf_update_userpage(struct perf_event *event,
1265 struct perf_event_mmap_page *userpg, u64 now)
1266 {
1267 struct clock_read_data *rd;
1268 unsigned int seq;
1269 u64 ns;
1270
1271 userpg->cap_user_time = 0;
1272 userpg->cap_user_time_zero = 0;
1273 userpg->cap_user_time_short = 0;
1274
1275 do {
1276 rd = sched_clock_read_begin(&seq);
1277
1278 if (rd->read_sched_clock != arch_timer_read_counter)
1279 return;
1280
1281 userpg->time_mult = rd->mult;
1282 userpg->time_shift = rd->shift;
1283 userpg->time_zero = rd->epoch_ns;
1284 userpg->time_cycles = rd->epoch_cyc;
1285 userpg->time_mask = rd->sched_clock_mask;
1286
1287 /*
1288 * Subtract the cycle base, such that software that
1289 * doesn't know about cap_user_time_short still 'works'
1290 * assuming no wraps.
1291 */
1292 ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
1293 userpg->time_zero -= ns;
1294
1295 } while (sched_clock_read_retry(seq));
1296
1297 userpg->time_offset = userpg->time_zero - now;
1298
1299 /*
1300 * time_shift is not expected to be greater than 31 due to
1301 * the original published conversion algorithm shifting a
1302 * 32-bit value (now specifies a 64-bit value) - refer
1303 * perf_event_mmap_page documentation in perf_event.h.
1304 */
1305 if (userpg->time_shift == 32) {
1306 userpg->time_shift = 31;
1307 userpg->time_mult >>= 1;
1308 }
1309
1310 /*
1311 * Internal timekeeping for enabled/running/stopped times
1312 * is always computed with the sched_clock.
1313 */
1314 userpg->cap_user_time = 1;
1315 userpg->cap_user_time_zero = 1;
1316 userpg->cap_user_time_short = 1;
1317 }
1318