1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * APM X-Gene SoC PMU (Performance Monitor Unit)
4 *
5 * Copyright (c) 2016, Applied Micro Circuits Corporation
6 * Author: Hoan Tran <hotran@apm.com>
7 * Tai Nguyen <ttnguyen@apm.com>
8 */
9
10 #include <linux/acpi.h>
11 #include <linux/clk.h>
12 #include <linux/cpuhotplug.h>
13 #include <linux/cpumask.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/mfd/syscon.h>
17 #include <linux/module.h>
18 #include <linux/of_address.h>
19 #include <linux/perf_event.h>
20 #include <linux/platform_device.h>
21 #include <linux/property.h>
22 #include <linux/regmap.h>
23 #include <linux/slab.h>
24
25 #define CSW_CSWCR 0x0000
26 #define CSW_CSWCR_DUALMCB_MASK BIT(0)
27 #define CSW_CSWCR_MCB0_ROUTING(x) (((x) & 0x0C) >> 2)
28 #define CSW_CSWCR_MCB1_ROUTING(x) (((x) & 0x30) >> 4)
29 #define MCBADDRMR 0x0000
30 #define MCBADDRMR_DUALMCU_MODE_MASK BIT(2)
31
32 #define PCPPMU_INTSTATUS_REG 0x000
33 #define PCPPMU_INTMASK_REG 0x004
34 #define PCPPMU_INTMASK 0x0000000F
35 #define PCPPMU_INTENMASK 0xFFFFFFFF
36 #define PCPPMU_INTCLRMASK 0xFFFFFFF0
37 #define PCPPMU_INT_MCU BIT(0)
38 #define PCPPMU_INT_MCB BIT(1)
39 #define PCPPMU_INT_L3C BIT(2)
40 #define PCPPMU_INT_IOB BIT(3)
41
42 #define PCPPMU_V3_INTMASK 0x00FF33FF
43 #define PCPPMU_V3_INTENMASK 0xFFFFFFFF
44 #define PCPPMU_V3_INTCLRMASK 0xFF00CC00
45 #define PCPPMU_V3_INT_MCU 0x000000FF
46 #define PCPPMU_V3_INT_MCB 0x00000300
47 #define PCPPMU_V3_INT_L3C 0x00FF0000
48 #define PCPPMU_V3_INT_IOB 0x00003000
49
50 #define PMU_MAX_COUNTERS 4
51 #define PMU_CNT_MAX_PERIOD 0xFFFFFFFFULL
52 #define PMU_V3_CNT_MAX_PERIOD 0xFFFFFFFFFFFFFFFFULL
53 #define PMU_OVERFLOW_MASK 0xF
54 #define PMU_PMCR_E BIT(0)
55 #define PMU_PMCR_P BIT(1)
56
57 #define PMU_PMEVCNTR0 0x000
58 #define PMU_PMEVCNTR1 0x004
59 #define PMU_PMEVCNTR2 0x008
60 #define PMU_PMEVCNTR3 0x00C
61 #define PMU_PMEVTYPER0 0x400
62 #define PMU_PMEVTYPER1 0x404
63 #define PMU_PMEVTYPER2 0x408
64 #define PMU_PMEVTYPER3 0x40C
65 #define PMU_PMAMR0 0xA00
66 #define PMU_PMAMR1 0xA04
67 #define PMU_PMCNTENSET 0xC00
68 #define PMU_PMCNTENCLR 0xC20
69 #define PMU_PMINTENSET 0xC40
70 #define PMU_PMINTENCLR 0xC60
71 #define PMU_PMOVSR 0xC80
72 #define PMU_PMCR 0xE04
73
74 /* PMU registers for V3 */
75 #define PMU_PMOVSCLR 0xC80
76 #define PMU_PMOVSSET 0xCC0
77
78 #define to_pmu_dev(p) container_of(p, struct xgene_pmu_dev, pmu)
79 #define GET_CNTR(ev) (ev->hw.idx)
80 #define GET_EVENTID(ev) (ev->hw.config & 0xFFULL)
81 #define GET_AGENTID(ev) (ev->hw.config_base & 0xFFFFFFFFUL)
82 #define GET_AGENT1ID(ev) ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL)
83
84 struct hw_pmu_info {
85 u32 type;
86 u32 enable_mask;
87 void __iomem *csr;
88 };
89
90 struct xgene_pmu_dev {
91 struct hw_pmu_info *inf;
92 struct xgene_pmu *parent;
93 struct pmu pmu;
94 u8 max_counters;
95 DECLARE_BITMAP(cntr_assign_mask, PMU_MAX_COUNTERS);
96 u64 max_period;
97 const struct attribute_group **attr_groups;
98 struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS];
99 };
100
101 struct xgene_pmu_ops {
102 void (*mask_int)(struct xgene_pmu *pmu);
103 void (*unmask_int)(struct xgene_pmu *pmu);
104 u64 (*read_counter)(struct xgene_pmu_dev *pmu, int idx);
105 void (*write_counter)(struct xgene_pmu_dev *pmu, int idx, u64 val);
106 void (*write_evttype)(struct xgene_pmu_dev *pmu_dev, int idx, u32 val);
107 void (*write_agentmsk)(struct xgene_pmu_dev *pmu_dev, u32 val);
108 void (*write_agent1msk)(struct xgene_pmu_dev *pmu_dev, u32 val);
109 void (*enable_counter)(struct xgene_pmu_dev *pmu_dev, int idx);
110 void (*disable_counter)(struct xgene_pmu_dev *pmu_dev, int idx);
111 void (*enable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx);
112 void (*disable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx);
113 void (*reset_counters)(struct xgene_pmu_dev *pmu_dev);
114 void (*start_counters)(struct xgene_pmu_dev *pmu_dev);
115 void (*stop_counters)(struct xgene_pmu_dev *pmu_dev);
116 };
117
118 struct xgene_pmu {
119 struct device *dev;
120 struct hlist_node node;
121 int version;
122 void __iomem *pcppmu_csr;
123 u32 mcb_active_mask;
124 u32 mc_active_mask;
125 u32 l3c_active_mask;
126 cpumask_t cpu;
127 int irq;
128 raw_spinlock_t lock;
129 const struct xgene_pmu_ops *ops;
130 struct list_head l3cpmus;
131 struct list_head iobpmus;
132 struct list_head mcbpmus;
133 struct list_head mcpmus;
134 };
135
136 struct xgene_pmu_dev_ctx {
137 char *name;
138 struct list_head next;
139 struct xgene_pmu_dev *pmu_dev;
140 struct hw_pmu_info inf;
141 };
142
143 struct xgene_pmu_data {
144 int id;
145 u32 data;
146 };
147
148 enum xgene_pmu_version {
149 PCP_PMU_V1 = 1,
150 PCP_PMU_V2,
151 PCP_PMU_V3,
152 };
153
154 enum xgene_pmu_dev_type {
155 PMU_TYPE_L3C = 0,
156 PMU_TYPE_IOB,
157 PMU_TYPE_IOB_SLOW,
158 PMU_TYPE_MCB,
159 PMU_TYPE_MC,
160 };
161
162 /*
163 * sysfs format attributes
164 */
xgene_pmu_format_show(struct device * dev,struct device_attribute * attr,char * buf)165 static ssize_t xgene_pmu_format_show(struct device *dev,
166 struct device_attribute *attr, char *buf)
167 {
168 struct dev_ext_attribute *eattr;
169
170 eattr = container_of(attr, struct dev_ext_attribute, attr);
171 return sysfs_emit(buf, "%s\n", (char *) eattr->var);
172 }
173
174 #define XGENE_PMU_FORMAT_ATTR(_name, _config) \
175 (&((struct dev_ext_attribute[]) { \
176 { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_format_show, NULL), \
177 .var = (void *) _config, } \
178 })[0].attr.attr)
179
180 static struct attribute *l3c_pmu_format_attrs[] = {
181 XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"),
182 XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"),
183 NULL,
184 };
185
186 static struct attribute *iob_pmu_format_attrs[] = {
187 XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"),
188 XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"),
189 NULL,
190 };
191
192 static struct attribute *mcb_pmu_format_attrs[] = {
193 XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"),
194 XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"),
195 NULL,
196 };
197
198 static struct attribute *mc_pmu_format_attrs[] = {
199 XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"),
200 NULL,
201 };
202
203 static const struct attribute_group l3c_pmu_format_attr_group = {
204 .name = "format",
205 .attrs = l3c_pmu_format_attrs,
206 };
207
208 static const struct attribute_group iob_pmu_format_attr_group = {
209 .name = "format",
210 .attrs = iob_pmu_format_attrs,
211 };
212
213 static const struct attribute_group mcb_pmu_format_attr_group = {
214 .name = "format",
215 .attrs = mcb_pmu_format_attrs,
216 };
217
218 static const struct attribute_group mc_pmu_format_attr_group = {
219 .name = "format",
220 .attrs = mc_pmu_format_attrs,
221 };
222
223 static struct attribute *l3c_pmu_v3_format_attrs[] = {
224 XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-39"),
225 NULL,
226 };
227
228 static struct attribute *iob_pmu_v3_format_attrs[] = {
229 XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-47"),
230 NULL,
231 };
232
233 static struct attribute *iob_slow_pmu_v3_format_attrs[] = {
234 XGENE_PMU_FORMAT_ATTR(iob_slow_eventid, "config:0-16"),
235 NULL,
236 };
237
238 static struct attribute *mcb_pmu_v3_format_attrs[] = {
239 XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-35"),
240 NULL,
241 };
242
243 static struct attribute *mc_pmu_v3_format_attrs[] = {
244 XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-44"),
245 NULL,
246 };
247
248 static const struct attribute_group l3c_pmu_v3_format_attr_group = {
249 .name = "format",
250 .attrs = l3c_pmu_v3_format_attrs,
251 };
252
253 static const struct attribute_group iob_pmu_v3_format_attr_group = {
254 .name = "format",
255 .attrs = iob_pmu_v3_format_attrs,
256 };
257
258 static const struct attribute_group iob_slow_pmu_v3_format_attr_group = {
259 .name = "format",
260 .attrs = iob_slow_pmu_v3_format_attrs,
261 };
262
263 static const struct attribute_group mcb_pmu_v3_format_attr_group = {
264 .name = "format",
265 .attrs = mcb_pmu_v3_format_attrs,
266 };
267
268 static const struct attribute_group mc_pmu_v3_format_attr_group = {
269 .name = "format",
270 .attrs = mc_pmu_v3_format_attrs,
271 };
272
273 /*
274 * sysfs event attributes
275 */
xgene_pmu_event_show(struct device * dev,struct device_attribute * attr,char * buf)276 static ssize_t xgene_pmu_event_show(struct device *dev,
277 struct device_attribute *attr, char *buf)
278 {
279 struct perf_pmu_events_attr *pmu_attr =
280 container_of(attr, struct perf_pmu_events_attr, attr);
281
282 return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id);
283 }
284
285 #define XGENE_PMU_EVENT_ATTR(_name, _config) \
286 PMU_EVENT_ATTR_ID(_name, xgene_pmu_event_show, _config)
287
288 static struct attribute *l3c_pmu_events_attrs[] = {
289 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
290 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
291 XGENE_PMU_EVENT_ATTR(read-hit, 0x02),
292 XGENE_PMU_EVENT_ATTR(read-miss, 0x03),
293 XGENE_PMU_EVENT_ATTR(write-need-replacement, 0x06),
294 XGENE_PMU_EVENT_ATTR(write-not-need-replacement, 0x07),
295 XGENE_PMU_EVENT_ATTR(tq-full, 0x08),
296 XGENE_PMU_EVENT_ATTR(ackq-full, 0x09),
297 XGENE_PMU_EVENT_ATTR(wdb-full, 0x0a),
298 XGENE_PMU_EVENT_ATTR(bank-fifo-full, 0x0b),
299 XGENE_PMU_EVENT_ATTR(odb-full, 0x0c),
300 XGENE_PMU_EVENT_ATTR(wbq-full, 0x0d),
301 XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue, 0x0e),
302 XGENE_PMU_EVENT_ATTR(bank-fifo-issue, 0x0f),
303 NULL,
304 };
305
306 static struct attribute *iob_pmu_events_attrs[] = {
307 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
308 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
309 XGENE_PMU_EVENT_ATTR(axi0-read, 0x02),
310 XGENE_PMU_EVENT_ATTR(axi0-read-partial, 0x03),
311 XGENE_PMU_EVENT_ATTR(axi1-read, 0x04),
312 XGENE_PMU_EVENT_ATTR(axi1-read-partial, 0x05),
313 XGENE_PMU_EVENT_ATTR(csw-read-block, 0x06),
314 XGENE_PMU_EVENT_ATTR(csw-read-partial, 0x07),
315 XGENE_PMU_EVENT_ATTR(axi0-write, 0x10),
316 XGENE_PMU_EVENT_ATTR(axi0-write-partial, 0x11),
317 XGENE_PMU_EVENT_ATTR(axi1-write, 0x13),
318 XGENE_PMU_EVENT_ATTR(axi1-write-partial, 0x14),
319 XGENE_PMU_EVENT_ATTR(csw-inbound-dirty, 0x16),
320 NULL,
321 };
322
323 static struct attribute *mcb_pmu_events_attrs[] = {
324 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
325 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
326 XGENE_PMU_EVENT_ATTR(csw-read, 0x02),
327 XGENE_PMU_EVENT_ATTR(csw-write-request, 0x03),
328 XGENE_PMU_EVENT_ATTR(mcb-csw-stall, 0x04),
329 XGENE_PMU_EVENT_ATTR(cancel-read-gack, 0x05),
330 NULL,
331 };
332
333 static struct attribute *mc_pmu_events_attrs[] = {
334 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
335 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
336 XGENE_PMU_EVENT_ATTR(act-cmd-sent, 0x02),
337 XGENE_PMU_EVENT_ATTR(pre-cmd-sent, 0x03),
338 XGENE_PMU_EVENT_ATTR(rd-cmd-sent, 0x04),
339 XGENE_PMU_EVENT_ATTR(rda-cmd-sent, 0x05),
340 XGENE_PMU_EVENT_ATTR(wr-cmd-sent, 0x06),
341 XGENE_PMU_EVENT_ATTR(wra-cmd-sent, 0x07),
342 XGENE_PMU_EVENT_ATTR(pde-cmd-sent, 0x08),
343 XGENE_PMU_EVENT_ATTR(sre-cmd-sent, 0x09),
344 XGENE_PMU_EVENT_ATTR(prea-cmd-sent, 0x0a),
345 XGENE_PMU_EVENT_ATTR(ref-cmd-sent, 0x0b),
346 XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent, 0x0c),
347 XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent, 0x0d),
348 XGENE_PMU_EVENT_ATTR(in-rd-collision, 0x0e),
349 XGENE_PMU_EVENT_ATTR(in-wr-collision, 0x0f),
350 XGENE_PMU_EVENT_ATTR(collision-queue-not-empty, 0x10),
351 XGENE_PMU_EVENT_ATTR(collision-queue-full, 0x11),
352 XGENE_PMU_EVENT_ATTR(mcu-request, 0x12),
353 XGENE_PMU_EVENT_ATTR(mcu-rd-request, 0x13),
354 XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request, 0x14),
355 XGENE_PMU_EVENT_ATTR(mcu-wr-request, 0x15),
356 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all, 0x16),
357 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel, 0x17),
358 XGENE_PMU_EVENT_ATTR(mcu-rd-response, 0x18),
359 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all, 0x19),
360 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a),
361 XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all, 0x1b),
362 XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel, 0x1c),
363 NULL,
364 };
365
366 static const struct attribute_group l3c_pmu_events_attr_group = {
367 .name = "events",
368 .attrs = l3c_pmu_events_attrs,
369 };
370
371 static const struct attribute_group iob_pmu_events_attr_group = {
372 .name = "events",
373 .attrs = iob_pmu_events_attrs,
374 };
375
376 static const struct attribute_group mcb_pmu_events_attr_group = {
377 .name = "events",
378 .attrs = mcb_pmu_events_attrs,
379 };
380
381 static const struct attribute_group mc_pmu_events_attr_group = {
382 .name = "events",
383 .attrs = mc_pmu_events_attrs,
384 };
385
386 static struct attribute *l3c_pmu_v3_events_attrs[] = {
387 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
388 XGENE_PMU_EVENT_ATTR(read-hit, 0x01),
389 XGENE_PMU_EVENT_ATTR(read-miss, 0x02),
390 XGENE_PMU_EVENT_ATTR(index-flush-eviction, 0x03),
391 XGENE_PMU_EVENT_ATTR(write-caused-replacement, 0x04),
392 XGENE_PMU_EVENT_ATTR(write-not-caused-replacement, 0x05),
393 XGENE_PMU_EVENT_ATTR(clean-eviction, 0x06),
394 XGENE_PMU_EVENT_ATTR(dirty-eviction, 0x07),
395 XGENE_PMU_EVENT_ATTR(read, 0x08),
396 XGENE_PMU_EVENT_ATTR(write, 0x09),
397 XGENE_PMU_EVENT_ATTR(request, 0x0a),
398 XGENE_PMU_EVENT_ATTR(tq-bank-conflict-issue-stall, 0x0b),
399 XGENE_PMU_EVENT_ATTR(tq-full, 0x0c),
400 XGENE_PMU_EVENT_ATTR(ackq-full, 0x0d),
401 XGENE_PMU_EVENT_ATTR(wdb-full, 0x0e),
402 XGENE_PMU_EVENT_ATTR(odb-full, 0x10),
403 XGENE_PMU_EVENT_ATTR(wbq-full, 0x11),
404 XGENE_PMU_EVENT_ATTR(input-req-async-fifo-stall, 0x12),
405 XGENE_PMU_EVENT_ATTR(output-req-async-fifo-stall, 0x13),
406 XGENE_PMU_EVENT_ATTR(output-data-async-fifo-stall, 0x14),
407 XGENE_PMU_EVENT_ATTR(total-insertion, 0x15),
408 XGENE_PMU_EVENT_ATTR(sip-insertions-r-set, 0x16),
409 XGENE_PMU_EVENT_ATTR(sip-insertions-r-clear, 0x17),
410 XGENE_PMU_EVENT_ATTR(dip-insertions-r-set, 0x18),
411 XGENE_PMU_EVENT_ATTR(dip-insertions-r-clear, 0x19),
412 XGENE_PMU_EVENT_ATTR(dip-insertions-force-r-set, 0x1a),
413 XGENE_PMU_EVENT_ATTR(egression, 0x1b),
414 XGENE_PMU_EVENT_ATTR(replacement, 0x1c),
415 XGENE_PMU_EVENT_ATTR(old-replacement, 0x1d),
416 XGENE_PMU_EVENT_ATTR(young-replacement, 0x1e),
417 XGENE_PMU_EVENT_ATTR(r-set-replacement, 0x1f),
418 XGENE_PMU_EVENT_ATTR(r-clear-replacement, 0x20),
419 XGENE_PMU_EVENT_ATTR(old-r-replacement, 0x21),
420 XGENE_PMU_EVENT_ATTR(old-nr-replacement, 0x22),
421 XGENE_PMU_EVENT_ATTR(young-r-replacement, 0x23),
422 XGENE_PMU_EVENT_ATTR(young-nr-replacement, 0x24),
423 XGENE_PMU_EVENT_ATTR(bloomfilter-clearing, 0x25),
424 XGENE_PMU_EVENT_ATTR(generation-flip, 0x26),
425 XGENE_PMU_EVENT_ATTR(vcc-droop-detected, 0x27),
426 NULL,
427 };
428
429 static struct attribute *iob_fast_pmu_v3_events_attrs[] = {
430 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
431 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-all, 0x01),
432 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-rd, 0x02),
433 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-wr, 0x03),
434 XGENE_PMU_EVENT_ATTR(pa-all-cp-req, 0x04),
435 XGENE_PMU_EVENT_ATTR(pa-cp-blk-req, 0x05),
436 XGENE_PMU_EVENT_ATTR(pa-cp-ptl-req, 0x06),
437 XGENE_PMU_EVENT_ATTR(pa-cp-rd-req, 0x07),
438 XGENE_PMU_EVENT_ATTR(pa-cp-wr-req, 0x08),
439 XGENE_PMU_EVENT_ATTR(ba-all-req, 0x09),
440 XGENE_PMU_EVENT_ATTR(ba-rd-req, 0x0a),
441 XGENE_PMU_EVENT_ATTR(ba-wr-req, 0x0b),
442 XGENE_PMU_EVENT_ATTR(pa-rd-shared-req-issued, 0x10),
443 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-req-issued, 0x11),
444 XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-stashable, 0x12),
445 XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-nonstashable, 0x13),
446 XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-stashable, 0x14),
447 XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-nonstashable, 0x15),
448 XGENE_PMU_EVENT_ATTR(pa-ptl-wr-req, 0x16),
449 XGENE_PMU_EVENT_ATTR(pa-ptl-rd-req, 0x17),
450 XGENE_PMU_EVENT_ATTR(pa-wr-back-clean-data, 0x18),
451 XGENE_PMU_EVENT_ATTR(pa-wr-back-cancelled-on-SS, 0x1b),
452 XGENE_PMU_EVENT_ATTR(pa-barrier-occurrence, 0x1c),
453 XGENE_PMU_EVENT_ATTR(pa-barrier-cycles, 0x1d),
454 XGENE_PMU_EVENT_ATTR(pa-total-cp-snoops, 0x20),
455 XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop, 0x21),
456 XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop-hit, 0x22),
457 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop, 0x23),
458 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop-hit, 0x24),
459 XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop, 0x25),
460 XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop-hit, 0x26),
461 XGENE_PMU_EVENT_ATTR(pa-req-buffer-full, 0x28),
462 XGENE_PMU_EVENT_ATTR(cswlf-outbound-req-fifo-full, 0x29),
463 XGENE_PMU_EVENT_ATTR(cswlf-inbound-snoop-fifo-backpressure, 0x2a),
464 XGENE_PMU_EVENT_ATTR(cswlf-outbound-lack-fifo-full, 0x2b),
465 XGENE_PMU_EVENT_ATTR(cswlf-inbound-gack-fifo-backpressure, 0x2c),
466 XGENE_PMU_EVENT_ATTR(cswlf-outbound-data-fifo-full, 0x2d),
467 XGENE_PMU_EVENT_ATTR(cswlf-inbound-data-fifo-backpressure, 0x2e),
468 XGENE_PMU_EVENT_ATTR(cswlf-inbound-req-backpressure, 0x2f),
469 NULL,
470 };
471
472 static struct attribute *iob_slow_pmu_v3_events_attrs[] = {
473 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
474 XGENE_PMU_EVENT_ATTR(pa-axi0-rd-req, 0x01),
475 XGENE_PMU_EVENT_ATTR(pa-axi0-wr-req, 0x02),
476 XGENE_PMU_EVENT_ATTR(pa-axi1-rd-req, 0x03),
477 XGENE_PMU_EVENT_ATTR(pa-axi1-wr-req, 0x04),
478 XGENE_PMU_EVENT_ATTR(ba-all-axi-req, 0x07),
479 XGENE_PMU_EVENT_ATTR(ba-axi-rd-req, 0x08),
480 XGENE_PMU_EVENT_ATTR(ba-axi-wr-req, 0x09),
481 XGENE_PMU_EVENT_ATTR(ba-free-list-empty, 0x10),
482 NULL,
483 };
484
485 static struct attribute *mcb_pmu_v3_events_attrs[] = {
486 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
487 XGENE_PMU_EVENT_ATTR(req-receive, 0x01),
488 XGENE_PMU_EVENT_ATTR(rd-req-recv, 0x02),
489 XGENE_PMU_EVENT_ATTR(rd-req-recv-2, 0x03),
490 XGENE_PMU_EVENT_ATTR(wr-req-recv, 0x04),
491 XGENE_PMU_EVENT_ATTR(wr-req-recv-2, 0x05),
492 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu, 0x06),
493 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu-2, 0x07),
494 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu, 0x08),
495 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu-2, 0x09),
496 XGENE_PMU_EVENT_ATTR(glbl-ack-recv-for-rd-sent-to-spec-mcu, 0x0a),
497 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-for-rd-sent-to-spec-mcu, 0x0b),
498 XGENE_PMU_EVENT_ATTR(glbl-ack-nogo-recv-for-rd-sent-to-spec-mcu, 0x0c),
499 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req, 0x0d),
500 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req-2, 0x0e),
501 XGENE_PMU_EVENT_ATTR(wr-req-sent-to-mcu, 0x0f),
502 XGENE_PMU_EVENT_ATTR(gack-recv, 0x10),
503 XGENE_PMU_EVENT_ATTR(rd-gack-recv, 0x11),
504 XGENE_PMU_EVENT_ATTR(wr-gack-recv, 0x12),
505 XGENE_PMU_EVENT_ATTR(cancel-rd-gack, 0x13),
506 XGENE_PMU_EVENT_ATTR(cancel-wr-gack, 0x14),
507 XGENE_PMU_EVENT_ATTR(mcb-csw-req-stall, 0x15),
508 XGENE_PMU_EVENT_ATTR(mcu-req-intf-blocked, 0x16),
509 XGENE_PMU_EVENT_ATTR(mcb-mcu-rd-intf-stall, 0x17),
510 XGENE_PMU_EVENT_ATTR(csw-rd-intf-blocked, 0x18),
511 XGENE_PMU_EVENT_ATTR(csw-local-ack-intf-blocked, 0x19),
512 XGENE_PMU_EVENT_ATTR(mcu-req-table-full, 0x1a),
513 XGENE_PMU_EVENT_ATTR(mcu-stat-table-full, 0x1b),
514 XGENE_PMU_EVENT_ATTR(mcu-wr-table-full, 0x1c),
515 XGENE_PMU_EVENT_ATTR(mcu-rdreceipt-resp, 0x1d),
516 XGENE_PMU_EVENT_ATTR(mcu-wrcomplete-resp, 0x1e),
517 XGENE_PMU_EVENT_ATTR(mcu-retryack-resp, 0x1f),
518 XGENE_PMU_EVENT_ATTR(mcu-pcrdgrant-resp, 0x20),
519 XGENE_PMU_EVENT_ATTR(mcu-req-from-lastload, 0x21),
520 XGENE_PMU_EVENT_ATTR(mcu-req-from-bypass, 0x22),
521 XGENE_PMU_EVENT_ATTR(volt-droop-detect, 0x23),
522 NULL,
523 };
524
525 static struct attribute *mc_pmu_v3_events_attrs[] = {
526 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
527 XGENE_PMU_EVENT_ATTR(act-sent, 0x01),
528 XGENE_PMU_EVENT_ATTR(pre-sent, 0x02),
529 XGENE_PMU_EVENT_ATTR(rd-sent, 0x03),
530 XGENE_PMU_EVENT_ATTR(rda-sent, 0x04),
531 XGENE_PMU_EVENT_ATTR(wr-sent, 0x05),
532 XGENE_PMU_EVENT_ATTR(wra-sent, 0x06),
533 XGENE_PMU_EVENT_ATTR(pd-entry-vld, 0x07),
534 XGENE_PMU_EVENT_ATTR(sref-entry-vld, 0x08),
535 XGENE_PMU_EVENT_ATTR(prea-sent, 0x09),
536 XGENE_PMU_EVENT_ATTR(ref-sent, 0x0a),
537 XGENE_PMU_EVENT_ATTR(rd-rda-sent, 0x0b),
538 XGENE_PMU_EVENT_ATTR(wr-wra-sent, 0x0c),
539 XGENE_PMU_EVENT_ATTR(raw-hazard, 0x0d),
540 XGENE_PMU_EVENT_ATTR(war-hazard, 0x0e),
541 XGENE_PMU_EVENT_ATTR(waw-hazard, 0x0f),
542 XGENE_PMU_EVENT_ATTR(rar-hazard, 0x10),
543 XGENE_PMU_EVENT_ATTR(raw-war-waw-hazard, 0x11),
544 XGENE_PMU_EVENT_ATTR(hprd-lprd-wr-req-vld, 0x12),
545 XGENE_PMU_EVENT_ATTR(lprd-req-vld, 0x13),
546 XGENE_PMU_EVENT_ATTR(hprd-req-vld, 0x14),
547 XGENE_PMU_EVENT_ATTR(hprd-lprd-req-vld, 0x15),
548 XGENE_PMU_EVENT_ATTR(wr-req-vld, 0x16),
549 XGENE_PMU_EVENT_ATTR(partial-wr-req-vld, 0x17),
550 XGENE_PMU_EVENT_ATTR(rd-retry, 0x18),
551 XGENE_PMU_EVENT_ATTR(wr-retry, 0x19),
552 XGENE_PMU_EVENT_ATTR(retry-gnt, 0x1a),
553 XGENE_PMU_EVENT_ATTR(rank-change, 0x1b),
554 XGENE_PMU_EVENT_ATTR(dir-change, 0x1c),
555 XGENE_PMU_EVENT_ATTR(rank-dir-change, 0x1d),
556 XGENE_PMU_EVENT_ATTR(rank-active, 0x1e),
557 XGENE_PMU_EVENT_ATTR(rank-idle, 0x1f),
558 XGENE_PMU_EVENT_ATTR(rank-pd, 0x20),
559 XGENE_PMU_EVENT_ATTR(rank-sref, 0x21),
560 XGENE_PMU_EVENT_ATTR(queue-fill-gt-thresh, 0x22),
561 XGENE_PMU_EVENT_ATTR(queue-rds-gt-thresh, 0x23),
562 XGENE_PMU_EVENT_ATTR(queue-wrs-gt-thresh, 0x24),
563 XGENE_PMU_EVENT_ATTR(phy-updt-complt, 0x25),
564 XGENE_PMU_EVENT_ATTR(tz-fail, 0x26),
565 XGENE_PMU_EVENT_ATTR(dram-errc, 0x27),
566 XGENE_PMU_EVENT_ATTR(dram-errd, 0x28),
567 XGENE_PMU_EVENT_ATTR(rd-enq, 0x29),
568 XGENE_PMU_EVENT_ATTR(wr-enq, 0x2a),
569 XGENE_PMU_EVENT_ATTR(tmac-limit-reached, 0x2b),
570 XGENE_PMU_EVENT_ATTR(tmaw-tracker-full, 0x2c),
571 NULL,
572 };
573
574 static const struct attribute_group l3c_pmu_v3_events_attr_group = {
575 .name = "events",
576 .attrs = l3c_pmu_v3_events_attrs,
577 };
578
579 static const struct attribute_group iob_fast_pmu_v3_events_attr_group = {
580 .name = "events",
581 .attrs = iob_fast_pmu_v3_events_attrs,
582 };
583
584 static const struct attribute_group iob_slow_pmu_v3_events_attr_group = {
585 .name = "events",
586 .attrs = iob_slow_pmu_v3_events_attrs,
587 };
588
589 static const struct attribute_group mcb_pmu_v3_events_attr_group = {
590 .name = "events",
591 .attrs = mcb_pmu_v3_events_attrs,
592 };
593
594 static const struct attribute_group mc_pmu_v3_events_attr_group = {
595 .name = "events",
596 .attrs = mc_pmu_v3_events_attrs,
597 };
598
599 /*
600 * sysfs cpumask attributes
601 */
cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)602 static ssize_t cpumask_show(struct device *dev,
603 struct device_attribute *attr, char *buf)
604 {
605 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev));
606
607 return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
608 }
609
610 static DEVICE_ATTR_RO(cpumask);
611
612 static struct attribute *xgene_pmu_cpumask_attrs[] = {
613 &dev_attr_cpumask.attr,
614 NULL,
615 };
616
617 static const struct attribute_group pmu_cpumask_attr_group = {
618 .attrs = xgene_pmu_cpumask_attrs,
619 };
620
621 /*
622 * Per PMU device attribute groups of PMU v1 and v2
623 */
624 static const struct attribute_group *l3c_pmu_attr_groups[] = {
625 &l3c_pmu_format_attr_group,
626 &pmu_cpumask_attr_group,
627 &l3c_pmu_events_attr_group,
628 NULL
629 };
630
631 static const struct attribute_group *iob_pmu_attr_groups[] = {
632 &iob_pmu_format_attr_group,
633 &pmu_cpumask_attr_group,
634 &iob_pmu_events_attr_group,
635 NULL
636 };
637
638 static const struct attribute_group *mcb_pmu_attr_groups[] = {
639 &mcb_pmu_format_attr_group,
640 &pmu_cpumask_attr_group,
641 &mcb_pmu_events_attr_group,
642 NULL
643 };
644
645 static const struct attribute_group *mc_pmu_attr_groups[] = {
646 &mc_pmu_format_attr_group,
647 &pmu_cpumask_attr_group,
648 &mc_pmu_events_attr_group,
649 NULL
650 };
651
652 /*
653 * Per PMU device attribute groups of PMU v3
654 */
655 static const struct attribute_group *l3c_pmu_v3_attr_groups[] = {
656 &l3c_pmu_v3_format_attr_group,
657 &pmu_cpumask_attr_group,
658 &l3c_pmu_v3_events_attr_group,
659 NULL
660 };
661
662 static const struct attribute_group *iob_fast_pmu_v3_attr_groups[] = {
663 &iob_pmu_v3_format_attr_group,
664 &pmu_cpumask_attr_group,
665 &iob_fast_pmu_v3_events_attr_group,
666 NULL
667 };
668
669 static const struct attribute_group *iob_slow_pmu_v3_attr_groups[] = {
670 &iob_slow_pmu_v3_format_attr_group,
671 &pmu_cpumask_attr_group,
672 &iob_slow_pmu_v3_events_attr_group,
673 NULL
674 };
675
676 static const struct attribute_group *mcb_pmu_v3_attr_groups[] = {
677 &mcb_pmu_v3_format_attr_group,
678 &pmu_cpumask_attr_group,
679 &mcb_pmu_v3_events_attr_group,
680 NULL
681 };
682
683 static const struct attribute_group *mc_pmu_v3_attr_groups[] = {
684 &mc_pmu_v3_format_attr_group,
685 &pmu_cpumask_attr_group,
686 &mc_pmu_v3_events_attr_group,
687 NULL
688 };
689
get_next_avail_cntr(struct xgene_pmu_dev * pmu_dev)690 static int get_next_avail_cntr(struct xgene_pmu_dev *pmu_dev)
691 {
692 int cntr;
693
694 cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask,
695 pmu_dev->max_counters);
696 if (cntr == pmu_dev->max_counters)
697 return -ENOSPC;
698 set_bit(cntr, pmu_dev->cntr_assign_mask);
699
700 return cntr;
701 }
702
clear_avail_cntr(struct xgene_pmu_dev * pmu_dev,int cntr)703 static void clear_avail_cntr(struct xgene_pmu_dev *pmu_dev, int cntr)
704 {
705 clear_bit(cntr, pmu_dev->cntr_assign_mask);
706 }
707
xgene_pmu_mask_int(struct xgene_pmu * xgene_pmu)708 static inline void xgene_pmu_mask_int(struct xgene_pmu *xgene_pmu)
709 {
710 writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
711 }
712
xgene_pmu_v3_mask_int(struct xgene_pmu * xgene_pmu)713 static inline void xgene_pmu_v3_mask_int(struct xgene_pmu *xgene_pmu)
714 {
715 writel(PCPPMU_V3_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
716 }
717
xgene_pmu_unmask_int(struct xgene_pmu * xgene_pmu)718 static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu)
719 {
720 writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
721 }
722
xgene_pmu_v3_unmask_int(struct xgene_pmu * xgene_pmu)723 static inline void xgene_pmu_v3_unmask_int(struct xgene_pmu *xgene_pmu)
724 {
725 writel(PCPPMU_V3_INTCLRMASK,
726 xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
727 }
728
xgene_pmu_read_counter32(struct xgene_pmu_dev * pmu_dev,int idx)729 static inline u64 xgene_pmu_read_counter32(struct xgene_pmu_dev *pmu_dev,
730 int idx)
731 {
732 return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
733 }
734
xgene_pmu_read_counter64(struct xgene_pmu_dev * pmu_dev,int idx)735 static inline u64 xgene_pmu_read_counter64(struct xgene_pmu_dev *pmu_dev,
736 int idx)
737 {
738 u32 lo, hi;
739
740 /*
741 * v3 has 64-bit counter registers composed by 2 32-bit registers
742 * This can be a problem if the counter increases and carries
743 * out of bit [31] between 2 reads. The extra reads would help
744 * to prevent this issue.
745 */
746 do {
747 hi = xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1);
748 lo = xgene_pmu_read_counter32(pmu_dev, 2 * idx);
749 } while (hi != xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1));
750
751 return (((u64)hi << 32) | lo);
752 }
753
754 static inline void
xgene_pmu_write_counter32(struct xgene_pmu_dev * pmu_dev,int idx,u64 val)755 xgene_pmu_write_counter32(struct xgene_pmu_dev *pmu_dev, int idx, u64 val)
756 {
757 writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
758 }
759
760 static inline void
xgene_pmu_write_counter64(struct xgene_pmu_dev * pmu_dev,int idx,u64 val)761 xgene_pmu_write_counter64(struct xgene_pmu_dev *pmu_dev, int idx, u64 val)
762 {
763 u32 cnt_lo, cnt_hi;
764
765 cnt_hi = upper_32_bits(val);
766 cnt_lo = lower_32_bits(val);
767
768 /* v3 has 64-bit counter registers composed by 2 32-bit registers */
769 xgene_pmu_write_counter32(pmu_dev, 2 * idx, cnt_lo);
770 xgene_pmu_write_counter32(pmu_dev, 2 * idx + 1, cnt_hi);
771 }
772
773 static inline void
xgene_pmu_write_evttype(struct xgene_pmu_dev * pmu_dev,int idx,u32 val)774 xgene_pmu_write_evttype(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
775 {
776 writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx));
777 }
778
779 static inline void
xgene_pmu_write_agentmsk(struct xgene_pmu_dev * pmu_dev,u32 val)780 xgene_pmu_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val)
781 {
782 writel(val, pmu_dev->inf->csr + PMU_PMAMR0);
783 }
784
785 static inline void
xgene_pmu_v3_write_agentmsk(struct xgene_pmu_dev * pmu_dev,u32 val)786 xgene_pmu_v3_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val) { }
787
788 static inline void
xgene_pmu_write_agent1msk(struct xgene_pmu_dev * pmu_dev,u32 val)789 xgene_pmu_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val)
790 {
791 writel(val, pmu_dev->inf->csr + PMU_PMAMR1);
792 }
793
794 static inline void
xgene_pmu_v3_write_agent1msk(struct xgene_pmu_dev * pmu_dev,u32 val)795 xgene_pmu_v3_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val) { }
796
797 static inline void
xgene_pmu_enable_counter(struct xgene_pmu_dev * pmu_dev,int idx)798 xgene_pmu_enable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
799 {
800 u32 val;
801
802 val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET);
803 val |= 1 << idx;
804 writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET);
805 }
806
807 static inline void
xgene_pmu_disable_counter(struct xgene_pmu_dev * pmu_dev,int idx)808 xgene_pmu_disable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
809 {
810 u32 val;
811
812 val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR);
813 val |= 1 << idx;
814 writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR);
815 }
816
817 static inline void
xgene_pmu_enable_counter_int(struct xgene_pmu_dev * pmu_dev,int idx)818 xgene_pmu_enable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
819 {
820 u32 val;
821
822 val = readl(pmu_dev->inf->csr + PMU_PMINTENSET);
823 val |= 1 << idx;
824 writel(val, pmu_dev->inf->csr + PMU_PMINTENSET);
825 }
826
827 static inline void
xgene_pmu_disable_counter_int(struct xgene_pmu_dev * pmu_dev,int idx)828 xgene_pmu_disable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
829 {
830 u32 val;
831
832 val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR);
833 val |= 1 << idx;
834 writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR);
835 }
836
xgene_pmu_reset_counters(struct xgene_pmu_dev * pmu_dev)837 static inline void xgene_pmu_reset_counters(struct xgene_pmu_dev *pmu_dev)
838 {
839 u32 val;
840
841 val = readl(pmu_dev->inf->csr + PMU_PMCR);
842 val |= PMU_PMCR_P;
843 writel(val, pmu_dev->inf->csr + PMU_PMCR);
844 }
845
xgene_pmu_start_counters(struct xgene_pmu_dev * pmu_dev)846 static inline void xgene_pmu_start_counters(struct xgene_pmu_dev *pmu_dev)
847 {
848 u32 val;
849
850 val = readl(pmu_dev->inf->csr + PMU_PMCR);
851 val |= PMU_PMCR_E;
852 writel(val, pmu_dev->inf->csr + PMU_PMCR);
853 }
854
xgene_pmu_stop_counters(struct xgene_pmu_dev * pmu_dev)855 static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev)
856 {
857 u32 val;
858
859 val = readl(pmu_dev->inf->csr + PMU_PMCR);
860 val &= ~PMU_PMCR_E;
861 writel(val, pmu_dev->inf->csr + PMU_PMCR);
862 }
863
xgene_perf_pmu_enable(struct pmu * pmu)864 static void xgene_perf_pmu_enable(struct pmu *pmu)
865 {
866 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
867 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
868 bool enabled = !bitmap_empty(pmu_dev->cntr_assign_mask,
869 pmu_dev->max_counters);
870
871 if (!enabled)
872 return;
873
874 xgene_pmu->ops->start_counters(pmu_dev);
875 }
876
xgene_perf_pmu_disable(struct pmu * pmu)877 static void xgene_perf_pmu_disable(struct pmu *pmu)
878 {
879 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
880 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
881
882 xgene_pmu->ops->stop_counters(pmu_dev);
883 }
884
xgene_perf_event_init(struct perf_event * event)885 static int xgene_perf_event_init(struct perf_event *event)
886 {
887 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
888 struct hw_perf_event *hw = &event->hw;
889 struct perf_event *sibling;
890
891 /* Test the event attr type check for PMU enumeration */
892 if (event->attr.type != event->pmu->type)
893 return -ENOENT;
894
895 /*
896 * SOC PMU counters are shared across all cores.
897 * Therefore, it does not support per-process mode.
898 * Also, it does not support event sampling mode.
899 */
900 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
901 return -EINVAL;
902
903 if (event->cpu < 0)
904 return -EINVAL;
905 /*
906 * Many perf core operations (eg. events rotation) operate on a
907 * single CPU context. This is obvious for CPU PMUs, where one
908 * expects the same sets of events being observed on all CPUs,
909 * but can lead to issues for off-core PMUs, where each
910 * event could be theoretically assigned to a different CPU. To
911 * mitigate this, we enforce CPU assignment to one, selected
912 * processor (the one described in the "cpumask" attribute).
913 */
914 event->cpu = cpumask_first(&pmu_dev->parent->cpu);
915
916 hw->config = event->attr.config;
917 /*
918 * Each bit of the config1 field represents an agent from which the
919 * request of the event come. The event is counted only if it's caused
920 * by a request of an agent has the bit cleared.
921 * By default, the event is counted for all agents.
922 */
923 hw->config_base = event->attr.config1;
924
925 /*
926 * We must NOT create groups containing mixed PMUs, although software
927 * events are acceptable
928 */
929 if (event->group_leader->pmu != event->pmu &&
930 !is_software_event(event->group_leader))
931 return -EINVAL;
932
933 for_each_sibling_event(sibling, event->group_leader) {
934 if (sibling->pmu != event->pmu &&
935 !is_software_event(sibling))
936 return -EINVAL;
937 }
938
939 return 0;
940 }
941
xgene_perf_enable_event(struct perf_event * event)942 static void xgene_perf_enable_event(struct perf_event *event)
943 {
944 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
945 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
946
947 xgene_pmu->ops->write_evttype(pmu_dev, GET_CNTR(event),
948 GET_EVENTID(event));
949 xgene_pmu->ops->write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event)));
950 if (pmu_dev->inf->type == PMU_TYPE_IOB)
951 xgene_pmu->ops->write_agent1msk(pmu_dev,
952 ~((u32)GET_AGENT1ID(event)));
953
954 xgene_pmu->ops->enable_counter(pmu_dev, GET_CNTR(event));
955 xgene_pmu->ops->enable_counter_int(pmu_dev, GET_CNTR(event));
956 }
957
xgene_perf_disable_event(struct perf_event * event)958 static void xgene_perf_disable_event(struct perf_event *event)
959 {
960 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
961 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
962
963 xgene_pmu->ops->disable_counter(pmu_dev, GET_CNTR(event));
964 xgene_pmu->ops->disable_counter_int(pmu_dev, GET_CNTR(event));
965 }
966
xgene_perf_event_set_period(struct perf_event * event)967 static void xgene_perf_event_set_period(struct perf_event *event)
968 {
969 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
970 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
971 struct hw_perf_event *hw = &event->hw;
972 /*
973 * For 32 bit counter, it has a period of 2^32. To account for the
974 * possibility of extreme interrupt latency we program for a period of
975 * half that. Hopefully, we can handle the interrupt before another 2^31
976 * events occur and the counter overtakes its previous value.
977 * For 64 bit counter, we don't expect it overflow.
978 */
979 u64 val = 1ULL << 31;
980
981 local64_set(&hw->prev_count, val);
982 xgene_pmu->ops->write_counter(pmu_dev, hw->idx, val);
983 }
984
xgene_perf_event_update(struct perf_event * event)985 static void xgene_perf_event_update(struct perf_event *event)
986 {
987 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
988 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
989 struct hw_perf_event *hw = &event->hw;
990 u64 delta, prev_raw_count, new_raw_count;
991
992 again:
993 prev_raw_count = local64_read(&hw->prev_count);
994 new_raw_count = xgene_pmu->ops->read_counter(pmu_dev, GET_CNTR(event));
995
996 if (local64_cmpxchg(&hw->prev_count, prev_raw_count,
997 new_raw_count) != prev_raw_count)
998 goto again;
999
1000 delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period;
1001
1002 local64_add(delta, &event->count);
1003 }
1004
xgene_perf_read(struct perf_event * event)1005 static void xgene_perf_read(struct perf_event *event)
1006 {
1007 xgene_perf_event_update(event);
1008 }
1009
xgene_perf_start(struct perf_event * event,int flags)1010 static void xgene_perf_start(struct perf_event *event, int flags)
1011 {
1012 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
1013 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
1014 struct hw_perf_event *hw = &event->hw;
1015
1016 if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED)))
1017 return;
1018
1019 WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE));
1020 hw->state = 0;
1021
1022 xgene_perf_event_set_period(event);
1023
1024 if (flags & PERF_EF_RELOAD) {
1025 u64 prev_raw_count = local64_read(&hw->prev_count);
1026
1027 xgene_pmu->ops->write_counter(pmu_dev, GET_CNTR(event),
1028 prev_raw_count);
1029 }
1030
1031 xgene_perf_enable_event(event);
1032 perf_event_update_userpage(event);
1033 }
1034
xgene_perf_stop(struct perf_event * event,int flags)1035 static void xgene_perf_stop(struct perf_event *event, int flags)
1036 {
1037 struct hw_perf_event *hw = &event->hw;
1038
1039 if (hw->state & PERF_HES_UPTODATE)
1040 return;
1041
1042 xgene_perf_disable_event(event);
1043 WARN_ON_ONCE(hw->state & PERF_HES_STOPPED);
1044 hw->state |= PERF_HES_STOPPED;
1045
1046 if (hw->state & PERF_HES_UPTODATE)
1047 return;
1048
1049 xgene_perf_read(event);
1050 hw->state |= PERF_HES_UPTODATE;
1051 }
1052
xgene_perf_add(struct perf_event * event,int flags)1053 static int xgene_perf_add(struct perf_event *event, int flags)
1054 {
1055 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
1056 struct hw_perf_event *hw = &event->hw;
1057
1058 hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1059
1060 /* Allocate an event counter */
1061 hw->idx = get_next_avail_cntr(pmu_dev);
1062 if (hw->idx < 0)
1063 return -EAGAIN;
1064
1065 /* Update counter event pointer for Interrupt handler */
1066 pmu_dev->pmu_counter_event[hw->idx] = event;
1067
1068 if (flags & PERF_EF_START)
1069 xgene_perf_start(event, PERF_EF_RELOAD);
1070
1071 return 0;
1072 }
1073
xgene_perf_del(struct perf_event * event,int flags)1074 static void xgene_perf_del(struct perf_event *event, int flags)
1075 {
1076 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
1077 struct hw_perf_event *hw = &event->hw;
1078
1079 xgene_perf_stop(event, PERF_EF_UPDATE);
1080
1081 /* clear the assigned counter */
1082 clear_avail_cntr(pmu_dev, GET_CNTR(event));
1083
1084 perf_event_update_userpage(event);
1085 pmu_dev->pmu_counter_event[hw->idx] = NULL;
1086 }
1087
xgene_init_perf(struct xgene_pmu_dev * pmu_dev,char * name)1088 static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
1089 {
1090 struct xgene_pmu *xgene_pmu;
1091
1092 if (pmu_dev->parent->version == PCP_PMU_V3)
1093 pmu_dev->max_period = PMU_V3_CNT_MAX_PERIOD;
1094 else
1095 pmu_dev->max_period = PMU_CNT_MAX_PERIOD;
1096 /* First version PMU supports only single event counter */
1097 xgene_pmu = pmu_dev->parent;
1098 if (xgene_pmu->version == PCP_PMU_V1)
1099 pmu_dev->max_counters = 1;
1100 else
1101 pmu_dev->max_counters = PMU_MAX_COUNTERS;
1102
1103 /* Perf driver registration */
1104 pmu_dev->pmu = (struct pmu) {
1105 .parent = pmu_dev->parent->dev,
1106 .attr_groups = pmu_dev->attr_groups,
1107 .task_ctx_nr = perf_invalid_context,
1108 .pmu_enable = xgene_perf_pmu_enable,
1109 .pmu_disable = xgene_perf_pmu_disable,
1110 .event_init = xgene_perf_event_init,
1111 .add = xgene_perf_add,
1112 .del = xgene_perf_del,
1113 .start = xgene_perf_start,
1114 .stop = xgene_perf_stop,
1115 .read = xgene_perf_read,
1116 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
1117 };
1118
1119 /* Hardware counter init */
1120 xgene_pmu->ops->stop_counters(pmu_dev);
1121 xgene_pmu->ops->reset_counters(pmu_dev);
1122
1123 return perf_pmu_register(&pmu_dev->pmu, name, -1);
1124 }
1125
1126 static int
xgene_pmu_dev_add(struct xgene_pmu * xgene_pmu,struct xgene_pmu_dev_ctx * ctx)1127 xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
1128 {
1129 struct device *dev = xgene_pmu->dev;
1130 struct xgene_pmu_dev *pmu;
1131
1132 pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
1133 if (!pmu)
1134 return -ENOMEM;
1135 pmu->parent = xgene_pmu;
1136 pmu->inf = &ctx->inf;
1137 ctx->pmu_dev = pmu;
1138
1139 switch (pmu->inf->type) {
1140 case PMU_TYPE_L3C:
1141 if (!(xgene_pmu->l3c_active_mask & pmu->inf->enable_mask))
1142 return -ENODEV;
1143 if (xgene_pmu->version == PCP_PMU_V3)
1144 pmu->attr_groups = l3c_pmu_v3_attr_groups;
1145 else
1146 pmu->attr_groups = l3c_pmu_attr_groups;
1147 break;
1148 case PMU_TYPE_IOB:
1149 if (xgene_pmu->version == PCP_PMU_V3)
1150 pmu->attr_groups = iob_fast_pmu_v3_attr_groups;
1151 else
1152 pmu->attr_groups = iob_pmu_attr_groups;
1153 break;
1154 case PMU_TYPE_IOB_SLOW:
1155 if (xgene_pmu->version == PCP_PMU_V3)
1156 pmu->attr_groups = iob_slow_pmu_v3_attr_groups;
1157 break;
1158 case PMU_TYPE_MCB:
1159 if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask))
1160 return -ENODEV;
1161 if (xgene_pmu->version == PCP_PMU_V3)
1162 pmu->attr_groups = mcb_pmu_v3_attr_groups;
1163 else
1164 pmu->attr_groups = mcb_pmu_attr_groups;
1165 break;
1166 case PMU_TYPE_MC:
1167 if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask))
1168 return -ENODEV;
1169 if (xgene_pmu->version == PCP_PMU_V3)
1170 pmu->attr_groups = mc_pmu_v3_attr_groups;
1171 else
1172 pmu->attr_groups = mc_pmu_attr_groups;
1173 break;
1174 default:
1175 return -EINVAL;
1176 }
1177
1178 if (xgene_init_perf(pmu, ctx->name)) {
1179 dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name);
1180 return -ENODEV;
1181 }
1182
1183 dev_info(dev, "%s PMU registered\n", ctx->name);
1184
1185 return 0;
1186 }
1187
_xgene_pmu_isr(int irq,struct xgene_pmu_dev * pmu_dev)1188 static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev)
1189 {
1190 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
1191 void __iomem *csr = pmu_dev->inf->csr;
1192 u32 pmovsr;
1193 int idx;
1194
1195 xgene_pmu->ops->stop_counters(pmu_dev);
1196
1197 if (xgene_pmu->version == PCP_PMU_V3)
1198 pmovsr = readl(csr + PMU_PMOVSSET) & PMU_OVERFLOW_MASK;
1199 else
1200 pmovsr = readl(csr + PMU_PMOVSR) & PMU_OVERFLOW_MASK;
1201
1202 if (!pmovsr)
1203 goto out;
1204
1205 /* Clear interrupt flag */
1206 if (xgene_pmu->version == PCP_PMU_V1)
1207 writel(0x0, csr + PMU_PMOVSR);
1208 else if (xgene_pmu->version == PCP_PMU_V2)
1209 writel(pmovsr, csr + PMU_PMOVSR);
1210 else
1211 writel(pmovsr, csr + PMU_PMOVSCLR);
1212
1213 for (idx = 0; idx < PMU_MAX_COUNTERS; idx++) {
1214 struct perf_event *event = pmu_dev->pmu_counter_event[idx];
1215 int overflowed = pmovsr & BIT(idx);
1216
1217 /* Ignore if we don't have an event. */
1218 if (!event || !overflowed)
1219 continue;
1220 xgene_perf_event_update(event);
1221 xgene_perf_event_set_period(event);
1222 }
1223
1224 out:
1225 xgene_pmu->ops->start_counters(pmu_dev);
1226 }
1227
xgene_pmu_isr(int irq,void * dev_id)1228 static irqreturn_t xgene_pmu_isr(int irq, void *dev_id)
1229 {
1230 u32 intr_mcu, intr_mcb, intr_l3c, intr_iob;
1231 struct xgene_pmu_dev_ctx *ctx;
1232 struct xgene_pmu *xgene_pmu = dev_id;
1233 u32 val;
1234
1235 raw_spin_lock(&xgene_pmu->lock);
1236
1237 /* Get Interrupt PMU source */
1238 val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG);
1239 if (xgene_pmu->version == PCP_PMU_V3) {
1240 intr_mcu = PCPPMU_V3_INT_MCU;
1241 intr_mcb = PCPPMU_V3_INT_MCB;
1242 intr_l3c = PCPPMU_V3_INT_L3C;
1243 intr_iob = PCPPMU_V3_INT_IOB;
1244 } else {
1245 intr_mcu = PCPPMU_INT_MCU;
1246 intr_mcb = PCPPMU_INT_MCB;
1247 intr_l3c = PCPPMU_INT_L3C;
1248 intr_iob = PCPPMU_INT_IOB;
1249 }
1250 if (val & intr_mcu) {
1251 list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
1252 _xgene_pmu_isr(irq, ctx->pmu_dev);
1253 }
1254 }
1255 if (val & intr_mcb) {
1256 list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
1257 _xgene_pmu_isr(irq, ctx->pmu_dev);
1258 }
1259 }
1260 if (val & intr_l3c) {
1261 list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
1262 _xgene_pmu_isr(irq, ctx->pmu_dev);
1263 }
1264 }
1265 if (val & intr_iob) {
1266 list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
1267 _xgene_pmu_isr(irq, ctx->pmu_dev);
1268 }
1269 }
1270
1271 raw_spin_unlock(&xgene_pmu->lock);
1272
1273 return IRQ_HANDLED;
1274 }
1275
acpi_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu * xgene_pmu,struct platform_device * pdev)1276 static int acpi_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
1277 struct platform_device *pdev)
1278 {
1279 void __iomem *csw_csr, *mcba_csr, *mcbb_csr;
1280 unsigned int reg;
1281
1282 csw_csr = devm_platform_ioremap_resource(pdev, 1);
1283 if (IS_ERR(csw_csr)) {
1284 dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
1285 return PTR_ERR(csw_csr);
1286 }
1287
1288 mcba_csr = devm_platform_ioremap_resource(pdev, 2);
1289 if (IS_ERR(mcba_csr)) {
1290 dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n");
1291 return PTR_ERR(mcba_csr);
1292 }
1293
1294 mcbb_csr = devm_platform_ioremap_resource(pdev, 3);
1295 if (IS_ERR(mcbb_csr)) {
1296 dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n");
1297 return PTR_ERR(mcbb_csr);
1298 }
1299
1300 xgene_pmu->l3c_active_mask = 0x1;
1301
1302 reg = readl(csw_csr + CSW_CSWCR);
1303 if (reg & CSW_CSWCR_DUALMCB_MASK) {
1304 /* Dual MCB active */
1305 xgene_pmu->mcb_active_mask = 0x3;
1306 /* Probe all active MC(s) */
1307 reg = readl(mcbb_csr + CSW_CSWCR);
1308 xgene_pmu->mc_active_mask =
1309 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
1310 } else {
1311 /* Single MCB active */
1312 xgene_pmu->mcb_active_mask = 0x1;
1313 /* Probe all active MC(s) */
1314 reg = readl(mcba_csr + CSW_CSWCR);
1315 xgene_pmu->mc_active_mask =
1316 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
1317 }
1318
1319 return 0;
1320 }
1321
acpi_pmu_v3_probe_active_mcb_mcu_l3c(struct xgene_pmu * xgene_pmu,struct platform_device * pdev)1322 static int acpi_pmu_v3_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
1323 struct platform_device *pdev)
1324 {
1325 void __iomem *csw_csr;
1326 unsigned int reg;
1327 u32 mcb0routing;
1328 u32 mcb1routing;
1329
1330 csw_csr = devm_platform_ioremap_resource(pdev, 1);
1331 if (IS_ERR(csw_csr)) {
1332 dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
1333 return PTR_ERR(csw_csr);
1334 }
1335
1336 reg = readl(csw_csr + CSW_CSWCR);
1337 mcb0routing = CSW_CSWCR_MCB0_ROUTING(reg);
1338 mcb1routing = CSW_CSWCR_MCB1_ROUTING(reg);
1339 if (reg & CSW_CSWCR_DUALMCB_MASK) {
1340 /* Dual MCB active */
1341 xgene_pmu->mcb_active_mask = 0x3;
1342 /* Probe all active L3C(s), maximum is 8 */
1343 xgene_pmu->l3c_active_mask = 0xFF;
1344 /* Probe all active MC(s), maximum is 8 */
1345 if ((mcb0routing == 0x2) && (mcb1routing == 0x2))
1346 xgene_pmu->mc_active_mask = 0xFF;
1347 else if ((mcb0routing == 0x1) && (mcb1routing == 0x1))
1348 xgene_pmu->mc_active_mask = 0x33;
1349 else
1350 xgene_pmu->mc_active_mask = 0x11;
1351 } else {
1352 /* Single MCB active */
1353 xgene_pmu->mcb_active_mask = 0x1;
1354 /* Probe all active L3C(s), maximum is 4 */
1355 xgene_pmu->l3c_active_mask = 0x0F;
1356 /* Probe all active MC(s), maximum is 4 */
1357 if (mcb0routing == 0x2)
1358 xgene_pmu->mc_active_mask = 0x0F;
1359 else if (mcb0routing == 0x1)
1360 xgene_pmu->mc_active_mask = 0x03;
1361 else
1362 xgene_pmu->mc_active_mask = 0x01;
1363 }
1364
1365 return 0;
1366 }
1367
fdt_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu * xgene_pmu,struct platform_device * pdev)1368 static int fdt_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
1369 struct platform_device *pdev)
1370 {
1371 struct regmap *csw_map, *mcba_map, *mcbb_map;
1372 struct device_node *np = pdev->dev.of_node;
1373 unsigned int reg;
1374
1375 csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw");
1376 if (IS_ERR(csw_map)) {
1377 dev_err(&pdev->dev, "unable to get syscon regmap csw\n");
1378 return PTR_ERR(csw_map);
1379 }
1380
1381 mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba");
1382 if (IS_ERR(mcba_map)) {
1383 dev_err(&pdev->dev, "unable to get syscon regmap mcba\n");
1384 return PTR_ERR(mcba_map);
1385 }
1386
1387 mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb");
1388 if (IS_ERR(mcbb_map)) {
1389 dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n");
1390 return PTR_ERR(mcbb_map);
1391 }
1392
1393 xgene_pmu->l3c_active_mask = 0x1;
1394 if (regmap_read(csw_map, CSW_CSWCR, ®))
1395 return -EINVAL;
1396
1397 if (reg & CSW_CSWCR_DUALMCB_MASK) {
1398 /* Dual MCB active */
1399 xgene_pmu->mcb_active_mask = 0x3;
1400 /* Probe all active MC(s) */
1401 if (regmap_read(mcbb_map, MCBADDRMR, ®))
1402 return 0;
1403 xgene_pmu->mc_active_mask =
1404 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
1405 } else {
1406 /* Single MCB active */
1407 xgene_pmu->mcb_active_mask = 0x1;
1408 /* Probe all active MC(s) */
1409 if (regmap_read(mcba_map, MCBADDRMR, ®))
1410 return 0;
1411 xgene_pmu->mc_active_mask =
1412 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
1413 }
1414
1415 return 0;
1416 }
1417
xgene_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu * xgene_pmu,struct platform_device * pdev)1418 static int xgene_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
1419 struct platform_device *pdev)
1420 {
1421 if (has_acpi_companion(&pdev->dev)) {
1422 if (xgene_pmu->version == PCP_PMU_V3)
1423 return acpi_pmu_v3_probe_active_mcb_mcu_l3c(xgene_pmu,
1424 pdev);
1425 else
1426 return acpi_pmu_probe_active_mcb_mcu_l3c(xgene_pmu,
1427 pdev);
1428 }
1429 return fdt_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev);
1430 }
1431
xgene_pmu_dev_name(struct device * dev,u32 type,int id)1432 static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
1433 {
1434 switch (type) {
1435 case PMU_TYPE_L3C:
1436 return devm_kasprintf(dev, GFP_KERNEL, "l3c%d", id);
1437 case PMU_TYPE_IOB:
1438 return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
1439 case PMU_TYPE_IOB_SLOW:
1440 return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id);
1441 case PMU_TYPE_MCB:
1442 return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
1443 case PMU_TYPE_MC:
1444 return devm_kasprintf(dev, GFP_KERNEL, "mc%d", id);
1445 default:
1446 return devm_kasprintf(dev, GFP_KERNEL, "unknown");
1447 }
1448 }
1449
1450 #if defined(CONFIG_ACPI)
1451 static struct
acpi_get_pmu_hw_inf(struct xgene_pmu * xgene_pmu,struct acpi_device * adev,u32 type)1452 xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
1453 struct acpi_device *adev, u32 type)
1454 {
1455 struct device *dev = xgene_pmu->dev;
1456 struct list_head resource_list;
1457 struct xgene_pmu_dev_ctx *ctx;
1458 const union acpi_object *obj;
1459 struct hw_pmu_info *inf;
1460 void __iomem *dev_csr;
1461 struct resource res;
1462 struct resource_entry *rentry;
1463 int enable_bit;
1464 int rc;
1465
1466 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1467 if (!ctx)
1468 return NULL;
1469
1470 INIT_LIST_HEAD(&resource_list);
1471 rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
1472 if (rc <= 0) {
1473 dev_err(dev, "PMU type %d: No resources found\n", type);
1474 return NULL;
1475 }
1476
1477 list_for_each_entry(rentry, &resource_list, node) {
1478 if (resource_type(rentry->res) == IORESOURCE_MEM) {
1479 res = *rentry->res;
1480 rentry = NULL;
1481 break;
1482 }
1483 }
1484 acpi_dev_free_resource_list(&resource_list);
1485
1486 if (rentry) {
1487 dev_err(dev, "PMU type %d: No memory resource found\n", type);
1488 return NULL;
1489 }
1490
1491 dev_csr = devm_ioremap_resource(dev, &res);
1492 if (IS_ERR(dev_csr)) {
1493 dev_err(dev, "PMU type %d: Fail to map resource\n", type);
1494 return NULL;
1495 }
1496
1497 /* A PMU device node without enable-bit-index is always enabled */
1498 rc = acpi_dev_get_property(adev, "enable-bit-index",
1499 ACPI_TYPE_INTEGER, &obj);
1500 if (rc < 0)
1501 enable_bit = 0;
1502 else
1503 enable_bit = (int) obj->integer.value;
1504
1505 ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
1506 if (!ctx->name) {
1507 dev_err(dev, "PMU type %d: Fail to get device name\n", type);
1508 return NULL;
1509 }
1510 inf = &ctx->inf;
1511 inf->type = type;
1512 inf->csr = dev_csr;
1513 inf->enable_mask = 1 << enable_bit;
1514
1515 return ctx;
1516 }
1517
1518 static const struct acpi_device_id xgene_pmu_acpi_type_match[] = {
1519 {"APMC0D5D", PMU_TYPE_L3C},
1520 {"APMC0D5E", PMU_TYPE_IOB},
1521 {"APMC0D5F", PMU_TYPE_MCB},
1522 {"APMC0D60", PMU_TYPE_MC},
1523 {"APMC0D84", PMU_TYPE_L3C},
1524 {"APMC0D85", PMU_TYPE_IOB},
1525 {"APMC0D86", PMU_TYPE_IOB_SLOW},
1526 {"APMC0D87", PMU_TYPE_MCB},
1527 {"APMC0D88", PMU_TYPE_MC},
1528 {},
1529 };
1530
xgene_pmu_acpi_match_type(const struct acpi_device_id * ids,struct acpi_device * adev)1531 static const struct acpi_device_id *xgene_pmu_acpi_match_type(
1532 const struct acpi_device_id *ids,
1533 struct acpi_device *adev)
1534 {
1535 const struct acpi_device_id *match_id = NULL;
1536 const struct acpi_device_id *id;
1537
1538 for (id = ids; id->id[0] || id->cls; id++) {
1539 if (!acpi_match_device_ids(adev, id))
1540 match_id = id;
1541 else if (match_id)
1542 break;
1543 }
1544
1545 return match_id;
1546 }
1547
acpi_pmu_dev_add(acpi_handle handle,u32 level,void * data,void ** return_value)1548 static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level,
1549 void *data, void **return_value)
1550 {
1551 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
1552 const struct acpi_device_id *acpi_id;
1553 struct xgene_pmu *xgene_pmu = data;
1554 struct xgene_pmu_dev_ctx *ctx;
1555
1556 if (!adev || acpi_bus_get_status(adev) || !adev->status.present)
1557 return AE_OK;
1558
1559 acpi_id = xgene_pmu_acpi_match_type(xgene_pmu_acpi_type_match, adev);
1560 if (!acpi_id)
1561 return AE_OK;
1562
1563 ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, (u32)acpi_id->driver_data);
1564 if (!ctx)
1565 return AE_OK;
1566
1567 if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
1568 /* Can't add the PMU device, skip it */
1569 devm_kfree(xgene_pmu->dev, ctx);
1570 return AE_OK;
1571 }
1572
1573 switch (ctx->inf.type) {
1574 case PMU_TYPE_L3C:
1575 list_add(&ctx->next, &xgene_pmu->l3cpmus);
1576 break;
1577 case PMU_TYPE_IOB:
1578 list_add(&ctx->next, &xgene_pmu->iobpmus);
1579 break;
1580 case PMU_TYPE_IOB_SLOW:
1581 list_add(&ctx->next, &xgene_pmu->iobpmus);
1582 break;
1583 case PMU_TYPE_MCB:
1584 list_add(&ctx->next, &xgene_pmu->mcbpmus);
1585 break;
1586 case PMU_TYPE_MC:
1587 list_add(&ctx->next, &xgene_pmu->mcpmus);
1588 break;
1589 }
1590 return AE_OK;
1591 }
1592
acpi_pmu_probe_pmu_dev(struct xgene_pmu * xgene_pmu,struct platform_device * pdev)1593 static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1594 struct platform_device *pdev)
1595 {
1596 struct device *dev = xgene_pmu->dev;
1597 acpi_handle handle;
1598 acpi_status status;
1599
1600 handle = ACPI_HANDLE(dev);
1601 if (!handle)
1602 return -EINVAL;
1603
1604 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1605 acpi_pmu_dev_add, NULL, xgene_pmu, NULL);
1606 if (ACPI_FAILURE(status)) {
1607 dev_err(dev, "failed to probe PMU devices\n");
1608 return -ENODEV;
1609 }
1610
1611 return 0;
1612 }
1613 #else
acpi_pmu_probe_pmu_dev(struct xgene_pmu * xgene_pmu,struct platform_device * pdev)1614 static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1615 struct platform_device *pdev)
1616 {
1617 return 0;
1618 }
1619 #endif
1620
1621 static struct
fdt_get_pmu_hw_inf(struct xgene_pmu * xgene_pmu,struct device_node * np,u32 type)1622 xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
1623 struct device_node *np, u32 type)
1624 {
1625 struct device *dev = xgene_pmu->dev;
1626 struct xgene_pmu_dev_ctx *ctx;
1627 struct hw_pmu_info *inf;
1628 void __iomem *dev_csr;
1629 struct resource res;
1630 int enable_bit;
1631
1632 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1633 if (!ctx)
1634 return NULL;
1635
1636 if (of_address_to_resource(np, 0, &res) < 0) {
1637 dev_err(dev, "PMU type %d: No resource address found\n", type);
1638 return NULL;
1639 }
1640
1641 dev_csr = devm_ioremap_resource(dev, &res);
1642 if (IS_ERR(dev_csr)) {
1643 dev_err(dev, "PMU type %d: Fail to map resource\n", type);
1644 return NULL;
1645 }
1646
1647 /* A PMU device node without enable-bit-index is always enabled */
1648 if (of_property_read_u32(np, "enable-bit-index", &enable_bit))
1649 enable_bit = 0;
1650
1651 ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
1652 if (!ctx->name) {
1653 dev_err(dev, "PMU type %d: Fail to get device name\n", type);
1654 return NULL;
1655 }
1656
1657 inf = &ctx->inf;
1658 inf->type = type;
1659 inf->csr = dev_csr;
1660 inf->enable_mask = 1 << enable_bit;
1661
1662 return ctx;
1663 }
1664
fdt_pmu_probe_pmu_dev(struct xgene_pmu * xgene_pmu,struct platform_device * pdev)1665 static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1666 struct platform_device *pdev)
1667 {
1668 struct xgene_pmu_dev_ctx *ctx;
1669 struct device_node *np;
1670
1671 for_each_child_of_node(pdev->dev.of_node, np) {
1672 if (!of_device_is_available(np))
1673 continue;
1674
1675 if (of_device_is_compatible(np, "apm,xgene-pmu-l3c"))
1676 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_L3C);
1677 else if (of_device_is_compatible(np, "apm,xgene-pmu-iob"))
1678 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_IOB);
1679 else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb"))
1680 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MCB);
1681 else if (of_device_is_compatible(np, "apm,xgene-pmu-mc"))
1682 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MC);
1683 else
1684 ctx = NULL;
1685
1686 if (!ctx)
1687 continue;
1688
1689 if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
1690 /* Can't add the PMU device, skip it */
1691 devm_kfree(xgene_pmu->dev, ctx);
1692 continue;
1693 }
1694
1695 switch (ctx->inf.type) {
1696 case PMU_TYPE_L3C:
1697 list_add(&ctx->next, &xgene_pmu->l3cpmus);
1698 break;
1699 case PMU_TYPE_IOB:
1700 list_add(&ctx->next, &xgene_pmu->iobpmus);
1701 break;
1702 case PMU_TYPE_IOB_SLOW:
1703 list_add(&ctx->next, &xgene_pmu->iobpmus);
1704 break;
1705 case PMU_TYPE_MCB:
1706 list_add(&ctx->next, &xgene_pmu->mcbpmus);
1707 break;
1708 case PMU_TYPE_MC:
1709 list_add(&ctx->next, &xgene_pmu->mcpmus);
1710 break;
1711 }
1712 }
1713
1714 return 0;
1715 }
1716
xgene_pmu_probe_pmu_dev(struct xgene_pmu * xgene_pmu,struct platform_device * pdev)1717 static int xgene_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1718 struct platform_device *pdev)
1719 {
1720 if (has_acpi_companion(&pdev->dev))
1721 return acpi_pmu_probe_pmu_dev(xgene_pmu, pdev);
1722 return fdt_pmu_probe_pmu_dev(xgene_pmu, pdev);
1723 }
1724
1725 static const struct xgene_pmu_data xgene_pmu_data = {
1726 .id = PCP_PMU_V1,
1727 };
1728
1729 static const struct xgene_pmu_data xgene_pmu_v2_data = {
1730 .id = PCP_PMU_V2,
1731 };
1732
1733 #ifdef CONFIG_ACPI
1734 static const struct xgene_pmu_data xgene_pmu_v3_data = {
1735 .id = PCP_PMU_V3,
1736 };
1737 #endif
1738
1739 static const struct xgene_pmu_ops xgene_pmu_ops = {
1740 .mask_int = xgene_pmu_mask_int,
1741 .unmask_int = xgene_pmu_unmask_int,
1742 .read_counter = xgene_pmu_read_counter32,
1743 .write_counter = xgene_pmu_write_counter32,
1744 .write_evttype = xgene_pmu_write_evttype,
1745 .write_agentmsk = xgene_pmu_write_agentmsk,
1746 .write_agent1msk = xgene_pmu_write_agent1msk,
1747 .enable_counter = xgene_pmu_enable_counter,
1748 .disable_counter = xgene_pmu_disable_counter,
1749 .enable_counter_int = xgene_pmu_enable_counter_int,
1750 .disable_counter_int = xgene_pmu_disable_counter_int,
1751 .reset_counters = xgene_pmu_reset_counters,
1752 .start_counters = xgene_pmu_start_counters,
1753 .stop_counters = xgene_pmu_stop_counters,
1754 };
1755
1756 static const struct xgene_pmu_ops xgene_pmu_v3_ops = {
1757 .mask_int = xgene_pmu_v3_mask_int,
1758 .unmask_int = xgene_pmu_v3_unmask_int,
1759 .read_counter = xgene_pmu_read_counter64,
1760 .write_counter = xgene_pmu_write_counter64,
1761 .write_evttype = xgene_pmu_write_evttype,
1762 .write_agentmsk = xgene_pmu_v3_write_agentmsk,
1763 .write_agent1msk = xgene_pmu_v3_write_agent1msk,
1764 .enable_counter = xgene_pmu_enable_counter,
1765 .disable_counter = xgene_pmu_disable_counter,
1766 .enable_counter_int = xgene_pmu_enable_counter_int,
1767 .disable_counter_int = xgene_pmu_disable_counter_int,
1768 .reset_counters = xgene_pmu_reset_counters,
1769 .start_counters = xgene_pmu_start_counters,
1770 .stop_counters = xgene_pmu_stop_counters,
1771 };
1772
1773 static const struct of_device_id xgene_pmu_of_match[] = {
1774 { .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data },
1775 { .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data },
1776 {},
1777 };
1778 MODULE_DEVICE_TABLE(of, xgene_pmu_of_match);
1779 #ifdef CONFIG_ACPI
1780 static const struct acpi_device_id xgene_pmu_acpi_match[] = {
1781 {"APMC0D5B", (kernel_ulong_t)&xgene_pmu_data},
1782 {"APMC0D5C", (kernel_ulong_t)&xgene_pmu_v2_data},
1783 {"APMC0D83", (kernel_ulong_t)&xgene_pmu_v3_data},
1784 {},
1785 };
1786 MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match);
1787 #endif
1788
xgene_pmu_online_cpu(unsigned int cpu,struct hlist_node * node)1789 static int xgene_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
1790 {
1791 struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
1792 node);
1793
1794 if (cpumask_empty(&xgene_pmu->cpu))
1795 cpumask_set_cpu(cpu, &xgene_pmu->cpu);
1796
1797 /* Overflow interrupt also should use the same CPU */
1798 WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
1799
1800 return 0;
1801 }
1802
xgene_pmu_offline_cpu(unsigned int cpu,struct hlist_node * node)1803 static int xgene_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
1804 {
1805 struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
1806 node);
1807 struct xgene_pmu_dev_ctx *ctx;
1808 unsigned int target;
1809
1810 if (!cpumask_test_and_clear_cpu(cpu, &xgene_pmu->cpu))
1811 return 0;
1812 target = cpumask_any_but(cpu_online_mask, cpu);
1813 if (target >= nr_cpu_ids)
1814 return 0;
1815
1816 list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
1817 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
1818 }
1819 list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
1820 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
1821 }
1822 list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
1823 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
1824 }
1825 list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
1826 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
1827 }
1828
1829 cpumask_set_cpu(target, &xgene_pmu->cpu);
1830 /* Overflow interrupt also should use the same CPU */
1831 WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
1832
1833 return 0;
1834 }
1835
xgene_pmu_probe(struct platform_device * pdev)1836 static int xgene_pmu_probe(struct platform_device *pdev)
1837 {
1838 const struct xgene_pmu_data *dev_data;
1839 struct xgene_pmu *xgene_pmu;
1840 int irq, rc;
1841 int version;
1842
1843 /* Install a hook to update the reader CPU in case it goes offline */
1844 rc = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
1845 "CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE",
1846 xgene_pmu_online_cpu,
1847 xgene_pmu_offline_cpu);
1848 if (rc)
1849 return rc;
1850
1851 xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL);
1852 if (!xgene_pmu)
1853 return -ENOMEM;
1854 xgene_pmu->dev = &pdev->dev;
1855 platform_set_drvdata(pdev, xgene_pmu);
1856
1857 dev_data = device_get_match_data(&pdev->dev);
1858 if (!dev_data)
1859 return -ENODEV;
1860 version = dev_data->id;
1861
1862 if (version == PCP_PMU_V3)
1863 xgene_pmu->ops = &xgene_pmu_v3_ops;
1864 else
1865 xgene_pmu->ops = &xgene_pmu_ops;
1866
1867 INIT_LIST_HEAD(&xgene_pmu->l3cpmus);
1868 INIT_LIST_HEAD(&xgene_pmu->iobpmus);
1869 INIT_LIST_HEAD(&xgene_pmu->mcbpmus);
1870 INIT_LIST_HEAD(&xgene_pmu->mcpmus);
1871
1872 xgene_pmu->version = version;
1873 dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version);
1874
1875 xgene_pmu->pcppmu_csr = devm_platform_ioremap_resource(pdev, 0);
1876 if (IS_ERR(xgene_pmu->pcppmu_csr)) {
1877 dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n");
1878 return PTR_ERR(xgene_pmu->pcppmu_csr);
1879 }
1880
1881 irq = platform_get_irq(pdev, 0);
1882 if (irq < 0)
1883 return -EINVAL;
1884
1885 rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
1886 IRQF_NOBALANCING | IRQF_NO_THREAD,
1887 dev_name(&pdev->dev), xgene_pmu);
1888 if (rc) {
1889 dev_err(&pdev->dev, "Could not request IRQ %d\n", irq);
1890 return rc;
1891 }
1892
1893 xgene_pmu->irq = irq;
1894
1895 raw_spin_lock_init(&xgene_pmu->lock);
1896
1897 /* Check for active MCBs and MCUs */
1898 rc = xgene_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev);
1899 if (rc) {
1900 dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n");
1901 xgene_pmu->mcb_active_mask = 0x1;
1902 xgene_pmu->mc_active_mask = 0x1;
1903 }
1904
1905 /* Add this instance to the list used by the hotplug callback */
1906 rc = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
1907 &xgene_pmu->node);
1908 if (rc) {
1909 dev_err(&pdev->dev, "Error %d registering hotplug", rc);
1910 return rc;
1911 }
1912
1913 /* Walk through the tree for all PMU perf devices */
1914 rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev);
1915 if (rc) {
1916 dev_err(&pdev->dev, "No PMU perf devices found!\n");
1917 goto out_unregister;
1918 }
1919
1920 /* Enable interrupt */
1921 xgene_pmu->ops->unmask_int(xgene_pmu);
1922
1923 return 0;
1924
1925 out_unregister:
1926 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
1927 &xgene_pmu->node);
1928 return rc;
1929 }
1930
1931 static void
xgene_pmu_dev_cleanup(struct xgene_pmu * xgene_pmu,struct list_head * pmus)1932 xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
1933 {
1934 struct xgene_pmu_dev_ctx *ctx;
1935
1936 list_for_each_entry(ctx, pmus, next) {
1937 perf_pmu_unregister(&ctx->pmu_dev->pmu);
1938 }
1939 }
1940
xgene_pmu_remove(struct platform_device * pdev)1941 static void xgene_pmu_remove(struct platform_device *pdev)
1942 {
1943 struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev);
1944
1945 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus);
1946 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus);
1947 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus);
1948 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
1949 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
1950 &xgene_pmu->node);
1951 }
1952
1953 static struct platform_driver xgene_pmu_driver = {
1954 .probe = xgene_pmu_probe,
1955 .remove_new = xgene_pmu_remove,
1956 .driver = {
1957 .name = "xgene-pmu",
1958 .of_match_table = xgene_pmu_of_match,
1959 .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match),
1960 .suppress_bind_attrs = true,
1961 },
1962 };
1963
1964 builtin_platform_driver(xgene_pmu_driver);
1965