1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * HiSilicon SoC HHA uncore Hardware event counters support
4  *
5  * Copyright (C) 2017 Hisilicon Limited
6  * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
7  *         Anurup M <anurup.m@huawei.com>
8  *
9  * This code is based on the uncore PMUs like arm-cci and arm-ccn.
10  */
11 #include <linux/acpi.h>
12 #include <linux/bug.h>
13 #include <linux/cpuhotplug.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/list.h>
17 #include <linux/platform_device.h>
18 #include <linux/smp.h>
19 
20 #include "hisi_uncore_pmu.h"
21 
22 /* HHA register definition */
23 #define HHA_INT_MASK		0x0804
24 #define HHA_INT_STATUS		0x0808
25 #define HHA_INT_CLEAR		0x080C
26 #define HHA_PERF_CTRL		0x1E00
27 #define HHA_EVENT_CTRL		0x1E04
28 #define HHA_EVENT_TYPE0		0x1E80
29 /*
30  * Each counter is 48-bits and [48:63] are reserved
31  * which are Read-As-Zero and Writes-Ignored.
32  */
33 #define HHA_CNT0_LOWER		0x1F00
34 
35 /* HHA has 16-counters */
36 #define HHA_NR_COUNTERS		0x10
37 
38 #define HHA_PERF_CTRL_EN	0x1
39 #define HHA_EVTYPE_NONE		0xff
40 
41 /*
42  * Select the counter register offset using the counter index
43  * each counter is 48-bits.
44  */
45 static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx)
46 {
47 	return (HHA_CNT0_LOWER + (cntr_idx * 8));
48 }
49 
50 static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu,
51 				     struct hw_perf_event *hwc)
52 {
53 	u32 idx = hwc->idx;
54 
55 	if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
56 		dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
57 		return 0;
58 	}
59 
60 	/* Read 64 bits and like L3C, top 16 bits are RAZ */
61 	return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
62 }
63 
64 static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu,
65 				       struct hw_perf_event *hwc, u64 val)
66 {
67 	u32 idx = hwc->idx;
68 
69 	if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
70 		dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
71 		return;
72 	}
73 
74 	/* Write 64 bits and like L3C, top 16 bits are WI */
75 	writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
76 }
77 
78 static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
79 				      u32 type)
80 {
81 	u32 reg, reg_idx, shift, val;
82 
83 	/*
84 	 * Select the appropriate event select register(HHA_EVENT_TYPEx).
85 	 * There are 4 event select registers for the 16 hardware counters.
86 	 * Event code is 8-bits and for the first 4 hardware counters,
87 	 * HHA_EVENT_TYPE0 is chosen. For the next 4 hardware counters,
88 	 * HHA_EVENT_TYPE1 is chosen and so on.
89 	 */
90 	reg = HHA_EVENT_TYPE0 + 4 * (idx / 4);
91 	reg_idx = idx % 4;
92 	shift = 8 * reg_idx;
93 
94 	/* Write event code to HHA_EVENT_TYPEx register */
95 	val = readl(hha_pmu->base + reg);
96 	val &= ~(HHA_EVTYPE_NONE << shift);
97 	val |= (type << shift);
98 	writel(val, hha_pmu->base + reg);
99 }
100 
101 static void hisi_hha_pmu_start_counters(struct hisi_pmu *hha_pmu)
102 {
103 	u32 val;
104 
105 	/*
106 	 * Set perf_enable bit in HHA_PERF_CTRL to start event
107 	 * counting for all enabled counters.
108 	 */
109 	val = readl(hha_pmu->base + HHA_PERF_CTRL);
110 	val |= HHA_PERF_CTRL_EN;
111 	writel(val, hha_pmu->base + HHA_PERF_CTRL);
112 }
113 
114 static void hisi_hha_pmu_stop_counters(struct hisi_pmu *hha_pmu)
115 {
116 	u32 val;
117 
118 	/*
119 	 * Clear perf_enable bit in HHA_PERF_CTRL to stop event
120 	 * counting for all enabled counters.
121 	 */
122 	val = readl(hha_pmu->base + HHA_PERF_CTRL);
123 	val &= ~(HHA_PERF_CTRL_EN);
124 	writel(val, hha_pmu->base + HHA_PERF_CTRL);
125 }
126 
127 static void hisi_hha_pmu_enable_counter(struct hisi_pmu *hha_pmu,
128 					struct hw_perf_event *hwc)
129 {
130 	u32 val;
131 
132 	/* Enable counter index in HHA_EVENT_CTRL register */
133 	val = readl(hha_pmu->base + HHA_EVENT_CTRL);
134 	val |= (1 << hwc->idx);
135 	writel(val, hha_pmu->base + HHA_EVENT_CTRL);
136 }
137 
138 static void hisi_hha_pmu_disable_counter(struct hisi_pmu *hha_pmu,
139 					 struct hw_perf_event *hwc)
140 {
141 	u32 val;
142 
143 	/* Clear counter index in HHA_EVENT_CTRL register */
144 	val = readl(hha_pmu->base + HHA_EVENT_CTRL);
145 	val &= ~(1 << hwc->idx);
146 	writel(val, hha_pmu->base + HHA_EVENT_CTRL);
147 }
148 
149 static void hisi_hha_pmu_enable_counter_int(struct hisi_pmu *hha_pmu,
150 					    struct hw_perf_event *hwc)
151 {
152 	u32 val;
153 
154 	/* Write 0 to enable interrupt */
155 	val = readl(hha_pmu->base + HHA_INT_MASK);
156 	val &= ~(1 << hwc->idx);
157 	writel(val, hha_pmu->base + HHA_INT_MASK);
158 }
159 
160 static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu,
161 					     struct hw_perf_event *hwc)
162 {
163 	u32 val;
164 
165 	/* Write 1 to mask interrupt */
166 	val = readl(hha_pmu->base + HHA_INT_MASK);
167 	val |= (1 << hwc->idx);
168 	writel(val, hha_pmu->base + HHA_INT_MASK);
169 }
170 
171 static irqreturn_t hisi_hha_pmu_isr(int irq, void *dev_id)
172 {
173 	struct hisi_pmu *hha_pmu = dev_id;
174 	struct perf_event *event;
175 	unsigned long overflown;
176 	int idx;
177 
178 	/* Read HHA_INT_STATUS register */
179 	overflown = readl(hha_pmu->base + HHA_INT_STATUS);
180 	if (!overflown)
181 		return IRQ_NONE;
182 
183 	/*
184 	 * Find the counter index which overflowed if the bit was set
185 	 * and handle it
186 	 */
187 	for_each_set_bit(idx, &overflown, HHA_NR_COUNTERS) {
188 		/* Write 1 to clear the IRQ status flag */
189 		writel((1 << idx), hha_pmu->base + HHA_INT_CLEAR);
190 
191 		/* Get the corresponding event struct */
192 		event = hha_pmu->pmu_events.hw_events[idx];
193 		if (!event)
194 			continue;
195 
196 		hisi_uncore_pmu_event_update(event);
197 		hisi_uncore_pmu_set_event_period(event);
198 	}
199 
200 	return IRQ_HANDLED;
201 }
202 
203 static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
204 				 struct platform_device *pdev)
205 {
206 	int irq, ret;
207 
208 	/* Read and init IRQ */
209 	irq = platform_get_irq(pdev, 0);
210 	if (irq < 0) {
211 		dev_err(&pdev->dev, "HHA PMU get irq fail; irq:%d\n", irq);
212 		return irq;
213 	}
214 
215 	ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
216 			      IRQF_NOBALANCING | IRQF_NO_THREAD,
217 			      dev_name(&pdev->dev), hha_pmu);
218 	if (ret < 0) {
219 		dev_err(&pdev->dev,
220 			"Fail to request IRQ:%d ret:%d\n", irq, ret);
221 		return ret;
222 	}
223 
224 	hha_pmu->irq = irq;
225 
226 	return 0;
227 }
228 
229 static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = {
230 	{ "HISI0243", },
231 	{},
232 };
233 MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match);
234 
235 static int hisi_hha_pmu_init_data(struct platform_device *pdev,
236 				  struct hisi_pmu *hha_pmu)
237 {
238 	unsigned long long id;
239 	struct resource *res;
240 	acpi_status status;
241 
242 	status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
243 				       "_UID", NULL, &id);
244 	if (ACPI_FAILURE(status))
245 		return -EINVAL;
246 
247 	hha_pmu->index_id = id;
248 
249 	/*
250 	 * Use SCCL_ID and UID to identify the HHA PMU, while
251 	 * SCCL_ID is in MPIDR[aff2].
252 	 */
253 	if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
254 				     &hha_pmu->sccl_id)) {
255 		dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
256 		return -EINVAL;
257 	}
258 	/* HHA PMUs only share the same SCCL */
259 	hha_pmu->ccl_id = -1;
260 
261 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
262 	hha_pmu->base = devm_ioremap_resource(&pdev->dev, res);
263 	if (IS_ERR(hha_pmu->base)) {
264 		dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n");
265 		return PTR_ERR(hha_pmu->base);
266 	}
267 
268 	return 0;
269 }
270 
271 static struct attribute *hisi_hha_pmu_format_attr[] = {
272 	HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
273 	NULL,
274 };
275 
276 static const struct attribute_group hisi_hha_pmu_format_group = {
277 	.name = "format",
278 	.attrs = hisi_hha_pmu_format_attr,
279 };
280 
281 static struct attribute *hisi_hha_pmu_events_attr[] = {
282 	HISI_PMU_EVENT_ATTR(rx_ops_num,		0x00),
283 	HISI_PMU_EVENT_ATTR(rx_outer,		0x01),
284 	HISI_PMU_EVENT_ATTR(rx_sccl,		0x02),
285 	HISI_PMU_EVENT_ATTR(rx_ccix,		0x03),
286 	HISI_PMU_EVENT_ATTR(rx_wbi,		0x04),
287 	HISI_PMU_EVENT_ATTR(rx_wbip,		0x05),
288 	HISI_PMU_EVENT_ATTR(rx_wtistash,	0x11),
289 	HISI_PMU_EVENT_ATTR(rd_ddr_64b,		0x1c),
290 	HISI_PMU_EVENT_ATTR(wr_dr_64b,		0x1d),
291 	HISI_PMU_EVENT_ATTR(rd_ddr_128b,	0x1e),
292 	HISI_PMU_EVENT_ATTR(wr_ddr_128b,	0x1f),
293 	HISI_PMU_EVENT_ATTR(spill_num,		0x20),
294 	HISI_PMU_EVENT_ATTR(spill_success,	0x21),
295 	HISI_PMU_EVENT_ATTR(bi_num,		0x23),
296 	HISI_PMU_EVENT_ATTR(mediated_num,	0x32),
297 	HISI_PMU_EVENT_ATTR(tx_snp_num,		0x33),
298 	HISI_PMU_EVENT_ATTR(tx_snp_outer,	0x34),
299 	HISI_PMU_EVENT_ATTR(tx_snp_ccix,	0x35),
300 	HISI_PMU_EVENT_ATTR(rx_snprspdata,	0x38),
301 	HISI_PMU_EVENT_ATTR(rx_snprsp_outer,	0x3c),
302 	HISI_PMU_EVENT_ATTR(sdir-lookup,	0x40),
303 	HISI_PMU_EVENT_ATTR(edir-lookup,	0x41),
304 	HISI_PMU_EVENT_ATTR(sdir-hit,		0x42),
305 	HISI_PMU_EVENT_ATTR(edir-hit,		0x43),
306 	HISI_PMU_EVENT_ATTR(sdir-home-migrate,	0x4c),
307 	HISI_PMU_EVENT_ATTR(edir-home-migrate,  0x4d),
308 	NULL,
309 };
310 
311 static const struct attribute_group hisi_hha_pmu_events_group = {
312 	.name = "events",
313 	.attrs = hisi_hha_pmu_events_attr,
314 };
315 
316 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
317 
318 static struct attribute *hisi_hha_pmu_cpumask_attrs[] = {
319 	&dev_attr_cpumask.attr,
320 	NULL,
321 };
322 
323 static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = {
324 	.attrs = hisi_hha_pmu_cpumask_attrs,
325 };
326 
327 static const struct attribute_group *hisi_hha_pmu_attr_groups[] = {
328 	&hisi_hha_pmu_format_group,
329 	&hisi_hha_pmu_events_group,
330 	&hisi_hha_pmu_cpumask_attr_group,
331 	NULL,
332 };
333 
334 static const struct hisi_uncore_ops hisi_uncore_hha_ops = {
335 	.write_evtype		= hisi_hha_pmu_write_evtype,
336 	.get_event_idx		= hisi_uncore_pmu_get_event_idx,
337 	.start_counters		= hisi_hha_pmu_start_counters,
338 	.stop_counters		= hisi_hha_pmu_stop_counters,
339 	.enable_counter		= hisi_hha_pmu_enable_counter,
340 	.disable_counter	= hisi_hha_pmu_disable_counter,
341 	.enable_counter_int	= hisi_hha_pmu_enable_counter_int,
342 	.disable_counter_int	= hisi_hha_pmu_disable_counter_int,
343 	.write_counter		= hisi_hha_pmu_write_counter,
344 	.read_counter		= hisi_hha_pmu_read_counter,
345 };
346 
347 static int hisi_hha_pmu_dev_probe(struct platform_device *pdev,
348 				  struct hisi_pmu *hha_pmu)
349 {
350 	int ret;
351 
352 	ret = hisi_hha_pmu_init_data(pdev, hha_pmu);
353 	if (ret)
354 		return ret;
355 
356 	ret = hisi_hha_pmu_init_irq(hha_pmu, pdev);
357 	if (ret)
358 		return ret;
359 
360 	hha_pmu->num_counters = HHA_NR_COUNTERS;
361 	hha_pmu->counter_bits = 48;
362 	hha_pmu->ops = &hisi_uncore_hha_ops;
363 	hha_pmu->dev = &pdev->dev;
364 	hha_pmu->on_cpu = -1;
365 	hha_pmu->check_event = 0x65;
366 
367 	return 0;
368 }
369 
370 static int hisi_hha_pmu_probe(struct platform_device *pdev)
371 {
372 	struct hisi_pmu *hha_pmu;
373 	char *name;
374 	int ret;
375 
376 	hha_pmu = devm_kzalloc(&pdev->dev, sizeof(*hha_pmu), GFP_KERNEL);
377 	if (!hha_pmu)
378 		return -ENOMEM;
379 
380 	platform_set_drvdata(pdev, hha_pmu);
381 
382 	ret = hisi_hha_pmu_dev_probe(pdev, hha_pmu);
383 	if (ret)
384 		return ret;
385 
386 	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
387 				       &hha_pmu->node);
388 	if (ret) {
389 		dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
390 		return ret;
391 	}
392 
393 	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
394 			      hha_pmu->sccl_id, hha_pmu->index_id);
395 	hha_pmu->pmu = (struct pmu) {
396 		.name		= name,
397 		.task_ctx_nr	= perf_invalid_context,
398 		.event_init	= hisi_uncore_pmu_event_init,
399 		.pmu_enable	= hisi_uncore_pmu_enable,
400 		.pmu_disable	= hisi_uncore_pmu_disable,
401 		.add		= hisi_uncore_pmu_add,
402 		.del		= hisi_uncore_pmu_del,
403 		.start		= hisi_uncore_pmu_start,
404 		.stop		= hisi_uncore_pmu_stop,
405 		.read		= hisi_uncore_pmu_read,
406 		.attr_groups	= hisi_hha_pmu_attr_groups,
407 		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
408 	};
409 
410 	ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
411 	if (ret) {
412 		dev_err(hha_pmu->dev, "HHA PMU register failed!\n");
413 		cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
414 					    &hha_pmu->node);
415 	}
416 
417 	return ret;
418 }
419 
420 static int hisi_hha_pmu_remove(struct platform_device *pdev)
421 {
422 	struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev);
423 
424 	perf_pmu_unregister(&hha_pmu->pmu);
425 	cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
426 				    &hha_pmu->node);
427 
428 	return 0;
429 }
430 
431 static struct platform_driver hisi_hha_pmu_driver = {
432 	.driver = {
433 		.name = "hisi_hha_pmu",
434 		.acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match),
435 	},
436 	.probe = hisi_hha_pmu_probe,
437 	.remove = hisi_hha_pmu_remove,
438 };
439 
440 static int __init hisi_hha_pmu_module_init(void)
441 {
442 	int ret;
443 
444 	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
445 				      "AP_PERF_ARM_HISI_HHA_ONLINE",
446 				      hisi_uncore_pmu_online_cpu,
447 				      hisi_uncore_pmu_offline_cpu);
448 	if (ret) {
449 		pr_err("HHA PMU: Error setup hotplug, ret = %d;\n", ret);
450 		return ret;
451 	}
452 
453 	ret = platform_driver_register(&hisi_hha_pmu_driver);
454 	if (ret)
455 		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
456 
457 	return ret;
458 }
459 module_init(hisi_hha_pmu_module_init);
460 
461 static void __exit hisi_hha_pmu_module_exit(void)
462 {
463 	platform_driver_unregister(&hisi_hha_pmu_driver);
464 	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
465 }
466 module_exit(hisi_hha_pmu_module_exit);
467 
468 MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver");
469 MODULE_LICENSE("GPL v2");
470 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
471 MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
472