1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * HiSilicon SoC DDRC uncore Hardware event counters support
4  *
5  * Copyright (C) 2017 HiSilicon Limited
6  * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
7  *         Anurup M <anurup.m@huawei.com>
8  *
9  * This code is based on the uncore PMUs like arm-cci and arm-ccn.
10  */
11 #include <linux/acpi.h>
12 #include <linux/bug.h>
13 #include <linux/cpuhotplug.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/list.h>
17 #include <linux/smp.h>
18 
19 #include "hisi_uncore_pmu.h"
20 
21 /* DDRC register definition in v1 */
22 #define DDRC_PERF_CTRL		0x010
23 #define DDRC_FLUX_WR		0x380
24 #define DDRC_FLUX_RD		0x384
25 #define DDRC_FLUX_WCMD          0x388
26 #define DDRC_FLUX_RCMD          0x38c
27 #define DDRC_PRE_CMD            0x3c0
28 #define DDRC_ACT_CMD            0x3c4
29 #define DDRC_RNK_CHG            0x3cc
30 #define DDRC_RW_CHG             0x3d0
31 #define DDRC_EVENT_CTRL         0x6C0
32 #define DDRC_INT_MASK		0x6c8
33 #define DDRC_INT_STATUS		0x6cc
34 #define DDRC_INT_CLEAR		0x6d0
35 #define DDRC_VERSION		0x710
36 
37 /* DDRC register definition in v2 */
38 #define DDRC_V2_INT_MASK	0x528
39 #define DDRC_V2_INT_STATUS	0x52c
40 #define DDRC_V2_INT_CLEAR	0x530
41 #define DDRC_V2_EVENT_CNT	0xe00
42 #define DDRC_V2_EVENT_CTRL	0xe70
43 #define DDRC_V2_EVENT_TYPE	0xe74
44 #define DDRC_V2_PERF_CTRL	0xeA0
45 
46 /* DDRC has 8-counters */
47 #define DDRC_NR_COUNTERS	0x8
48 #define DDRC_V1_PERF_CTRL_EN	0x2
49 #define DDRC_V2_PERF_CTRL_EN	0x1
50 #define DDRC_V1_NR_EVENTS	0x7
51 #define DDRC_V2_NR_EVENTS	0x90
52 
53 /*
54  * For PMU v1, there are eight-events and every event has been mapped
55  * to fixed-purpose counters which register offset is not consistent.
56  * Therefore there is no write event type and we assume that event
57  * code (0 to 7) is equal to counter index in PMU driver.
58  */
59 #define GET_DDRC_EVENTID(hwc)	(hwc->config_base & 0x7)
60 
61 static const u32 ddrc_reg_off[] = {
62 	DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
63 	DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
64 };
65 
66 /*
67  * Select the counter register offset using the counter index.
68  * In PMU v1, there are no programmable counter, the count
69  * is read form the statistics counter register itself.
70  */
71 static u32 hisi_ddrc_pmu_v1_get_counter_offset(int cntr_idx)
72 {
73 	return ddrc_reg_off[cntr_idx];
74 }
75 
76 static u32 hisi_ddrc_pmu_v2_get_counter_offset(int cntr_idx)
77 {
78 	return DDRC_V2_EVENT_CNT + cntr_idx * 8;
79 }
80 
81 static u64 hisi_ddrc_pmu_v1_read_counter(struct hisi_pmu *ddrc_pmu,
82 				      struct hw_perf_event *hwc)
83 {
84 	return readl(ddrc_pmu->base +
85 		     hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
86 }
87 
88 static void hisi_ddrc_pmu_v1_write_counter(struct hisi_pmu *ddrc_pmu,
89 					struct hw_perf_event *hwc, u64 val)
90 {
91 	writel((u32)val,
92 	       ddrc_pmu->base + hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
93 }
94 
95 static u64 hisi_ddrc_pmu_v2_read_counter(struct hisi_pmu *ddrc_pmu,
96 					 struct hw_perf_event *hwc)
97 {
98 	return readq(ddrc_pmu->base +
99 		     hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
100 }
101 
102 static void hisi_ddrc_pmu_v2_write_counter(struct hisi_pmu *ddrc_pmu,
103 					   struct hw_perf_event *hwc, u64 val)
104 {
105 	writeq(val,
106 	       ddrc_pmu->base + hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
107 }
108 
109 /*
110  * For DDRC PMU v1, event has been mapped to fixed-purpose counter by hardware,
111  * so there is no need to write event type, while it is programmable counter in
112  * PMU v2.
113  */
114 static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
115 				       u32 type)
116 {
117 	u32 offset;
118 
119 	if (hha_pmu->identifier >= HISI_PMU_V2) {
120 		offset = DDRC_V2_EVENT_TYPE + 4 * idx;
121 		writel(type, hha_pmu->base + offset);
122 	}
123 }
124 
125 static void hisi_ddrc_pmu_v1_start_counters(struct hisi_pmu *ddrc_pmu)
126 {
127 	u32 val;
128 
129 	/* Set perf_enable in DDRC_PERF_CTRL to start event counting */
130 	val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
131 	val |= DDRC_V1_PERF_CTRL_EN;
132 	writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
133 }
134 
135 static void hisi_ddrc_pmu_v1_stop_counters(struct hisi_pmu *ddrc_pmu)
136 {
137 	u32 val;
138 
139 	/* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
140 	val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
141 	val &= ~DDRC_V1_PERF_CTRL_EN;
142 	writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
143 }
144 
145 static void hisi_ddrc_pmu_v1_enable_counter(struct hisi_pmu *ddrc_pmu,
146 					    struct hw_perf_event *hwc)
147 {
148 	u32 val;
149 
150 	/* Set counter index(event code) in DDRC_EVENT_CTRL register */
151 	val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
152 	val |= (1 << GET_DDRC_EVENTID(hwc));
153 	writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
154 }
155 
156 static void hisi_ddrc_pmu_v1_disable_counter(struct hisi_pmu *ddrc_pmu,
157 					     struct hw_perf_event *hwc)
158 {
159 	u32 val;
160 
161 	/* Clear counter index(event code) in DDRC_EVENT_CTRL register */
162 	val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
163 	val &= ~(1 << GET_DDRC_EVENTID(hwc));
164 	writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
165 }
166 
167 static int hisi_ddrc_pmu_v1_get_event_idx(struct perf_event *event)
168 {
169 	struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
170 	unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
171 	struct hw_perf_event *hwc = &event->hw;
172 	/* For DDRC PMU, we use event code as counter index */
173 	int idx = GET_DDRC_EVENTID(hwc);
174 
175 	if (test_bit(idx, used_mask))
176 		return -EAGAIN;
177 
178 	set_bit(idx, used_mask);
179 
180 	return idx;
181 }
182 
183 static int hisi_ddrc_pmu_v2_get_event_idx(struct perf_event *event)
184 {
185 	return hisi_uncore_pmu_get_event_idx(event);
186 }
187 
188 static void hisi_ddrc_pmu_v2_start_counters(struct hisi_pmu *ddrc_pmu)
189 {
190 	u32 val;
191 
192 	val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
193 	val |= DDRC_V2_PERF_CTRL_EN;
194 	writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
195 }
196 
197 static void hisi_ddrc_pmu_v2_stop_counters(struct hisi_pmu *ddrc_pmu)
198 {
199 	u32 val;
200 
201 	val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
202 	val &= ~DDRC_V2_PERF_CTRL_EN;
203 	writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
204 }
205 
206 static void hisi_ddrc_pmu_v2_enable_counter(struct hisi_pmu *ddrc_pmu,
207 					    struct hw_perf_event *hwc)
208 {
209 	u32 val;
210 
211 	val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
212 	val |= 1 << hwc->idx;
213 	writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
214 }
215 
216 static void hisi_ddrc_pmu_v2_disable_counter(struct hisi_pmu *ddrc_pmu,
217 					     struct hw_perf_event *hwc)
218 {
219 	u32 val;
220 
221 	val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
222 	val &= ~(1 << hwc->idx);
223 	writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
224 }
225 
226 static void hisi_ddrc_pmu_v1_enable_counter_int(struct hisi_pmu *ddrc_pmu,
227 						struct hw_perf_event *hwc)
228 {
229 	u32 val;
230 
231 	/* Write 0 to enable interrupt */
232 	val = readl(ddrc_pmu->base + DDRC_INT_MASK);
233 	val &= ~(1 << hwc->idx);
234 	writel(val, ddrc_pmu->base + DDRC_INT_MASK);
235 }
236 
237 static void hisi_ddrc_pmu_v1_disable_counter_int(struct hisi_pmu *ddrc_pmu,
238 						 struct hw_perf_event *hwc)
239 {
240 	u32 val;
241 
242 	/* Write 1 to mask interrupt */
243 	val = readl(ddrc_pmu->base + DDRC_INT_MASK);
244 	val |= 1 << hwc->idx;
245 	writel(val, ddrc_pmu->base + DDRC_INT_MASK);
246 }
247 
248 static void hisi_ddrc_pmu_v2_enable_counter_int(struct hisi_pmu *ddrc_pmu,
249 						struct hw_perf_event *hwc)
250 {
251 	u32 val;
252 
253 	val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
254 	val &= ~(1 << hwc->idx);
255 	writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
256 }
257 
258 static void hisi_ddrc_pmu_v2_disable_counter_int(struct hisi_pmu *ddrc_pmu,
259 						struct hw_perf_event *hwc)
260 {
261 	u32 val;
262 
263 	val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
264 	val |= 1 << hwc->idx;
265 	writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
266 }
267 
268 static u32 hisi_ddrc_pmu_v1_get_int_status(struct hisi_pmu *ddrc_pmu)
269 {
270 	return readl(ddrc_pmu->base + DDRC_INT_STATUS);
271 }
272 
273 static void hisi_ddrc_pmu_v1_clear_int_status(struct hisi_pmu *ddrc_pmu,
274 					      int idx)
275 {
276 	writel(1 << idx, ddrc_pmu->base + DDRC_INT_CLEAR);
277 }
278 
279 static u32 hisi_ddrc_pmu_v2_get_int_status(struct hisi_pmu *ddrc_pmu)
280 {
281 	return readl(ddrc_pmu->base + DDRC_V2_INT_STATUS);
282 }
283 
284 static void hisi_ddrc_pmu_v2_clear_int_status(struct hisi_pmu *ddrc_pmu,
285 					      int idx)
286 {
287 	writel(1 << idx, ddrc_pmu->base + DDRC_V2_INT_CLEAR);
288 }
289 
290 static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
291 	{ "HISI0233", },
292 	{ "HISI0234", },
293 	{}
294 };
295 MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
296 
297 static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
298 				   struct hisi_pmu *ddrc_pmu)
299 {
300 	/*
301 	 * Use the SCCL_ID and DDRC channel ID to identify the
302 	 * DDRC PMU, while SCCL_ID is in MPIDR[aff2].
303 	 */
304 	if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
305 				     &ddrc_pmu->index_id)) {
306 		dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
307 		return -EINVAL;
308 	}
309 
310 	if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
311 				     &ddrc_pmu->sccl_id)) {
312 		dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
313 		return -EINVAL;
314 	}
315 	/* DDRC PMUs only share the same SCCL */
316 	ddrc_pmu->ccl_id = -1;
317 
318 	ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
319 	if (IS_ERR(ddrc_pmu->base)) {
320 		dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
321 		return PTR_ERR(ddrc_pmu->base);
322 	}
323 
324 	ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
325 	if (ddrc_pmu->identifier >= HISI_PMU_V2) {
326 		if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
327 					     &ddrc_pmu->sub_id)) {
328 			dev_err(&pdev->dev, "Can not read sub-id!\n");
329 			return -EINVAL;
330 		}
331 	}
332 
333 	return 0;
334 }
335 
336 static struct attribute *hisi_ddrc_pmu_v1_format_attr[] = {
337 	HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
338 	NULL,
339 };
340 
341 static const struct attribute_group hisi_ddrc_pmu_v1_format_group = {
342 	.name = "format",
343 	.attrs = hisi_ddrc_pmu_v1_format_attr,
344 };
345 
346 static struct attribute *hisi_ddrc_pmu_v2_format_attr[] = {
347 	HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
348 	NULL
349 };
350 
351 static const struct attribute_group hisi_ddrc_pmu_v2_format_group = {
352 	.name = "format",
353 	.attrs = hisi_ddrc_pmu_v2_format_attr,
354 };
355 
356 static struct attribute *hisi_ddrc_pmu_v1_events_attr[] = {
357 	HISI_PMU_EVENT_ATTR(flux_wr,		0x00),
358 	HISI_PMU_EVENT_ATTR(flux_rd,		0x01),
359 	HISI_PMU_EVENT_ATTR(flux_wcmd,		0x02),
360 	HISI_PMU_EVENT_ATTR(flux_rcmd,		0x03),
361 	HISI_PMU_EVENT_ATTR(pre_cmd,		0x04),
362 	HISI_PMU_EVENT_ATTR(act_cmd,		0x05),
363 	HISI_PMU_EVENT_ATTR(rnk_chg,		0x06),
364 	HISI_PMU_EVENT_ATTR(rw_chg,		0x07),
365 	NULL,
366 };
367 
368 static const struct attribute_group hisi_ddrc_pmu_v1_events_group = {
369 	.name = "events",
370 	.attrs = hisi_ddrc_pmu_v1_events_attr,
371 };
372 
373 static struct attribute *hisi_ddrc_pmu_v2_events_attr[] = {
374 	HISI_PMU_EVENT_ATTR(cycles,		0x00),
375 	HISI_PMU_EVENT_ATTR(flux_wr,		0x83),
376 	HISI_PMU_EVENT_ATTR(flux_rd,		0x84),
377 	NULL
378 };
379 
380 static const struct attribute_group hisi_ddrc_pmu_v2_events_group = {
381 	.name = "events",
382 	.attrs = hisi_ddrc_pmu_v2_events_attr,
383 };
384 
385 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
386 
387 static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
388 	&dev_attr_cpumask.attr,
389 	NULL,
390 };
391 
392 static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
393 	.attrs = hisi_ddrc_pmu_cpumask_attrs,
394 };
395 
396 static struct device_attribute hisi_ddrc_pmu_identifier_attr =
397 	__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
398 
399 static struct attribute *hisi_ddrc_pmu_identifier_attrs[] = {
400 	&hisi_ddrc_pmu_identifier_attr.attr,
401 	NULL
402 };
403 
404 static const struct attribute_group hisi_ddrc_pmu_identifier_group = {
405 	.attrs = hisi_ddrc_pmu_identifier_attrs,
406 };
407 
408 static const struct attribute_group *hisi_ddrc_pmu_v1_attr_groups[] = {
409 	&hisi_ddrc_pmu_v1_format_group,
410 	&hisi_ddrc_pmu_v1_events_group,
411 	&hisi_ddrc_pmu_cpumask_attr_group,
412 	&hisi_ddrc_pmu_identifier_group,
413 	NULL,
414 };
415 
416 static const struct attribute_group *hisi_ddrc_pmu_v2_attr_groups[] = {
417 	&hisi_ddrc_pmu_v2_format_group,
418 	&hisi_ddrc_pmu_v2_events_group,
419 	&hisi_ddrc_pmu_cpumask_attr_group,
420 	&hisi_ddrc_pmu_identifier_group,
421 	NULL
422 };
423 
424 static const struct hisi_uncore_ops hisi_uncore_ddrc_v1_ops = {
425 	.write_evtype           = hisi_ddrc_pmu_write_evtype,
426 	.get_event_idx		= hisi_ddrc_pmu_v1_get_event_idx,
427 	.start_counters		= hisi_ddrc_pmu_v1_start_counters,
428 	.stop_counters		= hisi_ddrc_pmu_v1_stop_counters,
429 	.enable_counter		= hisi_ddrc_pmu_v1_enable_counter,
430 	.disable_counter	= hisi_ddrc_pmu_v1_disable_counter,
431 	.enable_counter_int	= hisi_ddrc_pmu_v1_enable_counter_int,
432 	.disable_counter_int	= hisi_ddrc_pmu_v1_disable_counter_int,
433 	.write_counter		= hisi_ddrc_pmu_v1_write_counter,
434 	.read_counter		= hisi_ddrc_pmu_v1_read_counter,
435 	.get_int_status		= hisi_ddrc_pmu_v1_get_int_status,
436 	.clear_int_status	= hisi_ddrc_pmu_v1_clear_int_status,
437 };
438 
439 static const struct hisi_uncore_ops hisi_uncore_ddrc_v2_ops = {
440 	.write_evtype           = hisi_ddrc_pmu_write_evtype,
441 	.get_event_idx		= hisi_ddrc_pmu_v2_get_event_idx,
442 	.start_counters		= hisi_ddrc_pmu_v2_start_counters,
443 	.stop_counters		= hisi_ddrc_pmu_v2_stop_counters,
444 	.enable_counter		= hisi_ddrc_pmu_v2_enable_counter,
445 	.disable_counter	= hisi_ddrc_pmu_v2_disable_counter,
446 	.enable_counter_int	= hisi_ddrc_pmu_v2_enable_counter_int,
447 	.disable_counter_int	= hisi_ddrc_pmu_v2_disable_counter_int,
448 	.write_counter		= hisi_ddrc_pmu_v2_write_counter,
449 	.read_counter		= hisi_ddrc_pmu_v2_read_counter,
450 	.get_int_status		= hisi_ddrc_pmu_v2_get_int_status,
451 	.clear_int_status	= hisi_ddrc_pmu_v2_clear_int_status,
452 };
453 
454 static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
455 				   struct hisi_pmu *ddrc_pmu)
456 {
457 	int ret;
458 
459 	ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu);
460 	if (ret)
461 		return ret;
462 
463 	ret = hisi_uncore_pmu_init_irq(ddrc_pmu, pdev);
464 	if (ret)
465 		return ret;
466 
467 	if (ddrc_pmu->identifier >= HISI_PMU_V2) {
468 		ddrc_pmu->counter_bits = 48;
469 		ddrc_pmu->check_event = DDRC_V2_NR_EVENTS;
470 		ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v2_attr_groups;
471 		ddrc_pmu->ops = &hisi_uncore_ddrc_v2_ops;
472 	} else {
473 		ddrc_pmu->counter_bits = 32;
474 		ddrc_pmu->check_event = DDRC_V1_NR_EVENTS;
475 		ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v1_attr_groups;
476 		ddrc_pmu->ops = &hisi_uncore_ddrc_v1_ops;
477 	}
478 
479 	ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
480 	ddrc_pmu->dev = &pdev->dev;
481 	ddrc_pmu->on_cpu = -1;
482 
483 	return 0;
484 }
485 
486 static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
487 {
488 	struct hisi_pmu *ddrc_pmu;
489 	char *name;
490 	int ret;
491 
492 	ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL);
493 	if (!ddrc_pmu)
494 		return -ENOMEM;
495 
496 	platform_set_drvdata(pdev, ddrc_pmu);
497 
498 	ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu);
499 	if (ret)
500 		return ret;
501 
502 	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
503 				       &ddrc_pmu->node);
504 	if (ret) {
505 		dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
506 		return ret;
507 	}
508 
509 	if (ddrc_pmu->identifier >= HISI_PMU_V2)
510 		name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
511 				      "hisi_sccl%u_ddrc%u_%u",
512 				      ddrc_pmu->sccl_id, ddrc_pmu->index_id,
513 				      ddrc_pmu->sub_id);
514 	else
515 		name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
516 				      "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
517 				      ddrc_pmu->index_id);
518 
519 	hisi_pmu_init(&ddrc_pmu->pmu, name, ddrc_pmu->pmu_events.attr_groups, THIS_MODULE);
520 
521 	ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
522 	if (ret) {
523 		dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
524 		cpuhp_state_remove_instance_nocalls(
525 			CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node);
526 	}
527 
528 	return ret;
529 }
530 
531 static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
532 {
533 	struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
534 
535 	perf_pmu_unregister(&ddrc_pmu->pmu);
536 	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
537 					    &ddrc_pmu->node);
538 	return 0;
539 }
540 
541 static struct platform_driver hisi_ddrc_pmu_driver = {
542 	.driver = {
543 		.name = "hisi_ddrc_pmu",
544 		.acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
545 		.suppress_bind_attrs = true,
546 	},
547 	.probe = hisi_ddrc_pmu_probe,
548 	.remove = hisi_ddrc_pmu_remove,
549 };
550 
551 static int __init hisi_ddrc_pmu_module_init(void)
552 {
553 	int ret;
554 
555 	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
556 				      "AP_PERF_ARM_HISI_DDRC_ONLINE",
557 				      hisi_uncore_pmu_online_cpu,
558 				      hisi_uncore_pmu_offline_cpu);
559 	if (ret) {
560 		pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret);
561 		return ret;
562 	}
563 
564 	ret = platform_driver_register(&hisi_ddrc_pmu_driver);
565 	if (ret)
566 		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
567 
568 	return ret;
569 }
570 module_init(hisi_ddrc_pmu_module_init);
571 
572 static void __exit hisi_ddrc_pmu_module_exit(void)
573 {
574 	platform_driver_unregister(&hisi_ddrc_pmu_driver);
575 	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
576 
577 }
578 module_exit(hisi_ddrc_pmu_module_exit);
579 
580 MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
581 MODULE_LICENSE("GPL v2");
582 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
583 MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
584