18404b0fbSQi Liu // SPDX-License-Identifier: GPL-2.0-only
28404b0fbSQi Liu /*
38404b0fbSQi Liu  * This driver adds support for PCIe PMU RCiEP device. Related
48404b0fbSQi Liu  * perf events are bandwidth, latency etc.
58404b0fbSQi Liu  *
68404b0fbSQi Liu  * Copyright (C) 2021 HiSilicon Limited
78404b0fbSQi Liu  * Author: Qi Liu <liuqi115@huawei.com>
88404b0fbSQi Liu  */
98404b0fbSQi Liu #include <linux/bitfield.h>
108404b0fbSQi Liu #include <linux/bitmap.h>
118404b0fbSQi Liu #include <linux/bug.h>
128404b0fbSQi Liu #include <linux/device.h>
138404b0fbSQi Liu #include <linux/err.h>
148404b0fbSQi Liu #include <linux/interrupt.h>
158404b0fbSQi Liu #include <linux/irq.h>
168404b0fbSQi Liu #include <linux/kernel.h>
178404b0fbSQi Liu #include <linux/list.h>
188404b0fbSQi Liu #include <linux/module.h>
198404b0fbSQi Liu #include <linux/pci.h>
208404b0fbSQi Liu #include <linux/perf_event.h>
218404b0fbSQi Liu 
228404b0fbSQi Liu #define DRV_NAME "hisi_pcie_pmu"
238404b0fbSQi Liu /* Define registers */
248404b0fbSQi Liu #define HISI_PCIE_GLOBAL_CTRL		0x00
258404b0fbSQi Liu #define HISI_PCIE_EVENT_CTRL		0x010
268404b0fbSQi Liu #define HISI_PCIE_CNT			0x090
278404b0fbSQi Liu #define HISI_PCIE_EXT_CNT		0x110
288404b0fbSQi Liu #define HISI_PCIE_INT_STAT		0x150
298404b0fbSQi Liu #define HISI_PCIE_INT_MASK		0x154
308404b0fbSQi Liu #define HISI_PCIE_REG_BDF		0xfe0
318404b0fbSQi Liu #define HISI_PCIE_REG_VERSION		0xfe4
328404b0fbSQi Liu #define HISI_PCIE_REG_INFO		0xfe8
338404b0fbSQi Liu 
348404b0fbSQi Liu /* Define command in HISI_PCIE_GLOBAL_CTRL */
358404b0fbSQi Liu #define HISI_PCIE_GLOBAL_EN		0x01
368404b0fbSQi Liu #define HISI_PCIE_GLOBAL_NONE		0
378404b0fbSQi Liu 
388404b0fbSQi Liu /* Define command in HISI_PCIE_EVENT_CTRL */
398404b0fbSQi Liu #define HISI_PCIE_EVENT_EN		BIT_ULL(20)
408404b0fbSQi Liu #define HISI_PCIE_RESET_CNT		BIT_ULL(22)
418404b0fbSQi Liu #define HISI_PCIE_INIT_SET		BIT_ULL(34)
428404b0fbSQi Liu #define HISI_PCIE_THR_EN		BIT_ULL(26)
438404b0fbSQi Liu #define HISI_PCIE_TARGET_EN		BIT_ULL(32)
448404b0fbSQi Liu #define HISI_PCIE_TRIG_EN		BIT_ULL(52)
458404b0fbSQi Liu 
468404b0fbSQi Liu /* Define offsets in HISI_PCIE_EVENT_CTRL */
478404b0fbSQi Liu #define HISI_PCIE_EVENT_M		GENMASK_ULL(15, 0)
488404b0fbSQi Liu #define HISI_PCIE_THR_MODE_M		GENMASK_ULL(27, 27)
498404b0fbSQi Liu #define HISI_PCIE_THR_M			GENMASK_ULL(31, 28)
5017d57398SYicong Yang #define HISI_PCIE_LEN_M			GENMASK_ULL(35, 34)
518404b0fbSQi Liu #define HISI_PCIE_TARGET_M		GENMASK_ULL(52, 36)
528404b0fbSQi Liu #define HISI_PCIE_TRIG_MODE_M		GENMASK_ULL(53, 53)
538404b0fbSQi Liu #define HISI_PCIE_TRIG_M		GENMASK_ULL(59, 56)
548404b0fbSQi Liu 
5517d57398SYicong Yang /* Default config of TLP length mode, will count both TLP headers and payloads */
5617d57398SYicong Yang #define HISI_PCIE_LEN_M_DEFAULT		3ULL
5717d57398SYicong Yang 
588404b0fbSQi Liu #define HISI_PCIE_MAX_COUNTERS		8
598404b0fbSQi Liu #define HISI_PCIE_REG_STEP		8
608404b0fbSQi Liu #define HISI_PCIE_THR_MAX_VAL		10
618404b0fbSQi Liu #define HISI_PCIE_TRIG_MAX_VAL		10
628404b0fbSQi Liu #define HISI_PCIE_MAX_PERIOD		(GENMASK_ULL(63, 0))
638404b0fbSQi Liu #define HISI_PCIE_INIT_VAL		BIT_ULL(63)
648404b0fbSQi Liu 
658404b0fbSQi Liu struct hisi_pcie_pmu {
668404b0fbSQi Liu 	struct perf_event *hw_events[HISI_PCIE_MAX_COUNTERS];
678404b0fbSQi Liu 	struct hlist_node node;
688404b0fbSQi Liu 	struct pci_dev *pdev;
698404b0fbSQi Liu 	struct pmu pmu;
708404b0fbSQi Liu 	void __iomem *base;
718404b0fbSQi Liu 	int irq;
728404b0fbSQi Liu 	u32 identifier;
738404b0fbSQi Liu 	/* Minimum and maximum BDF of root ports monitored by PMU */
748404b0fbSQi Liu 	u16 bdf_min;
758404b0fbSQi Liu 	u16 bdf_max;
768404b0fbSQi Liu 	int on_cpu;
778404b0fbSQi Liu };
788404b0fbSQi Liu 
798404b0fbSQi Liu struct hisi_pcie_reg_pair {
808404b0fbSQi Liu 	u16 lo;
818404b0fbSQi Liu 	u16 hi;
828404b0fbSQi Liu };
838404b0fbSQi Liu 
848404b0fbSQi Liu #define to_pcie_pmu(p)  (container_of((p), struct hisi_pcie_pmu, pmu))
858404b0fbSQi Liu #define GET_PCI_DEVFN(bdf)  ((bdf) & 0xff)
868404b0fbSQi Liu 
878404b0fbSQi Liu #define HISI_PCIE_PMU_FILTER_ATTR(_name, _config, _hi, _lo)		  \
888404b0fbSQi Liu 	static u64 hisi_pcie_get_##_name(struct perf_event *event)	  \
898404b0fbSQi Liu 	{								  \
908404b0fbSQi Liu 		return FIELD_GET(GENMASK(_hi, _lo), event->attr._config); \
918404b0fbSQi Liu 	}								  \
928404b0fbSQi Liu 
938404b0fbSQi Liu HISI_PCIE_PMU_FILTER_ATTR(event, config, 16, 0);
948404b0fbSQi Liu HISI_PCIE_PMU_FILTER_ATTR(thr_len, config1, 3, 0);
958404b0fbSQi Liu HISI_PCIE_PMU_FILTER_ATTR(thr_mode, config1, 4, 4);
968404b0fbSQi Liu HISI_PCIE_PMU_FILTER_ATTR(trig_len, config1, 8, 5);
978404b0fbSQi Liu HISI_PCIE_PMU_FILTER_ATTR(trig_mode, config1, 9, 9);
9817d57398SYicong Yang HISI_PCIE_PMU_FILTER_ATTR(len_mode, config1, 11, 10);
998404b0fbSQi Liu HISI_PCIE_PMU_FILTER_ATTR(port, config2, 15, 0);
1008404b0fbSQi Liu HISI_PCIE_PMU_FILTER_ATTR(bdf, config2, 31, 16);
1018404b0fbSQi Liu 
hisi_pcie_event_sysfs_show(struct device * dev,struct device_attribute * attr,char * buf)1028404b0fbSQi Liu static ssize_t hisi_pcie_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1038404b0fbSQi Liu 					  char *buf)
1048404b0fbSQi Liu {
1058404b0fbSQi Liu 	struct perf_pmu_events_attr *pmu_attr =
1068404b0fbSQi Liu 		container_of(attr, struct perf_pmu_events_attr, attr);
1078404b0fbSQi Liu 
1088404b0fbSQi Liu 	return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id);
1098404b0fbSQi Liu }
1108404b0fbSQi Liu 
1118404b0fbSQi Liu #define HISI_PCIE_PMU_FORMAT_ATTR(_name, _format)                              \
1128404b0fbSQi Liu 	(&((struct dev_ext_attribute[]){                                       \
113*b91b73a4SLukas Wunner 		{ .attr = __ATTR(_name, 0444, device_show_string, NULL),       \
1148404b0fbSQi Liu 		  .var = (void *)_format }                                     \
1158404b0fbSQi Liu 	})[0].attr.attr)
1168404b0fbSQi Liu 
1178404b0fbSQi Liu #define HISI_PCIE_PMU_EVENT_ATTR(_name, _id)			\
1188404b0fbSQi Liu 	PMU_EVENT_ATTR_ID(_name, hisi_pcie_event_sysfs_show, _id)
1198404b0fbSQi Liu 
cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)1208404b0fbSQi Liu static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf)
1218404b0fbSQi Liu {
1228404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
1238404b0fbSQi Liu 
1248404b0fbSQi Liu 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu));
1258404b0fbSQi Liu }
1268404b0fbSQi Liu static DEVICE_ATTR_RO(cpumask);
1278404b0fbSQi Liu 
identifier_show(struct device * dev,struct device_attribute * attr,char * buf)1288404b0fbSQi Liu static ssize_t identifier_show(struct device *dev, struct device_attribute *attr, char *buf)
1298404b0fbSQi Liu {
1308404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
1318404b0fbSQi Liu 
1328404b0fbSQi Liu 	return sysfs_emit(buf, "%#x\n", pcie_pmu->identifier);
1338404b0fbSQi Liu }
1348404b0fbSQi Liu static DEVICE_ATTR_RO(identifier);
1358404b0fbSQi Liu 
bus_show(struct device * dev,struct device_attribute * attr,char * buf)1368404b0fbSQi Liu static ssize_t bus_show(struct device *dev, struct device_attribute *attr, char *buf)
1378404b0fbSQi Liu {
1388404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
1398404b0fbSQi Liu 
1408404b0fbSQi Liu 	return sysfs_emit(buf, "%#04x\n", PCI_BUS_NUM(pcie_pmu->bdf_min));
1418404b0fbSQi Liu }
1428404b0fbSQi Liu static DEVICE_ATTR_RO(bus);
1438404b0fbSQi Liu 
1448404b0fbSQi Liu static struct hisi_pcie_reg_pair
hisi_pcie_parse_reg_value(struct hisi_pcie_pmu * pcie_pmu,u32 reg_off)1458404b0fbSQi Liu hisi_pcie_parse_reg_value(struct hisi_pcie_pmu *pcie_pmu, u32 reg_off)
1468404b0fbSQi Liu {
1478404b0fbSQi Liu 	u32 val = readl_relaxed(pcie_pmu->base + reg_off);
1488404b0fbSQi Liu 	struct hisi_pcie_reg_pair regs = {
1498404b0fbSQi Liu 		.lo = val,
1508404b0fbSQi Liu 		.hi = val >> 16,
1518404b0fbSQi Liu 	};
1528404b0fbSQi Liu 
1538404b0fbSQi Liu 	return regs;
1548404b0fbSQi Liu }
1558404b0fbSQi Liu 
1568404b0fbSQi Liu /*
1578404b0fbSQi Liu  * Hardware counter and ext_counter work together for bandwidth, latency, bus
1588404b0fbSQi Liu  * utilization and buffer occupancy events. For example, RX memory write latency
1598404b0fbSQi Liu  * events(index = 0x0010), counter counts total delay cycles and ext_counter
1608404b0fbSQi Liu  * counts RX memory write PCIe packets number.
1618404b0fbSQi Liu  *
1628404b0fbSQi Liu  * As we don't want PMU driver to process these two data, "delay cycles" can
1638404b0fbSQi Liu  * be treated as an independent event(index = 0x0010), "RX memory write packets
1648404b0fbSQi Liu  * number" as another(index = 0x10010). BIT 16 is used to distinguish and 0-15
1658404b0fbSQi Liu  * bits are "real" event index, which can be used to set HISI_PCIE_EVENT_CTRL.
1668404b0fbSQi Liu  */
1678404b0fbSQi Liu #define EXT_COUNTER_IS_USED(idx)		((idx) & BIT(16))
1688404b0fbSQi Liu 
hisi_pcie_get_real_event(struct perf_event * event)1698404b0fbSQi Liu static u32 hisi_pcie_get_real_event(struct perf_event *event)
1708404b0fbSQi Liu {
1718404b0fbSQi Liu 	return hisi_pcie_get_event(event) & GENMASK(15, 0);
1728404b0fbSQi Liu }
1738404b0fbSQi Liu 
hisi_pcie_pmu_get_offset(u32 offset,u32 idx)1748404b0fbSQi Liu static u32 hisi_pcie_pmu_get_offset(u32 offset, u32 idx)
1758404b0fbSQi Liu {
1768404b0fbSQi Liu 	return offset + HISI_PCIE_REG_STEP * idx;
1778404b0fbSQi Liu }
1788404b0fbSQi Liu 
hisi_pcie_pmu_readl(struct hisi_pcie_pmu * pcie_pmu,u32 reg_offset,u32 idx)1798404b0fbSQi Liu static u32 hisi_pcie_pmu_readl(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset,
1808404b0fbSQi Liu 			       u32 idx)
1818404b0fbSQi Liu {
1828404b0fbSQi Liu 	u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
1838404b0fbSQi Liu 
1848404b0fbSQi Liu 	return readl_relaxed(pcie_pmu->base + offset);
1858404b0fbSQi Liu }
1868404b0fbSQi Liu 
hisi_pcie_pmu_writel(struct hisi_pcie_pmu * pcie_pmu,u32 reg_offset,u32 idx,u32 val)1878404b0fbSQi Liu static void hisi_pcie_pmu_writel(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u32 val)
1888404b0fbSQi Liu {
1898404b0fbSQi Liu 	u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
1908404b0fbSQi Liu 
1918404b0fbSQi Liu 	writel_relaxed(val, pcie_pmu->base + offset);
1928404b0fbSQi Liu }
1938404b0fbSQi Liu 
hisi_pcie_pmu_readq(struct hisi_pcie_pmu * pcie_pmu,u32 reg_offset,u32 idx)1948404b0fbSQi Liu static u64 hisi_pcie_pmu_readq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx)
1958404b0fbSQi Liu {
1968404b0fbSQi Liu 	u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
1978404b0fbSQi Liu 
1988404b0fbSQi Liu 	return readq_relaxed(pcie_pmu->base + offset);
1998404b0fbSQi Liu }
2008404b0fbSQi Liu 
hisi_pcie_pmu_writeq(struct hisi_pcie_pmu * pcie_pmu,u32 reg_offset,u32 idx,u64 val)2018404b0fbSQi Liu static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u64 val)
2028404b0fbSQi Liu {
2038404b0fbSQi Liu 	u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
2048404b0fbSQi Liu 
2058404b0fbSQi Liu 	writeq_relaxed(val, pcie_pmu->base + offset);
2068404b0fbSQi Liu }
2078404b0fbSQi Liu 
hisi_pcie_pmu_get_event_ctrl_val(struct perf_event * event)2084d473461SYicong Yang static u64 hisi_pcie_pmu_get_event_ctrl_val(struct perf_event *event)
2098404b0fbSQi Liu {
21017d57398SYicong Yang 	u64 port, trig_len, thr_len, len_mode;
2118404b0fbSQi Liu 	u64 reg = HISI_PCIE_INIT_SET;
2128404b0fbSQi Liu 
2138404b0fbSQi Liu 	/* Config HISI_PCIE_EVENT_CTRL according to event. */
2148404b0fbSQi Liu 	reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event));
2158404b0fbSQi Liu 
2168404b0fbSQi Liu 	/* Config HISI_PCIE_EVENT_CTRL according to root port or EP device. */
2178404b0fbSQi Liu 	port = hisi_pcie_get_port(event);
2188404b0fbSQi Liu 	if (port)
2198404b0fbSQi Liu 		reg |= FIELD_PREP(HISI_PCIE_TARGET_M, port);
2208404b0fbSQi Liu 	else
2218404b0fbSQi Liu 		reg |= HISI_PCIE_TARGET_EN |
2228404b0fbSQi Liu 		       FIELD_PREP(HISI_PCIE_TARGET_M, hisi_pcie_get_bdf(event));
2238404b0fbSQi Liu 
2248404b0fbSQi Liu 	/* Config HISI_PCIE_EVENT_CTRL according to trigger condition. */
2258404b0fbSQi Liu 	trig_len = hisi_pcie_get_trig_len(event);
2268404b0fbSQi Liu 	if (trig_len) {
2278404b0fbSQi Liu 		reg |= FIELD_PREP(HISI_PCIE_TRIG_M, trig_len);
2288404b0fbSQi Liu 		reg |= FIELD_PREP(HISI_PCIE_TRIG_MODE_M, hisi_pcie_get_trig_mode(event));
2298404b0fbSQi Liu 		reg |= HISI_PCIE_TRIG_EN;
2308404b0fbSQi Liu 	}
2318404b0fbSQi Liu 
2328404b0fbSQi Liu 	/* Config HISI_PCIE_EVENT_CTRL according to threshold condition. */
2338404b0fbSQi Liu 	thr_len = hisi_pcie_get_thr_len(event);
2348404b0fbSQi Liu 	if (thr_len) {
2358404b0fbSQi Liu 		reg |= FIELD_PREP(HISI_PCIE_THR_M, thr_len);
2368404b0fbSQi Liu 		reg |= FIELD_PREP(HISI_PCIE_THR_MODE_M, hisi_pcie_get_thr_mode(event));
2378404b0fbSQi Liu 		reg |= HISI_PCIE_THR_EN;
2388404b0fbSQi Liu 	}
2398404b0fbSQi Liu 
24017d57398SYicong Yang 	len_mode = hisi_pcie_get_len_mode(event);
24117d57398SYicong Yang 	if (len_mode)
24217d57398SYicong Yang 		reg |= FIELD_PREP(HISI_PCIE_LEN_M, len_mode);
24317d57398SYicong Yang 	else
24417d57398SYicong Yang 		reg |= FIELD_PREP(HISI_PCIE_LEN_M, HISI_PCIE_LEN_M_DEFAULT);
24517d57398SYicong Yang 
2464d473461SYicong Yang 	return reg;
2474d473461SYicong Yang }
2484d473461SYicong Yang 
hisi_pcie_pmu_config_event_ctrl(struct perf_event * event)2494d473461SYicong Yang static void hisi_pcie_pmu_config_event_ctrl(struct perf_event *event)
2504d473461SYicong Yang {
2514d473461SYicong Yang 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
2524d473461SYicong Yang 	struct hw_perf_event *hwc = &event->hw;
2534d473461SYicong Yang 	u64 reg = hisi_pcie_pmu_get_event_ctrl_val(event);
2544d473461SYicong Yang 
2558404b0fbSQi Liu 	hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, reg);
2568404b0fbSQi Liu }
2578404b0fbSQi Liu 
hisi_pcie_pmu_clear_event_ctrl(struct perf_event * event)25854a9e47eSYicong Yang static void hisi_pcie_pmu_clear_event_ctrl(struct perf_event *event)
2598404b0fbSQi Liu {
2608404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
2618404b0fbSQi Liu 	struct hw_perf_event *hwc = &event->hw;
2628404b0fbSQi Liu 
2638404b0fbSQi Liu 	hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, HISI_PCIE_INIT_SET);
2648404b0fbSQi Liu }
2658404b0fbSQi Liu 
hisi_pcie_pmu_valid_requester_id(struct hisi_pcie_pmu * pcie_pmu,u32 bdf)2668404b0fbSQi Liu static bool hisi_pcie_pmu_valid_requester_id(struct hisi_pcie_pmu *pcie_pmu, u32 bdf)
2678404b0fbSQi Liu {
2688404b0fbSQi Liu 	struct pci_dev *root_port, *pdev;
2698404b0fbSQi Liu 	u16 rp_bdf;
2708404b0fbSQi Liu 
2718404b0fbSQi Liu 	pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pcie_pmu->pdev->bus), PCI_BUS_NUM(bdf),
2728404b0fbSQi Liu 					   GET_PCI_DEVFN(bdf));
2738404b0fbSQi Liu 	if (!pdev)
2748404b0fbSQi Liu 		return false;
2758404b0fbSQi Liu 
2768404b0fbSQi Liu 	root_port = pcie_find_root_port(pdev);
2778404b0fbSQi Liu 	if (!root_port) {
2788404b0fbSQi Liu 		pci_dev_put(pdev);
2798404b0fbSQi Liu 		return false;
2808404b0fbSQi Liu 	}
2818404b0fbSQi Liu 
2828404b0fbSQi Liu 	pci_dev_put(pdev);
2838404b0fbSQi Liu 	rp_bdf = pci_dev_id(root_port);
2848404b0fbSQi Liu 	return rp_bdf >= pcie_pmu->bdf_min && rp_bdf <= pcie_pmu->bdf_max;
2858404b0fbSQi Liu }
2868404b0fbSQi Liu 
hisi_pcie_pmu_valid_filter(struct perf_event * event,struct hisi_pcie_pmu * pcie_pmu)2878404b0fbSQi Liu static bool hisi_pcie_pmu_valid_filter(struct perf_event *event,
2888404b0fbSQi Liu 				       struct hisi_pcie_pmu *pcie_pmu)
2898404b0fbSQi Liu {
2908404b0fbSQi Liu 	u32 requester_id = hisi_pcie_get_bdf(event);
2918404b0fbSQi Liu 
2928404b0fbSQi Liu 	if (hisi_pcie_get_thr_len(event) > HISI_PCIE_THR_MAX_VAL)
2938404b0fbSQi Liu 		return false;
2948404b0fbSQi Liu 
2958404b0fbSQi Liu 	if (hisi_pcie_get_trig_len(event) > HISI_PCIE_TRIG_MAX_VAL)
2968404b0fbSQi Liu 		return false;
2978404b0fbSQi Liu 
2982f864feeSJunhao He 	/* Need to explicitly set filter of "port" or "bdf" */
2992f864feeSJunhao He 	if (!hisi_pcie_get_port(event) &&
3002f864feeSJunhao He 	    !hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id))
3018404b0fbSQi Liu 		return false;
3028404b0fbSQi Liu 
3038404b0fbSQi Liu 	return true;
3048404b0fbSQi Liu }
3058404b0fbSQi Liu 
306b6693ad6SYicong Yang /*
307b6693ad6SYicong Yang  * Check Whether two events share the same config. The same config means not
308b6693ad6SYicong Yang  * only the event code, but also the filter settings of the two events are
309b6693ad6SYicong Yang  * the same.
310b6693ad6SYicong Yang  */
hisi_pcie_pmu_cmp_event(struct perf_event * target,struct perf_event * event)3118404b0fbSQi Liu static bool hisi_pcie_pmu_cmp_event(struct perf_event *target,
3128404b0fbSQi Liu 					struct perf_event *event)
3138404b0fbSQi Liu {
314b6693ad6SYicong Yang 	return hisi_pcie_pmu_get_event_ctrl_val(target) ==
315b6693ad6SYicong Yang 	       hisi_pcie_pmu_get_event_ctrl_val(event);
3168404b0fbSQi Liu }
3178404b0fbSQi Liu 
hisi_pcie_pmu_validate_event_group(struct perf_event * event)3188404b0fbSQi Liu static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
3198404b0fbSQi Liu {
3208404b0fbSQi Liu 	struct perf_event *sibling, *leader = event->group_leader;
3218404b0fbSQi Liu 	struct perf_event *event_group[HISI_PCIE_MAX_COUNTERS];
3228404b0fbSQi Liu 	int counters = 1;
3238404b0fbSQi Liu 	int num;
3248404b0fbSQi Liu 
3258404b0fbSQi Liu 	event_group[0] = leader;
3268404b0fbSQi Liu 	if (!is_software_event(leader)) {
3278404b0fbSQi Liu 		if (leader->pmu != event->pmu)
3288404b0fbSQi Liu 			return false;
3298404b0fbSQi Liu 
3308404b0fbSQi Liu 		if (leader != event && !hisi_pcie_pmu_cmp_event(leader, event))
3318404b0fbSQi Liu 			event_group[counters++] = event;
3328404b0fbSQi Liu 	}
3338404b0fbSQi Liu 
3348404b0fbSQi Liu 	for_each_sibling_event(sibling, event->group_leader) {
3358404b0fbSQi Liu 		if (is_software_event(sibling))
3368404b0fbSQi Liu 			continue;
3378404b0fbSQi Liu 
3388404b0fbSQi Liu 		if (sibling->pmu != event->pmu)
3398404b0fbSQi Liu 			return false;
3408404b0fbSQi Liu 
3418404b0fbSQi Liu 		for (num = 0; num < counters; num++) {
3428404b0fbSQi Liu 			/*
3438404b0fbSQi Liu 			 * If we find a related event, then it's a valid group
3448404b0fbSQi Liu 			 * since we don't need to allocate a new counter for it.
3458404b0fbSQi Liu 			 */
3468404b0fbSQi Liu 			if (hisi_pcie_pmu_cmp_event(event_group[num], sibling))
3478404b0fbSQi Liu 				break;
3488404b0fbSQi Liu 		}
3498404b0fbSQi Liu 
3508404b0fbSQi Liu 		/*
3518404b0fbSQi Liu 		 * Otherwise it's a new event but if there's no available counter,
3528404b0fbSQi Liu 		 * fail the check since we cannot schedule all the events in
3538404b0fbSQi Liu 		 * the group simultaneously.
3548404b0fbSQi Liu 		 */
3558404b0fbSQi Liu 		if (num == HISI_PCIE_MAX_COUNTERS)
3568404b0fbSQi Liu 			return false;
3578404b0fbSQi Liu 
3586d7d51e8SYicong Yang 		if (num == counters)
3596d7d51e8SYicong Yang 			event_group[counters++] = sibling;
3606d7d51e8SYicong Yang 	}
3616d7d51e8SYicong Yang 
3628404b0fbSQi Liu 	return true;
3638404b0fbSQi Liu }
3648404b0fbSQi Liu 
hisi_pcie_pmu_event_init(struct perf_event * event)3658404b0fbSQi Liu static int hisi_pcie_pmu_event_init(struct perf_event *event)
3668404b0fbSQi Liu {
3678404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
3688404b0fbSQi Liu 	struct hw_perf_event *hwc = &event->hw;
3698404b0fbSQi Liu 
3708404b0fbSQi Liu 	/* Check the type first before going on, otherwise it's not our event */
3718404b0fbSQi Liu 	if (event->attr.type != event->pmu->type)
3728404b0fbSQi Liu 		return -ENOENT;
3738404b0fbSQi Liu 
3748404b0fbSQi Liu 	if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event)))
3758404b0fbSQi Liu 		hwc->event_base = HISI_PCIE_EXT_CNT;
3768404b0fbSQi Liu 	else
377868f8a70SYicong Yang 		hwc->event_base = HISI_PCIE_CNT;
378868f8a70SYicong Yang 
3798404b0fbSQi Liu 	/* Sampling is not supported. */
3808404b0fbSQi Liu 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
3818404b0fbSQi Liu 		return -EOPNOTSUPP;
3828404b0fbSQi Liu 
3838404b0fbSQi Liu 	if (!hisi_pcie_pmu_valid_filter(event, pcie_pmu))
3848404b0fbSQi Liu 		return -EINVAL;
3858404b0fbSQi Liu 
3868404b0fbSQi Liu 	if (!hisi_pcie_pmu_validate_event_group(event))
3878404b0fbSQi Liu 		return -EINVAL;
3888404b0fbSQi Liu 
3898404b0fbSQi Liu 	event->cpu = pcie_pmu->on_cpu;
3907da37705SJunhao He 
3917da37705SJunhao He 	return 0;
3927da37705SJunhao He }
3937da37705SJunhao He 
hisi_pcie_pmu_read_counter(struct perf_event * event)3947da37705SJunhao He static u64 hisi_pcie_pmu_read_counter(struct perf_event *event)
3958404b0fbSQi Liu {
3968404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
3977da37705SJunhao He 	u32 idx = event->hw.idx;
3988404b0fbSQi Liu 
3998404b0fbSQi Liu 	return hisi_pcie_pmu_readq(pcie_pmu, event->hw.event_base, idx);
4008404b0fbSQi Liu }
4018404b0fbSQi Liu 
4028404b0fbSQi Liu /*
4037da37705SJunhao He  * Check all work events, if a relevant event is found then we return it
4047da37705SJunhao He  * first, otherwise return the first idle counter (need to reset).
4057da37705SJunhao He  */
hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu * pcie_pmu,struct perf_event * event)4068404b0fbSQi Liu static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu,
4077da37705SJunhao He 					struct perf_event *event)
4088404b0fbSQi Liu {
4098404b0fbSQi Liu 	int first_idle = -EAGAIN;
4102fbf96edSJunhao He 	struct perf_event *sibling;
4112fbf96edSJunhao He 	int idx;
4128404b0fbSQi Liu 
4138404b0fbSQi Liu 	for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
4148404b0fbSQi Liu 		sibling = pcie_pmu->hw_events[idx];
4157da37705SJunhao He 		if (!sibling) {
4168404b0fbSQi Liu 			if (first_idle == -EAGAIN)
4178404b0fbSQi Liu 				first_idle = idx;
4188404b0fbSQi Liu 			continue;
4198404b0fbSQi Liu 		}
4208404b0fbSQi Liu 
4218404b0fbSQi Liu 		/* Related events must be used in group */
4228404b0fbSQi Liu 		if (hisi_pcie_pmu_cmp_event(sibling, event) &&
4238404b0fbSQi Liu 		    sibling->group_leader == event->group_leader)
4248404b0fbSQi Liu 			return idx;
4258404b0fbSQi Liu 	}
4268404b0fbSQi Liu 
4278404b0fbSQi Liu 	return first_idle;
4288404b0fbSQi Liu }
4298404b0fbSQi Liu 
hisi_pcie_pmu_event_update(struct perf_event * event)4308404b0fbSQi Liu static void hisi_pcie_pmu_event_update(struct perf_event *event)
4318404b0fbSQi Liu {
4328404b0fbSQi Liu 	struct hw_perf_event *hwc = &event->hw;
4338404b0fbSQi Liu 	u64 new_cnt, prev_cnt, delta;
4348404b0fbSQi Liu 
4358404b0fbSQi Liu 	do {
4368404b0fbSQi Liu 		prev_cnt = local64_read(&hwc->prev_count);
4378404b0fbSQi Liu 		new_cnt = hisi_pcie_pmu_read_counter(event);
4388404b0fbSQi Liu 	} while (local64_cmpxchg(&hwc->prev_count, prev_cnt,
4398404b0fbSQi Liu 				 new_cnt) != prev_cnt);
4408404b0fbSQi Liu 
4418404b0fbSQi Liu 	delta = (new_cnt - prev_cnt) & HISI_PCIE_MAX_PERIOD;
4428404b0fbSQi Liu 	local64_add(delta, &event->count);
4438404b0fbSQi Liu }
4448404b0fbSQi Liu 
hisi_pcie_pmu_read(struct perf_event * event)4458404b0fbSQi Liu static void hisi_pcie_pmu_read(struct perf_event *event)
4468404b0fbSQi Liu {
4478404b0fbSQi Liu 	hisi_pcie_pmu_event_update(event);
4488404b0fbSQi Liu }
4498404b0fbSQi Liu 
hisi_pcie_pmu_set_period(struct perf_event * event)4508404b0fbSQi Liu static void hisi_pcie_pmu_set_period(struct perf_event *event)
4518404b0fbSQi Liu {
4528404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
4538404b0fbSQi Liu 	struct hw_perf_event *hwc = &event->hw;
4548404b0fbSQi Liu 	int idx = hwc->idx;
4558404b0fbSQi Liu 
4568404b0fbSQi Liu 	local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL);
4578404b0fbSQi Liu 	hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL);
4588404b0fbSQi Liu 	hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL);
4598404b0fbSQi Liu }
4608404b0fbSQi Liu 
hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu * pcie_pmu,struct hw_perf_event * hwc)4618404b0fbSQi Liu static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
4628404b0fbSQi Liu {
4638404b0fbSQi Liu 	u32 idx = hwc->idx;
4648404b0fbSQi Liu 	u64 val;
4658404b0fbSQi Liu 
4668404b0fbSQi Liu 	val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx);
4678404b0fbSQi Liu 	val |= HISI_PCIE_EVENT_EN;
4688404b0fbSQi Liu 	hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val);
4698404b0fbSQi Liu }
4708404b0fbSQi Liu 
hisi_pcie_pmu_disable_counter(struct hisi_pcie_pmu * pcie_pmu,struct hw_perf_event * hwc)4718404b0fbSQi Liu static void hisi_pcie_pmu_disable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
4728404b0fbSQi Liu {
4738404b0fbSQi Liu 	u32 idx = hwc->idx;
4748404b0fbSQi Liu 	u64 val;
4758404b0fbSQi Liu 
4768404b0fbSQi Liu 	val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx);
4778404b0fbSQi Liu 	val &= ~HISI_PCIE_EVENT_EN;
4788404b0fbSQi Liu 	hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val);
4798404b0fbSQi Liu }
4808404b0fbSQi Liu 
hisi_pcie_pmu_enable_int(struct hisi_pcie_pmu * pcie_pmu,struct hw_perf_event * hwc)4818404b0fbSQi Liu static void hisi_pcie_pmu_enable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
4828404b0fbSQi Liu {
4838404b0fbSQi Liu 	u32 idx = hwc->idx;
4848404b0fbSQi Liu 
4858404b0fbSQi Liu 	hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 0);
4868404b0fbSQi Liu }
4878404b0fbSQi Liu 
hisi_pcie_pmu_disable_int(struct hisi_pcie_pmu * pcie_pmu,struct hw_perf_event * hwc)4888404b0fbSQi Liu static void hisi_pcie_pmu_disable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
4898404b0fbSQi Liu {
4908404b0fbSQi Liu 	u32 idx = hwc->idx;
4918404b0fbSQi Liu 
4928404b0fbSQi Liu 	hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 1);
4938404b0fbSQi Liu }
4948404b0fbSQi Liu 
hisi_pcie_pmu_reset_counter(struct hisi_pcie_pmu * pcie_pmu,int idx)4958404b0fbSQi Liu static void hisi_pcie_pmu_reset_counter(struct hisi_pcie_pmu *pcie_pmu, int idx)
4968404b0fbSQi Liu {
4978404b0fbSQi Liu 	hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_RESET_CNT);
4988404b0fbSQi Liu 	hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_INIT_SET);
4998404b0fbSQi Liu }
5008404b0fbSQi Liu 
hisi_pcie_pmu_start(struct perf_event * event,int flags)5018404b0fbSQi Liu static void hisi_pcie_pmu_start(struct perf_event *event, int flags)
50254a9e47eSYicong Yang {
5038404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
5048404b0fbSQi Liu 	struct hw_perf_event *hwc = &event->hw;
5058404b0fbSQi Liu 	int idx = hwc->idx;
5068404b0fbSQi Liu 	u64 prev_cnt;
5078404b0fbSQi Liu 
5088404b0fbSQi Liu 	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
5098404b0fbSQi Liu 		return;
5108404b0fbSQi Liu 
5118404b0fbSQi Liu 	WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
5128404b0fbSQi Liu 	hwc->state = 0;
5138404b0fbSQi Liu 
5148404b0fbSQi Liu 	hisi_pcie_pmu_config_event_ctrl(event);
5158404b0fbSQi Liu 	hisi_pcie_pmu_enable_counter(pcie_pmu, hwc);
5168404b0fbSQi Liu 	hisi_pcie_pmu_enable_int(pcie_pmu, hwc);
5178404b0fbSQi Liu 	hisi_pcie_pmu_set_period(event);
5188404b0fbSQi Liu 
5198404b0fbSQi Liu 	if (flags & PERF_EF_RELOAD) {
5208404b0fbSQi Liu 		prev_cnt = local64_read(&hwc->prev_count);
5218404b0fbSQi Liu 		hisi_pcie_pmu_writeq(pcie_pmu, hwc->event_base, idx, prev_cnt);
5228404b0fbSQi Liu 	}
52354a9e47eSYicong Yang 
5248404b0fbSQi Liu 	perf_event_update_userpage(event);
5258404b0fbSQi Liu }
5268404b0fbSQi Liu 
hisi_pcie_pmu_stop(struct perf_event * event,int flags)5278404b0fbSQi Liu static void hisi_pcie_pmu_stop(struct perf_event *event, int flags)
5288404b0fbSQi Liu {
5298404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
5308404b0fbSQi Liu 	struct hw_perf_event *hwc = &event->hw;
5318404b0fbSQi Liu 
5328404b0fbSQi Liu 	hisi_pcie_pmu_event_update(event);
5338404b0fbSQi Liu 	hisi_pcie_pmu_disable_int(pcie_pmu, hwc);
5348404b0fbSQi Liu 	hisi_pcie_pmu_disable_counter(pcie_pmu, hwc);
5358404b0fbSQi Liu 	hisi_pcie_pmu_clear_event_ctrl(event);
5368404b0fbSQi Liu 	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
5378404b0fbSQi Liu 	hwc->state |= PERF_HES_STOPPED;
5388404b0fbSQi Liu 
5398404b0fbSQi Liu 	if (hwc->state & PERF_HES_UPTODATE)
5408404b0fbSQi Liu 		return;
5417da37705SJunhao He 
5428404b0fbSQi Liu 	hwc->state |= PERF_HES_UPTODATE;
5438404b0fbSQi Liu }
5448404b0fbSQi Liu 
hisi_pcie_pmu_add(struct perf_event * event,int flags)5458404b0fbSQi Liu static int hisi_pcie_pmu_add(struct perf_event *event, int flags)
5467da37705SJunhao He {
5477da37705SJunhao He 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
5487da37705SJunhao He 	struct hw_perf_event *hwc = &event->hw;
5497da37705SJunhao He 	int idx;
5507da37705SJunhao He 
5518404b0fbSQi Liu 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
5528404b0fbSQi Liu 
5538404b0fbSQi Liu 	idx = hisi_pcie_pmu_get_event_idx(pcie_pmu, event);
5548404b0fbSQi Liu 	if (idx < 0)
5558404b0fbSQi Liu 		return idx;
5568404b0fbSQi Liu 
5578404b0fbSQi Liu 	hwc->idx = idx;
5588404b0fbSQi Liu 
5598404b0fbSQi Liu 	/* No enabled counter found with related event, reset it */
5608404b0fbSQi Liu 	if (!pcie_pmu->hw_events[idx]) {
5618404b0fbSQi Liu 		hisi_pcie_pmu_reset_counter(pcie_pmu, idx);
5628404b0fbSQi Liu 		pcie_pmu->hw_events[idx] = event;
5638404b0fbSQi Liu 	}
5648404b0fbSQi Liu 
5658404b0fbSQi Liu 	if (flags & PERF_EF_START)
5668404b0fbSQi Liu 		hisi_pcie_pmu_start(event, PERF_EF_RELOAD);
5678404b0fbSQi Liu 
5688404b0fbSQi Liu 	return 0;
5698404b0fbSQi Liu }
5708404b0fbSQi Liu 
hisi_pcie_pmu_del(struct perf_event * event,int flags)5718404b0fbSQi Liu static void hisi_pcie_pmu_del(struct perf_event *event, int flags)
5728404b0fbSQi Liu {
5738404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
5748404b0fbSQi Liu 	struct hw_perf_event *hwc = &event->hw;
5758404b0fbSQi Liu 
5768404b0fbSQi Liu 	hisi_pcie_pmu_stop(event, PERF_EF_UPDATE);
5778404b0fbSQi Liu 	pcie_pmu->hw_events[hwc->idx] = NULL;
5788404b0fbSQi Liu 	perf_event_update_userpage(event);
5798404b0fbSQi Liu }
5808404b0fbSQi Liu 
hisi_pcie_pmu_enable(struct pmu * pmu)5818404b0fbSQi Liu static void hisi_pcie_pmu_enable(struct pmu *pmu)
5828404b0fbSQi Liu {
5838404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu);
5848404b0fbSQi Liu 	int num;
5858404b0fbSQi Liu 
5868404b0fbSQi Liu 	for (num = 0; num < HISI_PCIE_MAX_COUNTERS; num++) {
5878404b0fbSQi Liu 		if (pcie_pmu->hw_events[num])
5888404b0fbSQi Liu 			break;
5898404b0fbSQi Liu 	}
5908404b0fbSQi Liu 
5918404b0fbSQi Liu 	if (num == HISI_PCIE_MAX_COUNTERS)
5928404b0fbSQi Liu 		return;
5938404b0fbSQi Liu 
5948404b0fbSQi Liu 	writel(HISI_PCIE_GLOBAL_EN, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL);
5958404b0fbSQi Liu }
5968404b0fbSQi Liu 
hisi_pcie_pmu_disable(struct pmu * pmu)5978404b0fbSQi Liu static void hisi_pcie_pmu_disable(struct pmu *pmu)
5988404b0fbSQi Liu {
5998404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu);
6008404b0fbSQi Liu 
6018404b0fbSQi Liu 	writel(HISI_PCIE_GLOBAL_NONE, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL);
6028404b0fbSQi Liu }
6038404b0fbSQi Liu 
hisi_pcie_pmu_irq(int irq,void * data)6048404b0fbSQi Liu static irqreturn_t hisi_pcie_pmu_irq(int irq, void *data)
6058404b0fbSQi Liu {
6068404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = data;
6078404b0fbSQi Liu 	irqreturn_t ret = IRQ_NONE;
6088404b0fbSQi Liu 	struct perf_event *event;
6098404b0fbSQi Liu 	u32 overflown;
6108404b0fbSQi Liu 	int idx;
6118404b0fbSQi Liu 
6128404b0fbSQi Liu 	for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
6138404b0fbSQi Liu 		overflown = hisi_pcie_pmu_readl(pcie_pmu, HISI_PCIE_INT_STAT, idx);
6148404b0fbSQi Liu 		if (!overflown)
6158404b0fbSQi Liu 			continue;
6168404b0fbSQi Liu 
6178404b0fbSQi Liu 		/* Clear status of interrupt. */
6188404b0fbSQi Liu 		hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_STAT, idx, 1);
6198404b0fbSQi Liu 		event = pcie_pmu->hw_events[idx];
6208404b0fbSQi Liu 		if (!event)
6218404b0fbSQi Liu 			continue;
6228404b0fbSQi Liu 
6238404b0fbSQi Liu 		hisi_pcie_pmu_event_update(event);
6248404b0fbSQi Liu 		hisi_pcie_pmu_set_period(event);
6258404b0fbSQi Liu 		ret = IRQ_HANDLED;
6268404b0fbSQi Liu 	}
6278404b0fbSQi Liu 
6288404b0fbSQi Liu 	return ret;
6298404b0fbSQi Liu }
6308404b0fbSQi Liu 
hisi_pcie_pmu_irq_register(struct pci_dev * pdev,struct hisi_pcie_pmu * pcie_pmu)6318404b0fbSQi Liu static int hisi_pcie_pmu_irq_register(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
6328404b0fbSQi Liu {
6338404b0fbSQi Liu 	int irq, ret;
6348404b0fbSQi Liu 
6358404b0fbSQi Liu 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
6368404b0fbSQi Liu 	if (ret < 0) {
6378404b0fbSQi Liu 		pci_err(pdev, "Failed to enable MSI vectors: %d\n", ret);
6388404b0fbSQi Liu 		return ret;
6398404b0fbSQi Liu 	}
6408404b0fbSQi Liu 
6418404b0fbSQi Liu 	irq = pci_irq_vector(pdev, 0);
6428404b0fbSQi Liu 	ret = request_irq(irq, hisi_pcie_pmu_irq, IRQF_NOBALANCING | IRQF_NO_THREAD, DRV_NAME,
6438404b0fbSQi Liu 			  pcie_pmu);
6448404b0fbSQi Liu 	if (ret) {
6458404b0fbSQi Liu 		pci_err(pdev, "Failed to register IRQ: %d\n", ret);
6468404b0fbSQi Liu 		pci_free_irq_vectors(pdev);
6478404b0fbSQi Liu 		return ret;
6488404b0fbSQi Liu 	}
6498404b0fbSQi Liu 
6508404b0fbSQi Liu 	pcie_pmu->irq = irq;
6518404b0fbSQi Liu 
6528404b0fbSQi Liu 	return 0;
6538404b0fbSQi Liu }
65483a6d80cSYicong Yang 
hisi_pcie_pmu_irq_unregister(struct pci_dev * pdev,struct hisi_pcie_pmu * pcie_pmu)65583a6d80cSYicong Yang static void hisi_pcie_pmu_irq_unregister(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
6568404b0fbSQi Liu {
6578404b0fbSQi Liu 	free_irq(pcie_pmu->irq, pcie_pmu);
6588404b0fbSQi Liu 	pci_free_irq_vectors(pdev);
6598404b0fbSQi Liu }
6608404b0fbSQi Liu 
hisi_pcie_pmu_online_cpu(unsigned int cpu,struct hlist_node * node)6618404b0fbSQi Liu static int hisi_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
6628404b0fbSQi Liu {
6638404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
6648404b0fbSQi Liu 
66583a6d80cSYicong Yang 	if (pcie_pmu->on_cpu == -1) {
66683a6d80cSYicong Yang 		pcie_pmu->on_cpu = cpumask_local_spread(0, dev_to_node(&pcie_pmu->pdev->dev));
6678404b0fbSQi Liu 		WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(pcie_pmu->on_cpu)));
6688404b0fbSQi Liu 	}
6698404b0fbSQi Liu 
6708404b0fbSQi Liu 	return 0;
6718404b0fbSQi Liu }
6728404b0fbSQi Liu 
hisi_pcie_pmu_offline_cpu(unsigned int cpu,struct hlist_node * node)67383a6d80cSYicong Yang static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
67483a6d80cSYicong Yang {
67583a6d80cSYicong Yang 	struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
67683a6d80cSYicong Yang 	unsigned int target;
67783a6d80cSYicong Yang 	int numa_node;
67883a6d80cSYicong Yang 
67983a6d80cSYicong Yang 	/* Nothing to do if this CPU doesn't own the PMU */
6807a6a9f1cSJunhao He 	if (pcie_pmu->on_cpu != cpu)
68183a6d80cSYicong Yang 		return 0;
6828404b0fbSQi Liu 
6838404b0fbSQi Liu 	pcie_pmu->on_cpu = -1;
6848404b0fbSQi Liu 
6858404b0fbSQi Liu 	/* Choose a local CPU from all online cpus. */
6868404b0fbSQi Liu 	numa_node = dev_to_node(&pcie_pmu->pdev->dev);
6878404b0fbSQi Liu 
6888404b0fbSQi Liu 	target = cpumask_any_and_but(cpumask_of_node(numa_node),
6898404b0fbSQi Liu 				     cpu_online_mask, cpu);
6908404b0fbSQi Liu 	if (target >= nr_cpu_ids)
6918404b0fbSQi Liu 		target = cpumask_any_but(cpu_online_mask, cpu);
6928404b0fbSQi Liu 
6938404b0fbSQi Liu 	if (target >= nr_cpu_ids) {
6948404b0fbSQi Liu 		pci_err(pcie_pmu->pdev, "There is no CPU to set\n");
6958404b0fbSQi Liu 		return 0;
6968404b0fbSQi Liu 	}
6978404b0fbSQi Liu 
6988404b0fbSQi Liu 	perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target);
6998404b0fbSQi Liu 	/* Use this CPU for event counting */
7008404b0fbSQi Liu 	pcie_pmu->on_cpu = target;
7018404b0fbSQi Liu 	WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(target)));
70200ca69b8SYicong Yang 
70300ca69b8SYicong Yang 	return 0;
7046b4bb4f3SYicong Yang }
7056b4bb4f3SYicong Yang 
70600ca69b8SYicong Yang static struct attribute *hisi_pcie_pmu_events_attr[] = {
70700ca69b8SYicong Yang 	HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_latency, 0x0010),
70800ca69b8SYicong Yang 	HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_cnt, 0x10010),
70900ca69b8SYicong Yang 	HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_latency, 0x0210),
7106b4bb4f3SYicong Yang 	HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210),
7116b4bb4f3SYicong Yang 	HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011),
71200ca69b8SYicong Yang 	HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011),
71300ca69b8SYicong Yang 	HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_flux, 0x0104),
7148404b0fbSQi Liu 	HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_time, 0x10104),
7158404b0fbSQi Liu 	HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x0804),
7168404b0fbSQi Liu 	HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x10804),
7178404b0fbSQi Liu 	HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_flux, 0x2004),
7188404b0fbSQi Liu 	HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_time, 0x12004),
7198404b0fbSQi Liu 	HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_flux, 0x0105),
7208404b0fbSQi Liu 	HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_time, 0x10105),
7218404b0fbSQi Liu 	HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x0405),
7228404b0fbSQi Liu 	HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x10405),
7238404b0fbSQi Liu 	HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_flux, 0x1005),
7248404b0fbSQi Liu 	HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_time, 0x11005),
7258404b0fbSQi Liu 	NULL
7268404b0fbSQi Liu };
7278404b0fbSQi Liu 
72817d57398SYicong Yang static struct attribute_group hisi_pcie_pmu_events_group = {
7298404b0fbSQi Liu 	.name = "events",
7308404b0fbSQi Liu 	.attrs = hisi_pcie_pmu_events_attr,
7318404b0fbSQi Liu };
7328404b0fbSQi Liu 
7338404b0fbSQi Liu static struct attribute *hisi_pcie_pmu_format_attr[] = {
7348404b0fbSQi Liu 	HISI_PCIE_PMU_FORMAT_ATTR(event, "config:0-16"),
7358404b0fbSQi Liu 	HISI_PCIE_PMU_FORMAT_ATTR(thr_len, "config1:0-3"),
7368404b0fbSQi Liu 	HISI_PCIE_PMU_FORMAT_ATTR(thr_mode, "config1:4"),
7378404b0fbSQi Liu 	HISI_PCIE_PMU_FORMAT_ATTR(trig_len, "config1:5-8"),
7388404b0fbSQi Liu 	HISI_PCIE_PMU_FORMAT_ATTR(trig_mode, "config1:9"),
7398404b0fbSQi Liu 	HISI_PCIE_PMU_FORMAT_ATTR(len_mode, "config1:10-11"),
7408404b0fbSQi Liu 	HISI_PCIE_PMU_FORMAT_ATTR(port, "config2:0-15"),
7418404b0fbSQi Liu 	HISI_PCIE_PMU_FORMAT_ATTR(bdf, "config2:16-31"),
7428404b0fbSQi Liu 	NULL
7438404b0fbSQi Liu };
7448404b0fbSQi Liu 
7458404b0fbSQi Liu static const struct attribute_group hisi_pcie_pmu_format_group = {
7468404b0fbSQi Liu 	.name = "format",
7478404b0fbSQi Liu 	.attrs = hisi_pcie_pmu_format_attr,
7488404b0fbSQi Liu };
7498404b0fbSQi Liu 
7508404b0fbSQi Liu static struct attribute *hisi_pcie_pmu_bus_attrs[] = {
7518404b0fbSQi Liu 	&dev_attr_bus.attr,
7528404b0fbSQi Liu 	NULL
7538404b0fbSQi Liu };
7548404b0fbSQi Liu 
7558404b0fbSQi Liu static const struct attribute_group hisi_pcie_pmu_bus_attr_group = {
7568404b0fbSQi Liu 	.attrs = hisi_pcie_pmu_bus_attrs,
7578404b0fbSQi Liu };
7588404b0fbSQi Liu 
7598404b0fbSQi Liu static struct attribute *hisi_pcie_pmu_cpumask_attrs[] = {
7608404b0fbSQi Liu 	&dev_attr_cpumask.attr,
7618404b0fbSQi Liu 	NULL
7628404b0fbSQi Liu };
7638404b0fbSQi Liu 
7648404b0fbSQi Liu static const struct attribute_group hisi_pcie_pmu_cpumask_attr_group = {
7658404b0fbSQi Liu 	.attrs = hisi_pcie_pmu_cpumask_attrs,
7668404b0fbSQi Liu };
7678404b0fbSQi Liu 
7688404b0fbSQi Liu static struct attribute *hisi_pcie_pmu_identifier_attrs[] = {
7698404b0fbSQi Liu 	&dev_attr_identifier.attr,
7708404b0fbSQi Liu 	NULL
7718404b0fbSQi Liu };
7728404b0fbSQi Liu 
7738404b0fbSQi Liu static const struct attribute_group hisi_pcie_pmu_identifier_attr_group = {
7748404b0fbSQi Liu 	.attrs = hisi_pcie_pmu_identifier_attrs,
7758404b0fbSQi Liu };
7768404b0fbSQi Liu 
7778404b0fbSQi Liu static const struct attribute_group *hisi_pcie_pmu_attr_groups[] = {
7788404b0fbSQi Liu 	&hisi_pcie_pmu_events_group,
7798404b0fbSQi Liu 	&hisi_pcie_pmu_format_group,
7808404b0fbSQi Liu 	&hisi_pcie_pmu_bus_attr_group,
7818404b0fbSQi Liu 	&hisi_pcie_pmu_cpumask_attr_group,
7828404b0fbSQi Liu 	&hisi_pcie_pmu_identifier_attr_group,
7838404b0fbSQi Liu 	NULL
7848404b0fbSQi Liu };
7858404b0fbSQi Liu 
hisi_pcie_alloc_pmu(struct pci_dev * pdev,struct hisi_pcie_pmu * pcie_pmu)7868404b0fbSQi Liu static int hisi_pcie_alloc_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
7878404b0fbSQi Liu {
7888404b0fbSQi Liu 	struct hisi_pcie_reg_pair regs;
7898404b0fbSQi Liu 	u16 sicl_id, core_id;
7908404b0fbSQi Liu 	char *name;
7918404b0fbSQi Liu 
7928404b0fbSQi Liu 	regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_BDF);
7938404b0fbSQi Liu 	pcie_pmu->bdf_min = regs.lo;
7948404b0fbSQi Liu 	pcie_pmu->bdf_max = regs.hi;
7958404b0fbSQi Liu 
7968404b0fbSQi Liu 	regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_INFO);
7978404b0fbSQi Liu 	sicl_id = regs.hi;
7988404b0fbSQi Liu 	core_id = regs.lo;
7998404b0fbSQi Liu 
8008404b0fbSQi Liu 	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_pcie%u_core%u", sicl_id, core_id);
8018404b0fbSQi Liu 	if (!name)
8028404b0fbSQi Liu 		return -ENOMEM;
8038404b0fbSQi Liu 
8048404b0fbSQi Liu 	pcie_pmu->pdev = pdev;
8058404b0fbSQi Liu 	pcie_pmu->on_cpu = -1;
8068404b0fbSQi Liu 	pcie_pmu->identifier = readl(pcie_pmu->base + HISI_PCIE_REG_VERSION);
8078404b0fbSQi Liu 	pcie_pmu->pmu = (struct pmu) {
8088404b0fbSQi Liu 		.name		= name,
8098404b0fbSQi Liu 		.module		= THIS_MODULE,
8108404b0fbSQi Liu 		.parent		= &pdev->dev,
8118404b0fbSQi Liu 		.event_init	= hisi_pcie_pmu_event_init,
8128404b0fbSQi Liu 		.pmu_enable	= hisi_pcie_pmu_enable,
8138404b0fbSQi Liu 		.pmu_disable	= hisi_pcie_pmu_disable,
8148404b0fbSQi Liu 		.add		= hisi_pcie_pmu_add,
8158404b0fbSQi Liu 		.del		= hisi_pcie_pmu_del,
8168404b0fbSQi Liu 		.start		= hisi_pcie_pmu_start,
8178404b0fbSQi Liu 		.stop		= hisi_pcie_pmu_stop,
8188404b0fbSQi Liu 		.read		= hisi_pcie_pmu_read,
8198404b0fbSQi Liu 		.task_ctx_nr	= perf_invalid_context,
8208404b0fbSQi Liu 		.attr_groups	= hisi_pcie_pmu_attr_groups,
8218404b0fbSQi Liu 		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
8228404b0fbSQi Liu 	};
8238404b0fbSQi Liu 
8248404b0fbSQi Liu 	return 0;
8258404b0fbSQi Liu }
8268404b0fbSQi Liu 
hisi_pcie_init_pmu(struct pci_dev * pdev,struct hisi_pcie_pmu * pcie_pmu)8278404b0fbSQi Liu static int hisi_pcie_init_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
8288404b0fbSQi Liu {
8298404b0fbSQi Liu 	int ret;
8308404b0fbSQi Liu 
8318404b0fbSQi Liu 	pcie_pmu->base = pci_ioremap_bar(pdev, 2);
8328404b0fbSQi Liu 	if (!pcie_pmu->base) {
8338404b0fbSQi Liu 		pci_err(pdev, "Ioremap failed for pcie_pmu resource\n");
8348404b0fbSQi Liu 		return -ENOMEM;
8358404b0fbSQi Liu 	}
8368404b0fbSQi Liu 
8378404b0fbSQi Liu 	ret = hisi_pcie_alloc_pmu(pdev, pcie_pmu);
8388404b0fbSQi Liu 	if (ret)
8398404b0fbSQi Liu 		goto err_iounmap;
8408404b0fbSQi Liu 
8418404b0fbSQi Liu 	ret = hisi_pcie_pmu_irq_register(pdev, pcie_pmu);
8428404b0fbSQi Liu 	if (ret)
8438404b0fbSQi Liu 		goto err_iounmap;
8448404b0fbSQi Liu 
8458404b0fbSQi Liu 	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
8468404b0fbSQi Liu 	if (ret) {
8478404b0fbSQi Liu 		pci_err(pdev, "Failed to register hotplug: %d\n", ret);
8488404b0fbSQi Liu 		goto err_irq_unregister;
8498404b0fbSQi Liu 	}
8508404b0fbSQi Liu 
8518404b0fbSQi Liu 	ret = perf_pmu_register(&pcie_pmu->pmu, pcie_pmu->pmu.name, -1);
8528404b0fbSQi Liu 	if (ret) {
8538404b0fbSQi Liu 		pci_err(pdev, "Failed to register PCIe PMU: %d\n", ret);
8548404b0fbSQi Liu 		goto err_hotplug_unregister;
8558404b0fbSQi Liu 	}
8568404b0fbSQi Liu 
8578404b0fbSQi Liu 	return ret;
8588404b0fbSQi Liu 
8598404b0fbSQi Liu err_hotplug_unregister:
8608404b0fbSQi Liu 	cpuhp_state_remove_instance_nocalls(
8618404b0fbSQi Liu 		CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
8628404b0fbSQi Liu 
8638404b0fbSQi Liu err_irq_unregister:
8648404b0fbSQi Liu 	hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu);
8658404b0fbSQi Liu 
8668404b0fbSQi Liu err_iounmap:
8678404b0fbSQi Liu 	iounmap(pcie_pmu->base);
8688404b0fbSQi Liu 
8698404b0fbSQi Liu 	return ret;
8708404b0fbSQi Liu }
8718404b0fbSQi Liu 
hisi_pcie_uninit_pmu(struct pci_dev * pdev)8728404b0fbSQi Liu static void hisi_pcie_uninit_pmu(struct pci_dev *pdev)
8738404b0fbSQi Liu {
8748404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu = pci_get_drvdata(pdev);
8758404b0fbSQi Liu 
8768404b0fbSQi Liu 	perf_pmu_unregister(&pcie_pmu->pmu);
8778404b0fbSQi Liu 	cpuhp_state_remove_instance_nocalls(
8788404b0fbSQi Liu 		CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
8798404b0fbSQi Liu 	hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu);
8808404b0fbSQi Liu 	iounmap(pcie_pmu->base);
8818404b0fbSQi Liu }
8828404b0fbSQi Liu 
hisi_pcie_init_dev(struct pci_dev * pdev)8838404b0fbSQi Liu static int hisi_pcie_init_dev(struct pci_dev *pdev)
8848404b0fbSQi Liu {
8858404b0fbSQi Liu 	int ret;
8868404b0fbSQi Liu 
8878404b0fbSQi Liu 	ret = pcim_enable_device(pdev);
8888404b0fbSQi Liu 	if (ret) {
8898404b0fbSQi Liu 		pci_err(pdev, "Failed to enable PCI device: %d\n", ret);
8908404b0fbSQi Liu 		return ret;
8918404b0fbSQi Liu 	}
8928404b0fbSQi Liu 
8938404b0fbSQi Liu 	ret = pcim_iomap_regions(pdev, BIT(2), DRV_NAME);
8948404b0fbSQi Liu 	if (ret < 0) {
8958404b0fbSQi Liu 		pci_err(pdev, "Failed to request PCI mem regions: %d\n", ret);
8968404b0fbSQi Liu 		return ret;
8978404b0fbSQi Liu 	}
8988404b0fbSQi Liu 
8998404b0fbSQi Liu 	pci_set_master(pdev);
9008404b0fbSQi Liu 
9018404b0fbSQi Liu 	return 0;
9028404b0fbSQi Liu }
9038404b0fbSQi Liu 
hisi_pcie_pmu_probe(struct pci_dev * pdev,const struct pci_device_id * id)9048404b0fbSQi Liu static int hisi_pcie_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
9058404b0fbSQi Liu {
9068404b0fbSQi Liu 	struct hisi_pcie_pmu *pcie_pmu;
9078404b0fbSQi Liu 	int ret;
9088404b0fbSQi Liu 
9098404b0fbSQi Liu 	pcie_pmu = devm_kzalloc(&pdev->dev, sizeof(*pcie_pmu), GFP_KERNEL);
9108404b0fbSQi Liu 	if (!pcie_pmu)
9118404b0fbSQi Liu 		return -ENOMEM;
9128404b0fbSQi Liu 
9138404b0fbSQi Liu 	ret = hisi_pcie_init_dev(pdev);
9148404b0fbSQi Liu 	if (ret)
9158404b0fbSQi Liu 		return ret;
9168404b0fbSQi Liu 
9178404b0fbSQi Liu 	ret = hisi_pcie_init_pmu(pdev, pcie_pmu);
9188404b0fbSQi Liu 	if (ret)
9198404b0fbSQi Liu 		return ret;
9208404b0fbSQi Liu 
9218404b0fbSQi Liu 	pci_set_drvdata(pdev, pcie_pmu);
9228404b0fbSQi Liu 
9238404b0fbSQi Liu 	return ret;
9248404b0fbSQi Liu }
9258404b0fbSQi Liu 
hisi_pcie_pmu_remove(struct pci_dev * pdev)9268404b0fbSQi Liu static void hisi_pcie_pmu_remove(struct pci_dev *pdev)
9278404b0fbSQi Liu {
9288404b0fbSQi Liu 	hisi_pcie_uninit_pmu(pdev);
9298404b0fbSQi Liu 	pci_set_drvdata(pdev, NULL);
9308404b0fbSQi Liu }
9318404b0fbSQi Liu 
9328404b0fbSQi Liu static const struct pci_device_id hisi_pcie_pmu_ids[] = {
9338404b0fbSQi Liu 	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa12d) },
9348404b0fbSQi Liu 	{ 0, }
9358404b0fbSQi Liu };
9368404b0fbSQi Liu MODULE_DEVICE_TABLE(pci, hisi_pcie_pmu_ids);
9378404b0fbSQi Liu 
9388404b0fbSQi Liu static struct pci_driver hisi_pcie_pmu_driver = {
9398404b0fbSQi Liu 	.name = DRV_NAME,
9408404b0fbSQi Liu 	.id_table = hisi_pcie_pmu_ids,
9418404b0fbSQi Liu 	.probe = hisi_pcie_pmu_probe,
9428404b0fbSQi Liu 	.remove = hisi_pcie_pmu_remove,
9438404b0fbSQi Liu };
9448404b0fbSQi Liu 
hisi_pcie_module_init(void)9458404b0fbSQi Liu static int __init hisi_pcie_module_init(void)
9468404b0fbSQi Liu {
9478404b0fbSQi Liu 	int ret;
9488404b0fbSQi Liu 
9498404b0fbSQi Liu 	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE,
9508404b0fbSQi Liu 				      "AP_PERF_ARM_HISI_PCIE_PMU_ONLINE",
9518404b0fbSQi Liu 				      hisi_pcie_pmu_online_cpu,
9528404b0fbSQi Liu 				      hisi_pcie_pmu_offline_cpu);
9538404b0fbSQi Liu 	if (ret) {
9548404b0fbSQi Liu 		pr_err("Failed to setup PCIe PMU hotplug: %d\n", ret);
9558404b0fbSQi Liu 		return ret;
9568404b0fbSQi Liu 	}
9578404b0fbSQi Liu 
9588404b0fbSQi Liu 	ret = pci_register_driver(&hisi_pcie_pmu_driver);
9598404b0fbSQi Liu 	if (ret)
9608404b0fbSQi Liu 		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE);
9618404b0fbSQi Liu 
9628404b0fbSQi Liu 	return ret;
9638404b0fbSQi Liu }
964 module_init(hisi_pcie_module_init);
965 
hisi_pcie_module_exit(void)966 static void __exit hisi_pcie_module_exit(void)
967 {
968 	pci_unregister_driver(&hisi_pcie_pmu_driver);
969 	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE);
970 }
971 module_exit(hisi_pcie_module_exit);
972 
973 MODULE_DESCRIPTION("HiSilicon PCIe PMU driver");
974 MODULE_LICENSE("GPL v2");
975 MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
976