1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * OPAL IMC interface detection driver
4  * Supported on POWERNV platform
5  *
6  * Copyright	(C) 2017 Madhavan Srinivasan, IBM Corporation.
7  *		(C) 2017 Anju T Sudhakar, IBM Corporation.
8  *		(C) 2017 Hemant K Shaw, IBM Corporation.
9  */
10 #include <linux/kernel.h>
11 #include <linux/platform_device.h>
12 #include <linux/of.h>
13 #include <linux/of_address.h>
14 #include <linux/crash_dump.h>
15 #include <linux/debugfs.h>
16 #include <asm/opal.h>
17 #include <asm/io.h>
18 #include <asm/imc-pmu.h>
19 #include <asm/cputhreads.h>
20 
21 static struct dentry *imc_debugfs_parent;
22 
23 /* Helpers to export imc command and mode via debugfs */
24 static int imc_mem_get(void *data, u64 *val)
25 {
26 	*val = cpu_to_be64(*(u64 *)data);
27 	return 0;
28 }
29 
30 static int imc_mem_set(void *data, u64 val)
31 {
32 	*(u64 *)data = cpu_to_be64(val);
33 	return 0;
34 }
35 DEFINE_DEBUGFS_ATTRIBUTE(fops_imc_x64, imc_mem_get, imc_mem_set, "0x%016llx\n");
36 
37 static void imc_debugfs_create_x64(const char *name, umode_t mode,
38 				   struct dentry *parent, u64  *value)
39 {
40 	debugfs_create_file_unsafe(name, mode, parent, value, &fops_imc_x64);
41 }
42 
43 /*
44  * export_imc_mode_and_cmd: Create a debugfs interface
45  *                     for imc_cmd and imc_mode
46  *                     for each node in the system.
47  *  imc_mode and imc_cmd can be changed by echo into
48  *  this interface.
49  */
50 static void export_imc_mode_and_cmd(struct device_node *node,
51 				    struct imc_pmu *pmu_ptr)
52 {
53 	static u64 loc, *imc_mode_addr, *imc_cmd_addr;
54 	char mode[16], cmd[16];
55 	u32 cb_offset;
56 	struct imc_mem_info *ptr = pmu_ptr->mem_info;
57 
58 	imc_debugfs_parent = debugfs_create_dir("imc", arch_debugfs_dir);
59 
60 	if (of_property_read_u32(node, "cb_offset", &cb_offset))
61 		cb_offset = IMC_CNTL_BLK_OFFSET;
62 
63 	while (ptr->vbase != NULL) {
64 		loc = (u64)(ptr->vbase) + cb_offset;
65 		imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
66 		sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
67 		imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
68 				       imc_mode_addr);
69 
70 		imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
71 		sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
72 		imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
73 				       imc_cmd_addr);
74 		ptr++;
75 	}
76 }
77 
78 /*
79  * imc_get_mem_addr_nest: Function to get nest counter memory region
80  * for each chip
81  */
82 static int imc_get_mem_addr_nest(struct device_node *node,
83 				 struct imc_pmu *pmu_ptr,
84 				 u32 offset)
85 {
86 	int nr_chips = 0, i;
87 	u64 *base_addr_arr, baddr;
88 	u32 *chipid_arr;
89 
90 	nr_chips = of_property_count_u32_elems(node, "chip-id");
91 	if (nr_chips <= 0)
92 		return -ENODEV;
93 
94 	base_addr_arr = kcalloc(nr_chips, sizeof(*base_addr_arr), GFP_KERNEL);
95 	if (!base_addr_arr)
96 		return -ENOMEM;
97 
98 	chipid_arr = kcalloc(nr_chips, sizeof(*chipid_arr), GFP_KERNEL);
99 	if (!chipid_arr) {
100 		kfree(base_addr_arr);
101 		return -ENOMEM;
102 	}
103 
104 	if (of_property_read_u32_array(node, "chip-id", chipid_arr, nr_chips))
105 		goto error;
106 
107 	if (of_property_read_u64_array(node, "base-addr", base_addr_arr,
108 								nr_chips))
109 		goto error;
110 
111 	pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info),
112 				    GFP_KERNEL);
113 	if (!pmu_ptr->mem_info)
114 		goto error;
115 
116 	for (i = 0; i < nr_chips; i++) {
117 		pmu_ptr->mem_info[i].id = chipid_arr[i];
118 		baddr = base_addr_arr[i] + offset;
119 		pmu_ptr->mem_info[i].vbase = phys_to_virt(baddr);
120 	}
121 
122 	pmu_ptr->imc_counter_mmaped = true;
123 	kfree(base_addr_arr);
124 	kfree(chipid_arr);
125 	return 0;
126 
127 error:
128 	kfree(base_addr_arr);
129 	kfree(chipid_arr);
130 	return -1;
131 }
132 
133 /*
134  * imc_pmu_create : Takes the parent device which is the pmu unit, pmu_index
135  *		    and domain as the inputs.
136  * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets
137  */
138 static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
139 {
140 	int ret = 0;
141 	struct imc_pmu *pmu_ptr;
142 	u32 offset;
143 
144 	/* Return for unknown domain */
145 	if (domain < 0)
146 		return NULL;
147 
148 	/* memory for pmu */
149 	pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
150 	if (!pmu_ptr)
151 		return NULL;
152 
153 	/* Set the domain */
154 	pmu_ptr->domain = domain;
155 
156 	ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size);
157 	if (ret)
158 		goto free_pmu;
159 
160 	if (!of_property_read_u32(parent, "offset", &offset)) {
161 		if (imc_get_mem_addr_nest(parent, pmu_ptr, offset))
162 			goto free_pmu;
163 	}
164 
165 	/* Function to register IMC pmu */
166 	ret = init_imc_pmu(parent, pmu_ptr, pmu_index);
167 	if (ret) {
168 		pr_err("IMC PMU %s Register failed\n", pmu_ptr->pmu.name);
169 		kfree(pmu_ptr->pmu.name);
170 		if (pmu_ptr->domain == IMC_DOMAIN_NEST)
171 			kfree(pmu_ptr->mem_info);
172 		kfree(pmu_ptr);
173 		return NULL;
174 	}
175 
176 	return pmu_ptr;
177 
178 free_pmu:
179 	kfree(pmu_ptr);
180 	return NULL;
181 }
182 
183 static void disable_nest_pmu_counters(void)
184 {
185 	int nid, cpu;
186 	const struct cpumask *l_cpumask;
187 
188 	cpus_read_lock();
189 	for_each_node_with_cpus(nid) {
190 		l_cpumask = cpumask_of_node(nid);
191 		cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
192 		if (cpu >= nr_cpu_ids)
193 			continue;
194 		opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
195 				       get_hard_smp_processor_id(cpu));
196 	}
197 	cpus_read_unlock();
198 }
199 
200 static void disable_core_pmu_counters(void)
201 {
202 	int cpu, rc;
203 
204 	cpus_read_lock();
205 	/* Disable the IMC Core functions */
206 	for_each_online_cpu(cpu) {
207 		if (cpu_first_thread_sibling(cpu) != cpu)
208 			continue;
209 		rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
210 					    get_hard_smp_processor_id(cpu));
211 		if (rc)
212 			pr_err("%s: Failed to stop Core (cpu = %d)\n",
213 				__func__, cpu);
214 	}
215 	cpus_read_unlock();
216 }
217 
218 int get_max_nest_dev(void)
219 {
220 	struct device_node *node;
221 	u32 pmu_units = 0, type;
222 
223 	for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
224 		if (of_property_read_u32(node, "type", &type))
225 			continue;
226 
227 		if (type == IMC_TYPE_CHIP)
228 			pmu_units++;
229 	}
230 
231 	return pmu_units;
232 }
233 
234 static int opal_imc_counters_probe(struct platform_device *pdev)
235 {
236 	struct device_node *imc_dev = pdev->dev.of_node;
237 	struct imc_pmu *pmu;
238 	int pmu_count = 0, domain;
239 	bool core_imc_reg = false, thread_imc_reg = false;
240 	u32 type;
241 
242 	/*
243 	 * Check whether this is kdump kernel. If yes, force the engines to
244 	 * stop and return.
245 	 */
246 	if (is_kdump_kernel()) {
247 		disable_nest_pmu_counters();
248 		disable_core_pmu_counters();
249 		return -ENODEV;
250 	}
251 
252 	for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) {
253 		pmu = NULL;
254 		if (of_property_read_u32(imc_dev, "type", &type)) {
255 			pr_warn("IMC Device without type property\n");
256 			continue;
257 		}
258 
259 		switch (type) {
260 		case IMC_TYPE_CHIP:
261 			domain = IMC_DOMAIN_NEST;
262 			break;
263 		case IMC_TYPE_CORE:
264 			domain =IMC_DOMAIN_CORE;
265 			break;
266 		case IMC_TYPE_THREAD:
267 			domain = IMC_DOMAIN_THREAD;
268 			break;
269 		case IMC_TYPE_TRACE:
270 			domain = IMC_DOMAIN_TRACE;
271 			break;
272 		default:
273 			pr_warn("IMC Unknown Device type \n");
274 			domain = -1;
275 			break;
276 		}
277 
278 		pmu = imc_pmu_create(imc_dev, pmu_count, domain);
279 		if (pmu != NULL) {
280 			if (domain == IMC_DOMAIN_NEST) {
281 				if (!imc_debugfs_parent)
282 					export_imc_mode_and_cmd(imc_dev, pmu);
283 				pmu_count++;
284 			}
285 			if (domain == IMC_DOMAIN_CORE)
286 				core_imc_reg = true;
287 			if (domain == IMC_DOMAIN_THREAD)
288 				thread_imc_reg = true;
289 		}
290 	}
291 
292 	/* If core imc is not registered, unregister thread-imc */
293 	if (!core_imc_reg && thread_imc_reg)
294 		unregister_thread_imc();
295 
296 	return 0;
297 }
298 
299 static void opal_imc_counters_shutdown(struct platform_device *pdev)
300 {
301 	/*
302 	 * Function only stops the engines which is bare minimum.
303 	 * TODO: Need to handle proper memory cleanup and pmu
304 	 * unregister.
305 	 */
306 	disable_nest_pmu_counters();
307 	disable_core_pmu_counters();
308 }
309 
310 static const struct of_device_id opal_imc_match[] = {
311 	{ .compatible = IMC_DTB_COMPAT },
312 	{},
313 };
314 
315 static struct platform_driver opal_imc_driver = {
316 	.driver = {
317 		.name = "opal-imc-counters",
318 		.of_match_table = opal_imc_match,
319 	},
320 	.probe = opal_imc_counters_probe,
321 	.shutdown = opal_imc_counters_shutdown,
322 };
323 
324 builtin_platform_driver(opal_imc_driver);
325