1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #define pr_fmt(fmt)	"habanalabs: " fmt
9 
10 #include <uapi/drm/habanalabs_accel.h>
11 #include "habanalabs.h"
12 
13 #include <linux/fs.h>
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 
20 #include <asm/msr.h>
21 
22 /* make sure there is space for all the signed info */
23 static_assert(sizeof(struct cpucp_info) <= SEC_DEV_INFO_BUF_SZ);
24 
25 static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
26 	[HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
27 	[HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
28 	[HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
29 	[HL_DEBUG_OP_FUNNEL] = 0,
30 	[HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
31 	[HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
32 	[HL_DEBUG_OP_TIMESTAMP] = 0
33 
34 };
35 
device_status_info(struct hl_device * hdev,struct hl_info_args * args)36 static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
37 {
38 	struct hl_info_device_status dev_stat = {0};
39 	u32 size = args->return_size;
40 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
41 
42 	if ((!size) || (!out))
43 		return -EINVAL;
44 
45 	dev_stat.status = hl_device_status(hdev);
46 
47 	return copy_to_user(out, &dev_stat,
48 			min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
49 }
50 
hw_ip_info(struct hl_device * hdev,struct hl_info_args * args)51 static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
52 {
53 	struct hl_info_hw_ip_info hw_ip = {0};
54 	u32 size = args->return_size;
55 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
56 	struct asic_fixed_properties *prop = &hdev->asic_prop;
57 	u64 sram_kmd_size, dram_kmd_size, dram_available_size;
58 
59 	if ((!size) || (!out))
60 		return -EINVAL;
61 
62 	sram_kmd_size = (prop->sram_user_base_address -
63 				prop->sram_base_address);
64 	dram_kmd_size = (prop->dram_user_base_address -
65 				prop->dram_base_address);
66 
67 	hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
68 	hw_ip.sram_base_address = prop->sram_user_base_address;
69 	hw_ip.dram_base_address =
70 			prop->dram_supports_virtual_memory ?
71 			prop->dmmu.start_addr : prop->dram_user_base_address;
72 	hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask & 0xFF;
73 	hw_ip.tpc_enabled_mask_ext = prop->tpc_enabled_mask;
74 
75 	hw_ip.sram_size = prop->sram_size - sram_kmd_size;
76 
77 	dram_available_size = prop->dram_size - dram_kmd_size;
78 
79 	hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size, prop->dram_page_size) *
80 				prop->dram_page_size;
81 
82 	if (hw_ip.dram_size > PAGE_SIZE)
83 		hw_ip.dram_enabled = 1;
84 
85 	hw_ip.dram_page_size = prop->dram_page_size;
86 	hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size;
87 	hw_ip.num_of_events = prop->num_of_events;
88 
89 	memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
90 		min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
91 
92 	memcpy(hw_ip.card_name, prop->cpucp_info.card_name,
93 		min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
94 
95 	hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version);
96 	hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location);
97 
98 	hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
99 	hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
100 	hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
101 	hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
102 
103 	hw_ip.decoder_enabled_mask = prop->decoder_enabled_mask;
104 	hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode;
105 	hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt;
106 	hw_ip.number_of_user_interrupts = prop->user_interrupt_count;
107 	hw_ip.tpc_interrupt_id = prop->tpc_interrupt_id;
108 
109 	hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
110 	hw_ip.server_type = prop->server_type;
111 	hw_ip.security_enabled = prop->fw_security_enabled;
112 	hw_ip.revision_id = hdev->pdev->revision;
113 	hw_ip.rotator_enabled_mask = prop->rotator_enabled_mask;
114 	hw_ip.engine_core_interrupt_reg_addr = prop->engine_core_interrupt_reg_addr;
115 	hw_ip.reserved_dram_size = dram_kmd_size;
116 
117 	return copy_to_user(out, &hw_ip,
118 		min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
119 }
120 
hw_events_info(struct hl_device * hdev,bool aggregate,struct hl_info_args * args)121 static int hw_events_info(struct hl_device *hdev, bool aggregate,
122 			struct hl_info_args *args)
123 {
124 	u32 size, max_size = args->return_size;
125 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
126 	void *arr;
127 
128 	if ((!max_size) || (!out))
129 		return -EINVAL;
130 
131 	arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
132 	if (!arr) {
133 		dev_err(hdev->dev, "Events info not supported\n");
134 		return -EOPNOTSUPP;
135 	}
136 
137 	return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
138 }
139 
events_info(struct hl_fpriv * hpriv,struct hl_info_args * args)140 static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
141 {
142 	u32 max_size = args->return_size;
143 	u64 events_mask;
144 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
145 
146 	if ((max_size < sizeof(u64)) || (!out))
147 		return -EINVAL;
148 
149 	mutex_lock(&hpriv->notifier_event.lock);
150 	events_mask = hpriv->notifier_event.events_mask;
151 	hpriv->notifier_event.events_mask = 0;
152 	mutex_unlock(&hpriv->notifier_event.lock);
153 
154 	return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0;
155 }
156 
dram_usage_info(struct hl_fpriv * hpriv,struct hl_info_args * args)157 static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
158 {
159 	struct hl_device *hdev = hpriv->hdev;
160 	struct hl_info_dram_usage dram_usage = {0};
161 	u32 max_size = args->return_size;
162 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
163 	struct asic_fixed_properties *prop = &hdev->asic_prop;
164 	u64 dram_kmd_size;
165 
166 	if ((!max_size) || (!out))
167 		return -EINVAL;
168 
169 	dram_kmd_size = (prop->dram_user_base_address -
170 				prop->dram_base_address);
171 	dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
172 					atomic64_read(&hdev->dram_used_mem);
173 	if (hpriv->ctx)
174 		dram_usage.ctx_dram_mem =
175 			atomic64_read(&hpriv->ctx->dram_phys_mem);
176 
177 	return copy_to_user(out, &dram_usage,
178 		min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
179 }
180 
hw_idle(struct hl_device * hdev,struct hl_info_args * args)181 static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
182 {
183 	struct hl_info_hw_idle hw_idle = {0};
184 	u32 max_size = args->return_size;
185 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
186 
187 	if ((!max_size) || (!out))
188 		return -EINVAL;
189 
190 	hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
191 					hw_idle.busy_engines_mask_ext,
192 					HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
193 	hw_idle.busy_engines_mask =
194 			lower_32_bits(hw_idle.busy_engines_mask_ext[0]);
195 
196 	return copy_to_user(out, &hw_idle,
197 		min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
198 }
199 
debug_coresight(struct hl_device * hdev,struct hl_ctx * ctx,struct hl_debug_args * args)200 static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args)
201 {
202 	struct hl_debug_params *params;
203 	void *input = NULL, *output = NULL;
204 	int rc;
205 
206 	params = kzalloc(sizeof(*params), GFP_KERNEL);
207 	if (!params)
208 		return -ENOMEM;
209 
210 	params->reg_idx = args->reg_idx;
211 	params->enable = args->enable;
212 	params->op = args->op;
213 
214 	if (args->input_ptr && args->input_size) {
215 		input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
216 		if (!input) {
217 			rc = -ENOMEM;
218 			goto out;
219 		}
220 
221 		if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
222 					args->input_size)) {
223 			rc = -EFAULT;
224 			dev_err(hdev->dev, "failed to copy input debug data\n");
225 			goto out;
226 		}
227 
228 		params->input = input;
229 	}
230 
231 	if (args->output_ptr && args->output_size) {
232 		output = kzalloc(args->output_size, GFP_KERNEL);
233 		if (!output) {
234 			rc = -ENOMEM;
235 			goto out;
236 		}
237 
238 		params->output = output;
239 		params->output_size = args->output_size;
240 	}
241 
242 	rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params);
243 	if (rc) {
244 		dev_err(hdev->dev,
245 			"debug coresight operation failed %d\n", rc);
246 		goto out;
247 	}
248 
249 	if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
250 					output, args->output_size)) {
251 		dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
252 		rc = -EFAULT;
253 		goto out;
254 	}
255 
256 
257 out:
258 	kfree(params);
259 	kfree(output);
260 	kfree(input);
261 
262 	return rc;
263 }
264 
device_utilization(struct hl_device * hdev,struct hl_info_args * args)265 static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
266 {
267 	struct hl_info_device_utilization device_util = {0};
268 	u32 max_size = args->return_size;
269 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
270 	int rc;
271 
272 	if ((!max_size) || (!out))
273 		return -EINVAL;
274 
275 	rc = hl_device_utilization(hdev, &device_util.utilization);
276 	if (rc)
277 		return -EINVAL;
278 
279 	return copy_to_user(out, &device_util,
280 		min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
281 }
282 
get_clk_rate(struct hl_device * hdev,struct hl_info_args * args)283 static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
284 {
285 	struct hl_info_clk_rate clk_rate = {0};
286 	u32 max_size = args->return_size;
287 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
288 	int rc;
289 
290 	if ((!max_size) || (!out))
291 		return -EINVAL;
292 
293 	rc = hl_fw_get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz, &clk_rate.max_clk_rate_mhz);
294 	if (rc)
295 		return rc;
296 
297 	return copy_to_user(out, &clk_rate, min_t(size_t, max_size, sizeof(clk_rate)))
298 										? -EFAULT : 0;
299 }
300 
get_reset_count(struct hl_device * hdev,struct hl_info_args * args)301 static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
302 {
303 	struct hl_info_reset_count reset_count = {0};
304 	u32 max_size = args->return_size;
305 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
306 
307 	if ((!max_size) || (!out))
308 		return -EINVAL;
309 
310 	reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt;
311 	reset_count.soft_reset_cnt = hdev->reset_info.compute_reset_cnt;
312 
313 	return copy_to_user(out, &reset_count,
314 		min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
315 }
316 
time_sync_info(struct hl_device * hdev,struct hl_info_args * args)317 static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
318 {
319 	struct hl_info_time_sync time_sync = {0};
320 	u32 max_size = args->return_size;
321 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
322 
323 	if ((!max_size) || (!out))
324 		return -EINVAL;
325 
326 	time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
327 	time_sync.host_time = ktime_get_raw_ns();
328 	time_sync.tsc_time = rdtsc();
329 
330 	return copy_to_user(out, &time_sync,
331 		min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
332 }
333 
pci_counters_info(struct hl_fpriv * hpriv,struct hl_info_args * args)334 static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
335 {
336 	struct hl_device *hdev = hpriv->hdev;
337 	struct hl_info_pci_counters pci_counters = {0};
338 	u32 max_size = args->return_size;
339 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
340 	int rc;
341 
342 	if ((!max_size) || (!out))
343 		return -EINVAL;
344 
345 	rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters);
346 	if (rc)
347 		return rc;
348 
349 	return copy_to_user(out, &pci_counters,
350 		min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0;
351 }
352 
clk_throttle_info(struct hl_fpriv * hpriv,struct hl_info_args * args)353 static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
354 {
355 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
356 	struct hl_device *hdev = hpriv->hdev;
357 	struct hl_info_clk_throttle clk_throttle = {0};
358 	ktime_t end_time, zero_time = ktime_set(0, 0);
359 	u32 max_size = args->return_size;
360 	int i;
361 
362 	if ((!max_size) || (!out))
363 		return -EINVAL;
364 
365 	mutex_lock(&hdev->clk_throttling.lock);
366 
367 	clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason;
368 
369 	for (i = 0 ; i < HL_CLK_THROTTLE_TYPE_MAX ; i++) {
370 		if (!(hdev->clk_throttling.aggregated_reason & BIT(i)))
371 			continue;
372 
373 		clk_throttle.clk_throttling_timestamp_us[i] =
374 			ktime_to_us(hdev->clk_throttling.timestamp[i].start);
375 
376 		if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time))
377 			end_time = hdev->clk_throttling.timestamp[i].end;
378 		else
379 			end_time = ktime_get();
380 
381 		clk_throttle.clk_throttling_duration_ns[i] =
382 			ktime_to_ns(ktime_sub(end_time,
383 				hdev->clk_throttling.timestamp[i].start));
384 
385 	}
386 	mutex_unlock(&hdev->clk_throttling.lock);
387 
388 	return copy_to_user(out, &clk_throttle,
389 		min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
390 }
391 
cs_counters_info(struct hl_fpriv * hpriv,struct hl_info_args * args)392 static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
393 {
394 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
395 	struct hl_info_cs_counters cs_counters = {0};
396 	struct hl_device *hdev = hpriv->hdev;
397 	struct hl_cs_counters_atomic *cntr;
398 	u32 max_size = args->return_size;
399 
400 	cntr = &hdev->aggregated_cs_counters;
401 
402 	if ((!max_size) || (!out))
403 		return -EINVAL;
404 
405 	cs_counters.total_out_of_mem_drop_cnt =
406 			atomic64_read(&cntr->out_of_mem_drop_cnt);
407 	cs_counters.total_parsing_drop_cnt =
408 			atomic64_read(&cntr->parsing_drop_cnt);
409 	cs_counters.total_queue_full_drop_cnt =
410 			atomic64_read(&cntr->queue_full_drop_cnt);
411 	cs_counters.total_device_in_reset_drop_cnt =
412 			atomic64_read(&cntr->device_in_reset_drop_cnt);
413 	cs_counters.total_max_cs_in_flight_drop_cnt =
414 			atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
415 	cs_counters.total_validation_drop_cnt =
416 			atomic64_read(&cntr->validation_drop_cnt);
417 
418 	if (hpriv->ctx) {
419 		cs_counters.ctx_out_of_mem_drop_cnt =
420 				atomic64_read(
421 				&hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
422 		cs_counters.ctx_parsing_drop_cnt =
423 				atomic64_read(
424 				&hpriv->ctx->cs_counters.parsing_drop_cnt);
425 		cs_counters.ctx_queue_full_drop_cnt =
426 				atomic64_read(
427 				&hpriv->ctx->cs_counters.queue_full_drop_cnt);
428 		cs_counters.ctx_device_in_reset_drop_cnt =
429 				atomic64_read(
430 			&hpriv->ctx->cs_counters.device_in_reset_drop_cnt);
431 		cs_counters.ctx_max_cs_in_flight_drop_cnt =
432 				atomic64_read(
433 			&hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
434 		cs_counters.ctx_validation_drop_cnt =
435 				atomic64_read(
436 				&hpriv->ctx->cs_counters.validation_drop_cnt);
437 	}
438 
439 	return copy_to_user(out, &cs_counters,
440 		min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
441 }
442 
sync_manager_info(struct hl_fpriv * hpriv,struct hl_info_args * args)443 static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
444 {
445 	struct hl_device *hdev = hpriv->hdev;
446 	struct asic_fixed_properties *prop = &hdev->asic_prop;
447 	struct hl_info_sync_manager sm_info = {0};
448 	u32 max_size = args->return_size;
449 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
450 
451 	if ((!max_size) || (!out))
452 		return -EINVAL;
453 
454 	if (args->dcore_id >= HL_MAX_DCORES)
455 		return -EINVAL;
456 
457 	sm_info.first_available_sync_object =
458 			prop->first_available_user_sob[args->dcore_id];
459 	sm_info.first_available_monitor =
460 			prop->first_available_user_mon[args->dcore_id];
461 	sm_info.first_available_cq =
462 			prop->first_available_cq[args->dcore_id];
463 
464 	return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
465 			sizeof(sm_info))) ? -EFAULT : 0;
466 }
467 
total_energy_consumption_info(struct hl_fpriv * hpriv,struct hl_info_args * args)468 static int total_energy_consumption_info(struct hl_fpriv *hpriv,
469 			struct hl_info_args *args)
470 {
471 	struct hl_device *hdev = hpriv->hdev;
472 	struct hl_info_energy total_energy = {0};
473 	u32 max_size = args->return_size;
474 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
475 	int rc;
476 
477 	if ((!max_size) || (!out))
478 		return -EINVAL;
479 
480 	rc = hl_fw_cpucp_total_energy_get(hdev,
481 			&total_energy.total_energy_consumption);
482 	if (rc)
483 		return rc;
484 
485 	return copy_to_user(out, &total_energy,
486 		min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0;
487 }
488 
pll_frequency_info(struct hl_fpriv * hpriv,struct hl_info_args * args)489 static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
490 {
491 	struct hl_device *hdev = hpriv->hdev;
492 	struct hl_pll_frequency_info freq_info = { {0} };
493 	u32 max_size = args->return_size;
494 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
495 	int rc;
496 
497 	if ((!max_size) || (!out))
498 		return -EINVAL;
499 
500 	rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output);
501 	if (rc)
502 		return rc;
503 
504 	return copy_to_user(out, &freq_info,
505 		min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0;
506 }
507 
power_info(struct hl_fpriv * hpriv,struct hl_info_args * args)508 static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
509 {
510 	struct hl_device *hdev = hpriv->hdev;
511 	u32 max_size = args->return_size;
512 	struct hl_power_info power_info = {0};
513 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
514 	int rc;
515 
516 	if ((!max_size) || (!out))
517 		return -EINVAL;
518 
519 	rc = hl_fw_cpucp_power_get(hdev, &power_info.power);
520 	if (rc)
521 		return rc;
522 
523 	return copy_to_user(out, &power_info,
524 		min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0;
525 }
526 
open_stats_info(struct hl_fpriv * hpriv,struct hl_info_args * args)527 static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
528 {
529 	struct hl_device *hdev = hpriv->hdev;
530 	u32 max_size = args->return_size;
531 	struct hl_open_stats_info open_stats_info = {0};
532 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
533 
534 	if ((!max_size) || (!out))
535 		return -EINVAL;
536 
537 	open_stats_info.last_open_period_ms = jiffies64_to_msecs(
538 		hdev->last_open_session_duration_jif);
539 	open_stats_info.open_counter = hdev->open_counter;
540 	open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active;
541 	open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release;
542 
543 	return copy_to_user(out, &open_stats_info,
544 		min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0;
545 }
546 
dram_pending_rows_info(struct hl_fpriv * hpriv,struct hl_info_args * args)547 static int dram_pending_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
548 {
549 	struct hl_device *hdev = hpriv->hdev;
550 	u32 max_size = args->return_size;
551 	u32 pend_rows_num = 0;
552 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
553 	int rc;
554 
555 	if ((!max_size) || (!out))
556 		return -EINVAL;
557 
558 	rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num);
559 	if (rc)
560 		return rc;
561 
562 	return copy_to_user(out, &pend_rows_num,
563 			min_t(size_t, max_size, sizeof(pend_rows_num))) ? -EFAULT : 0;
564 }
565 
dram_replaced_rows_info(struct hl_fpriv * hpriv,struct hl_info_args * args)566 static int dram_replaced_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
567 {
568 	struct hl_device *hdev = hpriv->hdev;
569 	u32 max_size = args->return_size;
570 	struct cpucp_hbm_row_info info = {0};
571 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
572 	int rc;
573 
574 	if ((!max_size) || (!out))
575 		return -EINVAL;
576 
577 	rc = hl_fw_dram_replaced_row_get(hdev, &info);
578 	if (rc)
579 		return rc;
580 
581 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
582 }
583 
last_err_open_dev_info(struct hl_fpriv * hpriv,struct hl_info_args * args)584 static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
585 {
586 	struct hl_info_last_err_open_dev_time info = {0};
587 	struct hl_device *hdev = hpriv->hdev;
588 	u32 max_size = args->return_size;
589 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
590 
591 	if ((!max_size) || (!out))
592 		return -EINVAL;
593 
594 	info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime);
595 
596 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
597 }
598 
cs_timeout_info(struct hl_fpriv * hpriv,struct hl_info_args * args)599 static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
600 {
601 	struct hl_info_cs_timeout_event info = {0};
602 	struct hl_device *hdev = hpriv->hdev;
603 	u32 max_size = args->return_size;
604 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
605 
606 	if ((!max_size) || (!out))
607 		return -EINVAL;
608 
609 	info.seq = hdev->captured_err_info.cs_timeout.seq;
610 	info.timestamp = ktime_to_ns(hdev->captured_err_info.cs_timeout.timestamp);
611 
612 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
613 }
614 
razwi_info(struct hl_fpriv * hpriv,struct hl_info_args * args)615 static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
616 {
617 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
618 	struct hl_device *hdev = hpriv->hdev;
619 	u32 max_size = args->return_size;
620 	struct razwi_info *razwi_info;
621 
622 	if ((!max_size) || (!out))
623 		return -EINVAL;
624 
625 	razwi_info = &hdev->captured_err_info.razwi_info;
626 	if (!razwi_info->razwi_info_available)
627 		return 0;
628 
629 	return copy_to_user(out, &razwi_info->razwi,
630 			min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) ? -EFAULT : 0;
631 }
632 
undefined_opcode_info(struct hl_fpriv * hpriv,struct hl_info_args * args)633 static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
634 {
635 	struct hl_device *hdev = hpriv->hdev;
636 	u32 max_size = args->return_size;
637 	struct hl_info_undefined_opcode_event info = {0};
638 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
639 
640 	if ((!max_size) || (!out))
641 		return -EINVAL;
642 
643 	info.timestamp = ktime_to_ns(hdev->captured_err_info.undef_opcode.timestamp);
644 	info.engine_id = hdev->captured_err_info.undef_opcode.engine_id;
645 	info.cq_addr = hdev->captured_err_info.undef_opcode.cq_addr;
646 	info.cq_size = hdev->captured_err_info.undef_opcode.cq_size;
647 	info.stream_id = hdev->captured_err_info.undef_opcode.stream_id;
648 	info.cb_addr_streams_len = hdev->captured_err_info.undef_opcode.cb_addr_streams_len;
649 	memcpy(info.cb_addr_streams, hdev->captured_err_info.undef_opcode.cb_addr_streams,
650 			sizeof(info.cb_addr_streams));
651 
652 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
653 }
654 
dev_mem_alloc_page_sizes_info(struct hl_fpriv * hpriv,struct hl_info_args * args)655 static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
656 {
657 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
658 	struct hl_info_dev_memalloc_page_sizes info = {0};
659 	struct hl_device *hdev = hpriv->hdev;
660 	u32 max_size = args->return_size;
661 
662 	if ((!max_size) || (!out))
663 		return -EINVAL;
664 
665 	/*
666 	 * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2"
667 	 * pages (unlike some of the ASICs before supporting multiple page sizes).
668 	 * For this reason for all ASICs that not support multiple page size the function will
669 	 * return an empty bitmask indicating that multiple page sizes is not supported.
670 	 */
671 	info.page_order_bitmask = hdev->asic_prop.dmmu.supported_pages_mask;
672 
673 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
674 }
675 
sec_attest_info(struct hl_fpriv * hpriv,struct hl_info_args * args)676 static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
677 {
678 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
679 	struct cpucp_sec_attest_info *sec_attest_info;
680 	struct hl_info_sec_attest *info;
681 	u32 max_size = args->return_size;
682 	int rc;
683 
684 	if ((!max_size) || (!out))
685 		return -EINVAL;
686 
687 	sec_attest_info = kmalloc(sizeof(*sec_attest_info), GFP_KERNEL);
688 	if (!sec_attest_info)
689 		return -ENOMEM;
690 
691 	info = kzalloc(sizeof(*info), GFP_KERNEL);
692 	if (!info) {
693 		rc = -ENOMEM;
694 		goto free_sec_attest_info;
695 	}
696 
697 	rc = hl_fw_get_sec_attest_info(hpriv->hdev, sec_attest_info, args->sec_attest_nonce);
698 	if (rc)
699 		goto free_info;
700 
701 	info->nonce = le32_to_cpu(sec_attest_info->nonce);
702 	info->pcr_quote_len = le16_to_cpu(sec_attest_info->pcr_quote_len);
703 	info->pub_data_len = le16_to_cpu(sec_attest_info->pub_data_len);
704 	info->certificate_len = le16_to_cpu(sec_attest_info->certificate_len);
705 	info->pcr_num_reg = sec_attest_info->pcr_num_reg;
706 	info->pcr_reg_len = sec_attest_info->pcr_reg_len;
707 	info->quote_sig_len = sec_attest_info->quote_sig_len;
708 	memcpy(&info->pcr_data, &sec_attest_info->pcr_data, sizeof(info->pcr_data));
709 	memcpy(&info->pcr_quote, &sec_attest_info->pcr_quote, sizeof(info->pcr_quote));
710 	memcpy(&info->public_data, &sec_attest_info->public_data, sizeof(info->public_data));
711 	memcpy(&info->certificate, &sec_attest_info->certificate, sizeof(info->certificate));
712 	memcpy(&info->quote_sig, &sec_attest_info->quote_sig, sizeof(info->quote_sig));
713 
714 	rc = copy_to_user(out, info,
715 				min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0;
716 
717 free_info:
718 	kfree(info);
719 free_sec_attest_info:
720 	kfree(sec_attest_info);
721 
722 	return rc;
723 }
724 
dev_info_signed(struct hl_fpriv * hpriv,struct hl_info_args * args)725 static int dev_info_signed(struct hl_fpriv *hpriv, struct hl_info_args *args)
726 {
727 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
728 	struct cpucp_dev_info_signed *dev_info_signed;
729 	struct hl_info_signed *info;
730 	u32 max_size = args->return_size;
731 	int rc;
732 
733 	if ((!max_size) || (!out))
734 		return -EINVAL;
735 
736 	dev_info_signed = kzalloc(sizeof(*dev_info_signed), GFP_KERNEL);
737 	if (!dev_info_signed)
738 		return -ENOMEM;
739 
740 	info = kzalloc(sizeof(*info), GFP_KERNEL);
741 	if (!info) {
742 		rc = -ENOMEM;
743 		goto free_dev_info_signed;
744 	}
745 
746 	rc = hl_fw_get_dev_info_signed(hpriv->hdev,
747 					dev_info_signed, args->sec_attest_nonce);
748 	if (rc)
749 		goto free_info;
750 
751 	info->nonce = le32_to_cpu(dev_info_signed->nonce);
752 	info->info_sig_len = dev_info_signed->info_sig_len;
753 	info->pub_data_len = le16_to_cpu(dev_info_signed->pub_data_len);
754 	info->certificate_len = le16_to_cpu(dev_info_signed->certificate_len);
755 	info->dev_info_len = sizeof(struct cpucp_info);
756 	memcpy(&info->info_sig, &dev_info_signed->info_sig, sizeof(info->info_sig));
757 	memcpy(&info->public_data, &dev_info_signed->public_data, sizeof(info->public_data));
758 	memcpy(&info->certificate, &dev_info_signed->certificate, sizeof(info->certificate));
759 	memcpy(&info->dev_info, &dev_info_signed->info, info->dev_info_len);
760 
761 	rc = copy_to_user(out, info, min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0;
762 
763 free_info:
764 	kfree(info);
765 free_dev_info_signed:
766 	kfree(dev_info_signed);
767 
768 	return rc;
769 }
770 
771 
eventfd_register(struct hl_fpriv * hpriv,struct hl_info_args * args)772 static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args)
773 {
774 	int rc;
775 
776 	/* check if there is already a registered on that process */
777 	mutex_lock(&hpriv->notifier_event.lock);
778 	if (hpriv->notifier_event.eventfd) {
779 		mutex_unlock(&hpriv->notifier_event.lock);
780 		return -EINVAL;
781 	}
782 
783 	hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd);
784 	if (IS_ERR(hpriv->notifier_event.eventfd)) {
785 		rc = PTR_ERR(hpriv->notifier_event.eventfd);
786 		hpriv->notifier_event.eventfd = NULL;
787 		mutex_unlock(&hpriv->notifier_event.lock);
788 		return rc;
789 	}
790 
791 	mutex_unlock(&hpriv->notifier_event.lock);
792 	return 0;
793 }
794 
eventfd_unregister(struct hl_fpriv * hpriv,struct hl_info_args * args)795 static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args)
796 {
797 	mutex_lock(&hpriv->notifier_event.lock);
798 	if (!hpriv->notifier_event.eventfd) {
799 		mutex_unlock(&hpriv->notifier_event.lock);
800 		return -EINVAL;
801 	}
802 
803 	eventfd_ctx_put(hpriv->notifier_event.eventfd);
804 	hpriv->notifier_event.eventfd = NULL;
805 	mutex_unlock(&hpriv->notifier_event.lock);
806 	return 0;
807 }
808 
engine_status_info(struct hl_fpriv * hpriv,struct hl_info_args * args)809 static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
810 {
811 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
812 	u32 status_buf_size = args->return_size;
813 	struct hl_device *hdev = hpriv->hdev;
814 	struct engines_data eng_data;
815 	int rc;
816 
817 	if ((status_buf_size < SZ_1K) || (status_buf_size > HL_ENGINES_DATA_MAX_SIZE) || (!out))
818 		return -EINVAL;
819 
820 	eng_data.actual_size = 0;
821 	eng_data.allocated_buf_size = status_buf_size;
822 	eng_data.buf = vmalloc(status_buf_size);
823 	if (!eng_data.buf)
824 		return -ENOMEM;
825 
826 	hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
827 
828 	if (eng_data.actual_size > eng_data.allocated_buf_size) {
829 		dev_err(hdev->dev,
830 			"Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
831 			eng_data.actual_size, status_buf_size);
832 		vfree(eng_data.buf);
833 		return -ENOMEM;
834 	}
835 
836 	args->user_buffer_actual_size = eng_data.actual_size;
837 	rc = copy_to_user(out, eng_data.buf, min_t(size_t, status_buf_size, eng_data.actual_size)) ?
838 				-EFAULT : 0;
839 
840 	vfree(eng_data.buf);
841 
842 	return rc;
843 }
844 
page_fault_info(struct hl_fpriv * hpriv,struct hl_info_args * args)845 static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
846 {
847 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
848 	struct hl_device *hdev = hpriv->hdev;
849 	u32 max_size = args->return_size;
850 	struct page_fault_info *pgf_info;
851 
852 	if ((!max_size) || (!out))
853 		return -EINVAL;
854 
855 	pgf_info = &hdev->captured_err_info.page_fault_info;
856 	if (!pgf_info->page_fault_info_available)
857 		return 0;
858 
859 	return copy_to_user(out, &pgf_info->page_fault,
860 			min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) ? -EFAULT : 0;
861 }
862 
user_mappings_info(struct hl_fpriv * hpriv,struct hl_info_args * args)863 static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
864 {
865 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
866 	u32 user_buf_size = args->return_size;
867 	struct hl_device *hdev = hpriv->hdev;
868 	struct page_fault_info *pgf_info;
869 	u64 actual_size;
870 
871 	if (!out)
872 		return -EINVAL;
873 
874 	pgf_info = &hdev->captured_err_info.page_fault_info;
875 	if (!pgf_info->page_fault_info_available)
876 		return 0;
877 
878 	args->array_size = pgf_info->num_of_user_mappings;
879 
880 	actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping);
881 	if (user_buf_size < actual_size)
882 		return -ENOMEM;
883 
884 	return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0;
885 }
886 
hw_err_info(struct hl_fpriv * hpriv,struct hl_info_args * args)887 static int hw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
888 {
889 	void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
890 	struct hl_device *hdev = hpriv->hdev;
891 	u32 user_buf_size = args->return_size;
892 	struct hw_err_info *info;
893 	int rc;
894 
895 	if (!user_buf)
896 		return -EINVAL;
897 
898 	info = &hdev->captured_err_info.hw_err;
899 	if (!info->event_info_available)
900 		return 0;
901 
902 	if (user_buf_size < sizeof(struct hl_info_hw_err_event))
903 		return -ENOMEM;
904 
905 	rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_hw_err_event));
906 	return rc ? -EFAULT : 0;
907 }
908 
fw_err_info(struct hl_fpriv * hpriv,struct hl_info_args * args)909 static int fw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
910 {
911 	void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
912 	struct hl_device *hdev = hpriv->hdev;
913 	u32 user_buf_size = args->return_size;
914 	struct fw_err_info *info;
915 	int rc;
916 
917 	if (!user_buf)
918 		return -EINVAL;
919 
920 	info = &hdev->captured_err_info.fw_err;
921 	if (!info->event_info_available)
922 		return 0;
923 
924 	if (user_buf_size < sizeof(struct hl_info_fw_err_event))
925 		return -ENOMEM;
926 
927 	rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_fw_err_event));
928 	return rc ? -EFAULT : 0;
929 }
930 
engine_err_info(struct hl_fpriv * hpriv,struct hl_info_args * args)931 static int engine_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
932 {
933 	void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
934 	struct hl_device *hdev = hpriv->hdev;
935 	u32 user_buf_size = args->return_size;
936 	struct engine_err_info *info;
937 	int rc;
938 
939 	if (!user_buf)
940 		return -EINVAL;
941 
942 	info = &hdev->captured_err_info.engine_err;
943 	if (!info->event_info_available)
944 		return 0;
945 
946 	if (user_buf_size < sizeof(struct hl_info_engine_err_event))
947 		return -ENOMEM;
948 
949 	rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_engine_err_event));
950 	return rc ? -EFAULT : 0;
951 }
952 
send_fw_generic_request(struct hl_device * hdev,struct hl_info_args * info_args)953 static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args)
954 {
955 	void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer;
956 	u32 size = info_args->return_size;
957 	dma_addr_t dma_handle;
958 	bool need_input_buff;
959 	void *fw_buff;
960 	int rc = 0;
961 
962 	switch (info_args->fw_sub_opcode) {
963 	case HL_PASSTHROUGH_VERSIONS:
964 		need_input_buff = false;
965 		break;
966 	default:
967 		return -EINVAL;
968 	}
969 
970 	if (size > SZ_1M) {
971 		dev_err(hdev->dev, "buffer size cannot exceed 1MB\n");
972 		return -EINVAL;
973 	}
974 
975 	fw_buff = hl_cpu_accessible_dma_pool_alloc(hdev, size, &dma_handle);
976 	if (!fw_buff)
977 		return -ENOMEM;
978 
979 
980 	if (need_input_buff && copy_from_user(fw_buff, buff, size)) {
981 		dev_dbg(hdev->dev, "Failed to copy from user FW buff\n");
982 		rc = -EFAULT;
983 		goto free_buff;
984 	}
985 
986 	rc = hl_fw_send_generic_request(hdev, info_args->fw_sub_opcode, dma_handle, &size);
987 	if (rc)
988 		goto free_buff;
989 
990 	if (copy_to_user(buff, fw_buff, min(size, info_args->return_size))) {
991 		dev_dbg(hdev->dev, "Failed to copy to user FW generic req output\n");
992 		rc = -EFAULT;
993 	}
994 
995 free_buff:
996 	hl_cpu_accessible_dma_pool_free(hdev, info_args->return_size, fw_buff);
997 
998 	return rc;
999 }
1000 
_hl_info_ioctl(struct hl_fpriv * hpriv,void * data,struct device * dev)1001 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
1002 				struct device *dev)
1003 {
1004 	enum hl_device_status status;
1005 	struct hl_info_args *args = data;
1006 	struct hl_device *hdev = hpriv->hdev;
1007 	int rc;
1008 
1009 	if (args->pad) {
1010 		dev_dbg(hdev->dev, "Padding bytes must be 0\n");
1011 		return -EINVAL;
1012 	}
1013 
1014 	/*
1015 	 * Information is returned for the following opcodes even if the device
1016 	 * is disabled or in reset.
1017 	 */
1018 	switch (args->op) {
1019 	case HL_INFO_HW_IP_INFO:
1020 		return hw_ip_info(hdev, args);
1021 
1022 	case HL_INFO_DEVICE_STATUS:
1023 		return device_status_info(hdev, args);
1024 
1025 	case HL_INFO_RESET_COUNT:
1026 		return get_reset_count(hdev, args);
1027 
1028 	case HL_INFO_HW_EVENTS:
1029 		return hw_events_info(hdev, false, args);
1030 
1031 	case HL_INFO_HW_EVENTS_AGGREGATE:
1032 		return hw_events_info(hdev, true, args);
1033 
1034 	case HL_INFO_CS_COUNTERS:
1035 		return cs_counters_info(hpriv, args);
1036 
1037 	case HL_INFO_CLK_THROTTLE_REASON:
1038 		return clk_throttle_info(hpriv, args);
1039 
1040 	case HL_INFO_SYNC_MANAGER:
1041 		return sync_manager_info(hpriv, args);
1042 
1043 	case HL_INFO_OPEN_STATS:
1044 		return open_stats_info(hpriv, args);
1045 
1046 	case HL_INFO_LAST_ERR_OPEN_DEV_TIME:
1047 		return last_err_open_dev_info(hpriv, args);
1048 
1049 	case HL_INFO_CS_TIMEOUT_EVENT:
1050 		return cs_timeout_info(hpriv, args);
1051 
1052 	case HL_INFO_RAZWI_EVENT:
1053 		return razwi_info(hpriv, args);
1054 
1055 	case HL_INFO_UNDEFINED_OPCODE_EVENT:
1056 		return undefined_opcode_info(hpriv, args);
1057 
1058 	case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES:
1059 		return dev_mem_alloc_page_sizes_info(hpriv, args);
1060 
1061 	case HL_INFO_GET_EVENTS:
1062 		return events_info(hpriv, args);
1063 
1064 	case HL_INFO_PAGE_FAULT_EVENT:
1065 		return page_fault_info(hpriv, args);
1066 
1067 	case HL_INFO_USER_MAPPINGS:
1068 		return user_mappings_info(hpriv, args);
1069 
1070 	case HL_INFO_UNREGISTER_EVENTFD:
1071 		return eventfd_unregister(hpriv, args);
1072 
1073 	case HL_INFO_HW_ERR_EVENT:
1074 		return hw_err_info(hpriv, args);
1075 
1076 	case HL_INFO_FW_ERR_EVENT:
1077 		return fw_err_info(hpriv, args);
1078 
1079 	case HL_INFO_USER_ENGINE_ERR_EVENT:
1080 		return engine_err_info(hpriv, args);
1081 
1082 	case HL_INFO_DRAM_USAGE:
1083 		return dram_usage_info(hpriv, args);
1084 	default:
1085 		break;
1086 	}
1087 
1088 	if (!hl_device_operational(hdev, &status)) {
1089 		dev_dbg_ratelimited(dev,
1090 			"Device is %s. Can't execute INFO IOCTL\n",
1091 			hdev->status[status]);
1092 		return -EBUSY;
1093 	}
1094 
1095 	switch (args->op) {
1096 	case HL_INFO_HW_IDLE:
1097 		rc = hw_idle(hdev, args);
1098 		break;
1099 
1100 	case HL_INFO_DEVICE_UTILIZATION:
1101 		rc = device_utilization(hdev, args);
1102 		break;
1103 
1104 	case HL_INFO_CLK_RATE:
1105 		rc = get_clk_rate(hdev, args);
1106 		break;
1107 
1108 	case HL_INFO_TIME_SYNC:
1109 		return time_sync_info(hdev, args);
1110 
1111 	case HL_INFO_PCI_COUNTERS:
1112 		return pci_counters_info(hpriv, args);
1113 
1114 	case HL_INFO_TOTAL_ENERGY:
1115 		return total_energy_consumption_info(hpriv, args);
1116 
1117 	case HL_INFO_PLL_FREQUENCY:
1118 		return pll_frequency_info(hpriv, args);
1119 
1120 	case HL_INFO_POWER:
1121 		return power_info(hpriv, args);
1122 
1123 
1124 	case HL_INFO_DRAM_REPLACED_ROWS:
1125 		return dram_replaced_rows_info(hpriv, args);
1126 
1127 	case HL_INFO_DRAM_PENDING_ROWS:
1128 		return dram_pending_rows_info(hpriv, args);
1129 
1130 	case HL_INFO_SECURED_ATTESTATION:
1131 		return sec_attest_info(hpriv, args);
1132 
1133 	case HL_INFO_REGISTER_EVENTFD:
1134 		return eventfd_register(hpriv, args);
1135 
1136 	case HL_INFO_ENGINE_STATUS:
1137 		return engine_status_info(hpriv, args);
1138 
1139 	case HL_INFO_FW_GENERIC_REQ:
1140 		return send_fw_generic_request(hdev, args);
1141 
1142 	case HL_INFO_DEV_SIGNED:
1143 		return dev_info_signed(hpriv, args);
1144 
1145 	default:
1146 		dev_err(dev, "Invalid request %d\n", args->op);
1147 		rc = -EINVAL;
1148 		break;
1149 	}
1150 
1151 	return rc;
1152 }
1153 
hl_info_ioctl(struct drm_device * ddev,void * data,struct drm_file * file_priv)1154 int hl_info_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
1155 {
1156 	struct hl_fpriv *hpriv = file_priv->driver_priv;
1157 
1158 	return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
1159 }
1160 
hl_info_ioctl_control(struct hl_fpriv * hpriv,void * data)1161 static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
1162 {
1163 	struct hl_info_args *args = data;
1164 
1165 	switch (args->op) {
1166 	case HL_INFO_GET_EVENTS:
1167 	case HL_INFO_UNREGISTER_EVENTFD:
1168 	case HL_INFO_REGISTER_EVENTFD:
1169 		return -EOPNOTSUPP;
1170 	default:
1171 		break;
1172 	}
1173 
1174 	return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
1175 }
1176 
hl_debug_ioctl(struct drm_device * ddev,void * data,struct drm_file * file_priv)1177 int hl_debug_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
1178 {
1179 	struct hl_fpriv *hpriv = file_priv->driver_priv;
1180 	struct hl_device *hdev = hpriv->hdev;
1181 	struct hl_debug_args *args = data;
1182 	enum hl_device_status status;
1183 
1184 	int rc = 0;
1185 
1186 	if (!hl_device_operational(hdev, &status)) {
1187 		dev_dbg_ratelimited(hdev->dev,
1188 			"Device is %s. Can't execute DEBUG IOCTL\n",
1189 			hdev->status[status]);
1190 		return -EBUSY;
1191 	}
1192 
1193 	switch (args->op) {
1194 	case HL_DEBUG_OP_ETR:
1195 	case HL_DEBUG_OP_ETF:
1196 	case HL_DEBUG_OP_STM:
1197 	case HL_DEBUG_OP_FUNNEL:
1198 	case HL_DEBUG_OP_BMON:
1199 	case HL_DEBUG_OP_SPMU:
1200 	case HL_DEBUG_OP_TIMESTAMP:
1201 		if (!hdev->in_debug) {
1202 			dev_err_ratelimited(hdev->dev,
1203 				"Rejecting debug configuration request because device not in debug mode\n");
1204 			return -EFAULT;
1205 		}
1206 		args->input_size = min(args->input_size, hl_debug_struct_size[args->op]);
1207 		rc = debug_coresight(hdev, hpriv->ctx, args);
1208 		break;
1209 
1210 	case HL_DEBUG_OP_SET_MODE:
1211 		rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable);
1212 		break;
1213 
1214 	default:
1215 		dev_err(hdev->dev, "Invalid request %d\n", args->op);
1216 		rc = -EINVAL;
1217 		break;
1218 	}
1219 
1220 	return rc;
1221 }
1222 
1223 #define HL_IOCTL_DEF(ioctl, _func) \
1224 	[_IOC_NR(ioctl) - HL_COMMAND_START] = {.cmd = ioctl, .func = _func}
1225 
1226 static const struct hl_ioctl_desc hl_ioctls_control[] = {
1227 	HL_IOCTL_DEF(DRM_IOCTL_HL_INFO, hl_info_ioctl_control)
1228 };
1229 
_hl_ioctl(struct hl_fpriv * hpriv,unsigned int cmd,unsigned long arg,const struct hl_ioctl_desc * ioctl,struct device * dev)1230 static long _hl_ioctl(struct hl_fpriv *hpriv, unsigned int cmd, unsigned long arg,
1231 			const struct hl_ioctl_desc *ioctl, struct device *dev)
1232 {
1233 	unsigned int nr = _IOC_NR(cmd);
1234 	char stack_kdata[128] = {0};
1235 	char *kdata = NULL;
1236 	unsigned int usize, asize;
1237 	hl_ioctl_t *func;
1238 	u32 hl_size;
1239 	int retcode;
1240 
1241 	/* Do not trust userspace, use our own definition */
1242 	func = ioctl->func;
1243 
1244 	if (unlikely(!func)) {
1245 		dev_dbg(dev, "no function\n");
1246 		retcode = -ENOTTY;
1247 		goto out_err;
1248 	}
1249 
1250 	hl_size = _IOC_SIZE(ioctl->cmd);
1251 	usize = asize = _IOC_SIZE(cmd);
1252 	if (hl_size > asize)
1253 		asize = hl_size;
1254 
1255 	cmd = ioctl->cmd;
1256 
1257 	if (cmd & (IOC_IN | IOC_OUT)) {
1258 		if (asize <= sizeof(stack_kdata)) {
1259 			kdata = stack_kdata;
1260 		} else {
1261 			kdata = kzalloc(asize, GFP_KERNEL);
1262 			if (!kdata) {
1263 				retcode = -ENOMEM;
1264 				goto out_err;
1265 			}
1266 		}
1267 	}
1268 
1269 	if (cmd & IOC_IN) {
1270 		if (copy_from_user(kdata, (void __user *)arg, usize)) {
1271 			retcode = -EFAULT;
1272 			goto out_err;
1273 		}
1274 	}
1275 
1276 	retcode = func(hpriv, kdata);
1277 
1278 	if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
1279 		retcode = -EFAULT;
1280 
1281 out_err:
1282 	if (retcode) {
1283 		char task_comm[TASK_COMM_LEN];
1284 
1285 		dev_dbg_ratelimited(dev,
1286 				"error in ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
1287 				task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
1288 	}
1289 
1290 	if (kdata != stack_kdata)
1291 		kfree(kdata);
1292 
1293 	return retcode;
1294 }
1295 
hl_ioctl_control(struct file * filep,unsigned int cmd,unsigned long arg)1296 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
1297 {
1298 	struct hl_fpriv *hpriv = filep->private_data;
1299 	struct hl_device *hdev = hpriv->hdev;
1300 	const struct hl_ioctl_desc *ioctl = NULL;
1301 	unsigned int nr = _IOC_NR(cmd);
1302 
1303 	if (!hdev) {
1304 		pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
1305 		return -ENODEV;
1306 	}
1307 
1308 	if (nr == _IOC_NR(DRM_IOCTL_HL_INFO)) {
1309 		ioctl = &hl_ioctls_control[nr - HL_COMMAND_START];
1310 	} else {
1311 		char task_comm[TASK_COMM_LEN];
1312 
1313 		dev_dbg_ratelimited(hdev->dev_ctrl,
1314 				"invalid ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
1315 				task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
1316 		return -ENOTTY;
1317 	}
1318 
1319 	return _hl_ioctl(hpriv, cmd, arg, ioctl, hdev->dev_ctrl);
1320 }
1321