1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #define pr_fmt(fmt) "habanalabs: " fmt
9
10 #include <uapi/misc/habanalabs.h>
11 #include "habanalabs.h"
12
13 #include <linux/kernel.h>
14 #include <linux/fs.h>
15 #include <linux/uaccess.h>
16 #include <linux/slab.h>
17
18 static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
19 [HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
20 [HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
21 [HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
22 [HL_DEBUG_OP_FUNNEL] = 0,
23 [HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
24 [HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
25 [HL_DEBUG_OP_TIMESTAMP] = 0
26
27 };
28
device_status_info(struct hl_device * hdev,struct hl_info_args * args)29 static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
30 {
31 struct hl_info_device_status dev_stat = {0};
32 u32 size = args->return_size;
33 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
34
35 if ((!size) || (!out))
36 return -EINVAL;
37
38 dev_stat.status = hl_device_status(hdev);
39
40 return copy_to_user(out, &dev_stat,
41 min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
42 }
43
hw_ip_info(struct hl_device * hdev,struct hl_info_args * args)44 static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
45 {
46 struct hl_info_hw_ip_info hw_ip = {0};
47 u32 size = args->return_size;
48 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
49 struct asic_fixed_properties *prop = &hdev->asic_prop;
50 u64 sram_kmd_size, dram_kmd_size;
51
52 if ((!size) || (!out))
53 return -EINVAL;
54
55 sram_kmd_size = (prop->sram_user_base_address -
56 prop->sram_base_address);
57 dram_kmd_size = (prop->dram_user_base_address -
58 prop->dram_base_address);
59
60 hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
61 hw_ip.sram_base_address = prop->sram_user_base_address;
62 hw_ip.dram_base_address =
63 hdev->mmu_enable && prop->dram_supports_virtual_memory ?
64 prop->dmmu.start_addr : prop->dram_user_base_address;
65 hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
66 hw_ip.sram_size = prop->sram_size - sram_kmd_size;
67
68 if (hdev->mmu_enable)
69 hw_ip.dram_size =
70 DIV_ROUND_DOWN_ULL(prop->dram_size - dram_kmd_size,
71 prop->dram_page_size) *
72 prop->dram_page_size;
73 else
74 hw_ip.dram_size = prop->dram_size - dram_kmd_size;
75
76 if (hw_ip.dram_size > PAGE_SIZE)
77 hw_ip.dram_enabled = 1;
78 hw_ip.dram_page_size = prop->dram_page_size;
79 hw_ip.num_of_events = prop->num_of_events;
80
81 memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
82 min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
83
84 memcpy(hw_ip.card_name, prop->cpucp_info.card_name,
85 min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
86
87 hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version);
88 hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location);
89
90 hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
91 hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
92 hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
93 hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
94
95 hw_ip.first_available_interrupt_id =
96 prop->first_available_user_msix_interrupt;
97 return copy_to_user(out, &hw_ip,
98 min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0;
99 }
100
hw_events_info(struct hl_device * hdev,bool aggregate,struct hl_info_args * args)101 static int hw_events_info(struct hl_device *hdev, bool aggregate,
102 struct hl_info_args *args)
103 {
104 u32 size, max_size = args->return_size;
105 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
106 void *arr;
107
108 if ((!max_size) || (!out))
109 return -EINVAL;
110
111 arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
112
113 return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
114 }
115
dram_usage_info(struct hl_fpriv * hpriv,struct hl_info_args * args)116 static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
117 {
118 struct hl_device *hdev = hpriv->hdev;
119 struct hl_info_dram_usage dram_usage = {0};
120 u32 max_size = args->return_size;
121 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
122 struct asic_fixed_properties *prop = &hdev->asic_prop;
123 u64 dram_kmd_size;
124
125 if ((!max_size) || (!out))
126 return -EINVAL;
127
128 dram_kmd_size = (prop->dram_user_base_address -
129 prop->dram_base_address);
130 dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
131 atomic64_read(&hdev->dram_used_mem);
132 if (hpriv->ctx)
133 dram_usage.ctx_dram_mem =
134 atomic64_read(&hpriv->ctx->dram_phys_mem);
135
136 return copy_to_user(out, &dram_usage,
137 min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
138 }
139
hw_idle(struct hl_device * hdev,struct hl_info_args * args)140 static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
141 {
142 struct hl_info_hw_idle hw_idle = {0};
143 u32 max_size = args->return_size;
144 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
145
146 if ((!max_size) || (!out))
147 return -EINVAL;
148
149 hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
150 hw_idle.busy_engines_mask_ext,
151 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
152 hw_idle.busy_engines_mask =
153 lower_32_bits(hw_idle.busy_engines_mask_ext[0]);
154
155 return copy_to_user(out, &hw_idle,
156 min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
157 }
158
debug_coresight(struct hl_device * hdev,struct hl_debug_args * args)159 static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
160 {
161 struct hl_debug_params *params;
162 void *input = NULL, *output = NULL;
163 int rc;
164
165 params = kzalloc(sizeof(*params), GFP_KERNEL);
166 if (!params)
167 return -ENOMEM;
168
169 params->reg_idx = args->reg_idx;
170 params->enable = args->enable;
171 params->op = args->op;
172
173 if (args->input_ptr && args->input_size) {
174 input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
175 if (!input) {
176 rc = -ENOMEM;
177 goto out;
178 }
179
180 if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
181 args->input_size)) {
182 rc = -EFAULT;
183 dev_err(hdev->dev, "failed to copy input debug data\n");
184 goto out;
185 }
186
187 params->input = input;
188 }
189
190 if (args->output_ptr && args->output_size) {
191 output = kzalloc(args->output_size, GFP_KERNEL);
192 if (!output) {
193 rc = -ENOMEM;
194 goto out;
195 }
196
197 params->output = output;
198 params->output_size = args->output_size;
199 }
200
201 rc = hdev->asic_funcs->debug_coresight(hdev, params);
202 if (rc) {
203 dev_err(hdev->dev,
204 "debug coresight operation failed %d\n", rc);
205 goto out;
206 }
207
208 if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
209 output, args->output_size)) {
210 dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
211 rc = -EFAULT;
212 goto out;
213 }
214
215
216 out:
217 kfree(params);
218 kfree(output);
219 kfree(input);
220
221 return rc;
222 }
223
device_utilization(struct hl_device * hdev,struct hl_info_args * args)224 static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
225 {
226 struct hl_info_device_utilization device_util = {0};
227 u32 max_size = args->return_size;
228 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
229 int rc;
230
231 if ((!max_size) || (!out))
232 return -EINVAL;
233
234 rc = hl_device_utilization(hdev, &device_util.utilization);
235 if (rc)
236 return -EINVAL;
237
238 return copy_to_user(out, &device_util,
239 min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
240 }
241
get_clk_rate(struct hl_device * hdev,struct hl_info_args * args)242 static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
243 {
244 struct hl_info_clk_rate clk_rate = {0};
245 u32 max_size = args->return_size;
246 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
247 int rc;
248
249 if ((!max_size) || (!out))
250 return -EINVAL;
251
252 rc = hdev->asic_funcs->get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz,
253 &clk_rate.max_clk_rate_mhz);
254 if (rc)
255 return rc;
256
257 return copy_to_user(out, &clk_rate,
258 min((size_t) max_size, sizeof(clk_rate))) ? -EFAULT : 0;
259 }
260
get_reset_count(struct hl_device * hdev,struct hl_info_args * args)261 static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
262 {
263 struct hl_info_reset_count reset_count = {0};
264 u32 max_size = args->return_size;
265 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
266
267 if ((!max_size) || (!out))
268 return -EINVAL;
269
270 reset_count.hard_reset_cnt = hdev->hard_reset_cnt;
271 reset_count.soft_reset_cnt = hdev->soft_reset_cnt;
272
273 return copy_to_user(out, &reset_count,
274 min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
275 }
276
time_sync_info(struct hl_device * hdev,struct hl_info_args * args)277 static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
278 {
279 struct hl_info_time_sync time_sync = {0};
280 u32 max_size = args->return_size;
281 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
282
283 if ((!max_size) || (!out))
284 return -EINVAL;
285
286 time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
287 time_sync.host_time = ktime_get_raw_ns();
288
289 return copy_to_user(out, &time_sync,
290 min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
291 }
292
pci_counters_info(struct hl_fpriv * hpriv,struct hl_info_args * args)293 static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
294 {
295 struct hl_device *hdev = hpriv->hdev;
296 struct hl_info_pci_counters pci_counters = {0};
297 u32 max_size = args->return_size;
298 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
299 int rc;
300
301 if ((!max_size) || (!out))
302 return -EINVAL;
303
304 rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters);
305 if (rc)
306 return rc;
307
308 return copy_to_user(out, &pci_counters,
309 min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0;
310 }
311
clk_throttle_info(struct hl_fpriv * hpriv,struct hl_info_args * args)312 static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
313 {
314 struct hl_device *hdev = hpriv->hdev;
315 struct hl_info_clk_throttle clk_throttle = {0};
316 u32 max_size = args->return_size;
317 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
318
319 if ((!max_size) || (!out))
320 return -EINVAL;
321
322 clk_throttle.clk_throttling_reason = hdev->clk_throttling_reason;
323
324 return copy_to_user(out, &clk_throttle,
325 min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
326 }
327
cs_counters_info(struct hl_fpriv * hpriv,struct hl_info_args * args)328 static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
329 {
330 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
331 struct hl_info_cs_counters cs_counters = {0};
332 struct hl_device *hdev = hpriv->hdev;
333 struct hl_cs_counters_atomic *cntr;
334 u32 max_size = args->return_size;
335
336 cntr = &hdev->aggregated_cs_counters;
337
338 if ((!max_size) || (!out))
339 return -EINVAL;
340
341 cs_counters.total_out_of_mem_drop_cnt =
342 atomic64_read(&cntr->out_of_mem_drop_cnt);
343 cs_counters.total_parsing_drop_cnt =
344 atomic64_read(&cntr->parsing_drop_cnt);
345 cs_counters.total_queue_full_drop_cnt =
346 atomic64_read(&cntr->queue_full_drop_cnt);
347 cs_counters.total_device_in_reset_drop_cnt =
348 atomic64_read(&cntr->device_in_reset_drop_cnt);
349 cs_counters.total_max_cs_in_flight_drop_cnt =
350 atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
351 cs_counters.total_validation_drop_cnt =
352 atomic64_read(&cntr->validation_drop_cnt);
353
354 if (hpriv->ctx) {
355 cs_counters.ctx_out_of_mem_drop_cnt =
356 atomic64_read(
357 &hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
358 cs_counters.ctx_parsing_drop_cnt =
359 atomic64_read(
360 &hpriv->ctx->cs_counters.parsing_drop_cnt);
361 cs_counters.ctx_queue_full_drop_cnt =
362 atomic64_read(
363 &hpriv->ctx->cs_counters.queue_full_drop_cnt);
364 cs_counters.ctx_device_in_reset_drop_cnt =
365 atomic64_read(
366 &hpriv->ctx->cs_counters.device_in_reset_drop_cnt);
367 cs_counters.ctx_max_cs_in_flight_drop_cnt =
368 atomic64_read(
369 &hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
370 cs_counters.ctx_validation_drop_cnt =
371 atomic64_read(
372 &hpriv->ctx->cs_counters.validation_drop_cnt);
373 }
374
375 return copy_to_user(out, &cs_counters,
376 min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
377 }
378
sync_manager_info(struct hl_fpriv * hpriv,struct hl_info_args * args)379 static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
380 {
381 struct hl_device *hdev = hpriv->hdev;
382 struct asic_fixed_properties *prop = &hdev->asic_prop;
383 struct hl_info_sync_manager sm_info = {0};
384 u32 max_size = args->return_size;
385 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
386
387 if ((!max_size) || (!out))
388 return -EINVAL;
389
390 if (args->dcore_id >= HL_MAX_DCORES)
391 return -EINVAL;
392
393 sm_info.first_available_sync_object =
394 prop->first_available_user_sob[args->dcore_id];
395 sm_info.first_available_monitor =
396 prop->first_available_user_mon[args->dcore_id];
397 sm_info.first_available_cq =
398 prop->first_available_cq[args->dcore_id];
399
400 return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
401 sizeof(sm_info))) ? -EFAULT : 0;
402 }
403
total_energy_consumption_info(struct hl_fpriv * hpriv,struct hl_info_args * args)404 static int total_energy_consumption_info(struct hl_fpriv *hpriv,
405 struct hl_info_args *args)
406 {
407 struct hl_device *hdev = hpriv->hdev;
408 struct hl_info_energy total_energy = {0};
409 u32 max_size = args->return_size;
410 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
411 int rc;
412
413 if ((!max_size) || (!out))
414 return -EINVAL;
415
416 rc = hl_fw_cpucp_total_energy_get(hdev,
417 &total_energy.total_energy_consumption);
418 if (rc)
419 return rc;
420
421 return copy_to_user(out, &total_energy,
422 min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0;
423 }
424
pll_frequency_info(struct hl_fpriv * hpriv,struct hl_info_args * args)425 static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
426 {
427 struct hl_device *hdev = hpriv->hdev;
428 struct hl_pll_frequency_info freq_info = { {0} };
429 u32 max_size = args->return_size;
430 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
431 int rc;
432
433 if ((!max_size) || (!out))
434 return -EINVAL;
435
436 rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output);
437 if (rc)
438 return rc;
439
440 return copy_to_user(out, &freq_info,
441 min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0;
442 }
443
power_info(struct hl_fpriv * hpriv,struct hl_info_args * args)444 static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
445 {
446 struct hl_device *hdev = hpriv->hdev;
447 u32 max_size = args->return_size;
448 struct hl_power_info power_info = {0};
449 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
450 int rc;
451
452 if ((!max_size) || (!out))
453 return -EINVAL;
454
455 rc = hl_fw_cpucp_power_get(hdev, &power_info.power);
456 if (rc)
457 return rc;
458
459 return copy_to_user(out, &power_info,
460 min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0;
461 }
462
_hl_info_ioctl(struct hl_fpriv * hpriv,void * data,struct device * dev)463 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
464 struct device *dev)
465 {
466 enum hl_device_status status;
467 struct hl_info_args *args = data;
468 struct hl_device *hdev = hpriv->hdev;
469
470 int rc;
471
472 /*
473 * Information is returned for the following opcodes even if the device
474 * is disabled or in reset.
475 */
476 switch (args->op) {
477 case HL_INFO_HW_IP_INFO:
478 return hw_ip_info(hdev, args);
479
480 case HL_INFO_DEVICE_STATUS:
481 return device_status_info(hdev, args);
482
483 case HL_INFO_RESET_COUNT:
484 return get_reset_count(hdev, args);
485
486 default:
487 break;
488 }
489
490 if (!hl_device_operational(hdev, &status)) {
491 dev_warn_ratelimited(dev,
492 "Device is %s. Can't execute INFO IOCTL\n",
493 hdev->status[status]);
494 return -EBUSY;
495 }
496
497 switch (args->op) {
498 case HL_INFO_HW_EVENTS:
499 rc = hw_events_info(hdev, false, args);
500 break;
501
502 case HL_INFO_DRAM_USAGE:
503 rc = dram_usage_info(hpriv, args);
504 break;
505
506 case HL_INFO_HW_IDLE:
507 rc = hw_idle(hdev, args);
508 break;
509
510 case HL_INFO_DEVICE_UTILIZATION:
511 rc = device_utilization(hdev, args);
512 break;
513
514 case HL_INFO_HW_EVENTS_AGGREGATE:
515 rc = hw_events_info(hdev, true, args);
516 break;
517
518 case HL_INFO_CLK_RATE:
519 rc = get_clk_rate(hdev, args);
520 break;
521
522 case HL_INFO_TIME_SYNC:
523 return time_sync_info(hdev, args);
524
525 case HL_INFO_CS_COUNTERS:
526 return cs_counters_info(hpriv, args);
527
528 case HL_INFO_PCI_COUNTERS:
529 return pci_counters_info(hpriv, args);
530
531 case HL_INFO_CLK_THROTTLE_REASON:
532 return clk_throttle_info(hpriv, args);
533
534 case HL_INFO_SYNC_MANAGER:
535 return sync_manager_info(hpriv, args);
536
537 case HL_INFO_TOTAL_ENERGY:
538 return total_energy_consumption_info(hpriv, args);
539
540 case HL_INFO_PLL_FREQUENCY:
541 return pll_frequency_info(hpriv, args);
542
543 case HL_INFO_POWER:
544 return power_info(hpriv, args);
545
546 default:
547 dev_err(dev, "Invalid request %d\n", args->op);
548 rc = -ENOTTY;
549 break;
550 }
551
552 return rc;
553 }
554
hl_info_ioctl(struct hl_fpriv * hpriv,void * data)555 static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
556 {
557 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
558 }
559
hl_info_ioctl_control(struct hl_fpriv * hpriv,void * data)560 static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
561 {
562 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
563 }
564
hl_debug_ioctl(struct hl_fpriv * hpriv,void * data)565 static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
566 {
567 struct hl_debug_args *args = data;
568 struct hl_device *hdev = hpriv->hdev;
569 enum hl_device_status status;
570
571 int rc = 0;
572
573 if (!hl_device_operational(hdev, &status)) {
574 dev_warn_ratelimited(hdev->dev,
575 "Device is %s. Can't execute DEBUG IOCTL\n",
576 hdev->status[status]);
577 return -EBUSY;
578 }
579
580 switch (args->op) {
581 case HL_DEBUG_OP_ETR:
582 case HL_DEBUG_OP_ETF:
583 case HL_DEBUG_OP_STM:
584 case HL_DEBUG_OP_FUNNEL:
585 case HL_DEBUG_OP_BMON:
586 case HL_DEBUG_OP_SPMU:
587 case HL_DEBUG_OP_TIMESTAMP:
588 if (!hdev->in_debug) {
589 dev_err_ratelimited(hdev->dev,
590 "Rejecting debug configuration request because device not in debug mode\n");
591 return -EFAULT;
592 }
593 args->input_size =
594 min(args->input_size, hl_debug_struct_size[args->op]);
595 rc = debug_coresight(hdev, args);
596 break;
597 case HL_DEBUG_OP_SET_MODE:
598 rc = hl_device_set_debug_mode(hdev, (bool) args->enable);
599 break;
600 default:
601 dev_err(hdev->dev, "Invalid request %d\n", args->op);
602 rc = -ENOTTY;
603 break;
604 }
605
606 return rc;
607 }
608
609 #define HL_IOCTL_DEF(ioctl, _func) \
610 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
611
612 static const struct hl_ioctl_desc hl_ioctls[] = {
613 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
614 HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
615 HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
616 HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_wait_ioctl),
617 HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
618 HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
619 };
620
621 static const struct hl_ioctl_desc hl_ioctls_control[] = {
622 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
623 };
624
_hl_ioctl(struct file * filep,unsigned int cmd,unsigned long arg,const struct hl_ioctl_desc * ioctl,struct device * dev)625 static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
626 const struct hl_ioctl_desc *ioctl, struct device *dev)
627 {
628 struct hl_fpriv *hpriv = filep->private_data;
629 struct hl_device *hdev = hpriv->hdev;
630 unsigned int nr = _IOC_NR(cmd);
631 char stack_kdata[128] = {0};
632 char *kdata = NULL;
633 unsigned int usize, asize;
634 hl_ioctl_t *func;
635 u32 hl_size;
636 int retcode;
637
638 if (hdev->hard_reset_pending) {
639 dev_crit_ratelimited(dev,
640 "Device HARD reset pending! Please close FD\n");
641 return -ENODEV;
642 }
643
644 /* Do not trust userspace, use our own definition */
645 func = ioctl->func;
646
647 if (unlikely(!func)) {
648 dev_dbg(dev, "no function\n");
649 retcode = -ENOTTY;
650 goto out_err;
651 }
652
653 hl_size = _IOC_SIZE(ioctl->cmd);
654 usize = asize = _IOC_SIZE(cmd);
655 if (hl_size > asize)
656 asize = hl_size;
657
658 cmd = ioctl->cmd;
659
660 if (cmd & (IOC_IN | IOC_OUT)) {
661 if (asize <= sizeof(stack_kdata)) {
662 kdata = stack_kdata;
663 } else {
664 kdata = kzalloc(asize, GFP_KERNEL);
665 if (!kdata) {
666 retcode = -ENOMEM;
667 goto out_err;
668 }
669 }
670 }
671
672 if (cmd & IOC_IN) {
673 if (copy_from_user(kdata, (void __user *)arg, usize)) {
674 retcode = -EFAULT;
675 goto out_err;
676 }
677 } else if (cmd & IOC_OUT) {
678 memset(kdata, 0, usize);
679 }
680
681 retcode = func(hpriv, kdata);
682
683 if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
684 retcode = -EFAULT;
685
686 out_err:
687 if (retcode)
688 dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
689 task_pid_nr(current), cmd, nr);
690
691 if (kdata != stack_kdata)
692 kfree(kdata);
693
694 return retcode;
695 }
696
hl_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)697 long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
698 {
699 struct hl_fpriv *hpriv = filep->private_data;
700 struct hl_device *hdev = hpriv->hdev;
701 const struct hl_ioctl_desc *ioctl = NULL;
702 unsigned int nr = _IOC_NR(cmd);
703
704 if (!hdev) {
705 pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
706 return -ENODEV;
707 }
708
709 if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
710 ioctl = &hl_ioctls[nr];
711 } else {
712 dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
713 task_pid_nr(current), nr);
714 return -ENOTTY;
715 }
716
717 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
718 }
719
hl_ioctl_control(struct file * filep,unsigned int cmd,unsigned long arg)720 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
721 {
722 struct hl_fpriv *hpriv = filep->private_data;
723 struct hl_device *hdev = hpriv->hdev;
724 const struct hl_ioctl_desc *ioctl = NULL;
725 unsigned int nr = _IOC_NR(cmd);
726
727 if (!hdev) {
728 pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
729 return -ENODEV;
730 }
731
732 if (nr == _IOC_NR(HL_IOCTL_INFO)) {
733 ioctl = &hl_ioctls_control[nr];
734 } else {
735 dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
736 task_pid_nr(current), nr);
737 return -ENOTTY;
738 }
739
740 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);
741 }
742