xref: /linux/drivers/gpu/drm/i915/i915_query.c (revision 2da68a77)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include <linux/nospec.h>
8 
9 #include "i915_drv.h"
10 #include "i915_perf.h"
11 #include "i915_query.h"
12 #include "gt/intel_engine_user.h"
13 #include <uapi/drm/i915_drm.h>
14 
15 static int copy_query_item(void *query_hdr, size_t query_sz,
16 			   u32 total_length,
17 			   struct drm_i915_query_item *query_item)
18 {
19 	if (query_item->length == 0)
20 		return total_length;
21 
22 	if (query_item->length < total_length)
23 		return -EINVAL;
24 
25 	if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
26 			   query_sz))
27 		return -EFAULT;
28 
29 	return 0;
30 }
31 
32 static int fill_topology_info(const struct sseu_dev_info *sseu,
33 			      struct drm_i915_query_item *query_item,
34 			      intel_sseu_ss_mask_t subslice_mask)
35 {
36 	struct drm_i915_query_topology_info topo;
37 	u32 slice_length, subslice_length, eu_length, total_length;
38 	int ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
39 	int eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
40 	int ret;
41 
42 	BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
43 
44 	if (sseu->max_slices == 0)
45 		return -ENODEV;
46 
47 	slice_length = sizeof(sseu->slice_mask);
48 	subslice_length = sseu->max_slices * ss_stride;
49 	eu_length = sseu->max_slices * sseu->max_subslices * eu_stride;
50 	total_length = sizeof(topo) + slice_length + subslice_length +
51 		       eu_length;
52 
53 	ret = copy_query_item(&topo, sizeof(topo), total_length, query_item);
54 
55 	if (ret != 0)
56 		return ret;
57 
58 	memset(&topo, 0, sizeof(topo));
59 	topo.max_slices = sseu->max_slices;
60 	topo.max_subslices = sseu->max_subslices;
61 	topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
62 
63 	topo.subslice_offset = slice_length;
64 	topo.subslice_stride = ss_stride;
65 	topo.eu_offset = slice_length + subslice_length;
66 	topo.eu_stride = eu_stride;
67 
68 	if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
69 			 &topo, sizeof(topo)))
70 		return -EFAULT;
71 
72 	if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
73 			 &sseu->slice_mask, slice_length))
74 		return -EFAULT;
75 
76 	if (intel_sseu_copy_ssmask_to_user(u64_to_user_ptr(query_item->data_ptr +
77 							   sizeof(topo) + slice_length),
78 					   sseu))
79 		return -EFAULT;
80 
81 	if (intel_sseu_copy_eumask_to_user(u64_to_user_ptr(query_item->data_ptr +
82 							   sizeof(topo) +
83 							   slice_length + subslice_length),
84 					   sseu))
85 		return -EFAULT;
86 
87 	return total_length;
88 }
89 
90 static int query_topology_info(struct drm_i915_private *dev_priv,
91 			       struct drm_i915_query_item *query_item)
92 {
93 	const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu;
94 
95 	if (query_item->flags != 0)
96 		return -EINVAL;
97 
98 	return fill_topology_info(sseu, query_item, sseu->subslice_mask);
99 }
100 
101 static int query_geometry_subslices(struct drm_i915_private *i915,
102 				    struct drm_i915_query_item *query_item)
103 {
104 	const struct sseu_dev_info *sseu;
105 	struct intel_engine_cs *engine;
106 	struct i915_engine_class_instance classinstance;
107 
108 	if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
109 		return -ENODEV;
110 
111 	classinstance = *((struct i915_engine_class_instance *)&query_item->flags);
112 
113 	engine = intel_engine_lookup_user(i915, (u8)classinstance.engine_class,
114 					  (u8)classinstance.engine_instance);
115 
116 	if (!engine)
117 		return -EINVAL;
118 
119 	if (engine->class != RENDER_CLASS)
120 		return -EINVAL;
121 
122 	sseu = &engine->gt->info.sseu;
123 
124 	return fill_topology_info(sseu, query_item, sseu->geometry_subslice_mask);
125 }
126 
127 static int
128 query_engine_info(struct drm_i915_private *i915,
129 		  struct drm_i915_query_item *query_item)
130 {
131 	struct drm_i915_query_engine_info __user *query_ptr =
132 				u64_to_user_ptr(query_item->data_ptr);
133 	struct drm_i915_engine_info __user *info_ptr;
134 	struct drm_i915_query_engine_info query;
135 	struct drm_i915_engine_info info = { };
136 	unsigned int num_uabi_engines = 0;
137 	struct intel_engine_cs *engine;
138 	int len, ret;
139 
140 	if (query_item->flags)
141 		return -EINVAL;
142 
143 	for_each_uabi_engine(engine, i915)
144 		num_uabi_engines++;
145 
146 	len = struct_size(query_ptr, engines, num_uabi_engines);
147 
148 	ret = copy_query_item(&query, sizeof(query), len, query_item);
149 	if (ret != 0)
150 		return ret;
151 
152 	if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
153 	    query.rsvd[2])
154 		return -EINVAL;
155 
156 	info_ptr = &query_ptr->engines[0];
157 
158 	for_each_uabi_engine(engine, i915) {
159 		info.engine.engine_class = engine->uabi_class;
160 		info.engine.engine_instance = engine->uabi_instance;
161 		info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE;
162 		info.capabilities = engine->uabi_capabilities;
163 		info.logical_instance = ilog2(engine->logical_mask);
164 
165 		if (copy_to_user(info_ptr, &info, sizeof(info)))
166 			return -EFAULT;
167 
168 		query.num_engines++;
169 		info_ptr++;
170 	}
171 
172 	if (copy_to_user(query_ptr, &query, sizeof(query)))
173 		return -EFAULT;
174 
175 	return len;
176 }
177 
178 static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
179 						    u64 user_regs_ptr,
180 						    u32 kernel_n_regs)
181 {
182 	/*
183 	 * We'll just put the number of registers, and won't copy the
184 	 * register.
185 	 */
186 	if (user_n_regs == 0)
187 		return 0;
188 
189 	if (user_n_regs < kernel_n_regs)
190 		return -EINVAL;
191 
192 	return 0;
193 }
194 
195 static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
196 						u32 kernel_n_regs,
197 						u64 user_regs_ptr,
198 						u32 *user_n_regs)
199 {
200 	u32 __user *p = u64_to_user_ptr(user_regs_ptr);
201 	u32 r;
202 
203 	if (*user_n_regs == 0) {
204 		*user_n_regs = kernel_n_regs;
205 		return 0;
206 	}
207 
208 	*user_n_regs = kernel_n_regs;
209 
210 	if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs))
211 		return -EFAULT;
212 
213 	for (r = 0; r < kernel_n_regs; r++, p += 2) {
214 		unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
215 				p, Efault);
216 		unsafe_put_user(kernel_regs[r].value, p + 1, Efault);
217 	}
218 	user_write_access_end();
219 	return 0;
220 Efault:
221 	user_write_access_end();
222 	return -EFAULT;
223 }
224 
225 static int query_perf_config_data(struct drm_i915_private *i915,
226 				  struct drm_i915_query_item *query_item,
227 				  bool use_uuid)
228 {
229 	struct drm_i915_query_perf_config __user *user_query_config_ptr =
230 		u64_to_user_ptr(query_item->data_ptr);
231 	struct drm_i915_perf_oa_config __user *user_config_ptr =
232 		u64_to_user_ptr(query_item->data_ptr +
233 				sizeof(struct drm_i915_query_perf_config));
234 	struct drm_i915_perf_oa_config user_config;
235 	struct i915_perf *perf = &i915->perf;
236 	struct i915_oa_config *oa_config;
237 	char uuid[UUID_STRING_LEN + 1];
238 	u64 config_id;
239 	u32 flags, total_size;
240 	int ret;
241 
242 	if (!perf->i915)
243 		return -ENODEV;
244 
245 	total_size =
246 		sizeof(struct drm_i915_query_perf_config) +
247 		sizeof(struct drm_i915_perf_oa_config);
248 
249 	if (query_item->length == 0)
250 		return total_size;
251 
252 	if (query_item->length < total_size) {
253 		DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
254 			  query_item->length, total_size);
255 		return -EINVAL;
256 	}
257 
258 	if (get_user(flags, &user_query_config_ptr->flags))
259 		return -EFAULT;
260 
261 	if (flags != 0)
262 		return -EINVAL;
263 
264 	if (use_uuid) {
265 		struct i915_oa_config *tmp;
266 		int id;
267 
268 		BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
269 
270 		memset(&uuid, 0, sizeof(uuid));
271 		if (copy_from_user(uuid, user_query_config_ptr->uuid,
272 				     sizeof(user_query_config_ptr->uuid)))
273 			return -EFAULT;
274 
275 		oa_config = NULL;
276 		rcu_read_lock();
277 		idr_for_each_entry(&perf->metrics_idr, tmp, id) {
278 			if (!strcmp(tmp->uuid, uuid)) {
279 				oa_config = i915_oa_config_get(tmp);
280 				break;
281 			}
282 		}
283 		rcu_read_unlock();
284 	} else {
285 		if (get_user(config_id, &user_query_config_ptr->config))
286 			return -EFAULT;
287 
288 		oa_config = i915_perf_get_oa_config(perf, config_id);
289 	}
290 	if (!oa_config)
291 		return -ENOENT;
292 
293 	if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
294 		ret = -EFAULT;
295 		goto out;
296 	}
297 
298 	ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
299 						       user_config.boolean_regs_ptr,
300 						       oa_config->b_counter_regs_len);
301 	if (ret)
302 		goto out;
303 
304 	ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
305 						       user_config.flex_regs_ptr,
306 						       oa_config->flex_regs_len);
307 	if (ret)
308 		goto out;
309 
310 	ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
311 						       user_config.mux_regs_ptr,
312 						       oa_config->mux_regs_len);
313 	if (ret)
314 		goto out;
315 
316 	ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
317 						   oa_config->b_counter_regs_len,
318 						   user_config.boolean_regs_ptr,
319 						   &user_config.n_boolean_regs);
320 	if (ret)
321 		goto out;
322 
323 	ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
324 						   oa_config->flex_regs_len,
325 						   user_config.flex_regs_ptr,
326 						   &user_config.n_flex_regs);
327 	if (ret)
328 		goto out;
329 
330 	ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
331 						   oa_config->mux_regs_len,
332 						   user_config.mux_regs_ptr,
333 						   &user_config.n_mux_regs);
334 	if (ret)
335 		goto out;
336 
337 	memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
338 
339 	if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
340 		ret = -EFAULT;
341 		goto out;
342 	}
343 
344 	ret = total_size;
345 
346 out:
347 	i915_oa_config_put(oa_config);
348 	return ret;
349 }
350 
351 static size_t sizeof_perf_config_list(size_t count)
352 {
353 	return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
354 }
355 
356 static size_t sizeof_perf_metrics(struct i915_perf *perf)
357 {
358 	struct i915_oa_config *tmp;
359 	size_t i;
360 	int id;
361 
362 	i = 1;
363 	rcu_read_lock();
364 	idr_for_each_entry(&perf->metrics_idr, tmp, id)
365 		i++;
366 	rcu_read_unlock();
367 
368 	return sizeof_perf_config_list(i);
369 }
370 
371 static int query_perf_config_list(struct drm_i915_private *i915,
372 				  struct drm_i915_query_item *query_item)
373 {
374 	struct drm_i915_query_perf_config __user *user_query_config_ptr =
375 		u64_to_user_ptr(query_item->data_ptr);
376 	struct i915_perf *perf = &i915->perf;
377 	u64 *oa_config_ids = NULL;
378 	int alloc, n_configs;
379 	u32 flags;
380 	int ret;
381 
382 	if (!perf->i915)
383 		return -ENODEV;
384 
385 	if (query_item->length == 0)
386 		return sizeof_perf_metrics(perf);
387 
388 	if (get_user(flags, &user_query_config_ptr->flags))
389 		return -EFAULT;
390 
391 	if (flags != 0)
392 		return -EINVAL;
393 
394 	n_configs = 1;
395 	do {
396 		struct i915_oa_config *tmp;
397 		u64 *ids;
398 		int id;
399 
400 		ids = krealloc(oa_config_ids,
401 			       n_configs * sizeof(*oa_config_ids),
402 			       GFP_KERNEL);
403 		if (!ids)
404 			return -ENOMEM;
405 
406 		alloc = fetch_and_zero(&n_configs);
407 
408 		ids[n_configs++] = 1ull; /* reserved for test_config */
409 		rcu_read_lock();
410 		idr_for_each_entry(&perf->metrics_idr, tmp, id) {
411 			if (n_configs < alloc)
412 				ids[n_configs] = id;
413 			n_configs++;
414 		}
415 		rcu_read_unlock();
416 
417 		oa_config_ids = ids;
418 	} while (n_configs > alloc);
419 
420 	if (query_item->length < sizeof_perf_config_list(n_configs)) {
421 		DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
422 			  query_item->length,
423 			  sizeof_perf_config_list(n_configs));
424 		kfree(oa_config_ids);
425 		return -EINVAL;
426 	}
427 
428 	if (put_user(n_configs, &user_query_config_ptr->config)) {
429 		kfree(oa_config_ids);
430 		return -EFAULT;
431 	}
432 
433 	ret = copy_to_user(user_query_config_ptr + 1,
434 			   oa_config_ids,
435 			   n_configs * sizeof(*oa_config_ids));
436 	kfree(oa_config_ids);
437 	if (ret)
438 		return -EFAULT;
439 
440 	return sizeof_perf_config_list(n_configs);
441 }
442 
443 static int query_perf_config(struct drm_i915_private *i915,
444 			     struct drm_i915_query_item *query_item)
445 {
446 	switch (query_item->flags) {
447 	case DRM_I915_QUERY_PERF_CONFIG_LIST:
448 		return query_perf_config_list(i915, query_item);
449 	case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID:
450 		return query_perf_config_data(i915, query_item, true);
451 	case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID:
452 		return query_perf_config_data(i915, query_item, false);
453 	default:
454 		return -EINVAL;
455 	}
456 }
457 
458 static int query_memregion_info(struct drm_i915_private *i915,
459 				struct drm_i915_query_item *query_item)
460 {
461 	struct drm_i915_query_memory_regions __user *query_ptr =
462 		u64_to_user_ptr(query_item->data_ptr);
463 	struct drm_i915_memory_region_info __user *info_ptr =
464 		&query_ptr->regions[0];
465 	struct drm_i915_memory_region_info info = { };
466 	struct drm_i915_query_memory_regions query;
467 	struct intel_memory_region *mr;
468 	u32 total_length;
469 	int ret, id, i;
470 
471 	if (query_item->flags != 0)
472 		return -EINVAL;
473 
474 	total_length = sizeof(query);
475 	for_each_memory_region(mr, i915, id) {
476 		if (mr->private)
477 			continue;
478 
479 		total_length += sizeof(info);
480 	}
481 
482 	ret = copy_query_item(&query, sizeof(query), total_length, query_item);
483 	if (ret != 0)
484 		return ret;
485 
486 	if (query.num_regions)
487 		return -EINVAL;
488 
489 	for (i = 0; i < ARRAY_SIZE(query.rsvd); i++) {
490 		if (query.rsvd[i])
491 			return -EINVAL;
492 	}
493 
494 	for_each_memory_region(mr, i915, id) {
495 		if (mr->private)
496 			continue;
497 
498 		info.region.memory_class = mr->type;
499 		info.region.memory_instance = mr->instance;
500 		info.probed_size = mr->total;
501 
502 		if (mr->type == INTEL_MEMORY_LOCAL)
503 			info.probed_cpu_visible_size = mr->io_size;
504 		else
505 			info.probed_cpu_visible_size = mr->total;
506 
507 		if (perfmon_capable()) {
508 			intel_memory_region_avail(mr,
509 						  &info.unallocated_size,
510 						  &info.unallocated_cpu_visible_size);
511 		} else {
512 			info.unallocated_size = info.probed_size;
513 			info.unallocated_cpu_visible_size =
514 				info.probed_cpu_visible_size;
515 		}
516 
517 		if (__copy_to_user(info_ptr, &info, sizeof(info)))
518 			return -EFAULT;
519 
520 		query.num_regions++;
521 		info_ptr++;
522 	}
523 
524 	if (__copy_to_user(query_ptr, &query, sizeof(query)))
525 		return -EFAULT;
526 
527 	return total_length;
528 }
529 
530 static int query_hwconfig_blob(struct drm_i915_private *i915,
531 			       struct drm_i915_query_item *query_item)
532 {
533 	struct intel_gt *gt = to_gt(i915);
534 	struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
535 
536 	if (!hwconfig->size || !hwconfig->ptr)
537 		return -ENODEV;
538 
539 	if (query_item->length == 0)
540 		return hwconfig->size;
541 
542 	if (query_item->length < hwconfig->size)
543 		return -EINVAL;
544 
545 	if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
546 			 hwconfig->ptr, hwconfig->size))
547 		return -EFAULT;
548 
549 	return hwconfig->size;
550 }
551 
552 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
553 					struct drm_i915_query_item *query_item) = {
554 	query_topology_info,
555 	query_engine_info,
556 	query_perf_config,
557 	query_memregion_info,
558 	query_hwconfig_blob,
559 	query_geometry_subslices,
560 };
561 
562 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
563 {
564 	struct drm_i915_private *dev_priv = to_i915(dev);
565 	struct drm_i915_query *args = data;
566 	struct drm_i915_query_item __user *user_item_ptr =
567 		u64_to_user_ptr(args->items_ptr);
568 	u32 i;
569 
570 	if (args->flags != 0)
571 		return -EINVAL;
572 
573 	for (i = 0; i < args->num_items; i++, user_item_ptr++) {
574 		struct drm_i915_query_item item;
575 		unsigned long func_idx;
576 		int ret;
577 
578 		if (copy_from_user(&item, user_item_ptr, sizeof(item)))
579 			return -EFAULT;
580 
581 		if (item.query_id == 0)
582 			return -EINVAL;
583 
584 		if (overflows_type(item.query_id - 1, unsigned long))
585 			return -EINVAL;
586 
587 		func_idx = item.query_id - 1;
588 
589 		ret = -EINVAL;
590 		if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
591 			func_idx = array_index_nospec(func_idx,
592 						      ARRAY_SIZE(i915_query_funcs));
593 			ret = i915_query_funcs[func_idx](dev_priv, &item);
594 		}
595 
596 		/* Only write the length back to userspace if they differ. */
597 		if (ret != item.length && put_user(ret, &user_item_ptr->length))
598 			return -EFAULT;
599 	}
600 
601 	return 0;
602 }
603