1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "xe_exec_queue.h"
7
8 #include <linux/nospec.h>
9
10 #include <drm/drm_device.h>
11 #include <drm/drm_file.h>
12 #include <uapi/drm/xe_drm.h>
13
14 #include "xe_device.h"
15 #include "xe_gt.h"
16 #include "xe_hw_engine_class_sysfs.h"
17 #include "xe_hw_engine_group.h"
18 #include "xe_hw_fence.h"
19 #include "xe_lrc.h"
20 #include "xe_macros.h"
21 #include "xe_migrate.h"
22 #include "xe_pm.h"
23 #include "xe_ring_ops_types.h"
24 #include "xe_trace.h"
25 #include "xe_vm.h"
26
27 enum xe_exec_queue_sched_prop {
28 XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
29 XE_EXEC_QUEUE_TIMESLICE = 1,
30 XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
31 XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
32 };
33
34 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
35 u64 extensions, int ext_number);
36
__xe_exec_queue_free(struct xe_exec_queue * q)37 static void __xe_exec_queue_free(struct xe_exec_queue *q)
38 {
39 if (q->vm)
40 xe_vm_put(q->vm);
41
42 if (q->xef)
43 xe_file_put(q->xef);
44
45 kfree(q);
46 }
47
__xe_exec_queue_alloc(struct xe_device * xe,struct xe_vm * vm,u32 logical_mask,u16 width,struct xe_hw_engine * hwe,u32 flags,u64 extensions)48 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
49 struct xe_vm *vm,
50 u32 logical_mask,
51 u16 width, struct xe_hw_engine *hwe,
52 u32 flags, u64 extensions)
53 {
54 struct xe_exec_queue *q;
55 struct xe_gt *gt = hwe->gt;
56 int err;
57
58 /* only kernel queues can be permanent */
59 XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
60
61 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
62 if (!q)
63 return ERR_PTR(-ENOMEM);
64
65 kref_init(&q->refcount);
66 q->flags = flags;
67 q->hwe = hwe;
68 q->gt = gt;
69 q->class = hwe->class;
70 q->width = width;
71 q->logical_mask = logical_mask;
72 q->fence_irq = >->fence_irq[hwe->class];
73 q->ring_ops = gt->ring_ops[hwe->class];
74 q->ops = gt->exec_queue_ops;
75 INIT_LIST_HEAD(&q->lr.link);
76 INIT_LIST_HEAD(&q->multi_gt_link);
77 INIT_LIST_HEAD(&q->hw_engine_group_link);
78
79 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
80 q->sched_props.preempt_timeout_us =
81 hwe->eclass->sched_props.preempt_timeout_us;
82 q->sched_props.job_timeout_ms =
83 hwe->eclass->sched_props.job_timeout_ms;
84 if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
85 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
86 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
87 else
88 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
89
90 if (vm)
91 q->vm = xe_vm_get(vm);
92
93 if (extensions) {
94 /*
95 * may set q->usm, must come before xe_lrc_create(),
96 * may overwrite q->sched_props, must come before q->ops->init()
97 */
98 err = exec_queue_user_extensions(xe, q, extensions, 0);
99 if (err) {
100 __xe_exec_queue_free(q);
101 return ERR_PTR(err);
102 }
103 }
104
105 return q;
106 }
107
__xe_exec_queue_init(struct xe_exec_queue * q)108 static int __xe_exec_queue_init(struct xe_exec_queue *q)
109 {
110 struct xe_vm *vm = q->vm;
111 int i, err;
112
113 if (vm) {
114 err = xe_vm_lock(vm, true);
115 if (err)
116 return err;
117 }
118
119 for (i = 0; i < q->width; ++i) {
120 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K);
121 if (IS_ERR(q->lrc[i])) {
122 err = PTR_ERR(q->lrc[i]);
123 goto err_unlock;
124 }
125 }
126
127 if (vm)
128 xe_vm_unlock(vm);
129
130 err = q->ops->init(q);
131 if (err)
132 goto err_lrc;
133
134 return 0;
135
136 err_unlock:
137 if (vm)
138 xe_vm_unlock(vm);
139 err_lrc:
140 for (i = i - 1; i >= 0; --i)
141 xe_lrc_put(q->lrc[i]);
142 return err;
143 }
144
xe_exec_queue_create(struct xe_device * xe,struct xe_vm * vm,u32 logical_mask,u16 width,struct xe_hw_engine * hwe,u32 flags,u64 extensions)145 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
146 u32 logical_mask, u16 width,
147 struct xe_hw_engine *hwe, u32 flags,
148 u64 extensions)
149 {
150 struct xe_exec_queue *q;
151 int err;
152
153 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
154 extensions);
155 if (IS_ERR(q))
156 return q;
157
158 err = __xe_exec_queue_init(q);
159 if (err)
160 goto err_post_alloc;
161
162 return q;
163
164 err_post_alloc:
165 __xe_exec_queue_free(q);
166 return ERR_PTR(err);
167 }
168
xe_exec_queue_create_class(struct xe_device * xe,struct xe_gt * gt,struct xe_vm * vm,enum xe_engine_class class,u32 flags,u64 extensions)169 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
170 struct xe_vm *vm,
171 enum xe_engine_class class,
172 u32 flags, u64 extensions)
173 {
174 struct xe_hw_engine *hwe, *hwe0 = NULL;
175 enum xe_hw_engine_id id;
176 u32 logical_mask = 0;
177
178 for_each_hw_engine(hwe, gt, id) {
179 if (xe_hw_engine_is_reserved(hwe))
180 continue;
181
182 if (hwe->class == class) {
183 logical_mask |= BIT(hwe->logical_instance);
184 if (!hwe0)
185 hwe0 = hwe;
186 }
187 }
188
189 if (!logical_mask)
190 return ERR_PTR(-ENODEV);
191
192 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions);
193 }
194
195 /**
196 * xe_exec_queue_create_bind() - Create bind exec queue.
197 * @xe: Xe device.
198 * @tile: tile which bind exec queue belongs to.
199 * @flags: exec queue creation flags
200 * @extensions: exec queue creation extensions
201 *
202 * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
203 * for access to physical memory required for page table programming. On a
204 * faulting devices the reserved copy engine instance must be used to avoid
205 * deadlocking (user binds cannot get stuck behind faults as kernel binds which
206 * resolve faults depend on user binds). On non-faulting devices any copy engine
207 * can be used.
208 *
209 * Returns exec queue on success, ERR_PTR on failure
210 */
xe_exec_queue_create_bind(struct xe_device * xe,struct xe_tile * tile,u32 flags,u64 extensions)211 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
212 struct xe_tile *tile,
213 u32 flags, u64 extensions)
214 {
215 struct xe_gt *gt = tile->primary_gt;
216 struct xe_exec_queue *q;
217 struct xe_vm *migrate_vm;
218
219 migrate_vm = xe_migrate_get_vm(tile->migrate);
220 if (xe->info.has_usm) {
221 struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
222 XE_ENGINE_CLASS_COPY,
223 gt->usm.reserved_bcs_instance,
224 false);
225
226 if (!hwe) {
227 xe_vm_put(migrate_vm);
228 return ERR_PTR(-EINVAL);
229 }
230
231 q = xe_exec_queue_create(xe, migrate_vm,
232 BIT(hwe->logical_instance), 1, hwe,
233 flags, extensions);
234 } else {
235 q = xe_exec_queue_create_class(xe, gt, migrate_vm,
236 XE_ENGINE_CLASS_COPY, flags,
237 extensions);
238 }
239 xe_vm_put(migrate_vm);
240
241 return q;
242 }
243
xe_exec_queue_destroy(struct kref * ref)244 void xe_exec_queue_destroy(struct kref *ref)
245 {
246 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
247 struct xe_exec_queue *eq, *next;
248
249 xe_exec_queue_last_fence_put_unlocked(q);
250 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
251 list_for_each_entry_safe(eq, next, &q->multi_gt_list,
252 multi_gt_link)
253 xe_exec_queue_put(eq);
254 }
255
256 q->ops->fini(q);
257 }
258
xe_exec_queue_fini(struct xe_exec_queue * q)259 void xe_exec_queue_fini(struct xe_exec_queue *q)
260 {
261 int i;
262
263 /*
264 * Before releasing our ref to lrc and xef, accumulate our run ticks
265 */
266 xe_exec_queue_update_run_ticks(q);
267
268 for (i = 0; i < q->width; ++i)
269 xe_lrc_put(q->lrc[i]);
270
271 __xe_exec_queue_free(q);
272 }
273
xe_exec_queue_assign_name(struct xe_exec_queue * q,u32 instance)274 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
275 {
276 switch (q->class) {
277 case XE_ENGINE_CLASS_RENDER:
278 snprintf(q->name, sizeof(q->name), "rcs%d", instance);
279 break;
280 case XE_ENGINE_CLASS_VIDEO_DECODE:
281 snprintf(q->name, sizeof(q->name), "vcs%d", instance);
282 break;
283 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
284 snprintf(q->name, sizeof(q->name), "vecs%d", instance);
285 break;
286 case XE_ENGINE_CLASS_COPY:
287 snprintf(q->name, sizeof(q->name), "bcs%d", instance);
288 break;
289 case XE_ENGINE_CLASS_COMPUTE:
290 snprintf(q->name, sizeof(q->name), "ccs%d", instance);
291 break;
292 case XE_ENGINE_CLASS_OTHER:
293 snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
294 break;
295 default:
296 XE_WARN_ON(q->class);
297 }
298 }
299
xe_exec_queue_lookup(struct xe_file * xef,u32 id)300 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
301 {
302 struct xe_exec_queue *q;
303
304 mutex_lock(&xef->exec_queue.lock);
305 q = xa_load(&xef->exec_queue.xa, id);
306 if (q)
307 xe_exec_queue_get(q);
308 mutex_unlock(&xef->exec_queue.lock);
309
310 return q;
311 }
312
313 enum xe_exec_queue_priority
xe_exec_queue_device_get_max_priority(struct xe_device * xe)314 xe_exec_queue_device_get_max_priority(struct xe_device *xe)
315 {
316 return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
317 XE_EXEC_QUEUE_PRIORITY_NORMAL;
318 }
319
exec_queue_set_priority(struct xe_device * xe,struct xe_exec_queue * q,u64 value)320 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
321 u64 value)
322 {
323 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
324 return -EINVAL;
325
326 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
327 return -EPERM;
328
329 q->sched_props.priority = value;
330 return 0;
331 }
332
xe_exec_queue_enforce_schedule_limit(void)333 static bool xe_exec_queue_enforce_schedule_limit(void)
334 {
335 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
336 return true;
337 #else
338 return !capable(CAP_SYS_NICE);
339 #endif
340 }
341
342 static void
xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf * eclass,enum xe_exec_queue_sched_prop prop,u32 * min,u32 * max)343 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
344 enum xe_exec_queue_sched_prop prop,
345 u32 *min, u32 *max)
346 {
347 switch (prop) {
348 case XE_EXEC_QUEUE_JOB_TIMEOUT:
349 *min = eclass->sched_props.job_timeout_min;
350 *max = eclass->sched_props.job_timeout_max;
351 break;
352 case XE_EXEC_QUEUE_TIMESLICE:
353 *min = eclass->sched_props.timeslice_min;
354 *max = eclass->sched_props.timeslice_max;
355 break;
356 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
357 *min = eclass->sched_props.preempt_timeout_min;
358 *max = eclass->sched_props.preempt_timeout_max;
359 break;
360 default:
361 break;
362 }
363 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
364 if (capable(CAP_SYS_NICE)) {
365 switch (prop) {
366 case XE_EXEC_QUEUE_JOB_TIMEOUT:
367 *min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
368 *max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
369 break;
370 case XE_EXEC_QUEUE_TIMESLICE:
371 *min = XE_HW_ENGINE_TIMESLICE_MIN;
372 *max = XE_HW_ENGINE_TIMESLICE_MAX;
373 break;
374 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
375 *min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
376 *max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
377 break;
378 default:
379 break;
380 }
381 }
382 #endif
383 }
384
exec_queue_set_timeslice(struct xe_device * xe,struct xe_exec_queue * q,u64 value)385 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
386 u64 value)
387 {
388 u32 min = 0, max = 0;
389
390 xe_exec_queue_get_prop_minmax(q->hwe->eclass,
391 XE_EXEC_QUEUE_TIMESLICE, &min, &max);
392
393 if (xe_exec_queue_enforce_schedule_limit() &&
394 !xe_hw_engine_timeout_in_range(value, min, max))
395 return -EINVAL;
396
397 q->sched_props.timeslice_us = value;
398 return 0;
399 }
400
401 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
402 struct xe_exec_queue *q,
403 u64 value);
404
405 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
406 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
407 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
408 };
409
exec_queue_user_ext_set_property(struct xe_device * xe,struct xe_exec_queue * q,u64 extension)410 static int exec_queue_user_ext_set_property(struct xe_device *xe,
411 struct xe_exec_queue *q,
412 u64 extension)
413 {
414 u64 __user *address = u64_to_user_ptr(extension);
415 struct drm_xe_ext_set_property ext;
416 int err;
417 u32 idx;
418
419 err = __copy_from_user(&ext, address, sizeof(ext));
420 if (XE_IOCTL_DBG(xe, err))
421 return -EFAULT;
422
423 if (XE_IOCTL_DBG(xe, ext.property >=
424 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
425 XE_IOCTL_DBG(xe, ext.pad) ||
426 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
427 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
428 return -EINVAL;
429
430 idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
431 if (!exec_queue_set_property_funcs[idx])
432 return -EINVAL;
433
434 return exec_queue_set_property_funcs[idx](xe, q, ext.value);
435 }
436
437 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
438 struct xe_exec_queue *q,
439 u64 extension);
440
441 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
442 [DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
443 };
444
445 #define MAX_USER_EXTENSIONS 16
exec_queue_user_extensions(struct xe_device * xe,struct xe_exec_queue * q,u64 extensions,int ext_number)446 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
447 u64 extensions, int ext_number)
448 {
449 u64 __user *address = u64_to_user_ptr(extensions);
450 struct drm_xe_user_extension ext;
451 int err;
452 u32 idx;
453
454 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
455 return -E2BIG;
456
457 err = __copy_from_user(&ext, address, sizeof(ext));
458 if (XE_IOCTL_DBG(xe, err))
459 return -EFAULT;
460
461 if (XE_IOCTL_DBG(xe, ext.pad) ||
462 XE_IOCTL_DBG(xe, ext.name >=
463 ARRAY_SIZE(exec_queue_user_extension_funcs)))
464 return -EINVAL;
465
466 idx = array_index_nospec(ext.name,
467 ARRAY_SIZE(exec_queue_user_extension_funcs));
468 err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
469 if (XE_IOCTL_DBG(xe, err))
470 return err;
471
472 if (ext.next_extension)
473 return exec_queue_user_extensions(xe, q, ext.next_extension,
474 ++ext_number);
475
476 return 0;
477 }
478
calc_validate_logical_mask(struct xe_device * xe,struct xe_gt * gt,struct drm_xe_engine_class_instance * eci,u16 width,u16 num_placements)479 static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
480 struct drm_xe_engine_class_instance *eci,
481 u16 width, u16 num_placements)
482 {
483 int len = width * num_placements;
484 int i, j, n;
485 u16 class;
486 u16 gt_id;
487 u32 return_mask = 0, prev_mask;
488
489 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
490 len > 1))
491 return 0;
492
493 for (i = 0; i < width; ++i) {
494 u32 current_mask = 0;
495
496 for (j = 0; j < num_placements; ++j) {
497 struct xe_hw_engine *hwe;
498
499 n = j * width + i;
500
501 hwe = xe_hw_engine_lookup(xe, eci[n]);
502 if (XE_IOCTL_DBG(xe, !hwe))
503 return 0;
504
505 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
506 return 0;
507
508 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
509 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
510 return 0;
511
512 class = eci[n].engine_class;
513 gt_id = eci[n].gt_id;
514
515 if (width == 1 || !i)
516 return_mask |= BIT(eci[n].engine_instance);
517 current_mask |= BIT(eci[n].engine_instance);
518 }
519
520 /* Parallel submissions must be logically contiguous */
521 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
522 return 0;
523
524 prev_mask = current_mask;
525 }
526
527 return return_mask;
528 }
529
xe_exec_queue_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)530 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
531 struct drm_file *file)
532 {
533 struct xe_device *xe = to_xe_device(dev);
534 struct xe_file *xef = to_xe_file(file);
535 struct drm_xe_exec_queue_create *args = data;
536 struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
537 struct drm_xe_engine_class_instance __user *user_eci =
538 u64_to_user_ptr(args->instances);
539 struct xe_hw_engine *hwe;
540 struct xe_vm *vm;
541 struct xe_gt *gt;
542 struct xe_tile *tile;
543 struct xe_exec_queue *q = NULL;
544 u32 logical_mask;
545 u32 id;
546 u32 len;
547 int err;
548
549 if (XE_IOCTL_DBG(xe, args->flags) ||
550 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
551 return -EINVAL;
552
553 len = args->width * args->num_placements;
554 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
555 return -EINVAL;
556
557 err = __copy_from_user(eci, user_eci,
558 sizeof(struct drm_xe_engine_class_instance) *
559 len);
560 if (XE_IOCTL_DBG(xe, err))
561 return -EFAULT;
562
563 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
564 return -EINVAL;
565
566 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
567 if (XE_IOCTL_DBG(xe, args->width != 1) ||
568 XE_IOCTL_DBG(xe, args->num_placements != 1) ||
569 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
570 return -EINVAL;
571
572 for_each_tile(tile, xe, id) {
573 struct xe_exec_queue *new;
574 u32 flags = EXEC_QUEUE_FLAG_VM;
575
576 if (id)
577 flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
578
579 new = xe_exec_queue_create_bind(xe, tile, flags,
580 args->extensions);
581 if (IS_ERR(new)) {
582 err = PTR_ERR(new);
583 if (q)
584 goto put_exec_queue;
585 return err;
586 }
587 if (id == 0)
588 q = new;
589 else
590 list_add_tail(&new->multi_gt_list,
591 &q->multi_gt_link);
592 }
593 } else {
594 gt = xe_device_get_gt(xe, eci[0].gt_id);
595 logical_mask = calc_validate_logical_mask(xe, gt, eci,
596 args->width,
597 args->num_placements);
598 if (XE_IOCTL_DBG(xe, !logical_mask))
599 return -EINVAL;
600
601 hwe = xe_hw_engine_lookup(xe, eci[0]);
602 if (XE_IOCTL_DBG(xe, !hwe))
603 return -EINVAL;
604
605 vm = xe_vm_lookup(xef, args->vm_id);
606 if (XE_IOCTL_DBG(xe, !vm))
607 return -ENOENT;
608
609 err = down_read_interruptible(&vm->lock);
610 if (err) {
611 xe_vm_put(vm);
612 return err;
613 }
614
615 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
616 up_read(&vm->lock);
617 xe_vm_put(vm);
618 return -ENOENT;
619 }
620
621 q = xe_exec_queue_create(xe, vm, logical_mask,
622 args->width, hwe, 0,
623 args->extensions);
624 up_read(&vm->lock);
625 xe_vm_put(vm);
626 if (IS_ERR(q))
627 return PTR_ERR(q);
628
629 if (xe_vm_in_preempt_fence_mode(vm)) {
630 q->lr.context = dma_fence_context_alloc(1);
631
632 err = xe_vm_add_compute_exec_queue(vm, q);
633 if (XE_IOCTL_DBG(xe, err))
634 goto put_exec_queue;
635 }
636
637 if (q->vm && q->hwe->hw_engine_group) {
638 err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
639 if (err)
640 goto put_exec_queue;
641 }
642 }
643
644 q->xef = xe_file_get(xef);
645
646 /* user id alloc must always be last in ioctl to prevent UAF */
647 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
648 if (err)
649 goto kill_exec_queue;
650
651 args->exec_queue_id = id;
652
653 return 0;
654
655 kill_exec_queue:
656 xe_exec_queue_kill(q);
657 put_exec_queue:
658 xe_exec_queue_put(q);
659 return err;
660 }
661
xe_exec_queue_get_property_ioctl(struct drm_device * dev,void * data,struct drm_file * file)662 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
663 struct drm_file *file)
664 {
665 struct xe_device *xe = to_xe_device(dev);
666 struct xe_file *xef = to_xe_file(file);
667 struct drm_xe_exec_queue_get_property *args = data;
668 struct xe_exec_queue *q;
669 int ret;
670
671 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
672 return -EINVAL;
673
674 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
675 if (XE_IOCTL_DBG(xe, !q))
676 return -ENOENT;
677
678 switch (args->property) {
679 case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
680 args->value = q->ops->reset_status(q);
681 ret = 0;
682 break;
683 default:
684 ret = -EINVAL;
685 }
686
687 xe_exec_queue_put(q);
688
689 return ret;
690 }
691
692 /**
693 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
694 * @q: The exec_queue
695 *
696 * Return: True if the exec_queue is long-running, false otherwise.
697 */
xe_exec_queue_is_lr(struct xe_exec_queue * q)698 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
699 {
700 return q->vm && xe_vm_in_lr_mode(q->vm) &&
701 !(q->flags & EXEC_QUEUE_FLAG_VM);
702 }
703
xe_exec_queue_num_job_inflight(struct xe_exec_queue * q)704 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
705 {
706 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1;
707 }
708
709 /**
710 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
711 * @q: The exec_queue
712 *
713 * Return: True if the exec_queue's ring is full, false otherwise.
714 */
xe_exec_queue_ring_full(struct xe_exec_queue * q)715 bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
716 {
717 struct xe_lrc *lrc = q->lrc[0];
718 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
719
720 return xe_exec_queue_num_job_inflight(q) >= max_job;
721 }
722
723 /**
724 * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
725 * @q: The exec_queue
726 *
727 * FIXME: Need to determine what to use as the short-lived
728 * timeline lock for the exec_queues, so that the return value
729 * of this function becomes more than just an advisory
730 * snapshot in time. The timeline lock must protect the
731 * seqno from racing submissions on the same exec_queue.
732 * Typically vm->resv, but user-created timeline locks use the migrate vm
733 * and never grabs the migrate vm->resv so we have a race there.
734 *
735 * Return: True if the exec_queue is idle, false otherwise.
736 */
xe_exec_queue_is_idle(struct xe_exec_queue * q)737 bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
738 {
739 if (xe_exec_queue_is_parallel(q)) {
740 int i;
741
742 for (i = 0; i < q->width; ++i) {
743 if (xe_lrc_seqno(q->lrc[i]) !=
744 q->lrc[i]->fence_ctx.next_seqno - 1)
745 return false;
746 }
747
748 return true;
749 }
750
751 return xe_lrc_seqno(q->lrc[0]) ==
752 q->lrc[0]->fence_ctx.next_seqno - 1;
753 }
754
755 /**
756 * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
757 * from hw
758 * @q: The exec queue
759 *
760 * Update the timestamp saved by HW for this exec queue and save run ticks
761 * calculated by using the delta from last update.
762 */
xe_exec_queue_update_run_ticks(struct xe_exec_queue * q)763 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
764 {
765 struct xe_file *xef;
766 struct xe_lrc *lrc;
767 u32 old_ts, new_ts;
768
769 /*
770 * Jobs that are run during driver load may use an exec_queue, but are
771 * not associated with a user xe file, so avoid accumulating busyness
772 * for kernel specific work.
773 */
774 if (!q->vm || !q->vm->xef)
775 return;
776
777 xef = q->vm->xef;
778
779 /*
780 * Only sample the first LRC. For parallel submission, all of them are
781 * scheduled together and we compensate that below by multiplying by
782 * width - this may introduce errors if that premise is not true and
783 * they don't exit 100% aligned. On the other hand, looping through
784 * the LRCs and reading them in different time could also introduce
785 * errors.
786 */
787 lrc = q->lrc[0];
788 new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
789 xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
790 }
791
792 /**
793 * xe_exec_queue_kill - permanently stop all execution from an exec queue
794 * @q: The exec queue
795 *
796 * This function permanently stops all activity on an exec queue. If the queue
797 * is actively executing on the HW, it will be kicked off the engine; any
798 * pending jobs are discarded and all future submissions are rejected.
799 * This function is safe to call multiple times.
800 */
xe_exec_queue_kill(struct xe_exec_queue * q)801 void xe_exec_queue_kill(struct xe_exec_queue *q)
802 {
803 struct xe_exec_queue *eq = q, *next;
804
805 list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
806 multi_gt_link) {
807 q->ops->kill(eq);
808 xe_vm_remove_compute_exec_queue(q->vm, eq);
809 }
810
811 q->ops->kill(q);
812 xe_vm_remove_compute_exec_queue(q->vm, q);
813 }
814
xe_exec_queue_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file)815 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
816 struct drm_file *file)
817 {
818 struct xe_device *xe = to_xe_device(dev);
819 struct xe_file *xef = to_xe_file(file);
820 struct drm_xe_exec_queue_destroy *args = data;
821 struct xe_exec_queue *q;
822
823 if (XE_IOCTL_DBG(xe, args->pad) ||
824 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
825 return -EINVAL;
826
827 mutex_lock(&xef->exec_queue.lock);
828 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
829 mutex_unlock(&xef->exec_queue.lock);
830 if (XE_IOCTL_DBG(xe, !q))
831 return -ENOENT;
832
833 if (q->vm && q->hwe->hw_engine_group)
834 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
835
836 xe_exec_queue_kill(q);
837
838 trace_xe_exec_queue_close(q);
839 xe_exec_queue_put(q);
840
841 return 0;
842 }
843
xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue * q,struct xe_vm * vm)844 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
845 struct xe_vm *vm)
846 {
847 if (q->flags & EXEC_QUEUE_FLAG_VM) {
848 lockdep_assert_held(&vm->lock);
849 } else {
850 xe_vm_assert_held(vm);
851 lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
852 }
853 }
854
855 /**
856 * xe_exec_queue_last_fence_put() - Drop ref to last fence
857 * @q: The exec queue
858 * @vm: The VM the engine does a bind or exec for
859 */
xe_exec_queue_last_fence_put(struct xe_exec_queue * q,struct xe_vm * vm)860 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
861 {
862 xe_exec_queue_last_fence_lockdep_assert(q, vm);
863
864 xe_exec_queue_last_fence_put_unlocked(q);
865 }
866
867 /**
868 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
869 * @q: The exec queue
870 *
871 * Only safe to be called from xe_exec_queue_destroy().
872 */
xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue * q)873 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
874 {
875 if (q->last_fence) {
876 dma_fence_put(q->last_fence);
877 q->last_fence = NULL;
878 }
879 }
880
881 /**
882 * xe_exec_queue_last_fence_get() - Get last fence
883 * @q: The exec queue
884 * @vm: The VM the engine does a bind or exec for
885 *
886 * Get last fence, takes a ref
887 *
888 * Returns: last fence if not signaled, dma fence stub if signaled
889 */
xe_exec_queue_last_fence_get(struct xe_exec_queue * q,struct xe_vm * vm)890 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
891 struct xe_vm *vm)
892 {
893 struct dma_fence *fence;
894
895 xe_exec_queue_last_fence_lockdep_assert(q, vm);
896
897 if (q->last_fence &&
898 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
899 xe_exec_queue_last_fence_put(q, vm);
900
901 fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
902 dma_fence_get(fence);
903 return fence;
904 }
905
906 /**
907 * xe_exec_queue_last_fence_get_for_resume() - Get last fence
908 * @q: The exec queue
909 * @vm: The VM the engine does a bind or exec for
910 *
911 * Get last fence, takes a ref. Only safe to be called in the context of
912 * resuming the hw engine group's long-running exec queue, when the group
913 * semaphore is held.
914 *
915 * Returns: last fence if not signaled, dma fence stub if signaled
916 */
xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue * q,struct xe_vm * vm)917 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
918 struct xe_vm *vm)
919 {
920 struct dma_fence *fence;
921
922 lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
923
924 if (q->last_fence &&
925 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
926 xe_exec_queue_last_fence_put_unlocked(q);
927
928 fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
929 dma_fence_get(fence);
930 return fence;
931 }
932
933 /**
934 * xe_exec_queue_last_fence_set() - Set last fence
935 * @q: The exec queue
936 * @vm: The VM the engine does a bind or exec for
937 * @fence: The fence
938 *
939 * Set the last fence for the engine. Increases reference count for fence, when
940 * closing engine xe_exec_queue_last_fence_put should be called.
941 */
xe_exec_queue_last_fence_set(struct xe_exec_queue * q,struct xe_vm * vm,struct dma_fence * fence)942 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
943 struct dma_fence *fence)
944 {
945 xe_exec_queue_last_fence_lockdep_assert(q, vm);
946
947 xe_exec_queue_last_fence_put(q, vm);
948 q->last_fence = dma_fence_get(fence);
949 }
950
951 /**
952 * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue
953 * @q: The exec queue
954 * @vm: The VM the engine does a bind or exec for
955 *
956 * Returns:
957 * -ETIME if there exists an unsignalled last fence dependency, zero otherwise.
958 */
xe_exec_queue_last_fence_test_dep(struct xe_exec_queue * q,struct xe_vm * vm)959 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
960 {
961 struct dma_fence *fence;
962 int err = 0;
963
964 fence = xe_exec_queue_last_fence_get(q, vm);
965 if (fence) {
966 err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ?
967 0 : -ETIME;
968 dma_fence_put(fence);
969 }
970
971 return err;
972 }
973