1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
7 #include <linux/debugobjects.h>
8
9 #include "gt/intel_context.h"
10 #include "gt/intel_engine_heartbeat.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_ring.h"
13
14 #include "i915_drv.h"
15 #include "i915_active.h"
16
17 /*
18 * Active refs memory management
19 *
20 * To be more economical with memory, we reap all the i915_active trees as
21 * they idle (when we know the active requests are inactive) and allocate the
22 * nodes from a local slab cache to hopefully reduce the fragmentation.
23 */
24 static struct pool slab_cache;
25
26 struct active_node {
27 struct rb_node node;
28 struct i915_active_fence base;
29 struct i915_active *ref;
30 u64 timeline;
31 };
32
33 #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
34
35 static inline struct active_node *
node_from_active(struct i915_active_fence * active)36 node_from_active(struct i915_active_fence *active)
37 {
38 return container_of(active, struct active_node, base);
39 }
40
41 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
42
is_barrier(const struct i915_active_fence * active)43 static inline bool is_barrier(const struct i915_active_fence *active)
44 {
45 return IS_ERR(rcu_access_pointer(active->fence));
46 }
47
barrier_to_ll(struct active_node * node)48 static inline struct llist_node *barrier_to_ll(struct active_node *node)
49 {
50 GEM_BUG_ON(!is_barrier(&node->base));
51 return (struct llist_node *)&node->base.cb.node;
52 }
53
54 static inline struct intel_engine_cs *
__barrier_to_engine(struct active_node * node)55 __barrier_to_engine(struct active_node *node)
56 {
57 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
58 }
59
60 static inline struct intel_engine_cs *
barrier_to_engine(struct active_node * node)61 barrier_to_engine(struct active_node *node)
62 {
63 GEM_BUG_ON(!is_barrier(&node->base));
64 return __barrier_to_engine(node);
65 }
66
barrier_from_ll(struct llist_node * x)67 static inline struct active_node *barrier_from_ll(struct llist_node *x)
68 {
69 return container_of((struct list_head *)x,
70 struct active_node, base.cb.node);
71 }
72
73 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
74
active_debug_hint(void * addr)75 static void *active_debug_hint(void *addr)
76 {
77 struct i915_active *ref = addr;
78
79 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
80 }
81
82 static const struct debug_obj_descr active_debug_desc = {
83 .name = "i915_active",
84 .debug_hint = active_debug_hint,
85 };
86
debug_active_init(struct i915_active * ref)87 static void debug_active_init(struct i915_active *ref)
88 {
89 debug_object_init(ref, &active_debug_desc);
90 }
91
debug_active_activate(struct i915_active * ref)92 static void debug_active_activate(struct i915_active *ref)
93 {
94 lockdep_assert_held(&ref->tree_lock);
95 debug_object_activate(ref, &active_debug_desc);
96 }
97
debug_active_deactivate(struct i915_active * ref)98 static void debug_active_deactivate(struct i915_active *ref)
99 {
100 lockdep_assert_held(&ref->tree_lock);
101 if (!atomic_read(&ref->count)) /* after the last dec */
102 debug_object_deactivate(ref, &active_debug_desc);
103 }
104
debug_active_fini(struct i915_active * ref)105 static void debug_active_fini(struct i915_active *ref)
106 {
107 debug_object_free(ref, &active_debug_desc);
108 }
109
debug_active_assert(struct i915_active * ref)110 static void debug_active_assert(struct i915_active *ref)
111 {
112 debug_object_assert_init(ref, &active_debug_desc);
113 }
114
115 #else
116
debug_active_init(struct i915_active * ref)117 static inline void debug_active_init(struct i915_active *ref) { }
debug_active_activate(struct i915_active * ref)118 static inline void debug_active_activate(struct i915_active *ref) { }
debug_active_deactivate(struct i915_active * ref)119 static inline void debug_active_deactivate(struct i915_active *ref) { }
debug_active_fini(struct i915_active * ref)120 static inline void debug_active_fini(struct i915_active *ref) { }
debug_active_assert(struct i915_active * ref)121 static inline void debug_active_assert(struct i915_active *ref) { }
122
123 #endif
124
125 static void
__active_retire(struct i915_active * ref)126 __active_retire(struct i915_active *ref)
127 {
128 struct rb_root root = RB_ROOT;
129 struct active_node *it, *n;
130 unsigned long flags;
131
132 GEM_BUG_ON(i915_active_is_idle(ref));
133
134 /* return the unused nodes to our slabcache -- flushing the allocator */
135 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
136 return;
137
138 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
139 debug_active_deactivate(ref);
140
141 /* Even if we have not used the cache, we may still have a barrier */
142 if (!ref->cache)
143 ref->cache = fetch_node(ref->tree.rb_node);
144
145 /* Keep the MRU cached node for reuse */
146 if (ref->cache) {
147 /* Discard all other nodes in the tree */
148 rb_erase(&ref->cache->node, &ref->tree);
149 root = ref->tree;
150
151 /* Rebuild the tree with only the cached node */
152 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
153 rb_insert_color(&ref->cache->node, &ref->tree);
154 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
155
156 /* Make the cached node available for reuse with any timeline */
157 ref->cache->timeline = 0; /* needs cmpxchg(u64) */
158 }
159
160 spin_unlock_irqrestore(&ref->tree_lock, flags);
161
162 /* After the final retire, the entire struct may be freed */
163 if (ref->retire)
164 ref->retire(ref);
165
166 /* ... except if you wait on it, you must manage your own references! */
167 wake_up_var(ref);
168
169 /* Finally free the discarded timeline tree */
170 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
171 GEM_BUG_ON(i915_active_fence_isset(&it->base));
172 #ifdef __linux__
173 kmem_cache_free(slab_cache, it);
174 #else
175 pool_put(&slab_cache, it);
176 #endif
177 }
178 }
179
180 static void
active_work(struct work_struct * wrk)181 active_work(struct work_struct *wrk)
182 {
183 struct i915_active *ref = container_of(wrk, typeof(*ref), work);
184
185 GEM_BUG_ON(!atomic_read(&ref->count));
186 if (atomic_add_unless(&ref->count, -1, 1))
187 return;
188
189 __active_retire(ref);
190 }
191
192 static void
active_retire(struct i915_active * ref)193 active_retire(struct i915_active *ref)
194 {
195 GEM_BUG_ON(!atomic_read(&ref->count));
196 if (atomic_add_unless(&ref->count, -1, 1))
197 return;
198
199 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
200 queue_work(system_unbound_wq, &ref->work);
201 return;
202 }
203
204 __active_retire(ref);
205 }
206
207 static inline struct dma_fence **
__active_fence_slot(struct i915_active_fence * active)208 __active_fence_slot(struct i915_active_fence *active)
209 {
210 return (struct dma_fence ** __force)&active->fence;
211 }
212
213 static inline bool
active_fence_cb(struct dma_fence * fence,struct dma_fence_cb * cb)214 active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
215 {
216 struct i915_active_fence *active =
217 container_of(cb, typeof(*active), cb);
218
219 return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
220 }
221
222 static void
node_retire(struct dma_fence * fence,struct dma_fence_cb * cb)223 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
224 {
225 if (active_fence_cb(fence, cb))
226 active_retire(container_of(cb, struct active_node, base.cb)->ref);
227 }
228
229 static void
excl_retire(struct dma_fence * fence,struct dma_fence_cb * cb)230 excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
231 {
232 if (active_fence_cb(fence, cb))
233 active_retire(container_of(cb, struct i915_active, excl.cb));
234 }
235
__active_lookup(struct i915_active * ref,u64 idx)236 static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
237 {
238 struct active_node *it;
239
240 GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */
241
242 /*
243 * We track the most recently used timeline to skip a rbtree search
244 * for the common case, under typical loads we never need the rbtree
245 * at all. We can reuse the last slot if it is empty, that is
246 * after the previous activity has been retired, or if it matches the
247 * current timeline.
248 */
249 it = READ_ONCE(ref->cache);
250 if (it) {
251 u64 cached = READ_ONCE(it->timeline);
252
253 /* Once claimed, this slot will only belong to this idx */
254 if (cached == idx)
255 return it;
256
257 /*
258 * An unclaimed cache [.timeline=0] can only be claimed once.
259 *
260 * If the value is already non-zero, some other thread has
261 * claimed the cache and we know that is does not match our
262 * idx. If, and only if, the timeline is currently zero is it
263 * worth competing to claim it atomically for ourselves (for
264 * only the winner of that race will cmpxchg return the old
265 * value of 0).
266 */
267 if (!cached && !cmpxchg64(&it->timeline, 0, idx))
268 return it;
269 }
270
271 BUILD_BUG_ON(offsetof(typeof(*it), node));
272
273 /* While active, the tree can only be built; not destroyed */
274 GEM_BUG_ON(i915_active_is_idle(ref));
275
276 it = fetch_node(ref->tree.rb_node);
277 while (it) {
278 if (it->timeline < idx) {
279 it = fetch_node(it->node.rb_right);
280 } else if (it->timeline > idx) {
281 it = fetch_node(it->node.rb_left);
282 } else {
283 WRITE_ONCE(ref->cache, it);
284 break;
285 }
286 }
287
288 /* NB: If the tree rotated beneath us, we may miss our target. */
289 return it;
290 }
291
292 static struct i915_active_fence *
active_instance(struct i915_active * ref,u64 idx)293 active_instance(struct i915_active *ref, u64 idx)
294 {
295 struct active_node *node;
296 struct rb_node **p, *parent;
297
298 node = __active_lookup(ref, idx);
299 if (likely(node))
300 return &node->base;
301
302 spin_lock_irq(&ref->tree_lock);
303 GEM_BUG_ON(i915_active_is_idle(ref));
304
305 parent = NULL;
306 p = &ref->tree.rb_node;
307 while (*p) {
308 parent = *p;
309
310 node = rb_entry(parent, struct active_node, node);
311 if (node->timeline == idx)
312 goto out;
313
314 if (node->timeline < idx)
315 p = &parent->rb_right;
316 else
317 p = &parent->rb_left;
318 }
319
320 /*
321 * XXX: We should preallocate this before i915_active_ref() is ever
322 * called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC.
323 */
324 #ifdef __linux__
325 node = kmem_cache_alloc(slab_cache, GFP_ATOMIC);
326 #else
327 node = pool_get(&slab_cache, PR_NOWAIT);
328 #endif
329 if (!node)
330 goto out;
331
332 __i915_active_fence_init(&node->base, NULL, node_retire);
333 node->ref = ref;
334 node->timeline = idx;
335
336 rb_link_node(&node->node, parent, p);
337 rb_insert_color(&node->node, &ref->tree);
338
339 out:
340 WRITE_ONCE(ref->cache, node);
341 spin_unlock_irq(&ref->tree_lock);
342
343 return &node->base;
344 }
345
__i915_active_init(struct i915_active * ref,int (* active)(struct i915_active * ref),void (* retire)(struct i915_active * ref),unsigned long flags,struct lock_class_key * mkey,struct lock_class_key * wkey)346 void __i915_active_init(struct i915_active *ref,
347 int (*active)(struct i915_active *ref),
348 void (*retire)(struct i915_active *ref),
349 unsigned long flags,
350 struct lock_class_key *mkey,
351 struct lock_class_key *wkey)
352 {
353 debug_active_init(ref);
354
355 ref->flags = flags;
356 ref->active = active;
357 ref->retire = retire;
358
359 mtx_init(&ref->tree_lock, IPL_TTY);
360 ref->tree = RB_ROOT;
361 ref->cache = NULL;
362
363 init_llist_head(&ref->preallocated_barriers);
364 atomic_set(&ref->count, 0);
365 #ifdef __linux__
366 __mutex_init(&ref->mutex, "i915_active", mkey);
367 #else
368 rw_init(&ref->mutex, "i915_active");
369 #endif
370 __i915_active_fence_init(&ref->excl, NULL, excl_retire);
371 INIT_WORK(&ref->work, active_work);
372 #if IS_ENABLED(CONFIG_LOCKDEP)
373 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
374 #endif
375 }
376
____active_del_barrier(struct i915_active * ref,struct active_node * node,struct intel_engine_cs * engine)377 static bool ____active_del_barrier(struct i915_active *ref,
378 struct active_node *node,
379 struct intel_engine_cs *engine)
380
381 {
382 struct llist_node *head = NULL, *tail = NULL;
383 struct llist_node *pos, *next;
384
385 GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
386
387 /*
388 * Rebuild the llist excluding our node. We may perform this
389 * outside of the kernel_context timeline mutex and so someone
390 * else may be manipulating the engine->barrier_tasks, in
391 * which case either we or they will be upset :)
392 *
393 * A second __active_del_barrier() will report failure to claim
394 * the active_node and the caller will just shrug and know not to
395 * claim ownership of its node.
396 *
397 * A concurrent i915_request_add_active_barriers() will miss adding
398 * any of the tasks, but we will try again on the next -- and since
399 * we are actively using the barrier, we know that there will be
400 * at least another opportunity when we idle.
401 */
402 llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
403 if (node == barrier_from_ll(pos)) {
404 node = NULL;
405 continue;
406 }
407
408 pos->next = head;
409 head = pos;
410 if (!tail)
411 tail = pos;
412 }
413 if (head)
414 llist_add_batch(head, tail, &engine->barrier_tasks);
415
416 return !node;
417 }
418
419 static bool
__active_del_barrier(struct i915_active * ref,struct active_node * node)420 __active_del_barrier(struct i915_active *ref, struct active_node *node)
421 {
422 return ____active_del_barrier(ref, node, barrier_to_engine(node));
423 }
424
425 static bool
replace_barrier(struct i915_active * ref,struct i915_active_fence * active)426 replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
427 {
428 if (!is_barrier(active)) /* proto-node used by our idle barrier? */
429 return false;
430
431 /*
432 * This request is on the kernel_context timeline, and so
433 * we can use it to substitute for the pending idle-barrer
434 * request that we want to emit on the kernel_context.
435 */
436 return __active_del_barrier(ref, node_from_active(active));
437 }
438
i915_active_add_request(struct i915_active * ref,struct i915_request * rq)439 int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
440 {
441 u64 idx = i915_request_timeline(rq)->fence_context;
442 struct dma_fence *fence = &rq->fence;
443 struct i915_active_fence *active;
444 int err;
445
446 /* Prevent reaping in case we malloc/wait while building the tree */
447 err = i915_active_acquire(ref);
448 if (err)
449 return err;
450
451 do {
452 active = active_instance(ref, idx);
453 if (!active) {
454 err = -ENOMEM;
455 goto out;
456 }
457
458 if (replace_barrier(ref, active)) {
459 RCU_INIT_POINTER(active->fence, NULL);
460 atomic_dec(&ref->count);
461 }
462 } while (unlikely(is_barrier(active)));
463
464 fence = __i915_active_fence_set(active, fence);
465 if (!fence)
466 __i915_active_acquire(ref);
467 else
468 dma_fence_put(fence);
469
470 out:
471 i915_active_release(ref);
472 return err;
473 }
474
475 static struct dma_fence *
__i915_active_set_fence(struct i915_active * ref,struct i915_active_fence * active,struct dma_fence * fence)476 __i915_active_set_fence(struct i915_active *ref,
477 struct i915_active_fence *active,
478 struct dma_fence *fence)
479 {
480 struct dma_fence *prev;
481
482 if (replace_barrier(ref, active)) {
483 RCU_INIT_POINTER(active->fence, fence);
484 return NULL;
485 }
486
487 prev = __i915_active_fence_set(active, fence);
488 if (!prev)
489 __i915_active_acquire(ref);
490
491 return prev;
492 }
493
494 struct dma_fence *
i915_active_set_exclusive(struct i915_active * ref,struct dma_fence * f)495 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
496 {
497 /* We expect the caller to manage the exclusive timeline ordering */
498 return __i915_active_set_fence(ref, &ref->excl, f);
499 }
500
i915_active_acquire_if_busy(struct i915_active * ref)501 bool i915_active_acquire_if_busy(struct i915_active *ref)
502 {
503 debug_active_assert(ref);
504 return atomic_add_unless(&ref->count, 1, 0);
505 }
506
__i915_active_activate(struct i915_active * ref)507 static void __i915_active_activate(struct i915_active *ref)
508 {
509 spin_lock_irq(&ref->tree_lock); /* __active_retire() */
510 if (!atomic_fetch_inc(&ref->count))
511 debug_active_activate(ref);
512 spin_unlock_irq(&ref->tree_lock);
513 }
514
i915_active_acquire(struct i915_active * ref)515 int i915_active_acquire(struct i915_active *ref)
516 {
517 int err;
518
519 if (i915_active_acquire_if_busy(ref))
520 return 0;
521
522 if (!ref->active) {
523 __i915_active_activate(ref);
524 return 0;
525 }
526
527 err = mutex_lock_interruptible(&ref->mutex);
528 if (err)
529 return err;
530
531 if (likely(!i915_active_acquire_if_busy(ref))) {
532 err = ref->active(ref);
533 if (!err)
534 __i915_active_activate(ref);
535 }
536
537 mutex_unlock(&ref->mutex);
538
539 return err;
540 }
541
i915_active_acquire_for_context(struct i915_active * ref,u64 idx)542 int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
543 {
544 struct i915_active_fence *active;
545 int err;
546
547 err = i915_active_acquire(ref);
548 if (err)
549 return err;
550
551 active = active_instance(ref, idx);
552 if (!active) {
553 i915_active_release(ref);
554 return -ENOMEM;
555 }
556
557 return 0; /* return with active ref */
558 }
559
i915_active_release(struct i915_active * ref)560 void i915_active_release(struct i915_active *ref)
561 {
562 debug_active_assert(ref);
563 active_retire(ref);
564 }
565
enable_signaling(struct i915_active_fence * active)566 static void enable_signaling(struct i915_active_fence *active)
567 {
568 struct dma_fence *fence;
569
570 if (unlikely(is_barrier(active)))
571 return;
572
573 fence = i915_active_fence_get(active);
574 if (!fence)
575 return;
576
577 dma_fence_enable_sw_signaling(fence);
578 dma_fence_put(fence);
579 }
580
flush_barrier(struct active_node * it)581 static int flush_barrier(struct active_node *it)
582 {
583 struct intel_engine_cs *engine;
584
585 if (likely(!is_barrier(&it->base)))
586 return 0;
587
588 engine = __barrier_to_engine(it);
589 smp_rmb(); /* serialise with add_active_barriers */
590 if (!is_barrier(&it->base))
591 return 0;
592
593 return intel_engine_flush_barriers(engine);
594 }
595
flush_lazy_signals(struct i915_active * ref)596 static int flush_lazy_signals(struct i915_active *ref)
597 {
598 struct active_node *it, *n;
599 int err = 0;
600
601 enable_signaling(&ref->excl);
602 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
603 err = flush_barrier(it); /* unconnected idle barrier? */
604 if (err)
605 break;
606
607 enable_signaling(&it->base);
608 }
609
610 return err;
611 }
612
__i915_active_wait(struct i915_active * ref,int state)613 int __i915_active_wait(struct i915_active *ref, int state)
614 {
615 might_sleep();
616
617 /* Any fence added after the wait begins will not be auto-signaled */
618 if (i915_active_acquire_if_busy(ref)) {
619 int err;
620
621 err = flush_lazy_signals(ref);
622 i915_active_release(ref);
623 if (err)
624 return err;
625
626 if (___wait_var_event(ref, i915_active_is_idle(ref),
627 state, 0, 0, schedule()))
628 return -EINTR;
629 }
630
631 /*
632 * After the wait is complete, the caller may free the active.
633 * We have to flush any concurrent retirement before returning.
634 */
635 flush_work(&ref->work);
636 return 0;
637 }
638
__await_active(struct i915_active_fence * active,int (* fn)(void * arg,struct dma_fence * fence),void * arg)639 static int __await_active(struct i915_active_fence *active,
640 int (*fn)(void *arg, struct dma_fence *fence),
641 void *arg)
642 {
643 struct dma_fence *fence;
644
645 if (is_barrier(active)) /* XXX flush the barrier? */
646 return 0;
647
648 fence = i915_active_fence_get(active);
649 if (fence) {
650 int err;
651
652 err = fn(arg, fence);
653 dma_fence_put(fence);
654 if (err < 0)
655 return err;
656 }
657
658 return 0;
659 }
660
661 struct wait_barrier {
662 struct wait_queue_entry base;
663 struct i915_active *ref;
664 };
665
666 static int
barrier_wake(wait_queue_entry_t * wq,unsigned int mode,int flags,void * key)667 barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
668 {
669 struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
670
671 if (i915_active_is_idle(wb->ref)) {
672 list_del(&wq->entry);
673 i915_sw_fence_complete(wq->private);
674 kfree(wq);
675 }
676
677 return 0;
678 }
679
__await_barrier(struct i915_active * ref,struct i915_sw_fence * fence)680 static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
681 {
682 struct wait_barrier *wb;
683
684 wb = kmalloc(sizeof(*wb), GFP_KERNEL);
685 if (unlikely(!wb))
686 return -ENOMEM;
687
688 GEM_BUG_ON(i915_active_is_idle(ref));
689 if (!i915_sw_fence_await(fence)) {
690 kfree(wb);
691 return -EINVAL;
692 }
693
694 wb->base.flags = 0;
695 wb->base.func = barrier_wake;
696 wb->base.private = fence;
697 wb->ref = ref;
698
699 add_wait_queue(__var_waitqueue(ref), &wb->base);
700 return 0;
701 }
702
await_active(struct i915_active * ref,unsigned int flags,int (* fn)(void * arg,struct dma_fence * fence),void * arg,struct i915_sw_fence * barrier)703 static int await_active(struct i915_active *ref,
704 unsigned int flags,
705 int (*fn)(void *arg, struct dma_fence *fence),
706 void *arg, struct i915_sw_fence *barrier)
707 {
708 int err = 0;
709
710 if (!i915_active_acquire_if_busy(ref))
711 return 0;
712
713 if (flags & I915_ACTIVE_AWAIT_EXCL &&
714 rcu_access_pointer(ref->excl.fence)) {
715 err = __await_active(&ref->excl, fn, arg);
716 if (err)
717 goto out;
718 }
719
720 if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
721 struct active_node *it, *n;
722
723 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
724 err = __await_active(&it->base, fn, arg);
725 if (err)
726 goto out;
727 }
728 }
729
730 if (flags & I915_ACTIVE_AWAIT_BARRIER) {
731 err = flush_lazy_signals(ref);
732 if (err)
733 goto out;
734
735 err = __await_barrier(ref, barrier);
736 if (err)
737 goto out;
738 }
739
740 out:
741 i915_active_release(ref);
742 return err;
743 }
744
rq_await_fence(void * arg,struct dma_fence * fence)745 static int rq_await_fence(void *arg, struct dma_fence *fence)
746 {
747 return i915_request_await_dma_fence(arg, fence);
748 }
749
i915_request_await_active(struct i915_request * rq,struct i915_active * ref,unsigned int flags)750 int i915_request_await_active(struct i915_request *rq,
751 struct i915_active *ref,
752 unsigned int flags)
753 {
754 return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
755 }
756
sw_await_fence(void * arg,struct dma_fence * fence)757 static int sw_await_fence(void *arg, struct dma_fence *fence)
758 {
759 return i915_sw_fence_await_dma_fence(arg, fence, 0,
760 GFP_NOWAIT | __GFP_NOWARN);
761 }
762
i915_sw_fence_await_active(struct i915_sw_fence * fence,struct i915_active * ref,unsigned int flags)763 int i915_sw_fence_await_active(struct i915_sw_fence *fence,
764 struct i915_active *ref,
765 unsigned int flags)
766 {
767 return await_active(ref, flags, sw_await_fence, fence, fence);
768 }
769
i915_active_fini(struct i915_active * ref)770 void i915_active_fini(struct i915_active *ref)
771 {
772 debug_active_fini(ref);
773 GEM_BUG_ON(atomic_read(&ref->count));
774 GEM_BUG_ON(work_pending(&ref->work));
775 mutex_destroy(&ref->mutex);
776
777 if (ref->cache)
778 #ifdef __linux__
779 kmem_cache_free(slab_cache, ref->cache);
780 #else
781 pool_put(&slab_cache, ref->cache);
782 #endif
783 }
784
is_idle_barrier(struct active_node * node,u64 idx)785 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
786 {
787 return node->timeline == idx && !i915_active_fence_isset(&node->base);
788 }
789
reuse_idle_barrier(struct i915_active * ref,u64 idx)790 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
791 {
792 struct rb_node *prev, *p;
793
794 if (RB_EMPTY_ROOT(&ref->tree))
795 return NULL;
796
797 GEM_BUG_ON(i915_active_is_idle(ref));
798
799 /*
800 * Try to reuse any existing barrier nodes already allocated for this
801 * i915_active, due to overlapping active phases there is likely a
802 * node kept alive (as we reuse before parking). We prefer to reuse
803 * completely idle barriers (less hassle in manipulating the llists),
804 * but otherwise any will do.
805 */
806 if (ref->cache && is_idle_barrier(ref->cache, idx)) {
807 p = &ref->cache->node;
808 goto match;
809 }
810
811 prev = NULL;
812 p = ref->tree.rb_node;
813 while (p) {
814 struct active_node *node =
815 rb_entry(p, struct active_node, node);
816
817 if (is_idle_barrier(node, idx))
818 goto match;
819
820 prev = p;
821 if (node->timeline < idx)
822 p = READ_ONCE(p->rb_right);
823 else
824 p = READ_ONCE(p->rb_left);
825 }
826
827 /*
828 * No quick match, but we did find the leftmost rb_node for the
829 * kernel_context. Walk the rb_tree in-order to see if there were
830 * any idle-barriers on this timeline that we missed, or just use
831 * the first pending barrier.
832 */
833 for (p = prev; p; p = rb_next(p)) {
834 struct active_node *node =
835 rb_entry(p, struct active_node, node);
836 struct intel_engine_cs *engine;
837
838 if (node->timeline > idx)
839 break;
840
841 if (node->timeline < idx)
842 continue;
843
844 if (is_idle_barrier(node, idx))
845 goto match;
846
847 /*
848 * The list of pending barriers is protected by the
849 * kernel_context timeline, which notably we do not hold
850 * here. i915_request_add_active_barriers() may consume
851 * the barrier before we claim it, so we have to check
852 * for success.
853 */
854 engine = __barrier_to_engine(node);
855 smp_rmb(); /* serialise with add_active_barriers */
856 if (is_barrier(&node->base) &&
857 ____active_del_barrier(ref, node, engine))
858 goto match;
859 }
860
861 return NULL;
862
863 match:
864 spin_lock_irq(&ref->tree_lock);
865 rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
866 if (p == &ref->cache->node)
867 WRITE_ONCE(ref->cache, NULL);
868 spin_unlock_irq(&ref->tree_lock);
869
870 return rb_entry(p, struct active_node, node);
871 }
872
i915_active_acquire_preallocate_barrier(struct i915_active * ref,struct intel_engine_cs * engine)873 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
874 struct intel_engine_cs *engine)
875 {
876 intel_engine_mask_t tmp, mask = engine->mask;
877 struct llist_node *first = NULL, *last = NULL;
878 struct intel_gt *gt = engine->gt;
879
880 GEM_BUG_ON(i915_active_is_idle(ref));
881
882 /* Wait until the previous preallocation is completed */
883 while (!llist_empty(&ref->preallocated_barriers))
884 cond_resched();
885
886 /*
887 * Preallocate a node for each physical engine supporting the target
888 * engine (remember virtual engines have more than one sibling).
889 * We can then use the preallocated nodes in
890 * i915_active_acquire_barrier()
891 */
892 GEM_BUG_ON(!mask);
893 for_each_engine_masked(engine, gt, mask, tmp) {
894 u64 idx = engine->kernel_context->timeline->fence_context;
895 struct llist_node *prev = first;
896 struct active_node *node;
897
898 rcu_read_lock();
899 node = reuse_idle_barrier(ref, idx);
900 rcu_read_unlock();
901 if (!node) {
902 #ifdef __linux__
903 node = kmem_cache_alloc(slab_cache, GFP_KERNEL);
904 #else
905 node = pool_get(&slab_cache, PR_WAITOK);
906 #endif
907 if (!node)
908 goto unwind;
909
910 RCU_INIT_POINTER(node->base.fence, NULL);
911 node->base.cb.func = node_retire;
912 node->timeline = idx;
913 node->ref = ref;
914 }
915
916 if (!i915_active_fence_isset(&node->base)) {
917 /*
918 * Mark this as being *our* unconnected proto-node.
919 *
920 * Since this node is not in any list, and we have
921 * decoupled it from the rbtree, we can reuse the
922 * request to indicate this is an idle-barrier node
923 * and then we can use the rb_node and list pointers
924 * for our tracking of the pending barrier.
925 */
926 RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
927 node->base.cb.node.prev = (void *)engine;
928 __i915_active_acquire(ref);
929 }
930 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
931
932 GEM_BUG_ON(barrier_to_engine(node) != engine);
933 first = barrier_to_ll(node);
934 first->next = prev;
935 if (!last)
936 last = first;
937 intel_engine_pm_get(engine);
938 }
939
940 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
941 llist_add_batch(first, last, &ref->preallocated_barriers);
942
943 return 0;
944
945 unwind:
946 while (first) {
947 struct active_node *node = barrier_from_ll(first);
948
949 first = first->next;
950
951 atomic_dec(&ref->count);
952 intel_engine_pm_put(barrier_to_engine(node));
953
954 #ifdef __linux__
955 kmem_cache_free(slab_cache, node);
956 #else
957 pool_put(&slab_cache, node);
958 #endif
959 }
960 return -ENOMEM;
961 }
962
i915_active_acquire_barrier(struct i915_active * ref)963 void i915_active_acquire_barrier(struct i915_active *ref)
964 {
965 struct llist_node *pos, *next;
966 unsigned long flags;
967
968 GEM_BUG_ON(i915_active_is_idle(ref));
969
970 /*
971 * Transfer the list of preallocated barriers into the
972 * i915_active rbtree, but only as proto-nodes. They will be
973 * populated by i915_request_add_active_barriers() to point to the
974 * request that will eventually release them.
975 */
976 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
977 struct active_node *node = barrier_from_ll(pos);
978 struct intel_engine_cs *engine = barrier_to_engine(node);
979 struct rb_node **p, *parent;
980
981 spin_lock_irqsave_nested(&ref->tree_lock, flags,
982 SINGLE_DEPTH_NESTING);
983 parent = NULL;
984 p = &ref->tree.rb_node;
985 while (*p) {
986 struct active_node *it;
987
988 parent = *p;
989
990 it = rb_entry(parent, struct active_node, node);
991 if (it->timeline < node->timeline)
992 p = &parent->rb_right;
993 else
994 p = &parent->rb_left;
995 }
996 rb_link_node(&node->node, parent, p);
997 rb_insert_color(&node->node, &ref->tree);
998 spin_unlock_irqrestore(&ref->tree_lock, flags);
999
1000 GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
1001 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
1002 intel_engine_pm_put_delay(engine, 2);
1003 }
1004 }
1005
ll_to_fence_slot(struct llist_node * node)1006 static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
1007 {
1008 return __active_fence_slot(&barrier_from_ll(node)->base);
1009 }
1010
i915_request_add_active_barriers(struct i915_request * rq)1011 void i915_request_add_active_barriers(struct i915_request *rq)
1012 {
1013 struct intel_engine_cs *engine = rq->engine;
1014 struct llist_node *node, *next;
1015 unsigned long flags;
1016
1017 GEM_BUG_ON(!intel_context_is_barrier(rq->context));
1018 GEM_BUG_ON(intel_engine_is_virtual(engine));
1019 GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
1020
1021 node = llist_del_all(&engine->barrier_tasks);
1022 if (!node)
1023 return;
1024 /*
1025 * Attach the list of proto-fences to the in-flight request such
1026 * that the parent i915_active will be released when this request
1027 * is retired.
1028 */
1029 spin_lock_irqsave(&rq->lock, flags);
1030 llist_for_each_safe(node, next, node) {
1031 /* serialise with reuse_idle_barrier */
1032 smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1033 list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1034 }
1035 spin_unlock_irqrestore(&rq->lock, flags);
1036 }
1037
1038 /*
1039 * __i915_active_fence_set: Update the last active fence along its timeline
1040 * @active: the active tracker
1041 * @fence: the new fence (under construction)
1042 *
1043 * Records the new @fence as the last active fence along its timeline in
1044 * this active tracker, moving the tracking callbacks from the previous
1045 * fence onto this one. Gets and returns a reference to the previous fence
1046 * (if not already completed), which the caller must put after making sure
1047 * that it is executed before the new fence. To ensure that the order of
1048 * fences within the timeline of the i915_active_fence is understood, it
1049 * should be locked by the caller.
1050 */
1051 struct dma_fence *
__i915_active_fence_set(struct i915_active_fence * active,struct dma_fence * fence)1052 __i915_active_fence_set(struct i915_active_fence *active,
1053 struct dma_fence *fence)
1054 {
1055 struct dma_fence *prev;
1056 unsigned long flags;
1057
1058 /*
1059 * In case of fences embedded in i915_requests, their memory is
1060 * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
1061 * by new requests. Then, there is a risk of passing back a pointer
1062 * to a new, completely unrelated fence that reuses the same memory
1063 * while tracked under a different active tracker. Combined with i915
1064 * perf open/close operations that build await dependencies between
1065 * engine kernel context requests and user requests from different
1066 * timelines, this can lead to dependency loops and infinite waits.
1067 *
1068 * As a countermeasure, we try to get a reference to the active->fence
1069 * first, so if we succeed and pass it back to our user then it is not
1070 * released and potentially reused by an unrelated request before the
1071 * user has a chance to set up an await dependency on it.
1072 */
1073 prev = i915_active_fence_get(active);
1074 if (fence == prev)
1075 return fence;
1076
1077 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
1078
1079 /*
1080 * Consider that we have two threads arriving (A and B), with
1081 * C already resident as the active->fence.
1082 *
1083 * Both A and B have got a reference to C or NULL, depending on the
1084 * timing of the interrupt handler. Let's assume that if A has got C
1085 * then it has locked C first (before B).
1086 *
1087 * Note the strong ordering of the timeline also provides consistent
1088 * nesting rules for the fence->lock; the inner lock is always the
1089 * older lock.
1090 */
1091 spin_lock_irqsave(fence->lock, flags);
1092 if (prev)
1093 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1094
1095 /*
1096 * A does the cmpxchg first, and so it sees C or NULL, as before, or
1097 * something else, depending on the timing of other threads and/or
1098 * interrupt handler. If not the same as before then A unlocks C if
1099 * applicable and retries, starting from an attempt to get a new
1100 * active->fence. Meanwhile, B follows the same path as A.
1101 * Once A succeeds with cmpxch, B fails again, retires, gets A from
1102 * active->fence, locks it as soon as A completes, and possibly
1103 * succeeds with cmpxchg.
1104 */
1105 while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
1106 if (prev) {
1107 spin_unlock(prev->lock);
1108 dma_fence_put(prev);
1109 }
1110 spin_unlock_irqrestore(fence->lock, flags);
1111
1112 prev = i915_active_fence_get(active);
1113 GEM_BUG_ON(prev == fence);
1114
1115 spin_lock_irqsave(fence->lock, flags);
1116 if (prev)
1117 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1118 }
1119
1120 /*
1121 * If prev is NULL then the previous fence must have been signaled
1122 * and we know that we are first on the timeline. If it is still
1123 * present then, having the lock on that fence already acquired, we
1124 * serialise with the interrupt handler, in the process of removing it
1125 * from any future interrupt callback. A will then wait on C before
1126 * executing (if present).
1127 *
1128 * As B is second, it sees A as the previous fence and so waits for
1129 * it to complete its transition and takes over the occupancy for
1130 * itself -- remembering that it needs to wait on A before executing.
1131 */
1132 if (prev) {
1133 __list_del_entry(&active->cb.node);
1134 spin_unlock(prev->lock); /* serialise with prev->cb_list */
1135 }
1136 list_add_tail(&active->cb.node, &fence->cb_list);
1137 spin_unlock_irqrestore(fence->lock, flags);
1138
1139 return prev;
1140 }
1141
i915_active_fence_set(struct i915_active_fence * active,struct i915_request * rq)1142 int i915_active_fence_set(struct i915_active_fence *active,
1143 struct i915_request *rq)
1144 {
1145 struct dma_fence *fence;
1146 int err = 0;
1147
1148 /* Must maintain timeline ordering wrt previous active requests */
1149 fence = __i915_active_fence_set(active, &rq->fence);
1150 if (fence) {
1151 err = i915_request_await_dma_fence(rq, fence);
1152 dma_fence_put(fence);
1153 }
1154
1155 return err;
1156 }
1157
i915_active_noop(struct dma_fence * fence,struct dma_fence_cb * cb)1158 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
1159 {
1160 active_fence_cb(fence, cb);
1161 }
1162
1163 struct auto_active {
1164 struct i915_active base;
1165 struct kref ref;
1166 };
1167
i915_active_get(struct i915_active * ref)1168 struct i915_active *i915_active_get(struct i915_active *ref)
1169 {
1170 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1171
1172 kref_get(&aa->ref);
1173 return &aa->base;
1174 }
1175
auto_release(struct kref * ref)1176 static void auto_release(struct kref *ref)
1177 {
1178 struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1179
1180 i915_active_fini(&aa->base);
1181 kfree(aa);
1182 }
1183
i915_active_put(struct i915_active * ref)1184 void i915_active_put(struct i915_active *ref)
1185 {
1186 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1187
1188 kref_put(&aa->ref, auto_release);
1189 }
1190
auto_active(struct i915_active * ref)1191 static int auto_active(struct i915_active *ref)
1192 {
1193 i915_active_get(ref);
1194 return 0;
1195 }
1196
auto_retire(struct i915_active * ref)1197 static void auto_retire(struct i915_active *ref)
1198 {
1199 i915_active_put(ref);
1200 }
1201
i915_active_create(void)1202 struct i915_active *i915_active_create(void)
1203 {
1204 struct auto_active *aa;
1205
1206 aa = kmalloc(sizeof(*aa), GFP_KERNEL);
1207 if (!aa)
1208 return NULL;
1209
1210 kref_init(&aa->ref);
1211 i915_active_init(&aa->base, auto_active, auto_retire, 0);
1212
1213 return &aa->base;
1214 }
1215
1216 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1217 #include "selftests/i915_active.c"
1218 #endif
1219
i915_active_module_exit(void)1220 void i915_active_module_exit(void)
1221 {
1222 #ifdef __linux__
1223 kmem_cache_destroy(slab_cache);
1224 #else
1225 pool_destroy(&slab_cache);
1226 #endif
1227 }
1228
i915_active_module_init(void)1229 int __init i915_active_module_init(void)
1230 {
1231 #ifdef __linux__
1232 slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
1233 if (!slab_cache)
1234 return -ENOMEM;
1235 #else
1236 pool_init(&slab_cache, sizeof(struct active_node),
1237 CACHELINESIZE, IPL_TTY, 0, "drmsc", NULL);
1238 #endif
1239
1240 return 0;
1241 }
1242