xref: /linux/drivers/gpu/drm/i915/gt/intel_timeline.c (revision 24f90d66)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2018 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 
8 #include "i915_active.h"
9 #include "i915_syncmap.h"
10 #include "intel_gt.h"
11 #include "intel_ring.h"
12 #include "intel_timeline.h"
13 
14 #define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
15 #define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
16 
17 #define CACHELINE_BITS 6
18 #define CACHELINE_FREE CACHELINE_BITS
19 
20 struct intel_timeline_hwsp {
21 	struct intel_gt *gt;
22 	struct intel_gt_timelines *gt_timelines;
23 	struct list_head free_link;
24 	struct i915_vma *vma;
25 	u64 free_bitmap;
26 };
27 
28 static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
29 {
30 	struct drm_i915_private *i915 = gt->i915;
31 	struct drm_i915_gem_object *obj;
32 	struct i915_vma *vma;
33 
34 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
35 	if (IS_ERR(obj))
36 		return ERR_CAST(obj);
37 
38 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
39 
40 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
41 	if (IS_ERR(vma))
42 		i915_gem_object_put(obj);
43 
44 	return vma;
45 }
46 
47 static struct i915_vma *
48 hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
49 {
50 	struct intel_gt_timelines *gt = &timeline->gt->timelines;
51 	struct intel_timeline_hwsp *hwsp;
52 
53 	BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
54 
55 	spin_lock_irq(&gt->hwsp_lock);
56 
57 	/* hwsp_free_list only contains HWSP that have available cachelines */
58 	hwsp = list_first_entry_or_null(&gt->hwsp_free_list,
59 					typeof(*hwsp), free_link);
60 	if (!hwsp) {
61 		struct i915_vma *vma;
62 
63 		spin_unlock_irq(&gt->hwsp_lock);
64 
65 		hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
66 		if (!hwsp)
67 			return ERR_PTR(-ENOMEM);
68 
69 		vma = __hwsp_alloc(timeline->gt);
70 		if (IS_ERR(vma)) {
71 			kfree(hwsp);
72 			return vma;
73 		}
74 
75 		GT_TRACE(timeline->gt, "new HWSP allocated\n");
76 
77 		vma->private = hwsp;
78 		hwsp->gt = timeline->gt;
79 		hwsp->vma = vma;
80 		hwsp->free_bitmap = ~0ull;
81 		hwsp->gt_timelines = gt;
82 
83 		spin_lock_irq(&gt->hwsp_lock);
84 		list_add(&hwsp->free_link, &gt->hwsp_free_list);
85 	}
86 
87 	GEM_BUG_ON(!hwsp->free_bitmap);
88 	*cacheline = __ffs64(hwsp->free_bitmap);
89 	hwsp->free_bitmap &= ~BIT_ULL(*cacheline);
90 	if (!hwsp->free_bitmap)
91 		list_del(&hwsp->free_link);
92 
93 	spin_unlock_irq(&gt->hwsp_lock);
94 
95 	GEM_BUG_ON(hwsp->vma->private != hwsp);
96 	return hwsp->vma;
97 }
98 
99 static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
100 {
101 	struct intel_gt_timelines *gt = hwsp->gt_timelines;
102 	unsigned long flags;
103 
104 	spin_lock_irqsave(&gt->hwsp_lock, flags);
105 
106 	/* As a cacheline becomes available, publish the HWSP on the freelist */
107 	if (!hwsp->free_bitmap)
108 		list_add_tail(&hwsp->free_link, &gt->hwsp_free_list);
109 
110 	GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
111 	hwsp->free_bitmap |= BIT_ULL(cacheline);
112 
113 	/* And if no one is left using it, give the page back to the system */
114 	if (hwsp->free_bitmap == ~0ull) {
115 		i915_vma_put(hwsp->vma);
116 		list_del(&hwsp->free_link);
117 		kfree(hwsp);
118 	}
119 
120 	spin_unlock_irqrestore(&gt->hwsp_lock, flags);
121 }
122 
123 static void __rcu_cacheline_free(struct rcu_head *rcu)
124 {
125 	struct intel_timeline_cacheline *cl =
126 		container_of(rcu, typeof(*cl), rcu);
127 
128 	/* Must wait until after all *rq->hwsp are complete before removing */
129 	i915_gem_object_unpin_map(cl->hwsp->vma->obj);
130 	__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
131 
132 	i915_active_fini(&cl->active);
133 	kfree(cl);
134 }
135 
136 static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
137 {
138 	GEM_BUG_ON(!i915_active_is_idle(&cl->active));
139 	call_rcu(&cl->rcu, __rcu_cacheline_free);
140 }
141 
142 __i915_active_call
143 static void __cacheline_retire(struct i915_active *active)
144 {
145 	struct intel_timeline_cacheline *cl =
146 		container_of(active, typeof(*cl), active);
147 
148 	i915_vma_unpin(cl->hwsp->vma);
149 	if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
150 		__idle_cacheline_free(cl);
151 }
152 
153 static int __cacheline_active(struct i915_active *active)
154 {
155 	struct intel_timeline_cacheline *cl =
156 		container_of(active, typeof(*cl), active);
157 
158 	__i915_vma_pin(cl->hwsp->vma);
159 	return 0;
160 }
161 
162 static struct intel_timeline_cacheline *
163 cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
164 {
165 	struct intel_timeline_cacheline *cl;
166 	void *vaddr;
167 
168 	GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
169 
170 	cl = kmalloc(sizeof(*cl), GFP_KERNEL);
171 	if (!cl)
172 		return ERR_PTR(-ENOMEM);
173 
174 	vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
175 	if (IS_ERR(vaddr)) {
176 		kfree(cl);
177 		return ERR_CAST(vaddr);
178 	}
179 
180 	cl->hwsp = hwsp;
181 	cl->vaddr = page_pack_bits(vaddr, cacheline);
182 
183 	i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
184 
185 	return cl;
186 }
187 
188 static void cacheline_acquire(struct intel_timeline_cacheline *cl,
189 			      u32 ggtt_offset)
190 {
191 	if (!cl)
192 		return;
193 
194 	cl->ggtt_offset = ggtt_offset;
195 	i915_active_acquire(&cl->active);
196 }
197 
198 static void cacheline_release(struct intel_timeline_cacheline *cl)
199 {
200 	if (cl)
201 		i915_active_release(&cl->active);
202 }
203 
204 static void cacheline_free(struct intel_timeline_cacheline *cl)
205 {
206 	if (!i915_active_acquire_if_busy(&cl->active)) {
207 		__idle_cacheline_free(cl);
208 		return;
209 	}
210 
211 	GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
212 	cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
213 
214 	i915_active_release(&cl->active);
215 }
216 
217 static int intel_timeline_init(struct intel_timeline *timeline,
218 			       struct intel_gt *gt,
219 			       struct i915_vma *hwsp,
220 			       unsigned int offset)
221 {
222 	void *vaddr;
223 
224 	kref_init(&timeline->kref);
225 	atomic_set(&timeline->pin_count, 0);
226 
227 	timeline->gt = gt;
228 
229 	timeline->has_initial_breadcrumb = !hwsp;
230 	timeline->hwsp_cacheline = NULL;
231 
232 	if (!hwsp) {
233 		struct intel_timeline_cacheline *cl;
234 		unsigned int cacheline;
235 
236 		hwsp = hwsp_alloc(timeline, &cacheline);
237 		if (IS_ERR(hwsp))
238 			return PTR_ERR(hwsp);
239 
240 		cl = cacheline_alloc(hwsp->private, cacheline);
241 		if (IS_ERR(cl)) {
242 			__idle_hwsp_free(hwsp->private, cacheline);
243 			return PTR_ERR(cl);
244 		}
245 
246 		timeline->hwsp_cacheline = cl;
247 		timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
248 
249 		vaddr = page_mask_bits(cl->vaddr);
250 	} else {
251 		timeline->hwsp_offset = offset;
252 		vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
253 		if (IS_ERR(vaddr))
254 			return PTR_ERR(vaddr);
255 	}
256 
257 	timeline->hwsp_seqno =
258 		memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
259 
260 	timeline->hwsp_ggtt = i915_vma_get(hwsp);
261 	GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
262 
263 	timeline->fence_context = dma_fence_context_alloc(1);
264 
265 	mutex_init(&timeline->mutex);
266 
267 	INIT_ACTIVE_FENCE(&timeline->last_request);
268 	INIT_LIST_HEAD(&timeline->requests);
269 
270 	i915_syncmap_init(&timeline->sync);
271 
272 	return 0;
273 }
274 
275 void intel_gt_init_timelines(struct intel_gt *gt)
276 {
277 	struct intel_gt_timelines *timelines = &gt->timelines;
278 
279 	spin_lock_init(&timelines->lock);
280 	INIT_LIST_HEAD(&timelines->active_list);
281 
282 	spin_lock_init(&timelines->hwsp_lock);
283 	INIT_LIST_HEAD(&timelines->hwsp_free_list);
284 }
285 
286 static void intel_timeline_fini(struct intel_timeline *timeline)
287 {
288 	GEM_BUG_ON(atomic_read(&timeline->pin_count));
289 	GEM_BUG_ON(!list_empty(&timeline->requests));
290 	GEM_BUG_ON(timeline->retire);
291 
292 	if (timeline->hwsp_cacheline)
293 		cacheline_free(timeline->hwsp_cacheline);
294 	else
295 		i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
296 
297 	i915_vma_put(timeline->hwsp_ggtt);
298 }
299 
300 struct intel_timeline *
301 __intel_timeline_create(struct intel_gt *gt,
302 			struct i915_vma *global_hwsp,
303 			unsigned int offset)
304 {
305 	struct intel_timeline *timeline;
306 	int err;
307 
308 	timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
309 	if (!timeline)
310 		return ERR_PTR(-ENOMEM);
311 
312 	err = intel_timeline_init(timeline, gt, global_hwsp, offset);
313 	if (err) {
314 		kfree(timeline);
315 		return ERR_PTR(err);
316 	}
317 
318 	return timeline;
319 }
320 
321 struct intel_timeline *
322 intel_timeline_create_from_engine(struct intel_engine_cs *engine,
323 				  unsigned int offset)
324 {
325 	struct i915_vma *hwsp = engine->status_page.vma;
326 	struct intel_timeline *tl;
327 
328 	tl = __intel_timeline_create(engine->gt, hwsp, offset);
329 	if (IS_ERR(tl))
330 		return tl;
331 
332 	/* Borrow a nearby lock; we only create these timelines during init */
333 	mutex_lock(&hwsp->vm->mutex);
334 	list_add_tail(&tl->engine_link, &engine->status_page.timelines);
335 	mutex_unlock(&hwsp->vm->mutex);
336 
337 	return tl;
338 }
339 
340 void __intel_timeline_pin(struct intel_timeline *tl)
341 {
342 	GEM_BUG_ON(!atomic_read(&tl->pin_count));
343 	atomic_inc(&tl->pin_count);
344 }
345 
346 int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
347 {
348 	int err;
349 
350 	if (atomic_add_unless(&tl->pin_count, 1, 0))
351 		return 0;
352 
353 	err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH);
354 	if (err)
355 		return err;
356 
357 	tl->hwsp_offset =
358 		i915_ggtt_offset(tl->hwsp_ggtt) +
359 		offset_in_page(tl->hwsp_offset);
360 	GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
361 		 tl->fence_context, tl->hwsp_offset);
362 
363 	cacheline_acquire(tl->hwsp_cacheline, tl->hwsp_offset);
364 	if (atomic_fetch_inc(&tl->pin_count)) {
365 		cacheline_release(tl->hwsp_cacheline);
366 		__i915_vma_unpin(tl->hwsp_ggtt);
367 	}
368 
369 	return 0;
370 }
371 
372 void intel_timeline_reset_seqno(const struct intel_timeline *tl)
373 {
374 	/* Must be pinned to be writable, and no requests in flight. */
375 	GEM_BUG_ON(!atomic_read(&tl->pin_count));
376 	WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
377 }
378 
379 void intel_timeline_enter(struct intel_timeline *tl)
380 {
381 	struct intel_gt_timelines *timelines = &tl->gt->timelines;
382 
383 	/*
384 	 * Pretend we are serialised by the timeline->mutex.
385 	 *
386 	 * While generally true, there are a few exceptions to the rule
387 	 * for the engine->kernel_context being used to manage power
388 	 * transitions. As the engine_park may be called from under any
389 	 * timeline, it uses the power mutex as a global serialisation
390 	 * lock to prevent any other request entering its timeline.
391 	 *
392 	 * The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
393 	 *
394 	 * However, intel_gt_retire_request() does not know which engine
395 	 * it is retiring along and so cannot partake in the engine-pm
396 	 * barrier, and there we use the tl->active_count as a means to
397 	 * pin the timeline in the active_list while the locks are dropped.
398 	 * Ergo, as that is outside of the engine-pm barrier, we need to
399 	 * use atomic to manipulate tl->active_count.
400 	 */
401 	lockdep_assert_held(&tl->mutex);
402 
403 	if (atomic_add_unless(&tl->active_count, 1, 0))
404 		return;
405 
406 	spin_lock(&timelines->lock);
407 	if (!atomic_fetch_inc(&tl->active_count)) {
408 		/*
409 		 * The HWSP is volatile, and may have been lost while inactive,
410 		 * e.g. across suspend/resume. Be paranoid, and ensure that
411 		 * the HWSP value matches our seqno so we don't proclaim
412 		 * the next request as already complete.
413 		 */
414 		intel_timeline_reset_seqno(tl);
415 		list_add_tail(&tl->link, &timelines->active_list);
416 	}
417 	spin_unlock(&timelines->lock);
418 }
419 
420 void intel_timeline_exit(struct intel_timeline *tl)
421 {
422 	struct intel_gt_timelines *timelines = &tl->gt->timelines;
423 
424 	/* See intel_timeline_enter() */
425 	lockdep_assert_held(&tl->mutex);
426 
427 	GEM_BUG_ON(!atomic_read(&tl->active_count));
428 	if (atomic_add_unless(&tl->active_count, -1, 1))
429 		return;
430 
431 	spin_lock(&timelines->lock);
432 	if (atomic_dec_and_test(&tl->active_count))
433 		list_del(&tl->link);
434 	spin_unlock(&timelines->lock);
435 
436 	/*
437 	 * Since this timeline is idle, all bariers upon which we were waiting
438 	 * must also be complete and so we can discard the last used barriers
439 	 * without loss of information.
440 	 */
441 	i915_syncmap_free(&tl->sync);
442 }
443 
444 static u32 timeline_advance(struct intel_timeline *tl)
445 {
446 	GEM_BUG_ON(!atomic_read(&tl->pin_count));
447 	GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
448 
449 	return tl->seqno += 1 + tl->has_initial_breadcrumb;
450 }
451 
452 static void timeline_rollback(struct intel_timeline *tl)
453 {
454 	tl->seqno -= 1 + tl->has_initial_breadcrumb;
455 }
456 
457 static noinline int
458 __intel_timeline_get_seqno(struct intel_timeline *tl,
459 			   struct i915_request *rq,
460 			   u32 *seqno)
461 {
462 	struct intel_timeline_cacheline *cl;
463 	unsigned int cacheline;
464 	struct i915_vma *vma;
465 	void *vaddr;
466 	int err;
467 
468 	might_lock(&tl->gt->ggtt->vm.mutex);
469 	GT_TRACE(tl->gt, "timeline:%llx wrapped\n", tl->fence_context);
470 
471 	/*
472 	 * If there is an outstanding GPU reference to this cacheline,
473 	 * such as it being sampled by a HW semaphore on another timeline,
474 	 * we cannot wraparound our seqno value (the HW semaphore does
475 	 * a strict greater-than-or-equals compare, not i915_seqno_passed).
476 	 * So if the cacheline is still busy, we must detach ourselves
477 	 * from it and leave it inflight alongside its users.
478 	 *
479 	 * However, if nobody is watching and we can guarantee that nobody
480 	 * will, we could simply reuse the same cacheline.
481 	 *
482 	 * if (i915_active_request_is_signaled(&tl->last_request) &&
483 	 *     i915_active_is_signaled(&tl->hwsp_cacheline->active))
484 	 *	return 0;
485 	 *
486 	 * That seems unlikely for a busy timeline that needed to wrap in
487 	 * the first place, so just replace the cacheline.
488 	 */
489 
490 	vma = hwsp_alloc(tl, &cacheline);
491 	if (IS_ERR(vma)) {
492 		err = PTR_ERR(vma);
493 		goto err_rollback;
494 	}
495 
496 	err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
497 	if (err) {
498 		__idle_hwsp_free(vma->private, cacheline);
499 		goto err_rollback;
500 	}
501 
502 	cl = cacheline_alloc(vma->private, cacheline);
503 	if (IS_ERR(cl)) {
504 		err = PTR_ERR(cl);
505 		__idle_hwsp_free(vma->private, cacheline);
506 		goto err_unpin;
507 	}
508 	GEM_BUG_ON(cl->hwsp->vma != vma);
509 
510 	/*
511 	 * Attach the old cacheline to the current request, so that we only
512 	 * free it after the current request is retired, which ensures that
513 	 * all writes into the cacheline from previous requests are complete.
514 	 */
515 	err = i915_active_ref(&tl->hwsp_cacheline->active,
516 			      tl->fence_context,
517 			      &rq->fence);
518 	if (err)
519 		goto err_cacheline;
520 
521 	cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
522 	cacheline_free(tl->hwsp_cacheline);
523 
524 	i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
525 	i915_vma_put(tl->hwsp_ggtt);
526 
527 	tl->hwsp_ggtt = i915_vma_get(vma);
528 
529 	vaddr = page_mask_bits(cl->vaddr);
530 	tl->hwsp_offset = cacheline * CACHELINE_BYTES;
531 	tl->hwsp_seqno =
532 		memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
533 
534 	tl->hwsp_offset += i915_ggtt_offset(vma);
535 	GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
536 		 tl->fence_context, tl->hwsp_offset);
537 
538 	cacheline_acquire(cl, tl->hwsp_offset);
539 	tl->hwsp_cacheline = cl;
540 
541 	*seqno = timeline_advance(tl);
542 	GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
543 	return 0;
544 
545 err_cacheline:
546 	cacheline_free(cl);
547 err_unpin:
548 	i915_vma_unpin(vma);
549 err_rollback:
550 	timeline_rollback(tl);
551 	return err;
552 }
553 
554 int intel_timeline_get_seqno(struct intel_timeline *tl,
555 			     struct i915_request *rq,
556 			     u32 *seqno)
557 {
558 	*seqno = timeline_advance(tl);
559 
560 	/* Replace the HWSP on wraparound for HW semaphores */
561 	if (unlikely(!*seqno && tl->hwsp_cacheline))
562 		return __intel_timeline_get_seqno(tl, rq, seqno);
563 
564 	return 0;
565 }
566 
567 static int cacheline_ref(struct intel_timeline_cacheline *cl,
568 			 struct i915_request *rq)
569 {
570 	return i915_active_add_request(&cl->active, rq);
571 }
572 
573 int intel_timeline_read_hwsp(struct i915_request *from,
574 			     struct i915_request *to,
575 			     u32 *hwsp)
576 {
577 	struct intel_timeline_cacheline *cl;
578 	int err;
579 
580 	GEM_BUG_ON(!rcu_access_pointer(from->hwsp_cacheline));
581 
582 	rcu_read_lock();
583 	cl = rcu_dereference(from->hwsp_cacheline);
584 	if (i915_request_signaled(from)) /* confirm cacheline is valid */
585 		goto unlock;
586 	if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
587 		goto unlock; /* seqno wrapped and completed! */
588 	if (unlikely(__i915_request_is_complete(from)))
589 		goto release;
590 	rcu_read_unlock();
591 
592 	err = cacheline_ref(cl, to);
593 	if (err)
594 		goto out;
595 
596 	*hwsp = cl->ggtt_offset;
597 out:
598 	i915_active_release(&cl->active);
599 	return err;
600 
601 release:
602 	i915_active_release(&cl->active);
603 unlock:
604 	rcu_read_unlock();
605 	return 1;
606 }
607 
608 void intel_timeline_unpin(struct intel_timeline *tl)
609 {
610 	GEM_BUG_ON(!atomic_read(&tl->pin_count));
611 	if (!atomic_dec_and_test(&tl->pin_count))
612 		return;
613 
614 	cacheline_release(tl->hwsp_cacheline);
615 
616 	__i915_vma_unpin(tl->hwsp_ggtt);
617 }
618 
619 void __intel_timeline_free(struct kref *kref)
620 {
621 	struct intel_timeline *timeline =
622 		container_of(kref, typeof(*timeline), kref);
623 
624 	intel_timeline_fini(timeline);
625 	kfree_rcu(timeline, rcu);
626 }
627 
628 void intel_gt_fini_timelines(struct intel_gt *gt)
629 {
630 	struct intel_gt_timelines *timelines = &gt->timelines;
631 
632 	GEM_BUG_ON(!list_empty(&timelines->active_list));
633 	GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
634 }
635 
636 void intel_gt_show_timelines(struct intel_gt *gt,
637 			     struct drm_printer *m,
638 			     void (*show_request)(struct drm_printer *m,
639 						  const struct i915_request *rq,
640 						  const char *prefix,
641 						  int indent))
642 {
643 	struct intel_gt_timelines *timelines = &gt->timelines;
644 	struct intel_timeline *tl, *tn;
645 	LIST_HEAD(free);
646 
647 	spin_lock(&timelines->lock);
648 	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
649 		unsigned long count, ready, inflight;
650 		struct i915_request *rq, *rn;
651 		struct dma_fence *fence;
652 
653 		if (!mutex_trylock(&tl->mutex)) {
654 			drm_printf(m, "Timeline %llx: busy; skipping\n",
655 				   tl->fence_context);
656 			continue;
657 		}
658 
659 		intel_timeline_get(tl);
660 		GEM_BUG_ON(!atomic_read(&tl->active_count));
661 		atomic_inc(&tl->active_count); /* pin the list element */
662 		spin_unlock(&timelines->lock);
663 
664 		count = 0;
665 		ready = 0;
666 		inflight = 0;
667 		list_for_each_entry_safe(rq, rn, &tl->requests, link) {
668 			if (i915_request_completed(rq))
669 				continue;
670 
671 			count++;
672 			if (i915_request_is_ready(rq))
673 				ready++;
674 			if (i915_request_is_active(rq))
675 				inflight++;
676 		}
677 
678 		drm_printf(m, "Timeline %llx: { ", tl->fence_context);
679 		drm_printf(m, "count: %lu, ready: %lu, inflight: %lu",
680 			   count, ready, inflight);
681 		drm_printf(m, ", seqno: { current: %d, last: %d }",
682 			   *tl->hwsp_seqno, tl->seqno);
683 		fence = i915_active_fence_get(&tl->last_request);
684 		if (fence) {
685 			drm_printf(m, ", engine: %s",
686 				   to_request(fence)->engine->name);
687 			dma_fence_put(fence);
688 		}
689 		drm_printf(m, " }\n");
690 
691 		if (show_request) {
692 			list_for_each_entry_safe(rq, rn, &tl->requests, link)
693 				show_request(m, rq, "", 2);
694 		}
695 
696 		mutex_unlock(&tl->mutex);
697 		spin_lock(&timelines->lock);
698 
699 		/* Resume list iteration after reacquiring spinlock */
700 		list_safe_reset_next(tl, tn, link);
701 		if (atomic_dec_and_test(&tl->active_count))
702 			list_del(&tl->link);
703 
704 		/* Defer the final release to after the spinlock */
705 		if (refcount_dec_and_test(&tl->kref.refcount)) {
706 			GEM_BUG_ON(atomic_read(&tl->active_count));
707 			list_add(&tl->link, &free);
708 		}
709 	}
710 	spin_unlock(&timelines->lock);
711 
712 	list_for_each_entry_safe(tl, tn, &free, link)
713 		__intel_timeline_free(&tl->kref);
714 }
715 
716 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
717 #include "gt/selftests/mock_timeline.c"
718 #include "gt/selftest_timeline.c"
719 #endif
720