1*5ca02815Sjsg /* SPDX-License-Identifier: MIT */
2c349dbc7Sjsg /*
3c349dbc7Sjsg  * Copyright © 2016 Intel Corporation
4c349dbc7Sjsg  */
5c349dbc7Sjsg 
6c349dbc7Sjsg #ifndef __I915_TIMELINE_TYPES_H__
7c349dbc7Sjsg #define __I915_TIMELINE_TYPES_H__
8c349dbc7Sjsg 
9c349dbc7Sjsg #include <linux/list.h>
10c349dbc7Sjsg #include <linux/kref.h>
11c349dbc7Sjsg #include <linux/mutex.h>
12c349dbc7Sjsg #include <linux/rcupdate.h>
13c349dbc7Sjsg #include <linux/types.h>
14c349dbc7Sjsg 
15c349dbc7Sjsg #include "i915_active_types.h"
16c349dbc7Sjsg 
17c349dbc7Sjsg struct i915_vma;
18c349dbc7Sjsg struct i915_syncmap;
19c349dbc7Sjsg struct intel_gt;
20c349dbc7Sjsg 
21c349dbc7Sjsg struct intel_timeline {
22c349dbc7Sjsg 	u64 fence_context;
23c349dbc7Sjsg 	u32 seqno;
24c349dbc7Sjsg 
25c349dbc7Sjsg 	struct rwlock mutex; /* protects the flow of requests */
26c349dbc7Sjsg 
27c349dbc7Sjsg 	/*
28c349dbc7Sjsg 	 * pin_count and active_count track essentially the same thing:
29c349dbc7Sjsg 	 * How many requests are in flight or may be under construction.
30c349dbc7Sjsg 	 *
31c349dbc7Sjsg 	 * We need two distinct counters so that we can assign different
32c349dbc7Sjsg 	 * lifetimes to the events for different use-cases. For example,
33c349dbc7Sjsg 	 * we want to permanently keep the timeline pinned for the kernel
34c349dbc7Sjsg 	 * context so that we can issue requests at any time without having
35c349dbc7Sjsg 	 * to acquire space in the GGTT. However, we want to keep tracking
36c349dbc7Sjsg 	 * the activity (to be able to detect when we become idle) along that
37c349dbc7Sjsg 	 * permanently pinned timeline and so end up requiring two counters.
38c349dbc7Sjsg 	 *
39c349dbc7Sjsg 	 * Note that the active_count is protected by the intel_timeline.mutex,
40c349dbc7Sjsg 	 * but the pin_count is protected by a combination of serialisation
41c349dbc7Sjsg 	 * from the intel_context caller plus internal atomicity.
42c349dbc7Sjsg 	 */
43c349dbc7Sjsg 	atomic_t pin_count;
44c349dbc7Sjsg 	atomic_t active_count;
45c349dbc7Sjsg 
46*5ca02815Sjsg 	void *hwsp_map;
47c349dbc7Sjsg 	const u32 *hwsp_seqno;
48c349dbc7Sjsg 	struct i915_vma *hwsp_ggtt;
49c349dbc7Sjsg 	u32 hwsp_offset;
50c349dbc7Sjsg 
51c349dbc7Sjsg 	bool has_initial_breadcrumb;
52c349dbc7Sjsg 
53c349dbc7Sjsg 	/**
54c349dbc7Sjsg 	 * List of breadcrumbs associated with GPU requests currently
55c349dbc7Sjsg 	 * outstanding.
56c349dbc7Sjsg 	 */
57c349dbc7Sjsg 	struct list_head requests;
58c349dbc7Sjsg 
59c349dbc7Sjsg 	/*
60c349dbc7Sjsg 	 * Contains an RCU guarded pointer to the last request. No reference is
61c349dbc7Sjsg 	 * held to the request, users must carefully acquire a reference to
62c349dbc7Sjsg 	 * the request using i915_active_fence_get(), or manage the RCU
63c349dbc7Sjsg 	 * protection themselves (cf the i915_active_fence API).
64c349dbc7Sjsg 	 */
65c349dbc7Sjsg 	struct i915_active_fence last_request;
66c349dbc7Sjsg 
67*5ca02815Sjsg 	struct i915_active active;
68*5ca02815Sjsg 
69c349dbc7Sjsg 	/** A chain of completed timelines ready for early retirement. */
70c349dbc7Sjsg 	struct intel_timeline *retire;
71c349dbc7Sjsg 
72c349dbc7Sjsg 	/**
73c349dbc7Sjsg 	 * We track the most recent seqno that we wait on in every context so
74c349dbc7Sjsg 	 * that we only have to emit a new await and dependency on a more
75c349dbc7Sjsg 	 * recent sync point. As the contexts may be executed out-of-order, we
76c349dbc7Sjsg 	 * have to track each individually and can not rely on an absolute
77c349dbc7Sjsg 	 * global_seqno. When we know that all tracked fences are completed
78c349dbc7Sjsg 	 * (i.e. when the driver is idle), we know that the syncmap is
79c349dbc7Sjsg 	 * redundant and we can discard it without loss of generality.
80c349dbc7Sjsg 	 */
81c349dbc7Sjsg 	struct i915_syncmap *sync;
82c349dbc7Sjsg 
83c349dbc7Sjsg 	struct list_head link;
84c349dbc7Sjsg 	struct intel_gt *gt;
85c349dbc7Sjsg 
86*5ca02815Sjsg 	struct list_head engine_link;
87*5ca02815Sjsg 
88c349dbc7Sjsg 	struct kref kref;
89c349dbc7Sjsg 	struct rcu_head rcu;
90c349dbc7Sjsg };
91c349dbc7Sjsg 
92c349dbc7Sjsg #endif /* __I915_TIMELINE_TYPES_H__ */
93