15ca02815Sjsg /* SPDX-License-Identifier: MIT */
2c349dbc7Sjsg /*
3c349dbc7Sjsg  * Copyright © 2019 Intel Corporation
4c349dbc7Sjsg  */
5c349dbc7Sjsg 
6c349dbc7Sjsg #ifndef __INTEL_CONTEXT_TYPES__
7c349dbc7Sjsg #define __INTEL_CONTEXT_TYPES__
8c349dbc7Sjsg 
9c349dbc7Sjsg #include <linux/average.h>
10c349dbc7Sjsg #include <linux/kref.h>
11c349dbc7Sjsg #include <linux/list.h>
12c349dbc7Sjsg #include <linux/mutex.h>
13c349dbc7Sjsg #include <linux/types.h>
14c349dbc7Sjsg 
15c349dbc7Sjsg #include "i915_active_types.h"
165ca02815Sjsg #include "i915_sw_fence.h"
17c349dbc7Sjsg #include "i915_utils.h"
18c349dbc7Sjsg #include "intel_engine_types.h"
19c349dbc7Sjsg #include "intel_sseu.h"
20c349dbc7Sjsg 
215ca02815Sjsg #include "uc/intel_guc_fwif.h"
22c349dbc7Sjsg 
235ca02815Sjsg #define CONTEXT_REDZONE POISON_INUSE
24c349dbc7Sjsg DECLARE_EWMA(runtime, 3, 8);
25c349dbc7Sjsg 
26c349dbc7Sjsg struct i915_gem_context;
27ad8b1aafSjsg struct i915_gem_ww_ctx;
28c349dbc7Sjsg struct i915_vma;
29ad8b1aafSjsg struct intel_breadcrumbs;
30c349dbc7Sjsg struct intel_context;
31c349dbc7Sjsg struct intel_ring;
32c349dbc7Sjsg 
33c349dbc7Sjsg struct intel_context_ops {
345ca02815Sjsg 	unsigned long flags;
355ca02815Sjsg #define COPS_HAS_INFLIGHT_BIT 0
365ca02815Sjsg #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
375ca02815Sjsg 
381bb76ff1Sjsg #define COPS_RUNTIME_CYCLES_BIT 1
391bb76ff1Sjsg #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT)
401bb76ff1Sjsg 
41c349dbc7Sjsg 	int (*alloc)(struct intel_context *ce);
42c349dbc7Sjsg 
431bb76ff1Sjsg 	void (*revoke)(struct intel_context *ce, struct i915_request *rq,
441bb76ff1Sjsg 		       unsigned int preempt_timeout_ms);
455ca02815Sjsg 
46*f005ef32Sjsg 	void (*close)(struct intel_context *ce);
47*f005ef32Sjsg 
48ad8b1aafSjsg 	int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
49ad8b1aafSjsg 	int (*pin)(struct intel_context *ce, void *vaddr);
50c349dbc7Sjsg 	void (*unpin)(struct intel_context *ce);
51ad8b1aafSjsg 	void (*post_unpin)(struct intel_context *ce);
52c349dbc7Sjsg 
535ca02815Sjsg 	void (*cancel_request)(struct intel_context *ce,
545ca02815Sjsg 			       struct i915_request *rq);
555ca02815Sjsg 
56c349dbc7Sjsg 	void (*enter)(struct intel_context *ce);
57c349dbc7Sjsg 	void (*exit)(struct intel_context *ce);
58c349dbc7Sjsg 
595ca02815Sjsg 	void (*sched_disable)(struct intel_context *ce);
605ca02815Sjsg 
61*f005ef32Sjsg 	void (*update_stats)(struct intel_context *ce);
62*f005ef32Sjsg 
63c349dbc7Sjsg 	void (*reset)(struct intel_context *ce);
64c349dbc7Sjsg 	void (*destroy)(struct kref *kref);
655ca02815Sjsg 
661bb76ff1Sjsg 	/* virtual/parallel engine/context interface */
675ca02815Sjsg 	struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
681bb76ff1Sjsg 						unsigned int count,
691bb76ff1Sjsg 						unsigned long flags);
701bb76ff1Sjsg 	struct intel_context *(*create_parallel)(struct intel_engine_cs **engines,
711bb76ff1Sjsg 						 unsigned int num_siblings,
721bb76ff1Sjsg 						 unsigned int width);
735ca02815Sjsg 	struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
745ca02815Sjsg 					       unsigned int sibling);
75c349dbc7Sjsg };
76c349dbc7Sjsg 
77c349dbc7Sjsg struct intel_context {
78ad8b1aafSjsg 	/*
79ad8b1aafSjsg 	 * Note: Some fields may be accessed under RCU.
80ad8b1aafSjsg 	 *
81ad8b1aafSjsg 	 * Unless otherwise noted a field can safely be assumed to be protected
82ad8b1aafSjsg 	 * by strong reference counting.
83ad8b1aafSjsg 	 */
84ad8b1aafSjsg 	union {
85ad8b1aafSjsg 		struct kref ref; /* no kref_get_unless_zero()! */
86ad8b1aafSjsg 		struct rcu_head rcu;
87ad8b1aafSjsg 	};
88c349dbc7Sjsg 
89c349dbc7Sjsg 	struct intel_engine_cs *engine;
90c349dbc7Sjsg 	struct intel_engine_cs *inflight;
915ca02815Sjsg #define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
925ca02815Sjsg #define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
935ca02815Sjsg #define intel_context_inflight(ce) \
945ca02815Sjsg 	__intel_context_inflight(READ_ONCE((ce)->inflight))
955ca02815Sjsg #define intel_context_inflight_count(ce) \
965ca02815Sjsg 	__intel_context_inflight_count(READ_ONCE((ce)->inflight))
97c349dbc7Sjsg 
98c349dbc7Sjsg 	struct i915_address_space *vm;
99c349dbc7Sjsg 	struct i915_gem_context __rcu *gem_context;
100c349dbc7Sjsg 
101ad8b1aafSjsg 	/*
102ad8b1aafSjsg 	 * @signal_lock protects the list of requests that need signaling,
103ad8b1aafSjsg 	 * @signals. While there are any requests that need signaling,
104ad8b1aafSjsg 	 * we add the context to the breadcrumbs worker, and remove it
105ad8b1aafSjsg 	 * upon completion/cancellation of the last request.
106ad8b1aafSjsg 	 */
107ad8b1aafSjsg 	struct list_head signal_link; /* Accessed under RCU */
108ad8b1aafSjsg 	struct list_head signals; /* Guarded by signal_lock */
109ad8b1aafSjsg 	spinlock_t signal_lock; /* protects signals, the list of requests */
110c349dbc7Sjsg 
111c349dbc7Sjsg 	struct i915_vma *state;
1125ca02815Sjsg 	u32 ring_size;
113c349dbc7Sjsg 	struct intel_ring *ring;
114c349dbc7Sjsg 	struct intel_timeline *timeline;
115c349dbc7Sjsg 
116c349dbc7Sjsg 	unsigned long flags;
117c349dbc7Sjsg #define CONTEXT_BARRIER_BIT		0
118c349dbc7Sjsg #define CONTEXT_ALLOC_BIT		1
1195ca02815Sjsg #define CONTEXT_INIT_BIT		2
1205ca02815Sjsg #define CONTEXT_VALID_BIT		3
1215ca02815Sjsg #define CONTEXT_CLOSED_BIT		4
1225ca02815Sjsg #define CONTEXT_USE_SEMAPHORES		5
1235ca02815Sjsg #define CONTEXT_BANNED			6
1245ca02815Sjsg #define CONTEXT_FORCE_SINGLE_SUBMISSION	7
1255ca02815Sjsg #define CONTEXT_NOPREEMPT		8
1265ca02815Sjsg #define CONTEXT_LRCA_DIRTY		9
1271bb76ff1Sjsg #define CONTEXT_GUC_INIT		10
1281bb76ff1Sjsg #define CONTEXT_PERMA_PIN		11
1291bb76ff1Sjsg #define CONTEXT_IS_PARKING		12
1301bb76ff1Sjsg #define CONTEXT_EXITING			13
1315ca02815Sjsg 
1325ca02815Sjsg 	struct {
1335ca02815Sjsg 		u64 timeout_us;
1345ca02815Sjsg 	} watchdog;
135c349dbc7Sjsg 
136c349dbc7Sjsg 	u32 *lrc_reg_state;
137c349dbc7Sjsg 	union {
138c349dbc7Sjsg 		struct {
139c349dbc7Sjsg 			u32 lrca;
140c349dbc7Sjsg 			u32 ccid;
141c349dbc7Sjsg 		};
142c349dbc7Sjsg 		u64 desc;
143c349dbc7Sjsg 	} lrc;
144c349dbc7Sjsg 	u32 tag; /* cookie passed to HW to track this context on submission */
145c349dbc7Sjsg 
1461bb76ff1Sjsg 	/** stats: Context GPU engine busyness tracking. */
1471bb76ff1Sjsg 	struct intel_context_stats {
1481bb76ff1Sjsg 		u64 active;
1491bb76ff1Sjsg 
150c349dbc7Sjsg 		/* Time on GPU as tracked by the hw. */
151c349dbc7Sjsg 		struct {
152c349dbc7Sjsg 			struct ewma_runtime avg;
153c349dbc7Sjsg 			u64 total;
154c349dbc7Sjsg 			u32 last;
155c349dbc7Sjsg 			I915_SELFTEST_DECLARE(u32 num_underflow);
156c349dbc7Sjsg 			I915_SELFTEST_DECLARE(u32 max_underflow);
157c349dbc7Sjsg 		} runtime;
1581bb76ff1Sjsg 	} stats;
159c349dbc7Sjsg 
160c349dbc7Sjsg 	unsigned int active_count; /* protected by timeline->mutex */
161c349dbc7Sjsg 
162c349dbc7Sjsg 	atomic_t pin_count;
163c349dbc7Sjsg 	struct rwlock pin_mutex; /* guards pinning and associated on-gpuing */
164c349dbc7Sjsg 
165c349dbc7Sjsg 	/**
166c349dbc7Sjsg 	 * active: Active tracker for the rq activity (inc. external) on this
167c349dbc7Sjsg 	 * intel_context object.
168c349dbc7Sjsg 	 */
169c349dbc7Sjsg 	struct i915_active active;
170c349dbc7Sjsg 
171c349dbc7Sjsg 	const struct intel_context_ops *ops;
172c349dbc7Sjsg 
173c349dbc7Sjsg 	/** sseu: Control eu/slice partitioning */
174c349dbc7Sjsg 	struct intel_sseu sseu;
175ad8b1aafSjsg 
1763f069f93Sjsg 	/**
1773f069f93Sjsg 	 * pinned_contexts_link: List link for the engine's pinned contexts.
1783f069f93Sjsg 	 * This is only used if this is a perma-pinned kernel context and
1793f069f93Sjsg 	 * the list is assumed to only be manipulated during driver load
1803f069f93Sjsg 	 * or unload time so no mutex protection currently.
1813f069f93Sjsg 	 */
1823f069f93Sjsg 	struct list_head pinned_contexts_link;
1833f069f93Sjsg 
184ad8b1aafSjsg 	u8 wa_bb_page; /* if set, page num reserved for context workarounds */
1855ca02815Sjsg 
1865ca02815Sjsg 	struct {
1871bb76ff1Sjsg 		/** @lock: protects everything in guc_state */
1885ca02815Sjsg 		spinlock_t lock;
1895ca02815Sjsg 		/**
1901bb76ff1Sjsg 		 * @sched_state: scheduling state of this context using GuC
1915ca02815Sjsg 		 * submission
1925ca02815Sjsg 		 */
1931bb76ff1Sjsg 		u32 sched_state;
1945ca02815Sjsg 		/*
1951bb76ff1Sjsg 		 * @fences: maintains a list of requests that are currently
1961bb76ff1Sjsg 		 * being fenced until a GuC operation completes
1975ca02815Sjsg 		 */
1985ca02815Sjsg 		struct list_head fences;
1991bb76ff1Sjsg 		/**
2001bb76ff1Sjsg 		 * @blocked: fence used to signal when the blocking of a
2011bb76ff1Sjsg 		 * context's submissions is complete.
2021bb76ff1Sjsg 		 */
2031bb76ff1Sjsg 		struct i915_sw_fence blocked;
2041bb76ff1Sjsg 		/** @requests: list of active requests on this context */
2051bb76ff1Sjsg 		struct list_head requests;
2061bb76ff1Sjsg 		/** @prio: the context's current guc priority */
2071bb76ff1Sjsg 		u8 prio;
2081bb76ff1Sjsg 		/**
2091bb76ff1Sjsg 		 * @prio_count: a counter of the number requests in flight in
2101bb76ff1Sjsg 		 * each priority bucket
2111bb76ff1Sjsg 		 */
2121bb76ff1Sjsg 		u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
213*f005ef32Sjsg 		/**
214*f005ef32Sjsg 		 * @sched_disable_delay_work: worker to disable scheduling on this
215*f005ef32Sjsg 		 * context
216*f005ef32Sjsg 		 */
217*f005ef32Sjsg 		struct delayed_work sched_disable_delay_work;
2185ca02815Sjsg 	} guc_state;
2195ca02815Sjsg 
2205ca02815Sjsg 	struct {
2211bb76ff1Sjsg 		/**
2221bb76ff1Sjsg 		 * @id: handle which is used to uniquely identify this context
2231bb76ff1Sjsg 		 * with the GuC, protected by guc->submission_state.lock
2245ca02815Sjsg 		 */
2251bb76ff1Sjsg 		u16 id;
2261bb76ff1Sjsg 		/**
2271bb76ff1Sjsg 		 * @ref: the number of references to the guc_id, when
2281bb76ff1Sjsg 		 * transitioning in and out of zero protected by
2291bb76ff1Sjsg 		 * guc->submission_state.lock
2305ca02815Sjsg 		 */
2311bb76ff1Sjsg 		atomic_t ref;
2321bb76ff1Sjsg 		/**
2331bb76ff1Sjsg 		 * @link: in guc->guc_id_list when the guc_id has no refs but is
2341bb76ff1Sjsg 		 * still valid, protected by guc->submission_state.lock
2351bb76ff1Sjsg 		 */
2361bb76ff1Sjsg 		struct list_head link;
2371bb76ff1Sjsg 	} guc_id;
2381bb76ff1Sjsg 
2391bb76ff1Sjsg 	/**
2401bb76ff1Sjsg 	 * @destroyed_link: link in guc->submission_state.destroyed_contexts, in
2411bb76ff1Sjsg 	 * list when context is pending to be destroyed (deregistered with the
2421bb76ff1Sjsg 	 * GuC), protected by guc->submission_state.lock
2431bb76ff1Sjsg 	 */
2441bb76ff1Sjsg 	struct list_head destroyed_link;
2451bb76ff1Sjsg 
2461bb76ff1Sjsg 	/** @parallel: sub-structure for parallel submission members */
2471bb76ff1Sjsg 	struct {
2481bb76ff1Sjsg 		union {
2491bb76ff1Sjsg 			/**
2501bb76ff1Sjsg 			 * @child_list: parent's list of children
2511bb76ff1Sjsg 			 * contexts, no protection as immutable after context
2521bb76ff1Sjsg 			 * creation
2531bb76ff1Sjsg 			 */
2541bb76ff1Sjsg 			struct list_head child_list;
2551bb76ff1Sjsg 			/**
2561bb76ff1Sjsg 			 * @child_link: child's link into parent's list of
2571bb76ff1Sjsg 			 * children
2581bb76ff1Sjsg 			 */
2591bb76ff1Sjsg 			struct list_head child_link;
2601bb76ff1Sjsg 		};
2611bb76ff1Sjsg 		/** @parent: pointer to parent if child */
2621bb76ff1Sjsg 		struct intel_context *parent;
2631bb76ff1Sjsg 		/**
2641bb76ff1Sjsg 		 * @last_rq: last request submitted on a parallel context, used
2651bb76ff1Sjsg 		 * to insert submit fences between requests in the parallel
2661bb76ff1Sjsg 		 * context
2671bb76ff1Sjsg 		 */
2681bb76ff1Sjsg 		struct i915_request *last_rq;
2691bb76ff1Sjsg 		/**
2701bb76ff1Sjsg 		 * @fence_context: fence context composite fence when doing
2711bb76ff1Sjsg 		 * parallel submission
2721bb76ff1Sjsg 		 */
2731bb76ff1Sjsg 		u64 fence_context;
2741bb76ff1Sjsg 		/**
2751bb76ff1Sjsg 		 * @seqno: seqno for composite fence when doing parallel
2761bb76ff1Sjsg 		 * submission
2771bb76ff1Sjsg 		 */
2781bb76ff1Sjsg 		u32 seqno;
2791bb76ff1Sjsg 		/** @number_children: number of children if parent */
2801bb76ff1Sjsg 		u8 number_children;
2811bb76ff1Sjsg 		/** @child_index: index into child_list if child */
2821bb76ff1Sjsg 		u8 child_index;
2831bb76ff1Sjsg 		/** @guc: GuC specific members for parallel submission */
2841bb76ff1Sjsg 		struct {
2851bb76ff1Sjsg 			/** @wqi_head: cached head pointer in work queue */
2861bb76ff1Sjsg 			u16 wqi_head;
2871bb76ff1Sjsg 			/** @wqi_tail: cached tail pointer in work queue */
2881bb76ff1Sjsg 			u16 wqi_tail;
2891bb76ff1Sjsg 			/** @wq_head: pointer to the actual head in work queue */
2901bb76ff1Sjsg 			u32 *wq_head;
2911bb76ff1Sjsg 			/** @wq_tail: pointer to the actual head in work queue */
2921bb76ff1Sjsg 			u32 *wq_tail;
2931bb76ff1Sjsg 			/** @wq_status: pointer to the status in work queue */
2941bb76ff1Sjsg 			u32 *wq_status;
2951bb76ff1Sjsg 
2961bb76ff1Sjsg 			/**
2971bb76ff1Sjsg 			 * @parent_page: page in context state (ce->state) used
2981bb76ff1Sjsg 			 * by parent for work queue, process descriptor
2991bb76ff1Sjsg 			 */
3001bb76ff1Sjsg 			u8 parent_page;
3011bb76ff1Sjsg 		} guc;
3021bb76ff1Sjsg 	} parallel;
3031bb76ff1Sjsg 
3041bb76ff1Sjsg #ifdef CONFIG_DRM_I915_SELFTEST
3051bb76ff1Sjsg 	/**
3061bb76ff1Sjsg 	 * @drop_schedule_enable: Force drop of schedule enable G2H for selftest
3071bb76ff1Sjsg 	 */
3081bb76ff1Sjsg 	bool drop_schedule_enable;
3091bb76ff1Sjsg 
3101bb76ff1Sjsg 	/**
3111bb76ff1Sjsg 	 * @drop_schedule_disable: Force drop of schedule disable G2H for
3121bb76ff1Sjsg 	 * selftest
3131bb76ff1Sjsg 	 */
3141bb76ff1Sjsg 	bool drop_schedule_disable;
3151bb76ff1Sjsg 
3161bb76ff1Sjsg 	/**
3171bb76ff1Sjsg 	 * @drop_deregister: Force drop of deregister G2H for selftest
3181bb76ff1Sjsg 	 */
3191bb76ff1Sjsg 	bool drop_deregister;
3201bb76ff1Sjsg #endif
321c349dbc7Sjsg };
322c349dbc7Sjsg 
323c349dbc7Sjsg #endif /* __INTEL_CONTEXT_TYPES__ */
324