xref: /dragonfly/sys/dev/drm/i915/i915_gem_context.h (revision 5ca0a96d)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef __I915_GEM_CONTEXT_H__
26 #define __I915_GEM_CONTEXT_H__
27 
28 #include <linux/bitops.h>
29 #include <linux/list.h>
30 #include <linux/radix-tree.h>
31 
32 struct pid;
33 
34 struct drm_device;
35 struct drm_file;
36 
37 struct drm_i915_private;
38 struct drm_i915_file_private;
39 struct i915_hw_ppgtt;
40 struct i915_vma;
41 struct intel_ring;
42 
43 #define DEFAULT_CONTEXT_HANDLE 0
44 
45 /**
46  * struct i915_gem_context - client state
47  *
48  * The struct i915_gem_context represents the combined view of the driver and
49  * logical hardware state for a particular client.
50  */
51 struct i915_gem_context {
52 	/** i915: i915 device backpointer */
53 	struct drm_i915_private *i915;
54 
55 	/** file_priv: owning file descriptor */
56 	struct drm_i915_file_private *file_priv;
57 
58 	/**
59 	 * @ppgtt: unique address space (GTT)
60 	 *
61 	 * In full-ppgtt mode, each context has its own address space ensuring
62 	 * complete seperation of one client from all others.
63 	 *
64 	 * In other modes, this is a NULL pointer with the expectation that
65 	 * the caller uses the shared global GTT.
66 	 */
67 	struct i915_hw_ppgtt *ppgtt;
68 
69 	/**
70 	 * @pid: process id of creator
71 	 *
72 	 * Note that who created the context may not be the principle user,
73 	 * as the context may be shared across a local socket. However,
74 	 * that should only affect the default context, all contexts created
75 	 * explicitly by the client are expected to be isolated.
76 	 */
77 	pid_t pid;
78 
79 	/**
80 	 * @name: arbitrary name
81 	 *
82 	 * A name is constructed for the context from the creator's process
83 	 * name, pid and user handle in order to uniquely identify the
84 	 * context in messages.
85 	 */
86 	const char *name;
87 
88 	/** link: place with &drm_i915_private.context_list */
89 	struct list_head link;
90 	struct llist_node free_link;
91 
92 	/**
93 	 * @ref: reference count
94 	 *
95 	 * A reference to a context is held by both the client who created it
96 	 * and on each request submitted to the hardware using the request
97 	 * (to ensure the hardware has access to the state until it has
98 	 * finished all pending writes). See i915_gem_context_get() and
99 	 * i915_gem_context_put() for access.
100 	 */
101 	struct kref ref;
102 
103 	/**
104 	 * @rcu: rcu_head for deferred freeing.
105 	 */
106 	struct rcu_head rcu;
107 
108 	/**
109 	 * @flags: small set of booleans
110 	 */
111 	unsigned long flags;
112 #define CONTEXT_NO_ZEROMAP		BIT(0)
113 #define CONTEXT_NO_ERROR_CAPTURE	1
114 #define CONTEXT_CLOSED			2
115 #define CONTEXT_BANNABLE		3
116 #define CONTEXT_BANNED			4
117 #define CONTEXT_FORCE_SINGLE_SUBMISSION	5
118 
119 	/**
120 	 * @hw_id: - unique identifier for the context
121 	 *
122 	 * The hardware needs to uniquely identify the context for a few
123 	 * functions like fault reporting, PASID, scheduling. The
124 	 * &drm_i915_private.context_hw_ida is used to assign a unqiue
125 	 * id for the lifetime of the context.
126 	 */
127 	unsigned int hw_id;
128 
129 	/**
130 	 * @user_handle: userspace identifier
131 	 *
132 	 * A unique per-file identifier is generated from
133 	 * &drm_i915_file_private.contexts.
134 	 */
135 	u32 user_handle;
136 
137 	/**
138 	 * @priority: execution and service priority
139 	 *
140 	 * All clients are equal, but some are more equal than others!
141 	 *
142 	 * Requests from a context with a greater (more positive) value of
143 	 * @priority will be executed before those with a lower @priority
144 	 * value, forming a simple QoS.
145 	 *
146 	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
147 	 */
148 	int priority;
149 
150 	/** ggtt_offset_bias: placement restriction for context objects */
151 	u32 ggtt_offset_bias;
152 
153 	/** engine: per-engine logical HW state */
154 	struct intel_context {
155 		struct i915_vma *state;
156 		struct intel_ring *ring;
157 		u32 *lrc_reg_state;
158 		u64 lrc_desc;
159 		int pin_count;
160 		bool initialised;
161 	} engine[I915_NUM_ENGINES];
162 
163 	/** ring_size: size for allocating the per-engine ring buffer */
164 	u32 ring_size;
165 	/** desc_template: invariant fields for the HW context descriptor */
166 	u32 desc_template;
167 
168 	/** guilty_count: How many times this context has caused a GPU hang. */
169 	atomic_t guilty_count;
170 	/**
171 	 * @active_count: How many times this context was active during a GPU
172 	 * hang, but did not cause it.
173 	 */
174 	atomic_t active_count;
175 
176 #define CONTEXT_SCORE_GUILTY		10
177 #define CONTEXT_SCORE_BAN_THRESHOLD	40
178 	/** ban_score: Accumulated score of all hangs caused by this context. */
179 	atomic_t ban_score;
180 
181 	/** remap_slice: Bitmask of cache lines that need remapping */
182 	u8 remap_slice;
183 
184 	/** handles_vma: rbtree to look up our context specific obj/vma for
185 	 * the user handle. (user handles are per fd, but the binding is
186 	 * per vm, which may be one per context or shared with the global GTT)
187 	 */
188 	struct radix_tree_root handles_vma;
189 
190 	/** handles_list: reverse list of all the rbtree entries in use for
191 	 * this context, which allows us to free all the allocations on
192 	 * context close.
193 	 */
194 	struct list_head handles_list;
195 };
196 
197 static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
198 {
199 	return test_bit(CONTEXT_CLOSED, &ctx->flags);
200 }
201 
202 static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
203 {
204 	GEM_BUG_ON(i915_gem_context_is_closed(ctx));
205 	__set_bit(CONTEXT_CLOSED, &ctx->flags);
206 }
207 
208 static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
209 {
210 	return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
211 }
212 
213 static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
214 {
215 	__set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
216 }
217 
218 static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
219 {
220 	__clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
221 }
222 
223 static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
224 {
225 	return test_bit(CONTEXT_BANNABLE, &ctx->flags);
226 }
227 
228 static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
229 {
230 	__set_bit(CONTEXT_BANNABLE, &ctx->flags);
231 }
232 
233 static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
234 {
235 	__clear_bit(CONTEXT_BANNABLE, &ctx->flags);
236 }
237 
238 static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
239 {
240 	return test_bit(CONTEXT_BANNED, &ctx->flags);
241 }
242 
243 static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
244 {
245 	__set_bit(CONTEXT_BANNED, &ctx->flags);
246 }
247 
248 static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
249 {
250 	return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
251 }
252 
253 static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
254 {
255 	__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
256 }
257 
258 static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
259 {
260 	return c->user_handle == DEFAULT_CONTEXT_HANDLE;
261 }
262 
263 static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
264 {
265 	return !ctx->file_priv;
266 }
267 
268 /* i915_gem_context.c */
269 int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
270 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
271 void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
272 
273 int i915_gem_context_open(struct drm_i915_private *i915,
274 			  struct drm_file *file);
275 void i915_gem_context_close(struct drm_file *file);
276 
277 int i915_switch_context(struct drm_i915_gem_request *req);
278 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
279 
280 void i915_gem_context_release(struct kref *ctx_ref);
281 struct i915_gem_context *
282 i915_gem_context_create_gvt(struct drm_device *dev);
283 
284 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
285 				  struct drm_file *file);
286 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
287 				   struct drm_file *file);
288 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
289 				    struct drm_file *file_priv);
290 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
291 				    struct drm_file *file_priv);
292 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
293 				       struct drm_file *file);
294 
295 static inline struct i915_gem_context *
296 i915_gem_context_get(struct i915_gem_context *ctx)
297 {
298 	kref_get(&ctx->ref);
299 	return ctx;
300 }
301 
302 static inline void i915_gem_context_put(struct i915_gem_context *ctx)
303 {
304 	kref_put(&ctx->ref, i915_gem_context_release);
305 }
306 
307 #endif /* !__I915_GEM_CONTEXT_H__ */
308