xref: /openbsd/sys/dev/pci/drm/i915/gt/intel_engine_user.c (revision 88ee63d7)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/list.h>
7 #include <linux/list_sort.h>
8 #include <linux/llist.h>
9 
10 #include "i915_drv.h"
11 #include "intel_engine.h"
12 #include "intel_engine_user.h"
13 #include "intel_gt.h"
14 #include "uc/intel_guc_submission.h"
15 
16 struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private * i915,u8 class,u8 instance)17 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
18 {
19 	struct rb_node *p = i915->uabi_engines.rb_node;
20 
21 	while (p) {
22 		struct intel_engine_cs *it =
23 			rb_entry(p, typeof(*it), uabi_node);
24 
25 		if (class < it->uabi_class)
26 			p = p->rb_left;
27 		else if (class > it->uabi_class ||
28 			 instance > it->uabi_instance)
29 			p = p->rb_right;
30 		else if (instance < it->uabi_instance)
31 			p = p->rb_left;
32 		else
33 			return it;
34 	}
35 
36 	return NULL;
37 }
38 
intel_engine_add_user(struct intel_engine_cs * engine)39 void intel_engine_add_user(struct intel_engine_cs *engine)
40 {
41 	llist_add((struct llist_node *)&engine->uabi_node,
42 		  (struct llist_head *)&engine->i915->uabi_engines);
43 }
44 
45 #define I915_NO_UABI_CLASS ((u16)(-1))
46 
47 static const u16 uabi_classes[] = {
48 	[RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
49 	[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
50 	[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
51 	[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
52 	[COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
53 	[OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
54 };
55 
engine_cmp(void * priv,const struct list_head * A,const struct list_head * B)56 static int engine_cmp(void *priv, const struct list_head *A,
57 		      const struct list_head *B)
58 {
59 	const struct intel_engine_cs *a =
60 		container_of((struct rb_node *)A, typeof(*a), uabi_node);
61 	const struct intel_engine_cs *b =
62 		container_of((struct rb_node *)B, typeof(*b), uabi_node);
63 
64 	if (uabi_classes[a->class] < uabi_classes[b->class])
65 		return -1;
66 	if (uabi_classes[a->class] > uabi_classes[b->class])
67 		return 1;
68 
69 	if (a->instance < b->instance)
70 		return -1;
71 	if (a->instance > b->instance)
72 		return 1;
73 
74 	return 0;
75 }
76 
get_engines(struct drm_i915_private * i915)77 static struct llist_node *get_engines(struct drm_i915_private *i915)
78 {
79 	return llist_del_all((struct llist_head *)&i915->uabi_engines);
80 }
81 
sort_engines(struct drm_i915_private * i915,struct list_head * engines)82 static void sort_engines(struct drm_i915_private *i915,
83 			 struct list_head *engines)
84 {
85 	struct llist_node *pos, *next;
86 
87 	llist_for_each_safe(pos, next, get_engines(i915)) {
88 		struct intel_engine_cs *engine =
89 			container_of((struct rb_node *)pos, typeof(*engine),
90 				     uabi_node);
91 		list_add((struct list_head *)&engine->uabi_node, engines);
92 	}
93 	list_sort(NULL, engines, engine_cmp);
94 }
95 
96 #ifdef __linux__
set_scheduler_caps(struct drm_i915_private * i915)97 static void set_scheduler_caps(struct drm_i915_private *i915)
98 {
99 	static const struct {
100 		u8 engine;
101 		u8 sched;
102 	} map[] = {
103 #define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
104 		MAP(HAS_PREEMPTION, PREEMPTION),
105 		MAP(HAS_SEMAPHORES, SEMAPHORES),
106 		MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
107 #undef MAP
108 	};
109 	struct intel_engine_cs *engine;
110 	u32 enabled, disabled;
111 
112 	enabled = 0;
113 	disabled = 0;
114 	for_each_uabi_engine(engine, i915) { /* all engines must agree! */
115 		int i;
116 
117 		if (engine->sched_engine->schedule)
118 			enabled |= (I915_SCHEDULER_CAP_ENABLED |
119 				    I915_SCHEDULER_CAP_PRIORITY);
120 		else
121 			disabled |= (I915_SCHEDULER_CAP_ENABLED |
122 				     I915_SCHEDULER_CAP_PRIORITY);
123 
124 		if (intel_uc_uses_guc_submission(&engine->gt->uc))
125 			enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
126 
127 		for (i = 0; i < ARRAY_SIZE(map); i++) {
128 			if (engine->flags & BIT(map[i].engine))
129 				enabled |= BIT(map[i].sched);
130 			else
131 				disabled |= BIT(map[i].sched);
132 		}
133 	}
134 
135 	i915->caps.scheduler = enabled & ~disabled;
136 	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
137 		i915->caps.scheduler = 0;
138 }
139 #else
140 /* without the pointless ilog2 -> BIT() */
set_scheduler_caps(struct drm_i915_private * i915)141 static void set_scheduler_caps(struct drm_i915_private *i915)
142 {
143 	static const struct {
144 		u8 engine;
145 		u8 sched;
146 	} map[] = {
147 #define MAP(x, y) { I915_ENGINE_##x, I915_SCHEDULER_CAP_##y }
148 		MAP(HAS_PREEMPTION, PREEMPTION),
149 		MAP(HAS_SEMAPHORES, SEMAPHORES),
150 		MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
151 #undef MAP
152 	};
153 	struct intel_engine_cs *engine;
154 	u32 enabled, disabled;
155 
156 	enabled = 0;
157 	disabled = 0;
158 	for_each_uabi_engine(engine, i915) { /* all engines must agree! */
159 		int i;
160 
161 		if (engine->sched_engine->schedule)
162 			enabled |= (I915_SCHEDULER_CAP_ENABLED |
163 				    I915_SCHEDULER_CAP_PRIORITY);
164 		else
165 			disabled |= (I915_SCHEDULER_CAP_ENABLED |
166 				     I915_SCHEDULER_CAP_PRIORITY);
167 
168 		if (intel_uc_uses_guc_submission(&engine->gt->uc))
169 			enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
170 
171 		for (i = 0; i < ARRAY_SIZE(map); i++) {
172 			if (engine->flags & map[i].engine)
173 				enabled |= map[i].sched;
174 			else
175 				disabled |= map[i].sched;
176 		}
177 	}
178 
179 	i915->caps.scheduler = enabled & ~disabled;
180 	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
181 		i915->caps.scheduler = 0;
182 }
183 #endif
184 
intel_engine_class_repr(u8 class)185 const char *intel_engine_class_repr(u8 class)
186 {
187 	static const char * const uabi_names[] = {
188 		[RENDER_CLASS] = "rcs",
189 		[COPY_ENGINE_CLASS] = "bcs",
190 		[VIDEO_DECODE_CLASS] = "vcs",
191 		[VIDEO_ENHANCEMENT_CLASS] = "vecs",
192 		[OTHER_CLASS] = "other",
193 		[COMPUTE_CLASS] = "ccs",
194 	};
195 
196 	if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
197 		return "xxx";
198 
199 	return uabi_names[class];
200 }
201 
202 struct legacy_ring {
203 	struct intel_gt *gt;
204 	u8 class;
205 	u8 instance;
206 };
207 
legacy_ring_idx(const struct legacy_ring * ring)208 static int legacy_ring_idx(const struct legacy_ring *ring)
209 {
210 	static const struct {
211 		u8 base, max;
212 	} map[] = {
213 		[RENDER_CLASS] = { RCS0, 1 },
214 		[COPY_ENGINE_CLASS] = { BCS0, 1 },
215 		[VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
216 		[VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
217 		[COMPUTE_CLASS] = { CCS0, I915_MAX_CCS },
218 	};
219 
220 	if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
221 		return INVALID_ENGINE;
222 
223 	if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
224 		return INVALID_ENGINE;
225 
226 	return map[ring->class].base + ring->instance;
227 }
228 
add_legacy_ring(struct legacy_ring * ring,struct intel_engine_cs * engine)229 static void add_legacy_ring(struct legacy_ring *ring,
230 			    struct intel_engine_cs *engine)
231 {
232 	if (engine->gt != ring->gt || engine->class != ring->class) {
233 		ring->gt = engine->gt;
234 		ring->class = engine->class;
235 		ring->instance = 0;
236 	}
237 
238 	engine->legacy_idx = legacy_ring_idx(ring);
239 	if (engine->legacy_idx != INVALID_ENGINE)
240 		ring->instance++;
241 }
242 
engine_rename(struct intel_engine_cs * engine,const char * name,u16 instance)243 static void engine_rename(struct intel_engine_cs *engine, const char *name, u16 instance)
244 {
245 	char old[sizeof(engine->name)];
246 
247 	memcpy(old, engine->name, sizeof(engine->name));
248 	scnprintf(engine->name, sizeof(engine->name), "%s%u", name, instance);
249 	drm_dbg(&engine->i915->drm, "renamed %s to %s\n", old, engine->name);
250 }
251 
intel_engines_driver_register(struct drm_i915_private * i915)252 void intel_engines_driver_register(struct drm_i915_private *i915)
253 {
254 	u16 name_instance, other_instance = 0;
255 	struct legacy_ring ring = {};
256 	struct list_head *it, *next;
257 	struct rb_node **p, *prev;
258 	DRM_LIST_HEAD(engines);
259 
260 	sort_engines(i915, &engines);
261 
262 	prev = NULL;
263 	p = &i915->uabi_engines.rb_node;
264 	list_for_each_safe(it, next, &engines) {
265 		struct intel_engine_cs *engine =
266 			container_of((struct rb_node *)it, typeof(*engine),
267 				     uabi_node);
268 
269 		if (intel_gt_has_unrecoverable_error(engine->gt))
270 			continue; /* ignore incomplete engines */
271 
272 		GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
273 		engine->uabi_class = uabi_classes[engine->class];
274 		if (engine->uabi_class == I915_NO_UABI_CLASS) {
275 			name_instance = other_instance++;
276 		} else {
277 			GEM_BUG_ON(engine->uabi_class >=
278 				   ARRAY_SIZE(i915->engine_uabi_class_count));
279 			name_instance =
280 				i915->engine_uabi_class_count[engine->uabi_class]++;
281 		}
282 		engine->uabi_instance = name_instance;
283 
284 		/*
285 		 * Replace the internal name with the final user and log facing
286 		 * name.
287 		 */
288 		engine_rename(engine,
289 			      intel_engine_class_repr(engine->class),
290 			      name_instance);
291 
292 		if (engine->uabi_class == I915_NO_UABI_CLASS)
293 			continue;
294 
295 		rb_link_node(&engine->uabi_node, prev, p);
296 		rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
297 
298 		GEM_BUG_ON(intel_engine_lookup_user(i915,
299 						    engine->uabi_class,
300 						    engine->uabi_instance) != engine);
301 
302 		/* Fix up the mapping to match default execbuf::user_map[] */
303 		add_legacy_ring(&ring, engine);
304 
305 		prev = &engine->uabi_node;
306 		p = &prev->rb_right;
307 	}
308 
309 	if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
310 	    IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
311 		struct intel_engine_cs *engine;
312 		unsigned int isolation;
313 		int class, inst;
314 		int errors = 0;
315 
316 		for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) {
317 			for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) {
318 				engine = intel_engine_lookup_user(i915,
319 								  class, inst);
320 				if (!engine) {
321 					pr_err("UABI engine not found for { class:%d, instance:%d }\n",
322 					       class, inst);
323 					errors++;
324 					continue;
325 				}
326 
327 				if (engine->uabi_class != class ||
328 				    engine->uabi_instance != inst) {
329 					pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
330 					       engine->name,
331 					       engine->uabi_class,
332 					       engine->uabi_instance,
333 					       class, inst);
334 					errors++;
335 					continue;
336 				}
337 			}
338 		}
339 
340 		/*
341 		 * Make sure that classes with multiple engine instances all
342 		 * share the same basic configuration.
343 		 */
344 		isolation = intel_engines_has_context_isolation(i915);
345 		for_each_uabi_engine(engine, i915) {
346 			unsigned int bit = BIT(engine->uabi_class);
347 			unsigned int expected = engine->default_state ? bit : 0;
348 
349 			if ((isolation & bit) != expected) {
350 				pr_err("mismatching default context state for class %d on engine %s\n",
351 				       engine->uabi_class, engine->name);
352 				errors++;
353 			}
354 		}
355 
356 		if (drm_WARN(&i915->drm, errors,
357 			     "Invalid UABI engine mapping found"))
358 			i915->uabi_engines = RB_ROOT;
359 	}
360 
361 	set_scheduler_caps(i915);
362 }
363 
intel_engines_has_context_isolation(struct drm_i915_private * i915)364 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
365 {
366 	struct intel_engine_cs *engine;
367 	unsigned int which;
368 
369 	which = 0;
370 	for_each_uabi_engine(engine, i915)
371 		if (engine->default_state)
372 			which |= BIT(engine->uabi_class);
373 
374 	return which;
375 }
376