xref: /openbsd/sys/dev/pci/drm/i915/gt/sysfs_engines.c (revision 5ca02815)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/kobject.h>
7 #include <linux/sysfs.h>
8 
9 #include "i915_drv.h"
10 #include "intel_engine.h"
11 #include "intel_engine_heartbeat.h"
12 #include "sysfs_engines.h"
13 
14 #ifdef __linux__
15 
16 struct kobj_engine {
17 	struct kobject base;
18 	struct intel_engine_cs *engine;
19 };
20 
21 static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
22 {
23 	return container_of(kobj, struct kobj_engine, base)->engine;
24 }
25 
26 static ssize_t
27 name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
28 {
29 	return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
30 }
31 
32 static struct kobj_attribute name_attr =
33 __ATTR(name, 0444, name_show, NULL);
34 
35 static ssize_t
36 class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
37 {
38 	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
39 }
40 
41 static struct kobj_attribute class_attr =
42 __ATTR(class, 0444, class_show, NULL);
43 
44 static ssize_t
45 inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
46 {
47 	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
48 }
49 
50 static struct kobj_attribute inst_attr =
51 __ATTR(instance, 0444, inst_show, NULL);
52 
53 static ssize_t
54 mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
55 {
56 	return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
57 }
58 
59 static struct kobj_attribute mmio_attr =
60 __ATTR(mmio_base, 0444, mmio_show, NULL);
61 
62 static const char * const vcs_caps[] = {
63 	[ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
64 	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
65 };
66 
67 static const char * const vecs_caps[] = {
68 	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
69 };
70 
71 static ssize_t repr_trim(char *buf, ssize_t len)
72 {
73 	/* Trim off the trailing space and replace with a newline */
74 	if (len > PAGE_SIZE)
75 		len = PAGE_SIZE;
76 	if (len > 0)
77 		buf[len - 1] = '\n';
78 
79 	return len;
80 }
81 
82 static ssize_t
83 __caps_show(struct intel_engine_cs *engine,
84 	    unsigned long caps, char *buf, bool show_unknown)
85 {
86 	const char * const *repr;
87 	int count, n;
88 	ssize_t len;
89 
90 	switch (engine->class) {
91 	case VIDEO_DECODE_CLASS:
92 		repr = vcs_caps;
93 		count = ARRAY_SIZE(vcs_caps);
94 		break;
95 
96 	case VIDEO_ENHANCEMENT_CLASS:
97 		repr = vecs_caps;
98 		count = ARRAY_SIZE(vecs_caps);
99 		break;
100 
101 	default:
102 		repr = NULL;
103 		count = 0;
104 		break;
105 	}
106 	GEM_BUG_ON(count > BITS_PER_LONG);
107 
108 	len = 0;
109 	for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
110 		if (n >= count || !repr[n]) {
111 			if (GEM_WARN_ON(show_unknown))
112 				len += snprintf(buf + len, PAGE_SIZE - len,
113 						"[%x] ", n);
114 		} else {
115 			len += snprintf(buf + len, PAGE_SIZE - len,
116 					"%s ", repr[n]);
117 		}
118 		if (GEM_WARN_ON(len >= PAGE_SIZE))
119 			break;
120 	}
121 	return repr_trim(buf, len);
122 }
123 
124 static ssize_t
125 caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
126 {
127 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
128 
129 	return __caps_show(engine, engine->uabi_capabilities, buf, true);
130 }
131 
132 static struct kobj_attribute caps_attr =
133 __ATTR(capabilities, 0444, caps_show, NULL);
134 
135 static ssize_t
136 all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
137 {
138 	return __caps_show(kobj_to_engine(kobj), -1, buf, false);
139 }
140 
141 static struct kobj_attribute all_caps_attr =
142 __ATTR(known_capabilities, 0444, all_caps_show, NULL);
143 
144 static ssize_t
145 max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
146 	       const char *buf, size_t count)
147 {
148 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
149 	unsigned long long duration;
150 	int err;
151 
152 	/*
153 	 * When waiting for a request, if is it currently being executed
154 	 * on the GPU, we busywait for a short while before sleeping. The
155 	 * premise is that most requests are short, and if it is already
156 	 * executing then there is a good chance that it will complete
157 	 * before we can setup the interrupt handler and go to sleep.
158 	 * We try to offset the cost of going to sleep, by first spinning
159 	 * on the request -- if it completed in less time than it would take
160 	 * to go sleep, process the interrupt and return back to the client,
161 	 * then we have saved the client some latency, albeit at the cost
162 	 * of spinning on an expensive CPU core.
163 	 *
164 	 * While we try to avoid waiting at all for a request that is unlikely
165 	 * to complete, deciding how long it is worth spinning is for is an
166 	 * arbitrary decision: trading off power vs latency.
167 	 */
168 
169 	err = kstrtoull(buf, 0, &duration);
170 	if (err)
171 		return err;
172 
173 	if (duration > jiffies_to_nsecs(2))
174 		return -EINVAL;
175 
176 	WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
177 
178 	return count;
179 }
180 
181 static ssize_t
182 max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
183 {
184 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
185 
186 	return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
187 }
188 
189 static struct kobj_attribute max_spin_attr =
190 __ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
191 
192 static ssize_t
193 max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
194 {
195 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
196 
197 	return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
198 }
199 
200 static struct kobj_attribute max_spin_def =
201 __ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
202 
203 static ssize_t
204 timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
205 		const char *buf, size_t count)
206 {
207 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
208 	unsigned long long duration;
209 	int err;
210 
211 	/*
212 	 * Execlists uses a scheduling quantum (a timeslice) to alternate
213 	 * execution between ready-to-run contexts of equal priority. This
214 	 * ensures that all users (though only if they of equal importance)
215 	 * have the opportunity to run and prevents livelocks where contexts
216 	 * may have implicit ordering due to userspace semaphores.
217 	 */
218 
219 	err = kstrtoull(buf, 0, &duration);
220 	if (err)
221 		return err;
222 
223 	if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
224 		return -EINVAL;
225 
226 	WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
227 
228 	if (execlists_active(&engine->execlists))
229 		set_timer_ms(&engine->execlists.timer, duration);
230 
231 	return count;
232 }
233 
234 static ssize_t
235 timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
236 {
237 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
238 
239 	return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
240 }
241 
242 static struct kobj_attribute timeslice_duration_attr =
243 __ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
244 
245 static ssize_t
246 timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
247 {
248 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
249 
250 	return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
251 }
252 
253 static struct kobj_attribute timeslice_duration_def =
254 __ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
255 
256 static ssize_t
257 stop_store(struct kobject *kobj, struct kobj_attribute *attr,
258 	   const char *buf, size_t count)
259 {
260 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
261 	unsigned long long duration;
262 	int err;
263 
264 	/*
265 	 * When we allow ourselves to sleep before a GPU reset after disabling
266 	 * submission, even for a few milliseconds, gives an innocent context
267 	 * the opportunity to clear the GPU before the reset occurs. However,
268 	 * how long to sleep depends on the typical non-preemptible duration
269 	 * (a similar problem to determining the ideal preempt-reset timeout
270 	 * or even the heartbeat interval).
271 	 */
272 
273 	err = kstrtoull(buf, 0, &duration);
274 	if (err)
275 		return err;
276 
277 	if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
278 		return -EINVAL;
279 
280 	WRITE_ONCE(engine->props.stop_timeout_ms, duration);
281 	return count;
282 }
283 
284 static ssize_t
285 stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
286 {
287 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
288 
289 	return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
290 }
291 
292 static struct kobj_attribute stop_timeout_attr =
293 __ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
294 
295 static ssize_t
296 stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
297 {
298 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
299 
300 	return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
301 }
302 
303 static struct kobj_attribute stop_timeout_def =
304 __ATTR(stop_timeout_ms, 0444, stop_default, NULL);
305 
306 static ssize_t
307 preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
308 		      const char *buf, size_t count)
309 {
310 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
311 	unsigned long long timeout;
312 	int err;
313 
314 	/*
315 	 * After initialising a preemption request, we give the current
316 	 * resident a small amount of time to vacate the GPU. The preemption
317 	 * request is for a higher priority context and should be immediate to
318 	 * maintain high quality of service (and avoid priority inversion).
319 	 * However, the preemption granularity of the GPU can be quite coarse
320 	 * and so we need a compromise.
321 	 */
322 
323 	err = kstrtoull(buf, 0, &timeout);
324 	if (err)
325 		return err;
326 
327 	if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
328 		return -EINVAL;
329 
330 	WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
331 
332 	if (READ_ONCE(engine->execlists.pending[0]))
333 		set_timer_ms(&engine->execlists.preempt, timeout);
334 
335 	return count;
336 }
337 
338 static ssize_t
339 preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
340 		     char *buf)
341 {
342 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
343 
344 	return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
345 }
346 
347 static struct kobj_attribute preempt_timeout_attr =
348 __ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
349 
350 static ssize_t
351 preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
352 			char *buf)
353 {
354 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
355 
356 	return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
357 }
358 
359 static struct kobj_attribute preempt_timeout_def =
360 __ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
361 
362 static ssize_t
363 heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
364 		const char *buf, size_t count)
365 {
366 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
367 	unsigned long long delay;
368 	int err;
369 
370 	/*
371 	 * We monitor the health of the system via periodic heartbeat pulses.
372 	 * The pulses also provide the opportunity to perform garbage
373 	 * collection.  However, we interpret an incomplete pulse (a missed
374 	 * heartbeat) as an indication that the system is no longer responsive,
375 	 * i.e. hung, and perform an engine or full GPU reset. Given that the
376 	 * preemption granularity can be very coarse on a system, the optimal
377 	 * value for any workload is unknowable!
378 	 */
379 
380 	err = kstrtoull(buf, 0, &delay);
381 	if (err)
382 		return err;
383 
384 	if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
385 		return -EINVAL;
386 
387 	err = intel_engine_set_heartbeat(engine, delay);
388 	if (err)
389 		return err;
390 
391 	return count;
392 }
393 
394 static ssize_t
395 heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
396 {
397 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
398 
399 	return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
400 }
401 
402 static struct kobj_attribute heartbeat_interval_attr =
403 __ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
404 
405 static ssize_t
406 heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
407 {
408 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
409 
410 	return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
411 }
412 
413 static struct kobj_attribute heartbeat_interval_def =
414 __ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
415 
416 static void kobj_engine_release(struct kobject *kobj)
417 {
418 	kfree(kobj);
419 }
420 
421 static struct kobj_type kobj_engine_type = {
422 	.release = kobj_engine_release,
423 	.sysfs_ops = &kobj_sysfs_ops
424 };
425 
426 static struct kobject *
427 kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
428 {
429 	struct kobj_engine *ke;
430 
431 	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
432 	if (!ke)
433 		return NULL;
434 
435 	kobject_init(&ke->base, &kobj_engine_type);
436 	ke->engine = engine;
437 
438 	if (kobject_add(&ke->base, dir, "%s", engine->name)) {
439 		kobject_put(&ke->base);
440 		return NULL;
441 	}
442 
443 	/* xfer ownership to sysfs tree */
444 	return &ke->base;
445 }
446 
447 static void add_defaults(struct kobj_engine *parent)
448 {
449 	static const struct attribute *files[] = {
450 		&max_spin_def.attr,
451 		&stop_timeout_def.attr,
452 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
453 		&heartbeat_interval_def.attr,
454 #endif
455 		NULL
456 	};
457 	struct kobj_engine *ke;
458 
459 	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
460 	if (!ke)
461 		return;
462 
463 	kobject_init(&ke->base, &kobj_engine_type);
464 	ke->engine = parent->engine;
465 
466 	if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
467 		kobject_put(&ke->base);
468 		return;
469 	}
470 
471 	if (sysfs_create_files(&ke->base, files))
472 		return;
473 
474 	if (intel_engine_has_timeslices(ke->engine) &&
475 	    sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
476 		return;
477 
478 	if (intel_engine_has_preempt_reset(ke->engine) &&
479 	    sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
480 		return;
481 }
482 
483 #endif /* __linux__ */
484 
485 void intel_engines_add_sysfs(struct drm_i915_private *i915)
486 {
487 #ifdef __linux__
488 	static const struct attribute *files[] = {
489 		&name_attr.attr,
490 		&class_attr.attr,
491 		&inst_attr.attr,
492 		&mmio_attr.attr,
493 		&caps_attr.attr,
494 		&all_caps_attr.attr,
495 		&max_spin_attr.attr,
496 		&stop_timeout_attr.attr,
497 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
498 		&heartbeat_interval_attr.attr,
499 #endif
500 		NULL
501 	};
502 
503 	struct device *kdev = i915->drm.primary->kdev;
504 	struct intel_engine_cs *engine;
505 	struct kobject *dir;
506 
507 	dir = kobject_create_and_add("engine", &kdev->kobj);
508 	if (!dir)
509 		return;
510 
511 	for_each_uabi_engine(engine, i915) {
512 		struct kobject *kobj;
513 
514 		kobj = kobj_engine(dir, engine);
515 		if (!kobj)
516 			goto err_engine;
517 
518 		if (sysfs_create_files(kobj, files))
519 			goto err_object;
520 
521 		if (intel_engine_has_timeslices(engine) &&
522 		    sysfs_create_file(kobj, &timeslice_duration_attr.attr))
523 			goto err_engine;
524 
525 		if (intel_engine_has_preempt_reset(engine) &&
526 		    sysfs_create_file(kobj, &preempt_timeout_attr.attr))
527 			goto err_engine;
528 
529 		add_defaults(container_of(kobj, struct kobj_engine, base));
530 
531 		if (0) {
532 err_object:
533 			kobject_put(kobj);
534 err_engine:
535 			dev_err(kdev, "Failed to add sysfs engine '%s'\n",
536 				engine->name);
537 			break;
538 		}
539 	}
540 #endif /* __linux__ */
541 }
542