xref: /openbsd/sys/dev/pci/drm/i915/gt/sysfs_engines.c (revision e5dd7070)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/kobject.h>
7 #include <linux/sysfs.h>
8 
9 #include "i915_drv.h"
10 #include "intel_engine.h"
11 #include "intel_engine_heartbeat.h"
12 #include "sysfs_engines.h"
13 
14 #ifdef __linux__
15 
16 struct kobj_engine {
17 	struct kobject base;
18 	struct intel_engine_cs *engine;
19 };
20 
21 static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
22 {
23 	return container_of(kobj, struct kobj_engine, base)->engine;
24 }
25 
26 static ssize_t
27 name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
28 {
29 	return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
30 }
31 
32 static struct kobj_attribute name_attr =
33 __ATTR(name, 0444, name_show, NULL);
34 
35 static ssize_t
36 class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
37 {
38 	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
39 }
40 
41 static struct kobj_attribute class_attr =
42 __ATTR(class, 0444, class_show, NULL);
43 
44 static ssize_t
45 inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
46 {
47 	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
48 }
49 
50 static struct kobj_attribute inst_attr =
51 __ATTR(instance, 0444, inst_show, NULL);
52 
53 static ssize_t
54 mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
55 {
56 	return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
57 }
58 
59 static struct kobj_attribute mmio_attr =
60 __ATTR(mmio_base, 0444, mmio_show, NULL);
61 
62 static const char * const vcs_caps[] = {
63 	[ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
64 	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
65 };
66 
67 static const char * const vecs_caps[] = {
68 	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
69 };
70 
71 static ssize_t repr_trim(char *buf, ssize_t len)
72 {
73 	/* Trim off the trailing space and replace with a newline */
74 	if (len > PAGE_SIZE)
75 		len = PAGE_SIZE;
76 	if (len > 0)
77 		buf[len - 1] = '\n';
78 
79 	return len;
80 }
81 
82 static ssize_t
83 __caps_show(struct intel_engine_cs *engine,
84 	    u32 caps, char *buf, bool show_unknown)
85 {
86 	const char * const *repr;
87 	int count, n;
88 	ssize_t len;
89 
90 	BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities));
91 
92 	switch (engine->class) {
93 	case VIDEO_DECODE_CLASS:
94 		repr = vcs_caps;
95 		count = ARRAY_SIZE(vcs_caps);
96 		break;
97 
98 	case VIDEO_ENHANCEMENT_CLASS:
99 		repr = vecs_caps;
100 		count = ARRAY_SIZE(vecs_caps);
101 		break;
102 
103 	default:
104 		repr = NULL;
105 		count = 0;
106 		break;
107 	}
108 	GEM_BUG_ON(count > BITS_PER_TYPE(typeof(caps)));
109 
110 	len = 0;
111 	for_each_set_bit(n,
112 			 (unsigned long *)&caps,
113 			 show_unknown ? BITS_PER_TYPE(typeof(caps)) : count) {
114 		if (n >= count || !repr[n]) {
115 			if (GEM_WARN_ON(show_unknown))
116 				len += snprintf(buf + len, PAGE_SIZE - len,
117 						"[%x] ", n);
118 		} else {
119 			len += snprintf(buf + len, PAGE_SIZE - len,
120 					"%s ", repr[n]);
121 		}
122 		if (GEM_WARN_ON(len >= PAGE_SIZE))
123 			break;
124 	}
125 	return repr_trim(buf, len);
126 }
127 
128 static ssize_t
129 caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
130 {
131 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
132 
133 	return __caps_show(engine, engine->uabi_capabilities, buf, true);
134 }
135 
136 static struct kobj_attribute caps_attr =
137 __ATTR(capabilities, 0444, caps_show, NULL);
138 
139 static ssize_t
140 all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
141 {
142 	return __caps_show(kobj_to_engine(kobj), -1, buf, false);
143 }
144 
145 static struct kobj_attribute all_caps_attr =
146 __ATTR(known_capabilities, 0444, all_caps_show, NULL);
147 
148 static ssize_t
149 max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
150 	       const char *buf, size_t count)
151 {
152 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
153 	unsigned long long duration;
154 	int err;
155 
156 	/*
157 	 * When waiting for a request, if is it currently being executed
158 	 * on the GPU, we busywait for a short while before sleeping. The
159 	 * premise is that most requests are short, and if it is already
160 	 * executing then there is a good chance that it will complete
161 	 * before we can setup the interrupt handler and go to sleep.
162 	 * We try to offset the cost of going to sleep, by first spinning
163 	 * on the request -- if it completed in less time than it would take
164 	 * to go sleep, process the interrupt and return back to the client,
165 	 * then we have saved the client some latency, albeit at the cost
166 	 * of spinning on an expensive CPU core.
167 	 *
168 	 * While we try to avoid waiting at all for a request that is unlikely
169 	 * to complete, deciding how long it is worth spinning is for is an
170 	 * arbitrary decision: trading off power vs latency.
171 	 */
172 
173 	err = kstrtoull(buf, 0, &duration);
174 	if (err)
175 		return err;
176 
177 	if (duration > jiffies_to_nsecs(2))
178 		return -EINVAL;
179 
180 	WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
181 
182 	return count;
183 }
184 
185 static ssize_t
186 max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
187 {
188 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
189 
190 	return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
191 }
192 
193 static struct kobj_attribute max_spin_attr =
194 __ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
195 
196 static ssize_t
197 timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
198 		const char *buf, size_t count)
199 {
200 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
201 	unsigned long long duration;
202 	int err;
203 
204 	/*
205 	 * Execlists uses a scheduling quantum (a timeslice) to alternate
206 	 * execution between ready-to-run contexts of equal priority. This
207 	 * ensures that all users (though only if they of equal importance)
208 	 * have the opportunity to run and prevents livelocks where contexts
209 	 * may have implicit ordering due to userspace semaphores.
210 	 */
211 
212 	err = kstrtoull(buf, 0, &duration);
213 	if (err)
214 		return err;
215 
216 	if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
217 		return -EINVAL;
218 
219 	WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
220 
221 	if (execlists_active(&engine->execlists))
222 		set_timer_ms(&engine->execlists.timer, duration);
223 
224 	return count;
225 }
226 
227 static ssize_t
228 timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
229 {
230 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
231 
232 	return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
233 }
234 
235 static struct kobj_attribute timeslice_duration_attr =
236 __ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
237 
238 static ssize_t
239 stop_store(struct kobject *kobj, struct kobj_attribute *attr,
240 	   const char *buf, size_t count)
241 {
242 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
243 	unsigned long long duration;
244 	int err;
245 
246 	/*
247 	 * When we allow ourselves to sleep before a GPU reset after disabling
248 	 * submission, even for a few milliseconds, gives an innocent context
249 	 * the opportunity to clear the GPU before the reset occurs. However,
250 	 * how long to sleep depends on the typical non-preemptible duration
251 	 * (a similar problem to determining the ideal preempt-reset timeout
252 	 * or even the heartbeat interval).
253 	 */
254 
255 	err = kstrtoull(buf, 0, &duration);
256 	if (err)
257 		return err;
258 
259 	if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
260 		return -EINVAL;
261 
262 	WRITE_ONCE(engine->props.stop_timeout_ms, duration);
263 	return count;
264 }
265 
266 static ssize_t
267 stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
268 {
269 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
270 
271 	return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
272 }
273 
274 static struct kobj_attribute stop_timeout_attr =
275 __ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
276 
277 static ssize_t
278 preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
279 		      const char *buf, size_t count)
280 {
281 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
282 	unsigned long long timeout;
283 	int err;
284 
285 	/*
286 	 * After initialising a preemption request, we give the current
287 	 * resident a small amount of time to vacate the GPU. The preemption
288 	 * request is for a higher priority context and should be immediate to
289 	 * maintain high quality of service (and avoid priority inversion).
290 	 * However, the preemption granularity of the GPU can be quite coarse
291 	 * and so we need a compromise.
292 	 */
293 
294 	err = kstrtoull(buf, 0, &timeout);
295 	if (err)
296 		return err;
297 
298 	if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
299 		return -EINVAL;
300 
301 	WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
302 
303 	if (READ_ONCE(engine->execlists.pending[0]))
304 		set_timer_ms(&engine->execlists.preempt, timeout);
305 
306 	return count;
307 }
308 
309 static ssize_t
310 preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
311 		     char *buf)
312 {
313 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
314 
315 	return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
316 }
317 
318 static struct kobj_attribute preempt_timeout_attr =
319 __ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
320 
321 static ssize_t
322 heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
323 		const char *buf, size_t count)
324 {
325 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
326 	unsigned long long delay;
327 	int err;
328 
329 	/*
330 	 * We monitor the health of the system via periodic heartbeat pulses.
331 	 * The pulses also provide the opportunity to perform garbage
332 	 * collection.  However, we interpret an incomplete pulse (a missed
333 	 * heartbeat) as an indication that the system is no longer responsive,
334 	 * i.e. hung, and perform an engine or full GPU reset. Given that the
335 	 * preemption granularity can be very coarse on a system, the optimal
336 	 * value for any workload is unknowable!
337 	 */
338 
339 	err = kstrtoull(buf, 0, &delay);
340 	if (err)
341 		return err;
342 
343 	if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
344 		return -EINVAL;
345 
346 	err = intel_engine_set_heartbeat(engine, delay);
347 	if (err)
348 		return err;
349 
350 	return count;
351 }
352 
353 static ssize_t
354 heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
355 {
356 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
357 
358 	return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
359 }
360 
361 static struct kobj_attribute heartbeat_interval_attr =
362 __ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
363 
364 static void kobj_engine_release(struct kobject *kobj)
365 {
366 	kfree(kobj);
367 }
368 
369 static struct kobj_type kobj_engine_type = {
370 	.release = kobj_engine_release,
371 	.sysfs_ops = &kobj_sysfs_ops
372 };
373 
374 static struct kobject *
375 kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
376 {
377 	struct kobj_engine *ke;
378 
379 	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
380 	if (!ke)
381 		return NULL;
382 
383 	kobject_init(&ke->base, &kobj_engine_type);
384 	ke->engine = engine;
385 
386 	if (kobject_add(&ke->base, dir, "%s", engine->name)) {
387 		kobject_put(&ke->base);
388 		return NULL;
389 	}
390 
391 	/* xfer ownership to sysfs tree */
392 	return &ke->base;
393 }
394 
395 #endif /* __linux__ */
396 
397 void intel_engines_add_sysfs(struct drm_i915_private *i915)
398 {
399 #ifdef __linux__
400 	static const struct attribute *files[] = {
401 		&name_attr.attr,
402 		&class_attr.attr,
403 		&inst_attr.attr,
404 		&mmio_attr.attr,
405 		&caps_attr.attr,
406 		&all_caps_attr.attr,
407 		&max_spin_attr.attr,
408 		&stop_timeout_attr.attr,
409 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
410 		&heartbeat_interval_attr.attr,
411 #endif
412 		NULL
413 	};
414 
415 	struct device *kdev = i915->drm.primary->kdev;
416 	struct intel_engine_cs *engine;
417 	struct kobject *dir;
418 
419 	dir = kobject_create_and_add("engine", &kdev->kobj);
420 	if (!dir)
421 		return;
422 
423 	for_each_uabi_engine(engine, i915) {
424 		struct kobject *kobj;
425 
426 		kobj = kobj_engine(dir, engine);
427 		if (!kobj)
428 			goto err_engine;
429 
430 		if (sysfs_create_files(kobj, files))
431 			goto err_object;
432 
433 		if (intel_engine_has_timeslices(engine) &&
434 		    sysfs_create_file(kobj, &timeslice_duration_attr.attr))
435 			goto err_engine;
436 
437 		if (intel_engine_has_preempt_reset(engine) &&
438 		    sysfs_create_file(kobj, &preempt_timeout_attr.attr))
439 			goto err_engine;
440 
441 		if (0) {
442 err_object:
443 			kobject_put(kobj);
444 err_engine:
445 			dev_err(kdev, "Failed to add sysfs engine '%s'\n",
446 				engine->name);
447 			break;
448 		}
449 	}
450 #endif /* __linux__ */
451 }
452