xref: /openbsd/sys/dev/pci/drm/i915/gt/sysfs_engines.c (revision ad8b1aaf)
1c349dbc7Sjsg // SPDX-License-Identifier: MIT
2c349dbc7Sjsg /*
3c349dbc7Sjsg  * Copyright © 2019 Intel Corporation
4c349dbc7Sjsg  */
5c349dbc7Sjsg 
6c349dbc7Sjsg #include <linux/kobject.h>
7c349dbc7Sjsg #include <linux/sysfs.h>
8c349dbc7Sjsg 
9c349dbc7Sjsg #include "i915_drv.h"
10c349dbc7Sjsg #include "intel_engine.h"
11c349dbc7Sjsg #include "intel_engine_heartbeat.h"
12c349dbc7Sjsg #include "sysfs_engines.h"
13c349dbc7Sjsg 
14c349dbc7Sjsg #ifdef __linux__
15c349dbc7Sjsg 
16c349dbc7Sjsg struct kobj_engine {
17c349dbc7Sjsg 	struct kobject base;
18c349dbc7Sjsg 	struct intel_engine_cs *engine;
19c349dbc7Sjsg };
20c349dbc7Sjsg 
21c349dbc7Sjsg static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
22c349dbc7Sjsg {
23c349dbc7Sjsg 	return container_of(kobj, struct kobj_engine, base)->engine;
24c349dbc7Sjsg }
25c349dbc7Sjsg 
26c349dbc7Sjsg static ssize_t
27c349dbc7Sjsg name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
28c349dbc7Sjsg {
29c349dbc7Sjsg 	return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
30c349dbc7Sjsg }
31c349dbc7Sjsg 
32c349dbc7Sjsg static struct kobj_attribute name_attr =
33c349dbc7Sjsg __ATTR(name, 0444, name_show, NULL);
34c349dbc7Sjsg 
35c349dbc7Sjsg static ssize_t
36c349dbc7Sjsg class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
37c349dbc7Sjsg {
38c349dbc7Sjsg 	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
39c349dbc7Sjsg }
40c349dbc7Sjsg 
41c349dbc7Sjsg static struct kobj_attribute class_attr =
42c349dbc7Sjsg __ATTR(class, 0444, class_show, NULL);
43c349dbc7Sjsg 
44c349dbc7Sjsg static ssize_t
45c349dbc7Sjsg inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
46c349dbc7Sjsg {
47c349dbc7Sjsg 	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
48c349dbc7Sjsg }
49c349dbc7Sjsg 
50c349dbc7Sjsg static struct kobj_attribute inst_attr =
51c349dbc7Sjsg __ATTR(instance, 0444, inst_show, NULL);
52c349dbc7Sjsg 
53c349dbc7Sjsg static ssize_t
54c349dbc7Sjsg mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
55c349dbc7Sjsg {
56c349dbc7Sjsg 	return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
57c349dbc7Sjsg }
58c349dbc7Sjsg 
59c349dbc7Sjsg static struct kobj_attribute mmio_attr =
60c349dbc7Sjsg __ATTR(mmio_base, 0444, mmio_show, NULL);
61c349dbc7Sjsg 
62c349dbc7Sjsg static const char * const vcs_caps[] = {
63c349dbc7Sjsg 	[ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
64c349dbc7Sjsg 	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
65c349dbc7Sjsg };
66c349dbc7Sjsg 
67c349dbc7Sjsg static const char * const vecs_caps[] = {
68c349dbc7Sjsg 	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
69c349dbc7Sjsg };
70c349dbc7Sjsg 
71c349dbc7Sjsg static ssize_t repr_trim(char *buf, ssize_t len)
72c349dbc7Sjsg {
73c349dbc7Sjsg 	/* Trim off the trailing space and replace with a newline */
74c349dbc7Sjsg 	if (len > PAGE_SIZE)
75c349dbc7Sjsg 		len = PAGE_SIZE;
76c349dbc7Sjsg 	if (len > 0)
77c349dbc7Sjsg 		buf[len - 1] = '\n';
78c349dbc7Sjsg 
79c349dbc7Sjsg 	return len;
80c349dbc7Sjsg }
81c349dbc7Sjsg 
82c349dbc7Sjsg static ssize_t
83c349dbc7Sjsg __caps_show(struct intel_engine_cs *engine,
84c349dbc7Sjsg 	    u32 caps, char *buf, bool show_unknown)
85c349dbc7Sjsg {
86c349dbc7Sjsg 	const char * const *repr;
87c349dbc7Sjsg 	int count, n;
88c349dbc7Sjsg 	ssize_t len;
89c349dbc7Sjsg 
90c349dbc7Sjsg 	BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities));
91c349dbc7Sjsg 
92c349dbc7Sjsg 	switch (engine->class) {
93c349dbc7Sjsg 	case VIDEO_DECODE_CLASS:
94c349dbc7Sjsg 		repr = vcs_caps;
95c349dbc7Sjsg 		count = ARRAY_SIZE(vcs_caps);
96c349dbc7Sjsg 		break;
97c349dbc7Sjsg 
98c349dbc7Sjsg 	case VIDEO_ENHANCEMENT_CLASS:
99c349dbc7Sjsg 		repr = vecs_caps;
100c349dbc7Sjsg 		count = ARRAY_SIZE(vecs_caps);
101c349dbc7Sjsg 		break;
102c349dbc7Sjsg 
103c349dbc7Sjsg 	default:
104c349dbc7Sjsg 		repr = NULL;
105c349dbc7Sjsg 		count = 0;
106c349dbc7Sjsg 		break;
107c349dbc7Sjsg 	}
108c349dbc7Sjsg 	GEM_BUG_ON(count > BITS_PER_TYPE(typeof(caps)));
109c349dbc7Sjsg 
110c349dbc7Sjsg 	len = 0;
111c349dbc7Sjsg 	for_each_set_bit(n,
112c349dbc7Sjsg 			 (unsigned long *)&caps,
113c349dbc7Sjsg 			 show_unknown ? BITS_PER_TYPE(typeof(caps)) : count) {
114c349dbc7Sjsg 		if (n >= count || !repr[n]) {
115c349dbc7Sjsg 			if (GEM_WARN_ON(show_unknown))
116c349dbc7Sjsg 				len += snprintf(buf + len, PAGE_SIZE - len,
117c349dbc7Sjsg 						"[%x] ", n);
118c349dbc7Sjsg 		} else {
119c349dbc7Sjsg 			len += snprintf(buf + len, PAGE_SIZE - len,
120c349dbc7Sjsg 					"%s ", repr[n]);
121c349dbc7Sjsg 		}
122c349dbc7Sjsg 		if (GEM_WARN_ON(len >= PAGE_SIZE))
123c349dbc7Sjsg 			break;
124c349dbc7Sjsg 	}
125c349dbc7Sjsg 	return repr_trim(buf, len);
126c349dbc7Sjsg }
127c349dbc7Sjsg 
128c349dbc7Sjsg static ssize_t
129c349dbc7Sjsg caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
130c349dbc7Sjsg {
131c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
132c349dbc7Sjsg 
133c349dbc7Sjsg 	return __caps_show(engine, engine->uabi_capabilities, buf, true);
134c349dbc7Sjsg }
135c349dbc7Sjsg 
136c349dbc7Sjsg static struct kobj_attribute caps_attr =
137c349dbc7Sjsg __ATTR(capabilities, 0444, caps_show, NULL);
138c349dbc7Sjsg 
139c349dbc7Sjsg static ssize_t
140c349dbc7Sjsg all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
141c349dbc7Sjsg {
142c349dbc7Sjsg 	return __caps_show(kobj_to_engine(kobj), -1, buf, false);
143c349dbc7Sjsg }
144c349dbc7Sjsg 
145c349dbc7Sjsg static struct kobj_attribute all_caps_attr =
146c349dbc7Sjsg __ATTR(known_capabilities, 0444, all_caps_show, NULL);
147c349dbc7Sjsg 
148c349dbc7Sjsg static ssize_t
149c349dbc7Sjsg max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
150c349dbc7Sjsg 	       const char *buf, size_t count)
151c349dbc7Sjsg {
152c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
153c349dbc7Sjsg 	unsigned long long duration;
154c349dbc7Sjsg 	int err;
155c349dbc7Sjsg 
156c349dbc7Sjsg 	/*
157c349dbc7Sjsg 	 * When waiting for a request, if is it currently being executed
158c349dbc7Sjsg 	 * on the GPU, we busywait for a short while before sleeping. The
159c349dbc7Sjsg 	 * premise is that most requests are short, and if it is already
160c349dbc7Sjsg 	 * executing then there is a good chance that it will complete
161c349dbc7Sjsg 	 * before we can setup the interrupt handler and go to sleep.
162c349dbc7Sjsg 	 * We try to offset the cost of going to sleep, by first spinning
163c349dbc7Sjsg 	 * on the request -- if it completed in less time than it would take
164c349dbc7Sjsg 	 * to go sleep, process the interrupt and return back to the client,
165c349dbc7Sjsg 	 * then we have saved the client some latency, albeit at the cost
166c349dbc7Sjsg 	 * of spinning on an expensive CPU core.
167c349dbc7Sjsg 	 *
168c349dbc7Sjsg 	 * While we try to avoid waiting at all for a request that is unlikely
169c349dbc7Sjsg 	 * to complete, deciding how long it is worth spinning is for is an
170c349dbc7Sjsg 	 * arbitrary decision: trading off power vs latency.
171c349dbc7Sjsg 	 */
172c349dbc7Sjsg 
173c349dbc7Sjsg 	err = kstrtoull(buf, 0, &duration);
174c349dbc7Sjsg 	if (err)
175c349dbc7Sjsg 		return err;
176c349dbc7Sjsg 
177c349dbc7Sjsg 	if (duration > jiffies_to_nsecs(2))
178c349dbc7Sjsg 		return -EINVAL;
179c349dbc7Sjsg 
180c349dbc7Sjsg 	WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
181c349dbc7Sjsg 
182c349dbc7Sjsg 	return count;
183c349dbc7Sjsg }
184c349dbc7Sjsg 
185c349dbc7Sjsg static ssize_t
186c349dbc7Sjsg max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
187c349dbc7Sjsg {
188c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
189c349dbc7Sjsg 
190c349dbc7Sjsg 	return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
191c349dbc7Sjsg }
192c349dbc7Sjsg 
193c349dbc7Sjsg static struct kobj_attribute max_spin_attr =
194c349dbc7Sjsg __ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
195c349dbc7Sjsg 
196c349dbc7Sjsg static ssize_t
197*ad8b1aafSjsg max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
198*ad8b1aafSjsg {
199*ad8b1aafSjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
200*ad8b1aafSjsg 
201*ad8b1aafSjsg 	return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
202*ad8b1aafSjsg }
203*ad8b1aafSjsg 
204*ad8b1aafSjsg static struct kobj_attribute max_spin_def =
205*ad8b1aafSjsg __ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
206*ad8b1aafSjsg 
207*ad8b1aafSjsg static ssize_t
208c349dbc7Sjsg timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
209c349dbc7Sjsg 		const char *buf, size_t count)
210c349dbc7Sjsg {
211c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
212c349dbc7Sjsg 	unsigned long long duration;
213c349dbc7Sjsg 	int err;
214c349dbc7Sjsg 
215c349dbc7Sjsg 	/*
216c349dbc7Sjsg 	 * Execlists uses a scheduling quantum (a timeslice) to alternate
217c349dbc7Sjsg 	 * execution between ready-to-run contexts of equal priority. This
218c349dbc7Sjsg 	 * ensures that all users (though only if they of equal importance)
219c349dbc7Sjsg 	 * have the opportunity to run and prevents livelocks where contexts
220c349dbc7Sjsg 	 * may have implicit ordering due to userspace semaphores.
221c349dbc7Sjsg 	 */
222c349dbc7Sjsg 
223c349dbc7Sjsg 	err = kstrtoull(buf, 0, &duration);
224c349dbc7Sjsg 	if (err)
225c349dbc7Sjsg 		return err;
226c349dbc7Sjsg 
227c349dbc7Sjsg 	if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
228c349dbc7Sjsg 		return -EINVAL;
229c349dbc7Sjsg 
230c349dbc7Sjsg 	WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
231c349dbc7Sjsg 
232c349dbc7Sjsg 	if (execlists_active(&engine->execlists))
233c349dbc7Sjsg 		set_timer_ms(&engine->execlists.timer, duration);
234c349dbc7Sjsg 
235c349dbc7Sjsg 	return count;
236c349dbc7Sjsg }
237c349dbc7Sjsg 
238c349dbc7Sjsg static ssize_t
239c349dbc7Sjsg timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
240c349dbc7Sjsg {
241c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
242c349dbc7Sjsg 
243c349dbc7Sjsg 	return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
244c349dbc7Sjsg }
245c349dbc7Sjsg 
246c349dbc7Sjsg static struct kobj_attribute timeslice_duration_attr =
247c349dbc7Sjsg __ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
248c349dbc7Sjsg 
249c349dbc7Sjsg static ssize_t
250*ad8b1aafSjsg timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
251*ad8b1aafSjsg {
252*ad8b1aafSjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
253*ad8b1aafSjsg 
254*ad8b1aafSjsg 	return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
255*ad8b1aafSjsg }
256*ad8b1aafSjsg 
257*ad8b1aafSjsg static struct kobj_attribute timeslice_duration_def =
258*ad8b1aafSjsg __ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
259*ad8b1aafSjsg 
260*ad8b1aafSjsg static ssize_t
261c349dbc7Sjsg stop_store(struct kobject *kobj, struct kobj_attribute *attr,
262c349dbc7Sjsg 	   const char *buf, size_t count)
263c349dbc7Sjsg {
264c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
265c349dbc7Sjsg 	unsigned long long duration;
266c349dbc7Sjsg 	int err;
267c349dbc7Sjsg 
268c349dbc7Sjsg 	/*
269c349dbc7Sjsg 	 * When we allow ourselves to sleep before a GPU reset after disabling
270c349dbc7Sjsg 	 * submission, even for a few milliseconds, gives an innocent context
271c349dbc7Sjsg 	 * the opportunity to clear the GPU before the reset occurs. However,
272c349dbc7Sjsg 	 * how long to sleep depends on the typical non-preemptible duration
273c349dbc7Sjsg 	 * (a similar problem to determining the ideal preempt-reset timeout
274c349dbc7Sjsg 	 * or even the heartbeat interval).
275c349dbc7Sjsg 	 */
276c349dbc7Sjsg 
277c349dbc7Sjsg 	err = kstrtoull(buf, 0, &duration);
278c349dbc7Sjsg 	if (err)
279c349dbc7Sjsg 		return err;
280c349dbc7Sjsg 
281c349dbc7Sjsg 	if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
282c349dbc7Sjsg 		return -EINVAL;
283c349dbc7Sjsg 
284c349dbc7Sjsg 	WRITE_ONCE(engine->props.stop_timeout_ms, duration);
285c349dbc7Sjsg 	return count;
286c349dbc7Sjsg }
287c349dbc7Sjsg 
288c349dbc7Sjsg static ssize_t
289c349dbc7Sjsg stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
290c349dbc7Sjsg {
291c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
292c349dbc7Sjsg 
293c349dbc7Sjsg 	return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
294c349dbc7Sjsg }
295c349dbc7Sjsg 
296c349dbc7Sjsg static struct kobj_attribute stop_timeout_attr =
297c349dbc7Sjsg __ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
298c349dbc7Sjsg 
299c349dbc7Sjsg static ssize_t
300*ad8b1aafSjsg stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
301*ad8b1aafSjsg {
302*ad8b1aafSjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
303*ad8b1aafSjsg 
304*ad8b1aafSjsg 	return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
305*ad8b1aafSjsg }
306*ad8b1aafSjsg 
307*ad8b1aafSjsg static struct kobj_attribute stop_timeout_def =
308*ad8b1aafSjsg __ATTR(stop_timeout_ms, 0444, stop_default, NULL);
309*ad8b1aafSjsg 
310*ad8b1aafSjsg static ssize_t
311c349dbc7Sjsg preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
312c349dbc7Sjsg 		      const char *buf, size_t count)
313c349dbc7Sjsg {
314c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
315c349dbc7Sjsg 	unsigned long long timeout;
316c349dbc7Sjsg 	int err;
317c349dbc7Sjsg 
318c349dbc7Sjsg 	/*
319c349dbc7Sjsg 	 * After initialising a preemption request, we give the current
320c349dbc7Sjsg 	 * resident a small amount of time to vacate the GPU. The preemption
321c349dbc7Sjsg 	 * request is for a higher priority context and should be immediate to
322c349dbc7Sjsg 	 * maintain high quality of service (and avoid priority inversion).
323c349dbc7Sjsg 	 * However, the preemption granularity of the GPU can be quite coarse
324c349dbc7Sjsg 	 * and so we need a compromise.
325c349dbc7Sjsg 	 */
326c349dbc7Sjsg 
327c349dbc7Sjsg 	err = kstrtoull(buf, 0, &timeout);
328c349dbc7Sjsg 	if (err)
329c349dbc7Sjsg 		return err;
330c349dbc7Sjsg 
331c349dbc7Sjsg 	if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
332c349dbc7Sjsg 		return -EINVAL;
333c349dbc7Sjsg 
334c349dbc7Sjsg 	WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
335c349dbc7Sjsg 
336c349dbc7Sjsg 	if (READ_ONCE(engine->execlists.pending[0]))
337c349dbc7Sjsg 		set_timer_ms(&engine->execlists.preempt, timeout);
338c349dbc7Sjsg 
339c349dbc7Sjsg 	return count;
340c349dbc7Sjsg }
341c349dbc7Sjsg 
342c349dbc7Sjsg static ssize_t
343c349dbc7Sjsg preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
344c349dbc7Sjsg 		     char *buf)
345c349dbc7Sjsg {
346c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
347c349dbc7Sjsg 
348c349dbc7Sjsg 	return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
349c349dbc7Sjsg }
350c349dbc7Sjsg 
351c349dbc7Sjsg static struct kobj_attribute preempt_timeout_attr =
352c349dbc7Sjsg __ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
353c349dbc7Sjsg 
354c349dbc7Sjsg static ssize_t
355*ad8b1aafSjsg preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
356*ad8b1aafSjsg 			char *buf)
357*ad8b1aafSjsg {
358*ad8b1aafSjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
359*ad8b1aafSjsg 
360*ad8b1aafSjsg 	return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
361*ad8b1aafSjsg }
362*ad8b1aafSjsg 
363*ad8b1aafSjsg static struct kobj_attribute preempt_timeout_def =
364*ad8b1aafSjsg __ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
365*ad8b1aafSjsg 
366*ad8b1aafSjsg static ssize_t
367c349dbc7Sjsg heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
368c349dbc7Sjsg 		const char *buf, size_t count)
369c349dbc7Sjsg {
370c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
371c349dbc7Sjsg 	unsigned long long delay;
372c349dbc7Sjsg 	int err;
373c349dbc7Sjsg 
374c349dbc7Sjsg 	/*
375c349dbc7Sjsg 	 * We monitor the health of the system via periodic heartbeat pulses.
376c349dbc7Sjsg 	 * The pulses also provide the opportunity to perform garbage
377c349dbc7Sjsg 	 * collection.  However, we interpret an incomplete pulse (a missed
378c349dbc7Sjsg 	 * heartbeat) as an indication that the system is no longer responsive,
379c349dbc7Sjsg 	 * i.e. hung, and perform an engine or full GPU reset. Given that the
380c349dbc7Sjsg 	 * preemption granularity can be very coarse on a system, the optimal
381c349dbc7Sjsg 	 * value for any workload is unknowable!
382c349dbc7Sjsg 	 */
383c349dbc7Sjsg 
384c349dbc7Sjsg 	err = kstrtoull(buf, 0, &delay);
385c349dbc7Sjsg 	if (err)
386c349dbc7Sjsg 		return err;
387c349dbc7Sjsg 
388c349dbc7Sjsg 	if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
389c349dbc7Sjsg 		return -EINVAL;
390c349dbc7Sjsg 
391c349dbc7Sjsg 	err = intel_engine_set_heartbeat(engine, delay);
392c349dbc7Sjsg 	if (err)
393c349dbc7Sjsg 		return err;
394c349dbc7Sjsg 
395c349dbc7Sjsg 	return count;
396c349dbc7Sjsg }
397c349dbc7Sjsg 
398c349dbc7Sjsg static ssize_t
399c349dbc7Sjsg heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
400c349dbc7Sjsg {
401c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
402c349dbc7Sjsg 
403c349dbc7Sjsg 	return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
404c349dbc7Sjsg }
405c349dbc7Sjsg 
406c349dbc7Sjsg static struct kobj_attribute heartbeat_interval_attr =
407c349dbc7Sjsg __ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
408c349dbc7Sjsg 
409*ad8b1aafSjsg static ssize_t
410*ad8b1aafSjsg heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
411*ad8b1aafSjsg {
412*ad8b1aafSjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
413*ad8b1aafSjsg 
414*ad8b1aafSjsg 	return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
415*ad8b1aafSjsg }
416*ad8b1aafSjsg 
417*ad8b1aafSjsg static struct kobj_attribute heartbeat_interval_def =
418*ad8b1aafSjsg __ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
419*ad8b1aafSjsg 
420c349dbc7Sjsg static void kobj_engine_release(struct kobject *kobj)
421c349dbc7Sjsg {
422c349dbc7Sjsg 	kfree(kobj);
423c349dbc7Sjsg }
424c349dbc7Sjsg 
425c349dbc7Sjsg static struct kobj_type kobj_engine_type = {
426c349dbc7Sjsg 	.release = kobj_engine_release,
427c349dbc7Sjsg 	.sysfs_ops = &kobj_sysfs_ops
428c349dbc7Sjsg };
429c349dbc7Sjsg 
430c349dbc7Sjsg static struct kobject *
431c349dbc7Sjsg kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
432c349dbc7Sjsg {
433c349dbc7Sjsg 	struct kobj_engine *ke;
434c349dbc7Sjsg 
435c349dbc7Sjsg 	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
436c349dbc7Sjsg 	if (!ke)
437c349dbc7Sjsg 		return NULL;
438c349dbc7Sjsg 
439c349dbc7Sjsg 	kobject_init(&ke->base, &kobj_engine_type);
440c349dbc7Sjsg 	ke->engine = engine;
441c349dbc7Sjsg 
442c349dbc7Sjsg 	if (kobject_add(&ke->base, dir, "%s", engine->name)) {
443c349dbc7Sjsg 		kobject_put(&ke->base);
444c349dbc7Sjsg 		return NULL;
445c349dbc7Sjsg 	}
446c349dbc7Sjsg 
447c349dbc7Sjsg 	/* xfer ownership to sysfs tree */
448c349dbc7Sjsg 	return &ke->base;
449c349dbc7Sjsg }
450c349dbc7Sjsg 
451*ad8b1aafSjsg static void add_defaults(struct kobj_engine *parent)
452*ad8b1aafSjsg {
453*ad8b1aafSjsg 	static const struct attribute *files[] = {
454*ad8b1aafSjsg 		&max_spin_def.attr,
455*ad8b1aafSjsg 		&stop_timeout_def.attr,
456*ad8b1aafSjsg #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
457*ad8b1aafSjsg 		&heartbeat_interval_def.attr,
458*ad8b1aafSjsg #endif
459*ad8b1aafSjsg 		NULL
460*ad8b1aafSjsg 	};
461*ad8b1aafSjsg 	struct kobj_engine *ke;
462*ad8b1aafSjsg 
463*ad8b1aafSjsg 	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
464*ad8b1aafSjsg 	if (!ke)
465*ad8b1aafSjsg 		return;
466*ad8b1aafSjsg 
467*ad8b1aafSjsg 	kobject_init(&ke->base, &kobj_engine_type);
468*ad8b1aafSjsg 	ke->engine = parent->engine;
469*ad8b1aafSjsg 
470*ad8b1aafSjsg 	if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
471*ad8b1aafSjsg 		kobject_put(&ke->base);
472*ad8b1aafSjsg 		return;
473*ad8b1aafSjsg 	}
474*ad8b1aafSjsg 
475*ad8b1aafSjsg 	if (sysfs_create_files(&ke->base, files))
476*ad8b1aafSjsg 		return;
477*ad8b1aafSjsg 
478*ad8b1aafSjsg 	if (intel_engine_has_timeslices(ke->engine) &&
479*ad8b1aafSjsg 	    sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
480*ad8b1aafSjsg 		return;
481*ad8b1aafSjsg 
482*ad8b1aafSjsg 	if (intel_engine_has_preempt_reset(ke->engine) &&
483*ad8b1aafSjsg 	    sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
484*ad8b1aafSjsg 		return;
485*ad8b1aafSjsg }
486*ad8b1aafSjsg 
487c349dbc7Sjsg #endif /* __linux__ */
488c349dbc7Sjsg 
489c349dbc7Sjsg void intel_engines_add_sysfs(struct drm_i915_private *i915)
490c349dbc7Sjsg {
491c349dbc7Sjsg #ifdef __linux__
492c349dbc7Sjsg 	static const struct attribute *files[] = {
493c349dbc7Sjsg 		&name_attr.attr,
494c349dbc7Sjsg 		&class_attr.attr,
495c349dbc7Sjsg 		&inst_attr.attr,
496c349dbc7Sjsg 		&mmio_attr.attr,
497c349dbc7Sjsg 		&caps_attr.attr,
498c349dbc7Sjsg 		&all_caps_attr.attr,
499c349dbc7Sjsg 		&max_spin_attr.attr,
500c349dbc7Sjsg 		&stop_timeout_attr.attr,
501c349dbc7Sjsg #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
502c349dbc7Sjsg 		&heartbeat_interval_attr.attr,
503c349dbc7Sjsg #endif
504c349dbc7Sjsg 		NULL
505c349dbc7Sjsg 	};
506c349dbc7Sjsg 
507c349dbc7Sjsg 	struct device *kdev = i915->drm.primary->kdev;
508c349dbc7Sjsg 	struct intel_engine_cs *engine;
509c349dbc7Sjsg 	struct kobject *dir;
510c349dbc7Sjsg 
511c349dbc7Sjsg 	dir = kobject_create_and_add("engine", &kdev->kobj);
512c349dbc7Sjsg 	if (!dir)
513c349dbc7Sjsg 		return;
514c349dbc7Sjsg 
515c349dbc7Sjsg 	for_each_uabi_engine(engine, i915) {
516c349dbc7Sjsg 		struct kobject *kobj;
517c349dbc7Sjsg 
518c349dbc7Sjsg 		kobj = kobj_engine(dir, engine);
519c349dbc7Sjsg 		if (!kobj)
520c349dbc7Sjsg 			goto err_engine;
521c349dbc7Sjsg 
522c349dbc7Sjsg 		if (sysfs_create_files(kobj, files))
523c349dbc7Sjsg 			goto err_object;
524c349dbc7Sjsg 
525c349dbc7Sjsg 		if (intel_engine_has_timeslices(engine) &&
526c349dbc7Sjsg 		    sysfs_create_file(kobj, &timeslice_duration_attr.attr))
527c349dbc7Sjsg 			goto err_engine;
528c349dbc7Sjsg 
529c349dbc7Sjsg 		if (intel_engine_has_preempt_reset(engine) &&
530c349dbc7Sjsg 		    sysfs_create_file(kobj, &preempt_timeout_attr.attr))
531c349dbc7Sjsg 			goto err_engine;
532c349dbc7Sjsg 
533*ad8b1aafSjsg 		add_defaults(container_of(kobj, struct kobj_engine, base));
534*ad8b1aafSjsg 
535c349dbc7Sjsg 		if (0) {
536c349dbc7Sjsg err_object:
537c349dbc7Sjsg 			kobject_put(kobj);
538c349dbc7Sjsg err_engine:
539c349dbc7Sjsg 			dev_err(kdev, "Failed to add sysfs engine '%s'\n",
540c349dbc7Sjsg 				engine->name);
541c349dbc7Sjsg 			break;
542c349dbc7Sjsg 		}
543c349dbc7Sjsg 	}
544c349dbc7Sjsg #endif /* __linux__ */
545c349dbc7Sjsg }
546