xref: /openbsd/sys/dev/pci/drm/i915/gt/sysfs_engines.c (revision f005ef32)
1c349dbc7Sjsg // SPDX-License-Identifier: MIT
2c349dbc7Sjsg /*
3c349dbc7Sjsg  * Copyright © 2019 Intel Corporation
4c349dbc7Sjsg  */
5c349dbc7Sjsg 
6c349dbc7Sjsg #include <linux/kobject.h>
7c349dbc7Sjsg #include <linux/sysfs.h>
8c349dbc7Sjsg 
9c349dbc7Sjsg #include "i915_drv.h"
10c349dbc7Sjsg #include "intel_engine.h"
11c349dbc7Sjsg #include "intel_engine_heartbeat.h"
12c349dbc7Sjsg #include "sysfs_engines.h"
13c349dbc7Sjsg 
14c349dbc7Sjsg #ifdef __linux__
15c349dbc7Sjsg 
16c349dbc7Sjsg struct kobj_engine {
17c349dbc7Sjsg 	struct kobject base;
18c349dbc7Sjsg 	struct intel_engine_cs *engine;
19c349dbc7Sjsg };
20c349dbc7Sjsg 
kobj_to_engine(struct kobject * kobj)21c349dbc7Sjsg static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
22c349dbc7Sjsg {
23c349dbc7Sjsg 	return container_of(kobj, struct kobj_engine, base)->engine;
24c349dbc7Sjsg }
25c349dbc7Sjsg 
26c349dbc7Sjsg static ssize_t
name_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)27c349dbc7Sjsg name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
28c349dbc7Sjsg {
29*f005ef32Sjsg 	return sysfs_emit(buf, "%s\n", kobj_to_engine(kobj)->name);
30c349dbc7Sjsg }
31c349dbc7Sjsg 
32*f005ef32Sjsg static const struct kobj_attribute name_attr =
33c349dbc7Sjsg __ATTR(name, 0444, name_show, NULL);
34c349dbc7Sjsg 
35c349dbc7Sjsg static ssize_t
class_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)36c349dbc7Sjsg class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
37c349dbc7Sjsg {
38*f005ef32Sjsg 	return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
39c349dbc7Sjsg }
40c349dbc7Sjsg 
41*f005ef32Sjsg static const struct kobj_attribute class_attr =
42c349dbc7Sjsg __ATTR(class, 0444, class_show, NULL);
43c349dbc7Sjsg 
44c349dbc7Sjsg static ssize_t
inst_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)45c349dbc7Sjsg inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
46c349dbc7Sjsg {
47*f005ef32Sjsg 	return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
48c349dbc7Sjsg }
49c349dbc7Sjsg 
50*f005ef32Sjsg static const struct kobj_attribute inst_attr =
51c349dbc7Sjsg __ATTR(instance, 0444, inst_show, NULL);
52c349dbc7Sjsg 
53c349dbc7Sjsg static ssize_t
mmio_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)54c349dbc7Sjsg mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
55c349dbc7Sjsg {
56*f005ef32Sjsg 	return sysfs_emit(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
57c349dbc7Sjsg }
58c349dbc7Sjsg 
59*f005ef32Sjsg static const struct kobj_attribute mmio_attr =
60c349dbc7Sjsg __ATTR(mmio_base, 0444, mmio_show, NULL);
61c349dbc7Sjsg 
62c349dbc7Sjsg static const char * const vcs_caps[] = {
63c349dbc7Sjsg 	[ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
64c349dbc7Sjsg 	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
65c349dbc7Sjsg };
66c349dbc7Sjsg 
67c349dbc7Sjsg static const char * const vecs_caps[] = {
68c349dbc7Sjsg 	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
69c349dbc7Sjsg };
70c349dbc7Sjsg 
repr_trim(char * buf,ssize_t len)71c349dbc7Sjsg static ssize_t repr_trim(char *buf, ssize_t len)
72c349dbc7Sjsg {
73c349dbc7Sjsg 	/* Trim off the trailing space and replace with a newline */
74c349dbc7Sjsg 	if (len > PAGE_SIZE)
75c349dbc7Sjsg 		len = PAGE_SIZE;
76c349dbc7Sjsg 	if (len > 0)
77c349dbc7Sjsg 		buf[len - 1] = '\n';
78c349dbc7Sjsg 
79c349dbc7Sjsg 	return len;
80c349dbc7Sjsg }
81c349dbc7Sjsg 
82c349dbc7Sjsg static ssize_t
__caps_show(struct intel_engine_cs * engine,unsigned long caps,char * buf,bool show_unknown)83c349dbc7Sjsg __caps_show(struct intel_engine_cs *engine,
845ca02815Sjsg 	    unsigned long caps, char *buf, bool show_unknown)
85c349dbc7Sjsg {
86c349dbc7Sjsg 	const char * const *repr;
87c349dbc7Sjsg 	int count, n;
88c349dbc7Sjsg 	ssize_t len;
89c349dbc7Sjsg 
90c349dbc7Sjsg 	switch (engine->class) {
91c349dbc7Sjsg 	case VIDEO_DECODE_CLASS:
92c349dbc7Sjsg 		repr = vcs_caps;
93c349dbc7Sjsg 		count = ARRAY_SIZE(vcs_caps);
94c349dbc7Sjsg 		break;
95c349dbc7Sjsg 
96c349dbc7Sjsg 	case VIDEO_ENHANCEMENT_CLASS:
97c349dbc7Sjsg 		repr = vecs_caps;
98c349dbc7Sjsg 		count = ARRAY_SIZE(vecs_caps);
99c349dbc7Sjsg 		break;
100c349dbc7Sjsg 
101c349dbc7Sjsg 	default:
102c349dbc7Sjsg 		repr = NULL;
103c349dbc7Sjsg 		count = 0;
104c349dbc7Sjsg 		break;
105c349dbc7Sjsg 	}
1065ca02815Sjsg 	GEM_BUG_ON(count > BITS_PER_LONG);
107c349dbc7Sjsg 
108c349dbc7Sjsg 	len = 0;
1095ca02815Sjsg 	for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
110c349dbc7Sjsg 		if (n >= count || !repr[n]) {
111c349dbc7Sjsg 			if (GEM_WARN_ON(show_unknown))
112*f005ef32Sjsg 				len += sysfs_emit_at(buf, len, "[%x] ", n);
113c349dbc7Sjsg 		} else {
114*f005ef32Sjsg 			len += sysfs_emit_at(buf, len, "%s ", repr[n]);
115c349dbc7Sjsg 		}
116c349dbc7Sjsg 		if (GEM_WARN_ON(len >= PAGE_SIZE))
117c349dbc7Sjsg 			break;
118c349dbc7Sjsg 	}
119c349dbc7Sjsg 	return repr_trim(buf, len);
120c349dbc7Sjsg }
121c349dbc7Sjsg 
122c349dbc7Sjsg static ssize_t
caps_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)123c349dbc7Sjsg caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
124c349dbc7Sjsg {
125c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
126c349dbc7Sjsg 
127c349dbc7Sjsg 	return __caps_show(engine, engine->uabi_capabilities, buf, true);
128c349dbc7Sjsg }
129c349dbc7Sjsg 
130*f005ef32Sjsg static const struct kobj_attribute caps_attr =
131c349dbc7Sjsg __ATTR(capabilities, 0444, caps_show, NULL);
132c349dbc7Sjsg 
133c349dbc7Sjsg static ssize_t
all_caps_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)134c349dbc7Sjsg all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
135c349dbc7Sjsg {
136c349dbc7Sjsg 	return __caps_show(kobj_to_engine(kobj), -1, buf, false);
137c349dbc7Sjsg }
138c349dbc7Sjsg 
139*f005ef32Sjsg static const struct kobj_attribute all_caps_attr =
140c349dbc7Sjsg __ATTR(known_capabilities, 0444, all_caps_show, NULL);
141c349dbc7Sjsg 
142c349dbc7Sjsg static ssize_t
max_spin_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)143c349dbc7Sjsg max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
144c349dbc7Sjsg 	       const char *buf, size_t count)
145c349dbc7Sjsg {
146c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
1471bb76ff1Sjsg 	unsigned long long duration, clamped;
148c349dbc7Sjsg 	int err;
149c349dbc7Sjsg 
150c349dbc7Sjsg 	/*
151c349dbc7Sjsg 	 * When waiting for a request, if is it currently being executed
152c349dbc7Sjsg 	 * on the GPU, we busywait for a short while before sleeping. The
153c349dbc7Sjsg 	 * premise is that most requests are short, and if it is already
154c349dbc7Sjsg 	 * executing then there is a good chance that it will complete
155c349dbc7Sjsg 	 * before we can setup the interrupt handler and go to sleep.
156c349dbc7Sjsg 	 * We try to offset the cost of going to sleep, by first spinning
157c349dbc7Sjsg 	 * on the request -- if it completed in less time than it would take
158c349dbc7Sjsg 	 * to go sleep, process the interrupt and return back to the client,
159c349dbc7Sjsg 	 * then we have saved the client some latency, albeit at the cost
160c349dbc7Sjsg 	 * of spinning on an expensive CPU core.
161c349dbc7Sjsg 	 *
162c349dbc7Sjsg 	 * While we try to avoid waiting at all for a request that is unlikely
163c349dbc7Sjsg 	 * to complete, deciding how long it is worth spinning is for is an
164c349dbc7Sjsg 	 * arbitrary decision: trading off power vs latency.
165c349dbc7Sjsg 	 */
166c349dbc7Sjsg 
167c349dbc7Sjsg 	err = kstrtoull(buf, 0, &duration);
168c349dbc7Sjsg 	if (err)
169c349dbc7Sjsg 		return err;
170c349dbc7Sjsg 
1711bb76ff1Sjsg 	clamped = intel_clamp_max_busywait_duration_ns(engine, duration);
1721bb76ff1Sjsg 	if (duration != clamped)
173c349dbc7Sjsg 		return -EINVAL;
174c349dbc7Sjsg 
175c349dbc7Sjsg 	WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
176c349dbc7Sjsg 
177c349dbc7Sjsg 	return count;
178c349dbc7Sjsg }
179c349dbc7Sjsg 
180c349dbc7Sjsg static ssize_t
max_spin_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)181c349dbc7Sjsg max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
182c349dbc7Sjsg {
183c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
184c349dbc7Sjsg 
185*f005ef32Sjsg 	return sysfs_emit(buf, "%lu\n", engine->props.max_busywait_duration_ns);
186c349dbc7Sjsg }
187c349dbc7Sjsg 
188*f005ef32Sjsg static const struct kobj_attribute max_spin_attr =
189c349dbc7Sjsg __ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
190c349dbc7Sjsg 
191c349dbc7Sjsg static ssize_t
max_spin_default(struct kobject * kobj,struct kobj_attribute * attr,char * buf)192ad8b1aafSjsg max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
193ad8b1aafSjsg {
194ad8b1aafSjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
195ad8b1aafSjsg 
196*f005ef32Sjsg 	return sysfs_emit(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
197ad8b1aafSjsg }
198ad8b1aafSjsg 
199*f005ef32Sjsg static const struct kobj_attribute max_spin_def =
200ad8b1aafSjsg __ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
201ad8b1aafSjsg 
202ad8b1aafSjsg static ssize_t
timeslice_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)203c349dbc7Sjsg timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
204c349dbc7Sjsg 		const char *buf, size_t count)
205c349dbc7Sjsg {
206c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
2071bb76ff1Sjsg 	unsigned long long duration, clamped;
208c349dbc7Sjsg 	int err;
209c349dbc7Sjsg 
210c349dbc7Sjsg 	/*
211c349dbc7Sjsg 	 * Execlists uses a scheduling quantum (a timeslice) to alternate
212c349dbc7Sjsg 	 * execution between ready-to-run contexts of equal priority. This
213c349dbc7Sjsg 	 * ensures that all users (though only if they of equal importance)
214c349dbc7Sjsg 	 * have the opportunity to run and prevents livelocks where contexts
215c349dbc7Sjsg 	 * may have implicit ordering due to userspace semaphores.
216c349dbc7Sjsg 	 */
217c349dbc7Sjsg 
218c349dbc7Sjsg 	err = kstrtoull(buf, 0, &duration);
219c349dbc7Sjsg 	if (err)
220c349dbc7Sjsg 		return err;
221c349dbc7Sjsg 
2221bb76ff1Sjsg 	clamped = intel_clamp_timeslice_duration_ms(engine, duration);
2231bb76ff1Sjsg 	if (duration != clamped)
224c349dbc7Sjsg 		return -EINVAL;
225c349dbc7Sjsg 
226c349dbc7Sjsg 	WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
227c349dbc7Sjsg 
228c349dbc7Sjsg 	if (execlists_active(&engine->execlists))
229c349dbc7Sjsg 		set_timer_ms(&engine->execlists.timer, duration);
230c349dbc7Sjsg 
231c349dbc7Sjsg 	return count;
232c349dbc7Sjsg }
233c349dbc7Sjsg 
234c349dbc7Sjsg static ssize_t
timeslice_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)235c349dbc7Sjsg timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
236c349dbc7Sjsg {
237c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
238c349dbc7Sjsg 
239*f005ef32Sjsg 	return sysfs_emit(buf, "%lu\n", engine->props.timeslice_duration_ms);
240c349dbc7Sjsg }
241c349dbc7Sjsg 
242*f005ef32Sjsg static const struct kobj_attribute timeslice_duration_attr =
243c349dbc7Sjsg __ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
244c349dbc7Sjsg 
245c349dbc7Sjsg static ssize_t
timeslice_default(struct kobject * kobj,struct kobj_attribute * attr,char * buf)246ad8b1aafSjsg timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
247ad8b1aafSjsg {
248ad8b1aafSjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
249ad8b1aafSjsg 
250*f005ef32Sjsg 	return sysfs_emit(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
251ad8b1aafSjsg }
252ad8b1aafSjsg 
253*f005ef32Sjsg static const struct kobj_attribute timeslice_duration_def =
254ad8b1aafSjsg __ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
255ad8b1aafSjsg 
256ad8b1aafSjsg static ssize_t
stop_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)257c349dbc7Sjsg stop_store(struct kobject *kobj, struct kobj_attribute *attr,
258c349dbc7Sjsg 	   const char *buf, size_t count)
259c349dbc7Sjsg {
260c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
2611bb76ff1Sjsg 	unsigned long long duration, clamped;
262c349dbc7Sjsg 	int err;
263c349dbc7Sjsg 
264c349dbc7Sjsg 	/*
265c349dbc7Sjsg 	 * When we allow ourselves to sleep before a GPU reset after disabling
266c349dbc7Sjsg 	 * submission, even for a few milliseconds, gives an innocent context
267c349dbc7Sjsg 	 * the opportunity to clear the GPU before the reset occurs. However,
268c349dbc7Sjsg 	 * how long to sleep depends on the typical non-preemptible duration
269c349dbc7Sjsg 	 * (a similar problem to determining the ideal preempt-reset timeout
270c349dbc7Sjsg 	 * or even the heartbeat interval).
271c349dbc7Sjsg 	 */
272c349dbc7Sjsg 
273c349dbc7Sjsg 	err = kstrtoull(buf, 0, &duration);
274c349dbc7Sjsg 	if (err)
275c349dbc7Sjsg 		return err;
276c349dbc7Sjsg 
2771bb76ff1Sjsg 	clamped = intel_clamp_stop_timeout_ms(engine, duration);
2781bb76ff1Sjsg 	if (duration != clamped)
279c349dbc7Sjsg 		return -EINVAL;
280c349dbc7Sjsg 
281c349dbc7Sjsg 	WRITE_ONCE(engine->props.stop_timeout_ms, duration);
282c349dbc7Sjsg 	return count;
283c349dbc7Sjsg }
284c349dbc7Sjsg 
285c349dbc7Sjsg static ssize_t
stop_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)286c349dbc7Sjsg stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
287c349dbc7Sjsg {
288c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
289c349dbc7Sjsg 
290*f005ef32Sjsg 	return sysfs_emit(buf, "%lu\n", engine->props.stop_timeout_ms);
291c349dbc7Sjsg }
292c349dbc7Sjsg 
293*f005ef32Sjsg static const struct kobj_attribute stop_timeout_attr =
294c349dbc7Sjsg __ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
295c349dbc7Sjsg 
296c349dbc7Sjsg static ssize_t
stop_default(struct kobject * kobj,struct kobj_attribute * attr,char * buf)297ad8b1aafSjsg stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
298ad8b1aafSjsg {
299ad8b1aafSjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
300ad8b1aafSjsg 
301*f005ef32Sjsg 	return sysfs_emit(buf, "%lu\n", engine->defaults.stop_timeout_ms);
302ad8b1aafSjsg }
303ad8b1aafSjsg 
304*f005ef32Sjsg static const struct kobj_attribute stop_timeout_def =
305ad8b1aafSjsg __ATTR(stop_timeout_ms, 0444, stop_default, NULL);
306ad8b1aafSjsg 
307ad8b1aafSjsg static ssize_t
preempt_timeout_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)308c349dbc7Sjsg preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
309c349dbc7Sjsg 		      const char *buf, size_t count)
310c349dbc7Sjsg {
311c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
3121bb76ff1Sjsg 	unsigned long long timeout, clamped;
313c349dbc7Sjsg 	int err;
314c349dbc7Sjsg 
315c349dbc7Sjsg 	/*
316c349dbc7Sjsg 	 * After initialising a preemption request, we give the current
317c349dbc7Sjsg 	 * resident a small amount of time to vacate the GPU. The preemption
318c349dbc7Sjsg 	 * request is for a higher priority context and should be immediate to
319c349dbc7Sjsg 	 * maintain high quality of service (and avoid priority inversion).
320c349dbc7Sjsg 	 * However, the preemption granularity of the GPU can be quite coarse
321c349dbc7Sjsg 	 * and so we need a compromise.
322c349dbc7Sjsg 	 */
323c349dbc7Sjsg 
324c349dbc7Sjsg 	err = kstrtoull(buf, 0, &timeout);
325c349dbc7Sjsg 	if (err)
326c349dbc7Sjsg 		return err;
327c349dbc7Sjsg 
3281bb76ff1Sjsg 	clamped = intel_clamp_preempt_timeout_ms(engine, timeout);
3291bb76ff1Sjsg 	if (timeout != clamped)
330c349dbc7Sjsg 		return -EINVAL;
331c349dbc7Sjsg 
332c349dbc7Sjsg 	WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
333c349dbc7Sjsg 
334c349dbc7Sjsg 	if (READ_ONCE(engine->execlists.pending[0]))
335c349dbc7Sjsg 		set_timer_ms(&engine->execlists.preempt, timeout);
336c349dbc7Sjsg 
337c349dbc7Sjsg 	return count;
338c349dbc7Sjsg }
339c349dbc7Sjsg 
340c349dbc7Sjsg static ssize_t
preempt_timeout_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)341c349dbc7Sjsg preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
342c349dbc7Sjsg 		     char *buf)
343c349dbc7Sjsg {
344c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
345c349dbc7Sjsg 
346*f005ef32Sjsg 	return sysfs_emit(buf, "%lu\n", engine->props.preempt_timeout_ms);
347c349dbc7Sjsg }
348c349dbc7Sjsg 
349*f005ef32Sjsg static const struct kobj_attribute preempt_timeout_attr =
350c349dbc7Sjsg __ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
351c349dbc7Sjsg 
352c349dbc7Sjsg static ssize_t
preempt_timeout_default(struct kobject * kobj,struct kobj_attribute * attr,char * buf)353ad8b1aafSjsg preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
354ad8b1aafSjsg 			char *buf)
355ad8b1aafSjsg {
356ad8b1aafSjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
357ad8b1aafSjsg 
358*f005ef32Sjsg 	return sysfs_emit(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
359ad8b1aafSjsg }
360ad8b1aafSjsg 
361*f005ef32Sjsg static const struct kobj_attribute preempt_timeout_def =
362ad8b1aafSjsg __ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
363ad8b1aafSjsg 
364ad8b1aafSjsg static ssize_t
heartbeat_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)365c349dbc7Sjsg heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
366c349dbc7Sjsg 		const char *buf, size_t count)
367c349dbc7Sjsg {
368c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
3691bb76ff1Sjsg 	unsigned long long delay, clamped;
370c349dbc7Sjsg 	int err;
371c349dbc7Sjsg 
372c349dbc7Sjsg 	/*
373c349dbc7Sjsg 	 * We monitor the health of the system via periodic heartbeat pulses.
374c349dbc7Sjsg 	 * The pulses also provide the opportunity to perform garbage
375c349dbc7Sjsg 	 * collection.  However, we interpret an incomplete pulse (a missed
376c349dbc7Sjsg 	 * heartbeat) as an indication that the system is no longer responsive,
377c349dbc7Sjsg 	 * i.e. hung, and perform an engine or full GPU reset. Given that the
378c349dbc7Sjsg 	 * preemption granularity can be very coarse on a system, the optimal
379c349dbc7Sjsg 	 * value for any workload is unknowable!
380c349dbc7Sjsg 	 */
381c349dbc7Sjsg 
382c349dbc7Sjsg 	err = kstrtoull(buf, 0, &delay);
383c349dbc7Sjsg 	if (err)
384c349dbc7Sjsg 		return err;
385c349dbc7Sjsg 
3861bb76ff1Sjsg 	clamped = intel_clamp_heartbeat_interval_ms(engine, delay);
3871bb76ff1Sjsg 	if (delay != clamped)
388c349dbc7Sjsg 		return -EINVAL;
389c349dbc7Sjsg 
390c349dbc7Sjsg 	err = intel_engine_set_heartbeat(engine, delay);
391c349dbc7Sjsg 	if (err)
392c349dbc7Sjsg 		return err;
393c349dbc7Sjsg 
394c349dbc7Sjsg 	return count;
395c349dbc7Sjsg }
396c349dbc7Sjsg 
397c349dbc7Sjsg static ssize_t
heartbeat_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)398c349dbc7Sjsg heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
399c349dbc7Sjsg {
400c349dbc7Sjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
401c349dbc7Sjsg 
402*f005ef32Sjsg 	return sysfs_emit(buf, "%lu\n", engine->props.heartbeat_interval_ms);
403c349dbc7Sjsg }
404c349dbc7Sjsg 
405*f005ef32Sjsg static const struct kobj_attribute heartbeat_interval_attr =
406c349dbc7Sjsg __ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
407c349dbc7Sjsg 
408ad8b1aafSjsg static ssize_t
heartbeat_default(struct kobject * kobj,struct kobj_attribute * attr,char * buf)409ad8b1aafSjsg heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
410ad8b1aafSjsg {
411ad8b1aafSjsg 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
412ad8b1aafSjsg 
413*f005ef32Sjsg 	return sysfs_emit(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
414ad8b1aafSjsg }
415ad8b1aafSjsg 
416*f005ef32Sjsg static const struct kobj_attribute heartbeat_interval_def =
417ad8b1aafSjsg __ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
418ad8b1aafSjsg 
kobj_engine_release(struct kobject * kobj)419c349dbc7Sjsg static void kobj_engine_release(struct kobject *kobj)
420c349dbc7Sjsg {
421c349dbc7Sjsg 	kfree(kobj);
422c349dbc7Sjsg }
423c349dbc7Sjsg 
424*f005ef32Sjsg static const struct kobj_type kobj_engine_type = {
425c349dbc7Sjsg 	.release = kobj_engine_release,
426c349dbc7Sjsg 	.sysfs_ops = &kobj_sysfs_ops
427c349dbc7Sjsg };
428c349dbc7Sjsg 
429c349dbc7Sjsg static struct kobject *
kobj_engine(struct kobject * dir,struct intel_engine_cs * engine)430c349dbc7Sjsg kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
431c349dbc7Sjsg {
432c349dbc7Sjsg 	struct kobj_engine *ke;
433c349dbc7Sjsg 
434c349dbc7Sjsg 	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
435c349dbc7Sjsg 	if (!ke)
436c349dbc7Sjsg 		return NULL;
437c349dbc7Sjsg 
438c349dbc7Sjsg 	kobject_init(&ke->base, &kobj_engine_type);
439c349dbc7Sjsg 	ke->engine = engine;
440c349dbc7Sjsg 
441c349dbc7Sjsg 	if (kobject_add(&ke->base, dir, "%s", engine->name)) {
442c349dbc7Sjsg 		kobject_put(&ke->base);
443c349dbc7Sjsg 		return NULL;
444c349dbc7Sjsg 	}
445c349dbc7Sjsg 
446c349dbc7Sjsg 	/* xfer ownership to sysfs tree */
447c349dbc7Sjsg 	return &ke->base;
448c349dbc7Sjsg }
449c349dbc7Sjsg 
add_defaults(struct kobj_engine * parent)450ad8b1aafSjsg static void add_defaults(struct kobj_engine *parent)
451ad8b1aafSjsg {
452*f005ef32Sjsg 	static const struct attribute * const files[] = {
453ad8b1aafSjsg 		&max_spin_def.attr,
454ad8b1aafSjsg 		&stop_timeout_def.attr,
455ad8b1aafSjsg #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
456ad8b1aafSjsg 		&heartbeat_interval_def.attr,
457ad8b1aafSjsg #endif
458ad8b1aafSjsg 		NULL
459ad8b1aafSjsg 	};
460ad8b1aafSjsg 	struct kobj_engine *ke;
461ad8b1aafSjsg 
462ad8b1aafSjsg 	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
463ad8b1aafSjsg 	if (!ke)
464ad8b1aafSjsg 		return;
465ad8b1aafSjsg 
466ad8b1aafSjsg 	kobject_init(&ke->base, &kobj_engine_type);
467ad8b1aafSjsg 	ke->engine = parent->engine;
468ad8b1aafSjsg 
469ad8b1aafSjsg 	if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
470ad8b1aafSjsg 		kobject_put(&ke->base);
471ad8b1aafSjsg 		return;
472ad8b1aafSjsg 	}
473ad8b1aafSjsg 
474ad8b1aafSjsg 	if (sysfs_create_files(&ke->base, files))
475ad8b1aafSjsg 		return;
476ad8b1aafSjsg 
477ad8b1aafSjsg 	if (intel_engine_has_timeslices(ke->engine) &&
478ad8b1aafSjsg 	    sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
479ad8b1aafSjsg 		return;
480ad8b1aafSjsg 
481ad8b1aafSjsg 	if (intel_engine_has_preempt_reset(ke->engine) &&
482ad8b1aafSjsg 	    sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
483ad8b1aafSjsg 		return;
484ad8b1aafSjsg }
485ad8b1aafSjsg 
486c349dbc7Sjsg #endif /* __linux__ */
487c349dbc7Sjsg 
intel_engines_add_sysfs(struct drm_i915_private * i915)488c349dbc7Sjsg void intel_engines_add_sysfs(struct drm_i915_private *i915)
489c349dbc7Sjsg {
490c349dbc7Sjsg #ifdef __linux__
491*f005ef32Sjsg 	static const struct attribute * const files[] = {
492c349dbc7Sjsg 		&name_attr.attr,
493c349dbc7Sjsg 		&class_attr.attr,
494c349dbc7Sjsg 		&inst_attr.attr,
495c349dbc7Sjsg 		&mmio_attr.attr,
496c349dbc7Sjsg 		&caps_attr.attr,
497c349dbc7Sjsg 		&all_caps_attr.attr,
498c349dbc7Sjsg 		&max_spin_attr.attr,
499c349dbc7Sjsg 		&stop_timeout_attr.attr,
500c349dbc7Sjsg #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
501c349dbc7Sjsg 		&heartbeat_interval_attr.attr,
502c349dbc7Sjsg #endif
503c349dbc7Sjsg 		NULL
504c349dbc7Sjsg 	};
505c349dbc7Sjsg 
506c349dbc7Sjsg 	struct device *kdev = i915->drm.primary->kdev;
507c349dbc7Sjsg 	struct intel_engine_cs *engine;
508c349dbc7Sjsg 	struct kobject *dir;
509c349dbc7Sjsg 
510c349dbc7Sjsg 	dir = kobject_create_and_add("engine", &kdev->kobj);
511c349dbc7Sjsg 	if (!dir)
512c349dbc7Sjsg 		return;
513c349dbc7Sjsg 
514c349dbc7Sjsg 	for_each_uabi_engine(engine, i915) {
515c349dbc7Sjsg 		struct kobject *kobj;
516c349dbc7Sjsg 
517c349dbc7Sjsg 		kobj = kobj_engine(dir, engine);
518c349dbc7Sjsg 		if (!kobj)
519c349dbc7Sjsg 			goto err_engine;
520c349dbc7Sjsg 
521c349dbc7Sjsg 		if (sysfs_create_files(kobj, files))
522c349dbc7Sjsg 			goto err_object;
523c349dbc7Sjsg 
524c349dbc7Sjsg 		if (intel_engine_has_timeslices(engine) &&
525c349dbc7Sjsg 		    sysfs_create_file(kobj, &timeslice_duration_attr.attr))
526c349dbc7Sjsg 			goto err_engine;
527c349dbc7Sjsg 
528c349dbc7Sjsg 		if (intel_engine_has_preempt_reset(engine) &&
529c349dbc7Sjsg 		    sysfs_create_file(kobj, &preempt_timeout_attr.attr))
530c349dbc7Sjsg 			goto err_engine;
531c349dbc7Sjsg 
532ad8b1aafSjsg 		add_defaults(container_of(kobj, struct kobj_engine, base));
533ad8b1aafSjsg 
534c349dbc7Sjsg 		if (0) {
535c349dbc7Sjsg err_object:
536c349dbc7Sjsg 			kobject_put(kobj);
537c349dbc7Sjsg err_engine:
538c349dbc7Sjsg 			dev_err(kdev, "Failed to add sysfs engine '%s'\n",
539c349dbc7Sjsg 				engine->name);
540c349dbc7Sjsg 			break;
541c349dbc7Sjsg 		}
542c349dbc7Sjsg 	}
543c349dbc7Sjsg #endif /* __linux__ */
544c349dbc7Sjsg }
545