xref: /openbsd/sys/dev/pci/drm/i915/i915_sysfs.c (revision 73471bf0)
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *
26  */
27 
28 #include <linux/device.h>
29 #include <linux/module.h>
30 #include <linux/stat.h>
31 #include <linux/sysfs.h>
32 
33 #include "gt/intel_rc6.h"
34 #include "gt/intel_rps.h"
35 #include "gt/sysfs_engines.h"
36 
37 #include "i915_drv.h"
38 #include "i915_sysfs.h"
39 #include "intel_pm.h"
40 #include "intel_sideband.h"
41 
42 #ifdef __linux__
43 
44 static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
45 {
46 	struct drm_minor *minor = dev_get_drvdata(kdev);
47 	return to_i915(minor->dev);
48 }
49 
50 #ifdef CONFIG_PM
51 static u32 calc_residency(struct drm_i915_private *dev_priv,
52 			  i915_reg_t reg)
53 {
54 	intel_wakeref_t wakeref;
55 	u64 res = 0;
56 
57 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
58 		res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg);
59 
60 	return DIV_ROUND_CLOSEST_ULL(res, 1000);
61 }
62 
63 static ssize_t
64 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
65 {
66 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
67 	unsigned int mask;
68 
69 	mask = 0;
70 	if (HAS_RC6(dev_priv))
71 		mask |= BIT(0);
72 	if (HAS_RC6p(dev_priv))
73 		mask |= BIT(1);
74 	if (HAS_RC6pp(dev_priv))
75 		mask |= BIT(2);
76 
77 	return snprintf(buf, PAGE_SIZE, "%x\n", mask);
78 }
79 
80 static ssize_t
81 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
82 {
83 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
84 	u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
85 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
86 }
87 
88 static ssize_t
89 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
90 {
91 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
92 	u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
93 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
94 }
95 
96 static ssize_t
97 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
98 {
99 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
100 	u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
101 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
102 }
103 
104 static ssize_t
105 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
106 {
107 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
108 	u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
109 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
110 }
111 
112 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
113 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
114 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
115 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
116 static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
117 
118 static struct attribute *rc6_attrs[] = {
119 	&dev_attr_rc6_enable.attr,
120 	&dev_attr_rc6_residency_ms.attr,
121 	NULL
122 };
123 
124 static const struct attribute_group rc6_attr_group = {
125 	.name = power_group_name,
126 	.attrs =  rc6_attrs
127 };
128 
129 static struct attribute *rc6p_attrs[] = {
130 	&dev_attr_rc6p_residency_ms.attr,
131 	&dev_attr_rc6pp_residency_ms.attr,
132 	NULL
133 };
134 
135 static const struct attribute_group rc6p_attr_group = {
136 	.name = power_group_name,
137 	.attrs =  rc6p_attrs
138 };
139 
140 static struct attribute *media_rc6_attrs[] = {
141 	&dev_attr_media_rc6_residency_ms.attr,
142 	NULL
143 };
144 
145 static const struct attribute_group media_rc6_attr_group = {
146 	.name = power_group_name,
147 	.attrs =  media_rc6_attrs
148 };
149 #endif
150 
151 static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
152 {
153 	if (!HAS_L3_DPF(i915))
154 		return -EPERM;
155 
156 	if (!IS_ALIGNED(offset, sizeof(u32)))
157 		return -EINVAL;
158 
159 	if (offset >= GEN7_L3LOG_SIZE)
160 		return -ENXIO;
161 
162 	return 0;
163 }
164 
165 static ssize_t
166 i915_l3_read(struct file *filp, struct kobject *kobj,
167 	     struct bin_attribute *attr, char *buf,
168 	     loff_t offset, size_t count)
169 {
170 	struct device *kdev = kobj_to_dev(kobj);
171 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
172 	int slice = (int)(uintptr_t)attr->private;
173 	int ret;
174 
175 	ret = l3_access_valid(i915, offset);
176 	if (ret)
177 		return ret;
178 
179 	count = round_down(count, sizeof(u32));
180 	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
181 	memset(buf, 0, count);
182 
183 	spin_lock(&i915->gem.contexts.lock);
184 	if (i915->l3_parity.remap_info[slice])
185 		memcpy(buf,
186 		       i915->l3_parity.remap_info[slice] + offset / sizeof(u32),
187 		       count);
188 	spin_unlock(&i915->gem.contexts.lock);
189 
190 	return count;
191 }
192 
193 static ssize_t
194 i915_l3_write(struct file *filp, struct kobject *kobj,
195 	      struct bin_attribute *attr, char *buf,
196 	      loff_t offset, size_t count)
197 {
198 	struct device *kdev = kobj_to_dev(kobj);
199 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
200 	int slice = (int)(uintptr_t)attr->private;
201 	u32 *remap_info, *freeme = NULL;
202 	struct i915_gem_context *ctx;
203 	int ret;
204 
205 	ret = l3_access_valid(i915, offset);
206 	if (ret)
207 		return ret;
208 
209 	if (count < sizeof(u32))
210 		return -EINVAL;
211 
212 	remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
213 	if (!remap_info)
214 		return -ENOMEM;
215 
216 	spin_lock(&i915->gem.contexts.lock);
217 
218 	if (i915->l3_parity.remap_info[slice]) {
219 		freeme = remap_info;
220 		remap_info = i915->l3_parity.remap_info[slice];
221 	} else {
222 		i915->l3_parity.remap_info[slice] = remap_info;
223 	}
224 
225 	count = round_down(count, sizeof(u32));
226 	memcpy(remap_info + offset / sizeof(u32), buf, count);
227 
228 	/* NB: We defer the remapping until we switch to the context */
229 	list_for_each_entry(ctx, &i915->gem.contexts.list, link)
230 		ctx->remap_slice |= BIT(slice);
231 
232 	spin_unlock(&i915->gem.contexts.lock);
233 	kfree(freeme);
234 
235 	/*
236 	 * TODO: Ideally we really want a GPU reset here to make sure errors
237 	 * aren't propagated. Since I cannot find a stable way to reset the GPU
238 	 * at this point it is left as a TODO.
239 	*/
240 
241 	return count;
242 }
243 
244 static const struct bin_attribute dpf_attrs = {
245 	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
246 	.size = GEN7_L3LOG_SIZE,
247 	.read = i915_l3_read,
248 	.write = i915_l3_write,
249 	.mmap = NULL,
250 	.private = (void *)0
251 };
252 
253 static const struct bin_attribute dpf_attrs_1 = {
254 	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
255 	.size = GEN7_L3LOG_SIZE,
256 	.read = i915_l3_read,
257 	.write = i915_l3_write,
258 	.mmap = NULL,
259 	.private = (void *)1
260 };
261 
262 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
263 				    struct device_attribute *attr, char *buf)
264 {
265 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
266 	struct intel_rps *rps = &i915->gt.rps;
267 
268 	return snprintf(buf, PAGE_SIZE, "%d\n",
269 			intel_rps_read_actual_frequency(rps));
270 }
271 
272 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
273 				    struct device_attribute *attr, char *buf)
274 {
275 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
276 	struct intel_rps *rps = &i915->gt.rps;
277 
278 	return snprintf(buf, PAGE_SIZE, "%d\n",
279 			intel_gpu_freq(rps, rps->cur_freq));
280 }
281 
282 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
283 {
284 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
285 	struct intel_rps *rps = &i915->gt.rps;
286 
287 	return snprintf(buf, PAGE_SIZE, "%d\n",
288 			intel_gpu_freq(rps, rps->boost_freq));
289 }
290 
291 static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
292 				       struct device_attribute *attr,
293 				       const char *buf, size_t count)
294 {
295 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
296 	struct intel_rps *rps = &dev_priv->gt.rps;
297 	bool boost = false;
298 	ssize_t ret;
299 	u32 val;
300 
301 	ret = kstrtou32(buf, 0, &val);
302 	if (ret)
303 		return ret;
304 
305 	/* Validate against (static) hardware limits */
306 	val = intel_freq_opcode(rps, val);
307 	if (val < rps->min_freq || val > rps->max_freq)
308 		return -EINVAL;
309 
310 	mutex_lock(&rps->lock);
311 	if (val != rps->boost_freq) {
312 		rps->boost_freq = val;
313 		boost = atomic_read(&rps->num_waiters);
314 	}
315 	mutex_unlock(&rps->lock);
316 	if (boost)
317 		schedule_work(&rps->work);
318 
319 	return count;
320 }
321 
322 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
323 				     struct device_attribute *attr, char *buf)
324 {
325 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
326 	struct intel_rps *rps = &dev_priv->gt.rps;
327 
328 	return snprintf(buf, PAGE_SIZE, "%d\n",
329 			intel_gpu_freq(rps, rps->efficient_freq));
330 }
331 
332 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
333 {
334 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
335 	struct intel_rps *rps = &dev_priv->gt.rps;
336 
337 	return snprintf(buf, PAGE_SIZE, "%d\n",
338 			intel_gpu_freq(rps, rps->max_freq_softlimit));
339 }
340 
341 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
342 				     struct device_attribute *attr,
343 				     const char *buf, size_t count)
344 {
345 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
346 	struct intel_rps *rps = &dev_priv->gt.rps;
347 	ssize_t ret;
348 	u32 val;
349 
350 	ret = kstrtou32(buf, 0, &val);
351 	if (ret)
352 		return ret;
353 
354 	mutex_lock(&rps->lock);
355 
356 	val = intel_freq_opcode(rps, val);
357 	if (val < rps->min_freq ||
358 	    val > rps->max_freq ||
359 	    val < rps->min_freq_softlimit) {
360 		ret = -EINVAL;
361 		goto unlock;
362 	}
363 
364 	if (val > rps->rp0_freq)
365 		DRM_DEBUG("User requested overclocking to %d\n",
366 			  intel_gpu_freq(rps, val));
367 
368 	rps->max_freq_softlimit = val;
369 
370 	val = clamp_t(int, rps->cur_freq,
371 		      rps->min_freq_softlimit,
372 		      rps->max_freq_softlimit);
373 
374 	/*
375 	 * We still need *_set_rps to process the new max_delay and
376 	 * update the interrupt limits and PMINTRMSK even though
377 	 * frequency request may be unchanged.
378 	 */
379 	intel_rps_set(rps, val);
380 
381 unlock:
382 	mutex_unlock(&rps->lock);
383 
384 	return ret ?: count;
385 }
386 
387 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
388 {
389 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
390 	struct intel_rps *rps = &dev_priv->gt.rps;
391 
392 	return snprintf(buf, PAGE_SIZE, "%d\n",
393 			intel_gpu_freq(rps, rps->min_freq_softlimit));
394 }
395 
396 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
397 				     struct device_attribute *attr,
398 				     const char *buf, size_t count)
399 {
400 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
401 	struct intel_rps *rps = &dev_priv->gt.rps;
402 	ssize_t ret;
403 	u32 val;
404 
405 	ret = kstrtou32(buf, 0, &val);
406 	if (ret)
407 		return ret;
408 
409 	mutex_lock(&rps->lock);
410 
411 	val = intel_freq_opcode(rps, val);
412 	if (val < rps->min_freq ||
413 	    val > rps->max_freq ||
414 	    val > rps->max_freq_softlimit) {
415 		ret = -EINVAL;
416 		goto unlock;
417 	}
418 
419 	rps->min_freq_softlimit = val;
420 
421 	val = clamp_t(int, rps->cur_freq,
422 		      rps->min_freq_softlimit,
423 		      rps->max_freq_softlimit);
424 
425 	/*
426 	 * We still need *_set_rps to process the new min_delay and
427 	 * update the interrupt limits and PMINTRMSK even though
428 	 * frequency request may be unchanged.
429 	 */
430 	intel_rps_set(rps, val);
431 
432 unlock:
433 	mutex_unlock(&rps->lock);
434 
435 	return ret ?: count;
436 }
437 
438 static DEVICE_ATTR_RO(gt_act_freq_mhz);
439 static DEVICE_ATTR_RO(gt_cur_freq_mhz);
440 static DEVICE_ATTR_RW(gt_boost_freq_mhz);
441 static DEVICE_ATTR_RW(gt_max_freq_mhz);
442 static DEVICE_ATTR_RW(gt_min_freq_mhz);
443 
444 static DEVICE_ATTR_RO(vlv_rpe_freq_mhz);
445 
446 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
447 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
448 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
449 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
450 
451 /* For now we have a static number of RP states */
452 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
453 {
454 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
455 	struct intel_rps *rps = &dev_priv->gt.rps;
456 	u32 val;
457 
458 	if (attr == &dev_attr_gt_RP0_freq_mhz)
459 		val = intel_gpu_freq(rps, rps->rp0_freq);
460 	else if (attr == &dev_attr_gt_RP1_freq_mhz)
461 		val = intel_gpu_freq(rps, rps->rp1_freq);
462 	else if (attr == &dev_attr_gt_RPn_freq_mhz)
463 		val = intel_gpu_freq(rps, rps->min_freq);
464 	else
465 		BUG();
466 
467 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
468 }
469 
470 static const struct attribute * const gen6_attrs[] = {
471 	&dev_attr_gt_act_freq_mhz.attr,
472 	&dev_attr_gt_cur_freq_mhz.attr,
473 	&dev_attr_gt_boost_freq_mhz.attr,
474 	&dev_attr_gt_max_freq_mhz.attr,
475 	&dev_attr_gt_min_freq_mhz.attr,
476 	&dev_attr_gt_RP0_freq_mhz.attr,
477 	&dev_attr_gt_RP1_freq_mhz.attr,
478 	&dev_attr_gt_RPn_freq_mhz.attr,
479 	NULL,
480 };
481 
482 static const struct attribute * const vlv_attrs[] = {
483 	&dev_attr_gt_act_freq_mhz.attr,
484 	&dev_attr_gt_cur_freq_mhz.attr,
485 	&dev_attr_gt_boost_freq_mhz.attr,
486 	&dev_attr_gt_max_freq_mhz.attr,
487 	&dev_attr_gt_min_freq_mhz.attr,
488 	&dev_attr_gt_RP0_freq_mhz.attr,
489 	&dev_attr_gt_RP1_freq_mhz.attr,
490 	&dev_attr_gt_RPn_freq_mhz.attr,
491 	&dev_attr_vlv_rpe_freq_mhz.attr,
492 	NULL,
493 };
494 
495 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
496 
497 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
498 				struct bin_attribute *attr, char *buf,
499 				loff_t off, size_t count)
500 {
501 
502 	struct device *kdev = kobj_to_dev(kobj);
503 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
504 	struct i915_gpu_coredump *gpu;
505 	ssize_t ret;
506 
507 	gpu = i915_first_error_state(i915);
508 	if (IS_ERR(gpu)) {
509 		ret = PTR_ERR(gpu);
510 	} else if (gpu) {
511 		ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
512 		i915_gpu_coredump_put(gpu);
513 	} else {
514 		const char *str = "No error state collected\n";
515 		size_t len = strlen(str);
516 
517 		ret = min_t(size_t, count, len - off);
518 		memcpy(buf, str + off, ret);
519 	}
520 
521 	return ret;
522 }
523 
524 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
525 				 struct bin_attribute *attr, char *buf,
526 				 loff_t off, size_t count)
527 {
528 	struct device *kdev = kobj_to_dev(kobj);
529 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
530 
531 	drm_dbg(&dev_priv->drm, "Resetting error state\n");
532 	i915_reset_error_state(dev_priv);
533 
534 	return count;
535 }
536 
537 static const struct bin_attribute error_state_attr = {
538 	.attr.name = "error",
539 	.attr.mode = S_IRUSR | S_IWUSR,
540 	.size = 0,
541 	.read = error_state_read,
542 	.write = error_state_write,
543 };
544 
545 static void i915_setup_error_capture(struct device *kdev)
546 {
547 	if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
548 		DRM_ERROR("error_state sysfs setup failed\n");
549 }
550 
551 static void i915_teardown_error_capture(struct device *kdev)
552 {
553 	sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
554 }
555 #else
556 static void i915_setup_error_capture(struct device *kdev) {}
557 static void i915_teardown_error_capture(struct device *kdev) {}
558 #endif
559 
560 #endif /* __linux__ */
561 
562 void i915_setup_sysfs(struct drm_i915_private *dev_priv)
563 {
564 #ifdef __linux__
565 	struct device *kdev = dev_priv->drm.primary->kdev;
566 	int ret;
567 
568 #ifdef CONFIG_PM
569 	if (HAS_RC6(dev_priv)) {
570 		ret = sysfs_merge_group(&kdev->kobj,
571 					&rc6_attr_group);
572 		if (ret)
573 			drm_err(&dev_priv->drm,
574 				"RC6 residency sysfs setup failed\n");
575 	}
576 	if (HAS_RC6p(dev_priv)) {
577 		ret = sysfs_merge_group(&kdev->kobj,
578 					&rc6p_attr_group);
579 		if (ret)
580 			drm_err(&dev_priv->drm,
581 				"RC6p residency sysfs setup failed\n");
582 	}
583 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
584 		ret = sysfs_merge_group(&kdev->kobj,
585 					&media_rc6_attr_group);
586 		if (ret)
587 			drm_err(&dev_priv->drm,
588 				"Media RC6 residency sysfs setup failed\n");
589 	}
590 #endif
591 	if (HAS_L3_DPF(dev_priv)) {
592 		ret = device_create_bin_file(kdev, &dpf_attrs);
593 		if (ret)
594 			drm_err(&dev_priv->drm,
595 				"l3 parity sysfs setup failed\n");
596 
597 		if (NUM_L3_SLICES(dev_priv) > 1) {
598 			ret = device_create_bin_file(kdev,
599 						     &dpf_attrs_1);
600 			if (ret)
601 				drm_err(&dev_priv->drm,
602 					"l3 parity slice 1 setup failed\n");
603 		}
604 	}
605 
606 	ret = 0;
607 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
608 		ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
609 	else if (INTEL_GEN(dev_priv) >= 6)
610 		ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
611 	if (ret)
612 		drm_err(&dev_priv->drm, "RPS sysfs setup failed\n");
613 
614 	i915_setup_error_capture(kdev);
615 
616 	intel_engines_add_sysfs(dev_priv);
617 #endif /* __linux__ */
618 }
619 
620 void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
621 {
622 #ifdef __linux__
623 	struct device *kdev = dev_priv->drm.primary->kdev;
624 
625 	i915_teardown_error_capture(kdev);
626 
627 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
628 		sysfs_remove_files(&kdev->kobj, vlv_attrs);
629 	else
630 		sysfs_remove_files(&kdev->kobj, gen6_attrs);
631 	device_remove_bin_file(kdev,  &dpf_attrs_1);
632 	device_remove_bin_file(kdev,  &dpf_attrs);
633 #ifdef CONFIG_PM
634 	sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
635 	sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
636 #endif
637 #endif /* __linux__ */
638 }
639