xref: /dragonfly/sys/dev/drm/radeon/radeon_pm.c (revision 6ca88057)
1 /*
2  * Permission is hereby granted, free of charge, to any person obtaining a
3  * copy of this software and associated documentation files (the "Software"),
4  * to deal in the Software without restriction, including without limitation
5  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6  * and/or sell copies of the Software, and to permit persons to whom the
7  * Software is furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18  * OTHER DEALINGS IN THE SOFTWARE.
19  *
20  * Authors: Rafał Miłecki <zajec5@gmail.com>
21  *          Alex Deucher <alexdeucher@gmail.com>
22  *
23  * $FreeBSD: head/sys/dev/drm2/radeon/radeon_pm.c 254885 2013-08-25 19:37:15Z dumbbell $
24  */
25 
26 #include <sys/power.h>
27 #include <drm/drmP.h>
28 #include <sys/sensors.h>
29 #include "radeon.h"
30 #include "avivod.h"
31 #include "atom.h"
32 
33 #define RADEON_IDLE_LOOP_MS 100
34 #define RADEON_RECLOCK_DELAY_MS 200
35 #define RADEON_WAIT_VBLANK_TIMEOUT 200
36 
37 static const char *radeon_pm_state_type_name[5] = {
38 	"",
39 	"Powersave",
40 	"Battery",
41 	"Balanced",
42 	"Performance",
43 };
44 
45 #ifdef DUMBBELL_WIP
46 static void radeon_dynpm_idle_work_handler(struct work_struct *work);
47 #endif /* DUMBBELL_WIP */
48 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
49 static bool radeon_pm_in_vbl(struct radeon_device *rdev);
50 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
51 static void radeon_pm_update_profile(struct radeon_device *rdev);
52 static void radeon_pm_set_clocks(struct radeon_device *rdev);
53 
54 int radeon_pm_get_type_index(struct radeon_device *rdev,
55 			     enum radeon_pm_state_type ps_type,
56 			     int instance)
57 {
58 	int i;
59 	int found_instance = -1;
60 
61 	for (i = 0; i < rdev->pm.num_power_states; i++) {
62 		if (rdev->pm.power_state[i].type == ps_type) {
63 			found_instance++;
64 			if (found_instance == instance)
65 				return i;
66 		}
67 	}
68 	/* return default if no match */
69 	return rdev->pm.default_power_state_index;
70 }
71 
72 void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
73 {
74 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
75 		lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
76 		if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE)
77 			rdev->pm.dpm.ac_power = true;
78 		else
79 			rdev->pm.dpm.ac_power = false;
80 		if (rdev->family == CHIP_ARUBA) {
81 			if (rdev->asic->dpm.enable_bapm)
82 				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
83 		}
84 		lockmgr(&rdev->pm.mutex, LK_RELEASE);
85         } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
86 		if (rdev->pm.profile == PM_PROFILE_AUTO) {
87 			lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
88 			radeon_pm_update_profile(rdev);
89 			radeon_pm_set_clocks(rdev);
90 			lockmgr(&rdev->pm.mutex, LK_RELEASE);
91 		}
92 	}
93 }
94 
95 static void radeon_pm_update_profile(struct radeon_device *rdev)
96 {
97 	switch (rdev->pm.profile) {
98 	case PM_PROFILE_DEFAULT:
99 		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
100 		break;
101 	case PM_PROFILE_AUTO:
102 		if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE) {
103 			if (rdev->pm.active_crtc_count > 1)
104 				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
105 			else
106 				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
107 		} else {
108 			if (rdev->pm.active_crtc_count > 1)
109 				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
110 			else
111 				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
112 		}
113 		break;
114 	case PM_PROFILE_LOW:
115 		if (rdev->pm.active_crtc_count > 1)
116 			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
117 		else
118 			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
119 		break;
120 	case PM_PROFILE_MID:
121 		if (rdev->pm.active_crtc_count > 1)
122 			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
123 		else
124 			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
125 		break;
126 	case PM_PROFILE_HIGH:
127 		if (rdev->pm.active_crtc_count > 1)
128 			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
129 		else
130 			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
131 		break;
132 	}
133 
134 	if (rdev->pm.active_crtc_count == 0) {
135 		rdev->pm.requested_power_state_index =
136 			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
137 		rdev->pm.requested_clock_mode_index =
138 			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
139 	} else {
140 		rdev->pm.requested_power_state_index =
141 			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
142 		rdev->pm.requested_clock_mode_index =
143 			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
144 	}
145 }
146 
147 static void radeon_unmap_vram_bos(struct radeon_device *rdev)
148 {
149 	struct radeon_bo *bo, *n;
150 
151 	if (list_empty(&rdev->gem.objects))
152 		return;
153 
154 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
155 		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
156 			ttm_bo_unmap_virtual(&bo->tbo);
157 	}
158 }
159 
160 static void radeon_sync_with_vblank(struct radeon_device *rdev)
161 {
162 	if (rdev->pm.active_crtcs) {
163 		rdev->pm.vblank_sync = false;
164 #ifdef DUMBBELL_WIP
165 		wait_event_timeout(
166 			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
167 			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
168 #endif /* DUMBBELL_WIP */
169 	}
170 }
171 
172 static void radeon_set_power_state(struct radeon_device *rdev)
173 {
174 	u32 sclk, mclk;
175 	bool misc_after = false;
176 
177 	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
178 	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
179 		return;
180 
181 	if (radeon_gui_idle(rdev)) {
182 		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
183 			clock_info[rdev->pm.requested_clock_mode_index].sclk;
184 		if (sclk > rdev->pm.default_sclk)
185 			sclk = rdev->pm.default_sclk;
186 
187 		/* starting with BTC, there is one state that is used for both
188 		 * MH and SH.  Difference is that we always use the high clock index for
189 		 * mclk and vddci.
190 		 */
191 		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
192 		    (rdev->family >= CHIP_BARTS) &&
193 		    rdev->pm.active_crtc_count &&
194 		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
195 		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
196 			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
197 				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
198 		else
199 			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
200 				clock_info[rdev->pm.requested_clock_mode_index].mclk;
201 
202 		if (mclk > rdev->pm.default_mclk)
203 			mclk = rdev->pm.default_mclk;
204 
205 		/* upvolt before raising clocks, downvolt after lowering clocks */
206 		if (sclk < rdev->pm.current_sclk)
207 			misc_after = true;
208 
209 		radeon_sync_with_vblank(rdev);
210 
211 		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
212 			if (!radeon_pm_in_vbl(rdev))
213 				return;
214 		}
215 
216 		radeon_pm_prepare(rdev);
217 
218 		if (!misc_after)
219 			/* voltage, pcie lanes, etc.*/
220 			radeon_pm_misc(rdev);
221 
222 		/* set engine clock */
223 		if (sclk != rdev->pm.current_sclk) {
224 			radeon_pm_debug_check_in_vbl(rdev, false);
225 			radeon_set_engine_clock(rdev, sclk);
226 			radeon_pm_debug_check_in_vbl(rdev, true);
227 			rdev->pm.current_sclk = sclk;
228 			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
229 		}
230 
231 		/* set memory clock */
232 		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
233 			radeon_pm_debug_check_in_vbl(rdev, false);
234 			radeon_set_memory_clock(rdev, mclk);
235 			radeon_pm_debug_check_in_vbl(rdev, true);
236 			rdev->pm.current_mclk = mclk;
237 			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
238 		}
239 
240 		if (misc_after)
241 			/* voltage, pcie lanes, etc.*/
242 			radeon_pm_misc(rdev);
243 
244 		radeon_pm_finish(rdev);
245 
246 		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
247 		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
248 	} else
249 		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
250 }
251 
252 static void radeon_pm_set_clocks(struct radeon_device *rdev)
253 {
254 	int i, r;
255 
256 	/* no need to take locks, etc. if nothing's going to change */
257 	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
258 	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
259 		return;
260 
261 	DRM_LOCK(rdev->ddev);
262 	lockmgr(&rdev->pm.mclk_lock, LK_EXCLUSIVE); // down_write
263 	lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
264 
265 	/* wait for the rings to drain */
266 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
267 		struct radeon_ring *ring = &rdev->ring[i];
268 		if (!ring->ready) {
269 			continue;
270 		}
271 		r = radeon_fence_wait_empty(rdev, i);
272 		if (r) {
273 			/* needs a GPU reset dont reset here */
274 			lockmgr(&rdev->ring_lock, LK_RELEASE);
275 			lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); // up_write
276 			DRM_UNLOCK(rdev->ddev);
277 			return;
278 		}
279 	}
280 
281 	radeon_unmap_vram_bos(rdev);
282 
283 	if (rdev->irq.installed) {
284 		for (i = 0; i < rdev->num_crtc; i++) {
285 			if (rdev->pm.active_crtcs & (1 << i)) {
286 				rdev->pm.req_vblank |= (1 << i);
287 				drm_vblank_get(rdev->ddev, i);
288 			}
289 		}
290 	}
291 
292 	radeon_set_power_state(rdev);
293 
294 	if (rdev->irq.installed) {
295 		for (i = 0; i < rdev->num_crtc; i++) {
296 			if (rdev->pm.req_vblank & (1 << i)) {
297 				rdev->pm.req_vblank &= ~(1 << i);
298 				drm_vblank_put(rdev->ddev, i);
299 			}
300 		}
301 	}
302 
303 	/* update display watermarks based on new power state */
304 	radeon_update_bandwidth_info(rdev);
305 	if (rdev->pm.active_crtc_count)
306 		radeon_bandwidth_update(rdev);
307 
308 	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
309 
310 	lockmgr(&rdev->ring_lock, LK_RELEASE);
311 	lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); // up_write
312 	DRM_UNLOCK(rdev->ddev);
313 }
314 
315 static void radeon_pm_print_states(struct radeon_device *rdev)
316 {
317 	int i, j;
318 	struct radeon_power_state *power_state;
319 	struct radeon_pm_clock_info *clock_info;
320 
321 	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
322 	for (i = 0; i < rdev->pm.num_power_states; i++) {
323 		power_state = &rdev->pm.power_state[i];
324 		DRM_DEBUG_DRIVER("State %d: %s\n", i,
325 			radeon_pm_state_type_name[power_state->type]);
326 		if (i == rdev->pm.default_power_state_index)
327 			DRM_DEBUG_DRIVER("\tDefault");
328 		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
329 			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
330 		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
331 			DRM_DEBUG_DRIVER("\tSingle display only\n");
332 		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
333 		for (j = 0; j < power_state->num_clock_modes; j++) {
334 			clock_info = &(power_state->clock_info[j]);
335 			if (rdev->flags & RADEON_IS_IGP)
336 				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
337 						 j,
338 						 clock_info->sclk * 10);
339 			else
340 				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
341 						 j,
342 						 clock_info->sclk * 10,
343 						 clock_info->mclk * 10,
344 						 clock_info->voltage.voltage);
345 		}
346 	}
347 }
348 
349 #ifdef DUMBBELL_WIP
350 static ssize_t radeon_get_pm_profile(struct device *dev,
351 				     struct device_attribute *attr,
352 				     char *buf)
353 {
354 	struct drm_device *ddev = dev_get_drvdata(dev);
355 	struct radeon_device *rdev = ddev->dev_private;
356 	int cp = rdev->pm.profile;
357 
358 	return ksnprintf(buf, PAGE_SIZE, "%s\n",
359 			(cp == PM_PROFILE_AUTO) ? "auto" :
360 			(cp == PM_PROFILE_LOW) ? "low" :
361 			(cp == PM_PROFILE_MID) ? "mid" :
362 			(cp == PM_PROFILE_HIGH) ? "high" : "default");
363 }
364 
365 static ssize_t radeon_set_pm_profile(struct device *dev,
366 				     struct device_attribute *attr,
367 				     const char *buf,
368 				     size_t count)
369 {
370 	struct drm_device *ddev = dev_get_drvdata(dev);
371 	struct radeon_device *rdev = ddev->dev_private;
372 
373 	/* Can't set profile when the card is off */
374 	if  ((rdev->flags & RADEON_IS_PX) &&
375 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
376 		return -EINVAL;
377 
378 	lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
379 	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
380 		if (strncmp("default", buf, strlen("default")) == 0)
381 			rdev->pm.profile = PM_PROFILE_DEFAULT;
382 		else if (strncmp("auto", buf, strlen("auto")) == 0)
383 			rdev->pm.profile = PM_PROFILE_AUTO;
384 		else if (strncmp("low", buf, strlen("low")) == 0)
385 			rdev->pm.profile = PM_PROFILE_LOW;
386 		else if (strncmp("mid", buf, strlen("mid")) == 0)
387 			rdev->pm.profile = PM_PROFILE_MID;
388 		else if (strncmp("high", buf, strlen("high")) == 0)
389 			rdev->pm.profile = PM_PROFILE_HIGH;
390 		else {
391 			count = -EINVAL;
392 			goto fail;
393 		}
394 		radeon_pm_update_profile(rdev);
395 		radeon_pm_set_clocks(rdev);
396 	} else
397 		count = -EINVAL;
398 
399 fail:
400 	lockmgr(&rdev->pm.mutex, LK_RELEASE);
401 
402 	return count;
403 }
404 
405 static ssize_t radeon_get_pm_method(struct device *dev,
406 				    struct device_attribute *attr,
407 				    char *buf)
408 {
409 	struct drm_device *ddev = dev_get_drvdata(dev);
410 	struct radeon_device *rdev = ddev->dev_private;
411 	int pm = rdev->pm.pm_method;
412 
413 	return ksnprintf(buf, PAGE_SIZE, "%s\n",
414 			(pm == PM_METHOD_DYNPM) ? "dynpm" :
415 			(pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
416 }
417 
418 static ssize_t radeon_set_pm_method(struct device *dev,
419 				    struct device_attribute *attr,
420 				    const char *buf,
421 				    size_t count)
422 {
423 	struct drm_device *ddev = dev_get_drvdata(dev);
424 	struct radeon_device *rdev = ddev->dev_private;
425 
426 	/* Can't set method when the card is off */
427 	if  ((rdev->flags & RADEON_IS_PX) &&
428 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
429 		count = -EINVAL;
430 		goto fail;
431 	}
432 
433 	/* we don't support the legacy modes with dpm */
434 	if (rdev->pm.pm_method == PM_METHOD_DPM) {
435 		count = -EINVAL;
436 		goto fail;
437 	}
438 
439 	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
440 		lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
441 		rdev->pm.pm_method = PM_METHOD_DYNPM;
442 		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
443 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
444 		lockmgr(&rdev->pm.mutex, LK_RELEASE);
445 	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
446 		lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
447 		/* disable dynpm */
448 		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
449 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
450 		rdev->pm.pm_method = PM_METHOD_PROFILE;
451 		lockmgr(&rdev->pm.mutex, LK_RELEASE);
452 #ifdef DUMBBELL_WIP
453 		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
454 #endif /* DUMBBELL_WIP */
455 	} else {
456 		count = -EINVAL;
457 		goto fail;
458 	}
459 	radeon_pm_compute_clocks(rdev);
460 fail:
461 	return count;
462 }
463 
464 static ssize_t radeon_get_dpm_state(struct device *dev,
465 				    struct device_attribute *attr,
466 				    char *buf)
467 {
468 	struct drm_device *ddev = dev_get_drvdata(dev);
469 	struct radeon_device *rdev = ddev->dev_private;
470 	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
471 
472 	return snprintf(buf, PAGE_SIZE, "%s\n",
473 			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
474 			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
475 }
476 
477 static ssize_t radeon_set_dpm_state(struct device *dev,
478 				    struct device_attribute *attr,
479 				    const char *buf,
480 				    size_t count)
481 {
482 	struct drm_device *ddev = dev_get_drvdata(dev);
483 	struct radeon_device *rdev = ddev->dev_private;
484 
485 	lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
486 	if (strncmp("battery", buf, strlen("battery")) == 0)
487 		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
488 	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
489 		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
490 	else if (strncmp("performance", buf, strlen("performance")) == 0)
491 		rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
492 	else {
493 		lockmgr(&rdev->pm.mutex, LK_RELEASE);
494 		count = -EINVAL;
495 		goto fail;
496 	}
497 	lockmgr(&rdev->pm.mutex, LK_RELEASE);
498 
499 	/* Can't set dpm state when the card is off */
500 	if (!(rdev->flags & RADEON_IS_PX) ||
501 	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
502 		radeon_pm_compute_clocks(rdev);
503 
504 fail:
505 	return count;
506 }
507 
508 static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
509 						       struct device_attribute *attr,
510 						       char *buf)
511 {
512 	struct drm_device *ddev = dev_get_drvdata(dev);
513 	struct radeon_device *rdev = ddev->dev_private;
514 	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
515 
516 	if  ((rdev->flags & RADEON_IS_PX) &&
517 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
518 		return snprintf(buf, PAGE_SIZE, "off\n");
519 
520 	return snprintf(buf, PAGE_SIZE, "%s\n",
521 			(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
522 			(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
523 }
524 
525 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
526 						       struct device_attribute *attr,
527 						       const char *buf,
528 						       size_t count)
529 {
530 	struct drm_device *ddev = dev_get_drvdata(dev);
531 	struct radeon_device *rdev = ddev->dev_private;
532 	enum radeon_dpm_forced_level level;
533 	int ret = 0;
534 
535 	/* Can't force performance level when the card is off */
536 	if  ((rdev->flags & RADEON_IS_PX) &&
537 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
538 		return -EINVAL;
539 
540 	spin_lock(&rdev->pm.mutex);
541 	if (strncmp("low", buf, strlen("low")) == 0) {
542 		level = RADEON_DPM_FORCED_LEVEL_LOW;
543 	} else if (strncmp("high", buf, strlen("high")) == 0) {
544 		level = RADEON_DPM_FORCED_LEVEL_HIGH;
545 	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
546 		level = RADEON_DPM_FORCED_LEVEL_AUTO;
547 	} else {
548 		count = -EINVAL;
549 		goto fail;
550 	}
551 	if (rdev->asic->dpm.force_performance_level) {
552 		if (rdev->pm.dpm.thermal_active) {
553 			count = -EINVAL;
554 			goto fail;
555 		}
556 		ret = radeon_dpm_force_performance_level(rdev, level);
557 		if (ret)
558 			count = -EINVAL;
559 	}
560 fail:
561 	spin_unlock(&rdev->pm.mutex);
562 
563 	return count;
564 }
565 
566 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
567 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
568 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
569 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
570 		   radeon_get_dpm_forced_performance_level,
571 		   radeon_set_dpm_forced_performance_level);
572 
573 static ssize_t radeon_hwmon_show_temp(struct device *dev,
574 				      struct device_attribute *attr,
575 				      char *buf)
576 {
577 	struct radeon_device *rdev = dev_get_drvdata(dev);
578 	struct drm_device *ddev = rdev->ddev;
579 	int temp;
580 
581 	/* Can't get temperature when the card is off */
582 	if  ((rdev->flags & RADEON_IS_PX) &&
583 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
584 		return -EINVAL;
585 
586 	if (rdev->asic->pm.get_temperature)
587 		temp = radeon_get_temperature(rdev);
588 	else
589 		temp = 0;
590 
591 	return ksnprintf(buf, PAGE_SIZE, "%d\n", temp);
592 }
593 
594 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
595 					     struct device_attribute *attr,
596 					     char *buf)
597 {
598 	struct radeon_device *rdev = dev_get_drvdata(dev);
599 	int hyst = to_sensor_dev_attr(attr)->index;
600 	int temp;
601 
602 	if (hyst)
603 		temp = rdev->pm.dpm.thermal.min_temp;
604 	else
605 		temp = rdev->pm.dpm.thermal.max_temp;
606 
607 	return ksnprintf(buf, PAGE_SIZE, "%d\n", temp);
608 }
609 
610 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
611 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
612 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
613 
614 static struct attribute *hwmon_attributes[] = {
615 	&sensor_dev_attr_temp1_input.dev_attr.attr,
616 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
617 	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
618 	NULL
619 };
620 
621 static umode_t hwmon_attributes_visible(struct kobject *kobj,
622 					struct attribute *attr, int index)
623 {
624 	struct device *dev = container_of(kobj, struct device, kobj);
625 	struct radeon_device *rdev = dev_get_drvdata(dev);
626 
627 	/* Skip limit attributes if DPM is not enabled */
628 	if (rdev->pm.pm_method != PM_METHOD_DPM &&
629 	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
630 	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
631 		return 0;
632 
633 	return attr->mode;
634 }
635 
636 static const struct attribute_group hwmon_attrgroup = {
637 	.attrs = hwmon_attributes,
638 	.is_visible = hwmon_attributes_visible,
639 };
640 
641 static const struct attribute_group *hwmon_groups[] = {
642 	&hwmon_attrgroup,
643 	NULL
644 };
645 #endif /* DUMBBELL_WIP */
646 
647 static void
648 radeon_hwmon_refresh(void *arg)
649 {
650 	struct radeon_device *rdev = (struct radeon_device *)arg;
651 	struct drm_device *ddev = rdev->ddev;
652 	struct ksensor *s = rdev->pm.int_sensor;
653 	int temp;
654 	enum sensor_status stat;
655 
656 	/* Can't get temperature when the card is off */
657 	if  ((rdev->flags & RADEON_IS_PX) &&
658 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
659 		sensor_set_unknown(s);
660 		s->status = SENSOR_S_OK;
661 		return;
662 	}
663 
664 	if (rdev->asic->pm.get_temperature == NULL) {
665 		sensor_set_invalid(s);
666 		return;
667 	}
668 
669 	temp = radeon_get_temperature(rdev);
670 	if (temp >= rdev->pm.dpm.thermal.max_temp)
671 		stat = SENSOR_S_CRIT;
672 	else if (temp >= rdev->pm.dpm.thermal.min_temp)
673 		stat = SENSOR_S_WARN;
674 	else
675 		stat = SENSOR_S_OK;
676 
677 	sensor_set(s, temp * 1000 + 273150000, stat);
678 }
679 
680 static int radeon_hwmon_init(struct radeon_device *rdev)
681 {
682 	int err = 0;
683 
684 	rdev->pm.int_sensor = NULL;
685 	rdev->pm.int_sensordev = NULL;
686 
687 	switch (rdev->pm.int_thermal_type) {
688 	case THERMAL_TYPE_RV6XX:
689 	case THERMAL_TYPE_RV770:
690 	case THERMAL_TYPE_EVERGREEN:
691 	case THERMAL_TYPE_NI:
692 	case THERMAL_TYPE_SUMO:
693 	case THERMAL_TYPE_SI:
694 	case THERMAL_TYPE_CI:
695 	case THERMAL_TYPE_KV:
696 		if (rdev->asic->pm.get_temperature == NULL)
697 			return err;
698 
699 		rdev->pm.int_sensor = kmalloc(sizeof(*rdev->pm.int_sensor),
700 		    M_DRM, M_ZERO | M_WAITOK);
701 		rdev->pm.int_sensordev = kmalloc(
702 		    sizeof(*rdev->pm.int_sensordev), M_DRM,
703 		    M_ZERO | M_WAITOK);
704 		strlcpy(rdev->pm.int_sensordev->xname,
705 		    device_get_nameunit(rdev->dev),
706 		    sizeof(rdev->pm.int_sensordev->xname));
707 		rdev->pm.int_sensor->type = SENSOR_TEMP;
708 		rdev->pm.int_sensor->flags |= SENSOR_FINVALID;
709 		sensor_attach(rdev->pm.int_sensordev, rdev->pm.int_sensor);
710 		sensor_task_register(rdev, radeon_hwmon_refresh, 5);
711 		sensordev_install(rdev->pm.int_sensordev);
712 		break;
713 	default:
714 		break;
715 	}
716 
717 	return err;
718 }
719 
720 static void radeon_hwmon_fini(struct radeon_device *rdev)
721 {
722 	if (rdev->pm.int_sensor != NULL && rdev->pm.int_sensordev != NULL) {
723 		sensordev_deinstall(rdev->pm.int_sensordev);
724 		sensor_task_unregister(rdev);
725 		kfree(rdev->pm.int_sensor);
726 		kfree(rdev->pm.int_sensordev);
727 		rdev->pm.int_sensor = NULL;
728 		rdev->pm.int_sensordev = NULL;
729 	}
730 }
731 
732 static void radeon_dpm_thermal_work_handler(void *arg, int pending)
733 {
734 	struct radeon_device *rdev = arg;
735 	/* switch to the thermal state */
736 	enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
737 
738 	if (!rdev->pm.dpm_enabled)
739 		return;
740 
741 	if (rdev->asic->pm.get_temperature) {
742 		int temp = radeon_get_temperature(rdev);
743 
744 		if (temp < rdev->pm.dpm.thermal.min_temp)
745 			/* switch back the user state */
746 			dpm_state = rdev->pm.dpm.user_state;
747 	} else {
748 		if (rdev->pm.dpm.thermal.high_to_low)
749 			/* switch back the user state */
750 			dpm_state = rdev->pm.dpm.user_state;
751 	}
752 	mutex_lock(&rdev->pm.mutex);
753 	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
754 		rdev->pm.dpm.thermal_active = true;
755 	else
756 		rdev->pm.dpm.thermal_active = false;
757 	rdev->pm.dpm.state = dpm_state;
758 	mutex_unlock(&rdev->pm.mutex);
759 
760 	radeon_pm_compute_clocks(rdev);
761 }
762 
763 static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
764 						     enum radeon_pm_state_type dpm_state)
765 {
766 	int i;
767 	struct radeon_ps *ps;
768 	u32 ui_class;
769 	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
770 		true : false;
771 
772 	/* check if the vblank period is too short to adjust the mclk */
773 	if (single_display && rdev->asic->dpm.vblank_too_short) {
774 		if (radeon_dpm_vblank_too_short(rdev))
775 			single_display = false;
776 	}
777 
778 	/* certain older asics have a separare 3D performance state,
779 	 * so try that first if the user selected performance
780 	 */
781 	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
782 		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
783 	/* balanced states don't exist at the moment */
784 	if (dpm_state == POWER_STATE_TYPE_BALANCED)
785 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
786 
787 restart_search:
788 	/* Pick the best power state based on current conditions */
789 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
790 		ps = &rdev->pm.dpm.ps[i];
791 		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
792 		switch (dpm_state) {
793 		/* user states */
794 		case POWER_STATE_TYPE_BATTERY:
795 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
796 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
797 					if (single_display)
798 						return ps;
799 				} else
800 					return ps;
801 			}
802 			break;
803 		case POWER_STATE_TYPE_BALANCED:
804 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
805 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
806 					if (single_display)
807 						return ps;
808 				} else
809 					return ps;
810 			}
811 			break;
812 		case POWER_STATE_TYPE_PERFORMANCE:
813 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
814 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
815 					if (single_display)
816 						return ps;
817 				} else
818 					return ps;
819 			}
820 			break;
821 		/* internal states */
822 		case POWER_STATE_TYPE_INTERNAL_UVD:
823 			if (rdev->pm.dpm.uvd_ps)
824 				return rdev->pm.dpm.uvd_ps;
825 			else
826 				break;
827 		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
828 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
829 				return ps;
830 			break;
831 		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
832 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
833 				return ps;
834 			break;
835 		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
836 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
837 				return ps;
838 			break;
839 		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
840 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
841 				return ps;
842 			break;
843 		case POWER_STATE_TYPE_INTERNAL_BOOT:
844 			return rdev->pm.dpm.boot_ps;
845 		case POWER_STATE_TYPE_INTERNAL_THERMAL:
846 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
847 				return ps;
848 			break;
849 		case POWER_STATE_TYPE_INTERNAL_ACPI:
850 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
851 				return ps;
852 			break;
853 		case POWER_STATE_TYPE_INTERNAL_ULV:
854 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
855 				return ps;
856 			break;
857 		case POWER_STATE_TYPE_INTERNAL_3DPERF:
858 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
859 				return ps;
860 			break;
861 		default:
862 			break;
863 		}
864 	}
865 	/* use a fallback state if we didn't match */
866 	switch (dpm_state) {
867 	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
868 		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
869 		goto restart_search;
870 	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
871 	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
872 	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
873 		if (rdev->pm.dpm.uvd_ps) {
874 			return rdev->pm.dpm.uvd_ps;
875 		} else {
876 			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
877 			goto restart_search;
878 		}
879 	case POWER_STATE_TYPE_INTERNAL_THERMAL:
880 		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
881 		goto restart_search;
882 	case POWER_STATE_TYPE_INTERNAL_ACPI:
883 		dpm_state = POWER_STATE_TYPE_BATTERY;
884 		goto restart_search;
885 	case POWER_STATE_TYPE_BATTERY:
886 	case POWER_STATE_TYPE_BALANCED:
887 	case POWER_STATE_TYPE_INTERNAL_3DPERF:
888 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
889 		goto restart_search;
890 	default:
891 		break;
892 	}
893 
894 	return NULL;
895 }
896 
897 static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
898 {
899 	int i;
900 	struct radeon_ps *ps;
901 	enum radeon_pm_state_type dpm_state;
902 	int ret;
903 
904 	/* if dpm init failed */
905 	if (!rdev->pm.dpm_enabled)
906 		return;
907 
908 	if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
909 		/* add other state override checks here */
910 		if ((!rdev->pm.dpm.thermal_active) &&
911 		    (!rdev->pm.dpm.uvd_active))
912 			rdev->pm.dpm.state = rdev->pm.dpm.user_state;
913 	}
914 	dpm_state = rdev->pm.dpm.state;
915 
916 	ps = radeon_dpm_pick_power_state(rdev, dpm_state);
917 	if (ps)
918 		rdev->pm.dpm.requested_ps = ps;
919 	else
920 		return;
921 
922 	/* no need to reprogram if nothing changed unless we are on BTC+ */
923 	if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
924 		/* vce just modifies an existing state so force a change */
925 		if (ps->vce_active != rdev->pm.dpm.vce_active)
926 			goto force;
927 		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
928 			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
929 			 * all we need to do is update the display configuration.
930 			 */
931 			if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
932 				/* update display watermarks based on new power state */
933 				radeon_bandwidth_update(rdev);
934 				/* update displays */
935 				radeon_dpm_display_configuration_changed(rdev);
936 				rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
937 				rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
938 			}
939 			return;
940 		} else {
941 			/* for BTC+ if the num crtcs hasn't changed and state is the same,
942 			 * nothing to do, if the num crtcs is > 1 and state is the same,
943 			 * update display configuration.
944 			 */
945 			if (rdev->pm.dpm.new_active_crtcs ==
946 			    rdev->pm.dpm.current_active_crtcs) {
947 				return;
948 			} else {
949 				if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
950 				    (rdev->pm.dpm.new_active_crtc_count > 1)) {
951 					/* update display watermarks based on new power state */
952 					radeon_bandwidth_update(rdev);
953 					/* update displays */
954 					radeon_dpm_display_configuration_changed(rdev);
955 					rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
956 					rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
957 					return;
958 				}
959 			}
960 		}
961 	}
962 
963 force:
964 	if (radeon_dpm == 1) {
965 		printk("switching from power state:\n");
966 		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
967 		printk("switching to power state:\n");
968 		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
969 	}
970 
971 	lockmgr(&rdev->ddev->struct_mutex, LK_EXCLUSIVE);
972 	lockmgr(&rdev->pm.mclk_lock, LK_EXCLUSIVE); // down_write
973 	lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
974 
975 	/* update whether vce is active */
976 	ps->vce_active = rdev->pm.dpm.vce_active;
977 
978 	ret = radeon_dpm_pre_set_power_state(rdev);
979 	if (ret)
980 		goto done;
981 
982 	/* update display watermarks based on new power state */
983 	radeon_bandwidth_update(rdev);
984 	/* update displays */
985 	radeon_dpm_display_configuration_changed(rdev);
986 
987 	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
988 	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
989 
990 	/* wait for the rings to drain */
991 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
992 		struct radeon_ring *ring = &rdev->ring[i];
993 		if (ring->ready)
994 			radeon_fence_wait_empty(rdev, i);
995 	}
996 
997 	/* program the new power state */
998 	radeon_dpm_set_power_state(rdev);
999 
1000 	/* update current power state */
1001 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
1002 
1003 	radeon_dpm_post_set_power_state(rdev);
1004 
1005 	if (rdev->asic->dpm.force_performance_level) {
1006 		if (rdev->pm.dpm.thermal_active) {
1007 			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
1008 			/* force low perf level for thermal */
1009 			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
1010 			/* save the user's level */
1011 			rdev->pm.dpm.forced_level = level;
1012 		} else {
1013 			/* otherwise, user selected level */
1014 			radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
1015 		}
1016 	}
1017 
1018 done:
1019 	lockmgr(&rdev->ring_lock, LK_RELEASE);
1020 	lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); // up_write
1021 	lockmgr(&rdev->ddev->struct_mutex, LK_RELEASE);
1022 }
1023 
1024 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
1025 {
1026 	enum radeon_pm_state_type dpm_state;
1027 
1028 	if (rdev->asic->dpm.powergate_uvd) {
1029 		mutex_lock(&rdev->pm.mutex);
1030 		/* don't powergate anything if we
1031 		   have active but pause streams */
1032 		enable |= rdev->pm.dpm.sd > 0;
1033 		enable |= rdev->pm.dpm.hd > 0;
1034 		/* enable/disable UVD */
1035 		radeon_dpm_powergate_uvd(rdev, !enable);
1036 		mutex_unlock(&rdev->pm.mutex);
1037 	} else {
1038 		if (enable) {
1039 			mutex_lock(&rdev->pm.mutex);
1040 			rdev->pm.dpm.uvd_active = true;
1041 			/* disable this for now */
1042 #if 0
1043 			if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
1044 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
1045 			else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
1046 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1047 			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
1048 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1049 			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
1050 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
1051 			else
1052 #endif
1053 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
1054 			rdev->pm.dpm.state = dpm_state;
1055 			mutex_unlock(&rdev->pm.mutex);
1056 		} else {
1057 			mutex_lock(&rdev->pm.mutex);
1058 			rdev->pm.dpm.uvd_active = false;
1059 			mutex_unlock(&rdev->pm.mutex);
1060 		}
1061 
1062 		radeon_pm_compute_clocks(rdev);
1063 	}
1064 }
1065 
1066 void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
1067 {
1068 	if (enable) {
1069 		mutex_lock(&rdev->pm.mutex);
1070 		rdev->pm.dpm.vce_active = true;
1071 		/* XXX select vce level based on ring/task */
1072 		rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
1073 		mutex_unlock(&rdev->pm.mutex);
1074 	} else {
1075 		mutex_lock(&rdev->pm.mutex);
1076 		rdev->pm.dpm.vce_active = false;
1077 		mutex_unlock(&rdev->pm.mutex);
1078 	}
1079 
1080 	radeon_pm_compute_clocks(rdev);
1081 }
1082 
1083 static void radeon_pm_suspend_old(struct radeon_device *rdev)
1084 {
1085 	lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1086 	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1087 		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1088 			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1089 	}
1090 	lockmgr(&rdev->pm.mutex, LK_RELEASE);
1091 
1092 #ifdef DUMBBELL_WIP
1093 	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1094 #endif /* DUMBBELL_WIP */
1095 }
1096 
1097 static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1098 {
1099 	lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1100 	/* disable dpm */
1101 	radeon_dpm_disable(rdev);
1102 	/* reset the power state */
1103 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1104 	rdev->pm.dpm_enabled = false;
1105 	lockmgr(&rdev->pm.mutex, LK_RELEASE);
1106 }
1107 
1108 void radeon_pm_suspend(struct radeon_device *rdev)
1109 {
1110 	if (rdev->pm.pm_method == PM_METHOD_DPM)
1111 		radeon_pm_suspend_dpm(rdev);
1112 	else
1113 		radeon_pm_suspend_old(rdev);
1114 }
1115 
1116 static void radeon_pm_resume_old(struct radeon_device *rdev)
1117 {
1118 	/* set up the default clocks if the MC ucode is loaded */
1119 	if ((rdev->family >= CHIP_BARTS) &&
1120 	    (rdev->family <= CHIP_CAYMAN) &&
1121 	    rdev->mc_fw) {
1122 		if (rdev->pm.default_vddc)
1123 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1124 						SET_VOLTAGE_TYPE_ASIC_VDDC);
1125 		if (rdev->pm.default_vddci)
1126 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1127 						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1128 		if (rdev->pm.default_sclk)
1129 			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1130 		if (rdev->pm.default_mclk)
1131 			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1132 	}
1133 	/* asic init will reset the default power state */
1134 	lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1135 	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1136 	rdev->pm.current_clock_mode_index = 0;
1137 	rdev->pm.current_sclk = rdev->pm.default_sclk;
1138 	rdev->pm.current_mclk = rdev->pm.default_mclk;
1139 	if (rdev->pm.power_state) {
1140 		rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1141 		rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1142 	}
1143 	if (rdev->pm.pm_method == PM_METHOD_DYNPM
1144 	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1145 		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1146 #ifdef DUMBBELL_WIP
1147 		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1148 				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1149 #endif /* DUMBBELL_WIP */
1150 	}
1151 	lockmgr(&rdev->pm.mutex, LK_RELEASE);
1152 	radeon_pm_compute_clocks(rdev);
1153 }
1154 
1155 static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1156 {
1157 	int ret;
1158 
1159 	/* asic init will reset to the boot state */
1160 	lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1161 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1162 	radeon_dpm_setup_asic(rdev);
1163 	ret = radeon_dpm_enable(rdev);
1164 	lockmgr(&rdev->pm.mutex, LK_RELEASE);
1165 	if (ret)
1166 		goto dpm_resume_fail;
1167 	rdev->pm.dpm_enabled = true;
1168 	return;
1169 
1170 dpm_resume_fail:
1171 	DRM_ERROR("radeon: dpm resume failed\n");
1172 	if ((rdev->family >= CHIP_BARTS) &&
1173 	    (rdev->family <= CHIP_CAYMAN) &&
1174 	    rdev->mc_fw) {
1175 		if (rdev->pm.default_vddc)
1176 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1177 						SET_VOLTAGE_TYPE_ASIC_VDDC);
1178 		if (rdev->pm.default_vddci)
1179 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1180 						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1181 		if (rdev->pm.default_sclk)
1182 			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1183 		if (rdev->pm.default_mclk)
1184 			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1185 	}
1186 }
1187 
1188 void radeon_pm_resume(struct radeon_device *rdev)
1189 {
1190 	if (rdev->pm.pm_method == PM_METHOD_DPM)
1191 		radeon_pm_resume_dpm(rdev);
1192 	else
1193 		radeon_pm_resume_old(rdev);
1194 }
1195 
1196 static int radeon_pm_init_old(struct radeon_device *rdev)
1197 {
1198 	int ret;
1199 
1200 	rdev->pm.profile = PM_PROFILE_DEFAULT;
1201 	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1202 	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1203 	rdev->pm.dynpm_can_upclock = true;
1204 	rdev->pm.dynpm_can_downclock = true;
1205 	rdev->pm.default_sclk = rdev->clock.default_sclk;
1206 	rdev->pm.default_mclk = rdev->clock.default_mclk;
1207 	rdev->pm.current_sclk = rdev->clock.default_sclk;
1208 	rdev->pm.current_mclk = rdev->clock.default_mclk;
1209 	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1210 
1211 	if (rdev->bios) {
1212 		if (rdev->is_atom_bios)
1213 			radeon_atombios_get_power_modes(rdev);
1214 		else
1215 			radeon_combios_get_power_modes(rdev);
1216 		radeon_pm_print_states(rdev);
1217 		radeon_pm_init_profile(rdev);
1218 		/* set up the default clocks if the MC ucode is loaded */
1219 		if ((rdev->family >= CHIP_BARTS) &&
1220 		    (rdev->family <= CHIP_CAYMAN) &&
1221 		    rdev->mc_fw) {
1222 			if (rdev->pm.default_vddc)
1223 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1224 							SET_VOLTAGE_TYPE_ASIC_VDDC);
1225 			if (rdev->pm.default_vddci)
1226 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1227 							SET_VOLTAGE_TYPE_ASIC_VDDCI);
1228 			if (rdev->pm.default_sclk)
1229 				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1230 			if (rdev->pm.default_mclk)
1231 				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1232 		}
1233 	}
1234 
1235 	/* set up the internal thermal sensor if applicable */
1236 	ret = radeon_hwmon_init(rdev);
1237 	if (ret)
1238 		return ret;
1239 
1240 #ifdef DUMBBELL_WIP
1241 	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1242 #endif /* DUMBBELL_WIP */
1243 
1244 	if (rdev->pm.num_power_states > 1) {
1245 		/* where's the best place to put these? */
1246 #ifdef DUMBBELL_WIP
1247 		ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1248 #endif /* DUMBBELL_WIP */
1249 		if (ret)
1250 			DRM_ERROR("failed to create device file for power profile\n");
1251 #ifdef DUMBBELL_WIP
1252 		ret = device_create_file(rdev->dev, &dev_attr_power_method);
1253 #endif /* DUMBBELL_WIP */
1254 		if (ret)
1255 			DRM_ERROR("failed to create device file for power method\n");
1256 
1257 		if (radeon_debugfs_pm_init(rdev)) {
1258 			DRM_ERROR("Failed to register debugfs file for PM!\n");
1259 		}
1260 
1261 		DRM_INFO("radeon: power management initialized\n");
1262 	}
1263 
1264 	return 0;
1265 }
1266 
1267 static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1268 {
1269 	int i;
1270 
1271 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1272 		printk("== power state %d ==\n", i);
1273 		radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1274 	}
1275 }
1276 
1277 static int radeon_pm_init_dpm(struct radeon_device *rdev)
1278 {
1279 	int ret;
1280 
1281 	/* default to balanced state */
1282 	rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1283 	rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1284 	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1285 	rdev->pm.default_sclk = rdev->clock.default_sclk;
1286 	rdev->pm.default_mclk = rdev->clock.default_mclk;
1287 	rdev->pm.current_sclk = rdev->clock.default_sclk;
1288 	rdev->pm.current_mclk = rdev->clock.default_mclk;
1289 	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1290 
1291 	if (rdev->bios && rdev->is_atom_bios)
1292 		radeon_atombios_get_power_modes(rdev);
1293 	else
1294 		return -EINVAL;
1295 
1296 	/* set up the internal thermal sensor if applicable */
1297 	ret = radeon_hwmon_init(rdev);
1298 	if (ret)
1299 		return ret;
1300 
1301 	TASK_INIT(&rdev->pm.dpm.thermal.work, 0, radeon_dpm_thermal_work_handler, rdev);
1302 	lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1303 	radeon_dpm_init(rdev);
1304 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1305 	if (radeon_dpm == 1)
1306 		radeon_dpm_print_power_states(rdev);
1307 	radeon_dpm_setup_asic(rdev);
1308 	ret = radeon_dpm_enable(rdev);
1309 	lockmgr(&rdev->pm.mutex, LK_RELEASE);
1310 	if (ret)
1311 		goto dpm_failed;
1312 	rdev->pm.dpm_enabled = true;
1313 
1314 #ifdef TODO_DEVICE_FILE
1315 	ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1316 	if (ret)
1317 		DRM_ERROR("failed to create device file for dpm state\n");
1318 	ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1319 	if (ret)
1320 		DRM_ERROR("failed to create device file for dpm state\n");
1321 	/* XXX: these are noops for dpm but are here for backwards compat */
1322 	ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1323 	if (ret)
1324 		DRM_ERROR("failed to create device file for power profile\n");
1325 	ret = device_create_file(rdev->dev, &dev_attr_power_method);
1326 	if (ret)
1327 		DRM_ERROR("failed to create device file for power method\n");
1328 
1329 	if (radeon_debugfs_pm_init(rdev)) {
1330 		DRM_ERROR("Failed to register debugfs file for dpm!\n");
1331 	}
1332 #endif
1333 
1334 	DRM_INFO("radeon: dpm initialized\n");
1335 
1336 	return 0;
1337 
1338 dpm_failed:
1339 	rdev->pm.dpm_enabled = false;
1340 	if ((rdev->family >= CHIP_BARTS) &&
1341 	    (rdev->family <= CHIP_CAYMAN) &&
1342 	    rdev->mc_fw) {
1343 		if (rdev->pm.default_vddc)
1344 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1345 						SET_VOLTAGE_TYPE_ASIC_VDDC);
1346 		if (rdev->pm.default_vddci)
1347 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1348 						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1349 		if (rdev->pm.default_sclk)
1350 			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1351 		if (rdev->pm.default_mclk)
1352 			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1353 	}
1354 	DRM_ERROR("radeon: dpm initialization failed\n");
1355 	return ret;
1356 }
1357 
1358 int radeon_pm_init(struct radeon_device *rdev)
1359 {
1360 	/* enable dpm on rv6xx+ */
1361 	switch (rdev->family) {
1362 	case CHIP_RV610:
1363 	case CHIP_RV630:
1364 	case CHIP_RV620:
1365 	case CHIP_RV635:
1366 	case CHIP_RV670:
1367 	case CHIP_RS780:
1368 	case CHIP_RS880:
1369 	case CHIP_RV770:
1370 		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1371 		if (!rdev->rlc_fw)
1372 			rdev->pm.pm_method = PM_METHOD_PROFILE;
1373 		else if ((rdev->family >= CHIP_RV770) &&
1374 			 (!(rdev->flags & RADEON_IS_IGP)) &&
1375 			 (!rdev->smc_fw))
1376 			rdev->pm.pm_method = PM_METHOD_PROFILE;
1377 		else if (radeon_dpm == 1)
1378 			rdev->pm.pm_method = PM_METHOD_DPM;
1379 		else
1380 			rdev->pm.pm_method = PM_METHOD_PROFILE;
1381 		break;
1382 	case CHIP_RV730:
1383 	case CHIP_RV710:
1384 	case CHIP_RV740:
1385 	case CHIP_CEDAR:
1386 	case CHIP_REDWOOD:
1387 	case CHIP_JUNIPER:
1388 	case CHIP_CYPRESS:
1389 	case CHIP_HEMLOCK:
1390 	case CHIP_PALM:
1391 	case CHIP_SUMO:
1392 	case CHIP_SUMO2:
1393 	case CHIP_BARTS:
1394 	case CHIP_TURKS:
1395 	case CHIP_CAICOS:
1396 	case CHIP_CAYMAN:
1397 	case CHIP_ARUBA:
1398 	case CHIP_TAHITI:
1399 	case CHIP_PITCAIRN:
1400 	case CHIP_VERDE:
1401 	case CHIP_OLAND:
1402 	case CHIP_HAINAN:
1403 	case CHIP_BONAIRE:
1404 	case CHIP_KABINI:
1405 	case CHIP_KAVERI:
1406 	case CHIP_HAWAII:
1407 	case CHIP_MULLINS:
1408 		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1409 		if (!rdev->rlc_fw)
1410 			rdev->pm.pm_method = PM_METHOD_PROFILE;
1411 		else if ((rdev->family >= CHIP_RV770) &&
1412 			 (!(rdev->flags & RADEON_IS_IGP)) &&
1413 			 (!rdev->smc_fw))
1414 			rdev->pm.pm_method = PM_METHOD_PROFILE;
1415 		else if (radeon_dpm == 0)
1416 			rdev->pm.pm_method = PM_METHOD_PROFILE;
1417 		else
1418 			rdev->pm.pm_method = PM_METHOD_DPM;
1419 		break;
1420 	default:
1421 		/* default to profile method */
1422 		rdev->pm.pm_method = PM_METHOD_PROFILE;
1423 		break;
1424 	}
1425 
1426 	if (rdev->pm.pm_method == PM_METHOD_DPM)
1427 		return radeon_pm_init_dpm(rdev);
1428 	else
1429 		return radeon_pm_init_old(rdev);
1430 }
1431 
1432 int radeon_pm_late_init(struct radeon_device *rdev)
1433 {
1434 	int ret = 0;
1435 
1436 	if (rdev->pm.pm_method == PM_METHOD_DPM) {
1437 		lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1438 		ret = radeon_dpm_late_enable(rdev);
1439 		lockmgr(&rdev->pm.mutex, LK_RELEASE);
1440 	}
1441 	return ret;
1442 }
1443 
1444 static void radeon_pm_fini_old(struct radeon_device *rdev)
1445 {
1446 	if (rdev->pm.num_power_states > 1) {
1447 		DRM_UNLOCK(rdev->ddev); /* Work around LOR. */
1448 		lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1449 		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1450 			rdev->pm.profile = PM_PROFILE_DEFAULT;
1451 			radeon_pm_update_profile(rdev);
1452 			radeon_pm_set_clocks(rdev);
1453 		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1454 			/* reset default clocks */
1455 			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1456 			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1457 			radeon_pm_set_clocks(rdev);
1458 		}
1459 		lockmgr(&rdev->pm.mutex, LK_RELEASE);
1460 		DRM_LOCK(rdev->ddev);
1461 
1462 #ifdef DUMBBELL_WIP
1463 		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1464 
1465 		device_remove_file(rdev->dev, &dev_attr_power_profile);
1466 		device_remove_file(rdev->dev, &dev_attr_power_method);
1467 #endif /* DUMBBELL_WIP */
1468 	}
1469 
1470 	if (rdev->pm.power_state) {
1471 		int i;
1472 		for (i = 0; i < rdev->pm.num_power_states; ++i) {
1473 			kfree(rdev->pm.power_state[i].clock_info);
1474 		}
1475 		kfree(rdev->pm.power_state);
1476 		rdev->pm.power_state = NULL;
1477 		rdev->pm.num_power_states = 0;
1478 	}
1479 
1480 	radeon_hwmon_fini(rdev);
1481 }
1482 
1483 static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1484 {
1485 	if (rdev->pm.num_power_states > 1) {
1486 		lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1487 		radeon_dpm_disable(rdev);
1488 		lockmgr(&rdev->pm.mutex, LK_RELEASE);
1489 
1490 #ifdef TODO_DEVICE_FILE
1491 		device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
1492 		device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1493 		/* XXX backwards compat */
1494 		device_remove_file(rdev->dev, &dev_attr_power_profile);
1495 		device_remove_file(rdev->dev, &dev_attr_power_method);
1496 #endif
1497 	}
1498 	radeon_dpm_fini(rdev);
1499 
1500 	kfree(rdev->pm.power_state);
1501 
1502 	radeon_hwmon_fini(rdev);
1503 }
1504 
1505 void radeon_pm_fini(struct radeon_device *rdev)
1506 {
1507 	if (rdev->pm.pm_method == PM_METHOD_DPM)
1508 		radeon_pm_fini_dpm(rdev);
1509 	else
1510 		radeon_pm_fini_old(rdev);
1511 }
1512 
1513 static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1514 {
1515 	struct drm_device *ddev = rdev->ddev;
1516 	struct drm_crtc *crtc;
1517 	struct radeon_crtc *radeon_crtc;
1518 
1519 	if (rdev->pm.num_power_states < 2)
1520 		return;
1521 
1522 	lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1523 
1524 	rdev->pm.active_crtcs = 0;
1525 	rdev->pm.active_crtc_count = 0;
1526 	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1527 		list_for_each_entry(crtc,
1528 				    &ddev->mode_config.crtc_list, head) {
1529 			radeon_crtc = to_radeon_crtc(crtc);
1530 			if (radeon_crtc->enabled) {
1531 				rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1532 				rdev->pm.active_crtc_count++;
1533 			}
1534 		}
1535 	}
1536 
1537 	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1538 		radeon_pm_update_profile(rdev);
1539 		radeon_pm_set_clocks(rdev);
1540 	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1541 		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1542 			if (rdev->pm.active_crtc_count > 1) {
1543 				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1544 #ifdef DUMBBELL_WIP
1545 					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1546 #endif /* DUMBBELL_WIP */
1547 
1548 					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1549 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1550 					radeon_pm_get_dynpm_state(rdev);
1551 					radeon_pm_set_clocks(rdev);
1552 
1553 					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1554 				}
1555 			} else if (rdev->pm.active_crtc_count == 1) {
1556 				/* TODO: Increase clocks if needed for current mode */
1557 
1558 				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1559 					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1560 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1561 					radeon_pm_get_dynpm_state(rdev);
1562 					radeon_pm_set_clocks(rdev);
1563 
1564 #ifdef DUMBBELL_WIP
1565 					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1566 							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1567 #endif /* DUMBBELL_WIP */
1568 				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1569 					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1570 #ifdef DUMBBELL_WIP
1571 					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1572 							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1573 #endif /* DUMBBELL_WIP */
1574 					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1575 				}
1576 			} else { /* count == 0 */
1577 				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1578 #ifdef DUMBBELL_WIP
1579 					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1580 #endif /* DUMBBELL_WIP */
1581 
1582 					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1583 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1584 					radeon_pm_get_dynpm_state(rdev);
1585 					radeon_pm_set_clocks(rdev);
1586 				}
1587 			}
1588 		}
1589 	}
1590 
1591 	lockmgr(&rdev->pm.mutex, LK_RELEASE);
1592 }
1593 
1594 static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1595 {
1596 	struct drm_device *ddev = rdev->ddev;
1597 	struct drm_crtc *crtc;
1598 	struct radeon_crtc *radeon_crtc;
1599 
1600 	if (!rdev->pm.dpm_enabled)
1601 		return;
1602 
1603 	lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1604 
1605 	/* update active crtc counts */
1606 	rdev->pm.dpm.new_active_crtcs = 0;
1607 	rdev->pm.dpm.new_active_crtc_count = 0;
1608 	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1609 		list_for_each_entry(crtc,
1610 				    &ddev->mode_config.crtc_list, head) {
1611 			radeon_crtc = to_radeon_crtc(crtc);
1612 			if (crtc->enabled) {
1613 				rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1614 				rdev->pm.dpm.new_active_crtc_count++;
1615 			}
1616 		}
1617 	}
1618 
1619 	/* update battery/ac status */
1620 	if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE)
1621 		rdev->pm.dpm.ac_power = true;
1622 	else
1623 		rdev->pm.dpm.ac_power = false;
1624 
1625 	radeon_dpm_change_power_state_locked(rdev);
1626 
1627 	lockmgr(&rdev->pm.mutex, LK_RELEASE);
1628 
1629 }
1630 
1631 void radeon_pm_compute_clocks(struct radeon_device *rdev)
1632 {
1633 	if (rdev->pm.pm_method == PM_METHOD_DPM)
1634 		radeon_pm_compute_clocks_dpm(rdev);
1635 	else
1636 		radeon_pm_compute_clocks_old(rdev);
1637 }
1638 
1639 static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1640 {
1641 	int  crtc, vpos, hpos, vbl_status;
1642 	bool in_vbl = true;
1643 
1644 	/* Iterate over all active crtc's. All crtc's must be in vblank,
1645 	 * otherwise return in_vbl == false.
1646 	 */
1647 	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1648 		if (rdev->pm.active_crtcs & (1 << crtc)) {
1649 			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
1650 			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1651 			    !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1652 				in_vbl = false;
1653 		}
1654 	}
1655 
1656 	return in_vbl;
1657 }
1658 
1659 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1660 {
1661 	u32 stat_crtc = 0;
1662 	bool in_vbl = radeon_pm_in_vbl(rdev);
1663 
1664 	if (in_vbl == false)
1665 		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1666 			 finish ? "exit" : "entry");
1667 	return in_vbl;
1668 }
1669 
1670 #ifdef DUMBBELL_WIP
1671 static void radeon_dynpm_idle_work_handler(struct work_struct *work)
1672 {
1673 	struct radeon_device *rdev;
1674 	int resched;
1675 	rdev = container_of(work, struct radeon_device,
1676 				pm.dynpm_idle_work.work);
1677 
1678 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1679 	lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1680 	if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1681 		int not_processed = 0;
1682 		int i;
1683 
1684 		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1685 			struct radeon_ring *ring = &rdev->ring[i];
1686 
1687 			if (ring->ready) {
1688 				not_processed += radeon_fence_count_emitted(rdev, i);
1689 				if (not_processed >= 3)
1690 					break;
1691 			}
1692 		}
1693 
1694 		if (not_processed >= 3) { /* should upclock */
1695 			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
1696 				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1697 			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1698 				   rdev->pm.dynpm_can_upclock) {
1699 				rdev->pm.dynpm_planned_action =
1700 					DYNPM_ACTION_UPCLOCK;
1701 				rdev->pm.dynpm_action_timeout = jiffies +
1702 				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1703 			}
1704 		} else if (not_processed == 0) { /* should downclock */
1705 			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
1706 				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1707 			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1708 				   rdev->pm.dynpm_can_downclock) {
1709 				rdev->pm.dynpm_planned_action =
1710 					DYNPM_ACTION_DOWNCLOCK;
1711 				rdev->pm.dynpm_action_timeout = jiffies +
1712 				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1713 			}
1714 		}
1715 
1716 		/* Note, radeon_pm_set_clocks is called with static_switch set
1717 		 * to false since we want to wait for vbl to avoid flicker.
1718 		 */
1719 		if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
1720 		    jiffies > rdev->pm.dynpm_action_timeout) {
1721 			radeon_pm_get_dynpm_state(rdev);
1722 			radeon_pm_set_clocks(rdev);
1723 		}
1724 
1725 		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1726 				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1727 	}
1728 	lockmgr(&rdev->pm.mutex, LK_RELEASE);
1729 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1730 }
1731 #endif /* DUMBBELL_WIP */
1732 
1733 /*
1734  * Debugfs info
1735  */
1736 #if defined(CONFIG_DEBUG_FS)
1737 
1738 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1739 {
1740 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1741 	struct drm_device *dev = node->minor->dev;
1742 	struct radeon_device *rdev = dev->dev_private;
1743 	struct drm_device *ddev = rdev->ddev;
1744 
1745 	if  ((rdev->flags & RADEON_IS_PX) &&
1746 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1747 		seq_printf(m, "PX asic powered off\n");
1748 	} else if (rdev->pm.dpm_enabled) {
1749 		spin_lock(&rdev->pm.mutex);
1750 		if (rdev->asic->dpm.debugfs_print_current_performance_level)
1751 			radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1752 		else
1753 			seq_printf(m, "Debugfs support not implemented for this asic\n");
1754 		spin_unlock(&rdev->pm.mutex);
1755 	} else {
1756 		seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1757 		/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1758 		if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1759 			seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1760 		else
1761 			seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1762 		seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1763 		if (rdev->asic->pm.get_memory_clock)
1764 			seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1765 		if (rdev->pm.current_vddc)
1766 			seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1767 		if (rdev->asic->pm.get_pcie_lanes)
1768 			seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1769 	}
1770 
1771 	return 0;
1772 }
1773 
1774 static struct drm_info_list radeon_pm_info_list[] = {
1775 	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1776 };
1777 #endif
1778 
1779 static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1780 {
1781 #if defined(CONFIG_DEBUG_FS)
1782 	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1783 #else
1784 	return 0;
1785 #endif
1786 }
1787