xref: /dragonfly/sys/dev/drm/radeon/radeon_pm.c (revision f2187f0a)
1 /*
2  * Permission is hereby granted, free of charge, to any person obtaining a
3  * copy of this software and associated documentation files (the "Software"),
4  * to deal in the Software without restriction, including without limitation
5  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6  * and/or sell copies of the Software, and to permit persons to whom the
7  * Software is furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18  * OTHER DEALINGS IN THE SOFTWARE.
19  *
20  * Authors: Rafał Miłecki <zajec5@gmail.com>
21  *          Alex Deucher <alexdeucher@gmail.com>
22  */
23 #include <drm/drmP.h>
24 #include "radeon.h"
25 #include "avivod.h"
26 #include "atom.h"
27 #include <linux/power_supply.h>
28 #include <linux/hwmon.h>
29 
30 #include <sys/power.h>
31 #include <sys/sensors.h>
32 
33 #define RADEON_IDLE_LOOP_MS 100
34 #define RADEON_RECLOCK_DELAY_MS 200
35 #define RADEON_WAIT_VBLANK_TIMEOUT 200
36 
37 static const char *radeon_pm_state_type_name[5] = {
38 	"",
39 	"Powersave",
40 	"Battery",
41 	"Balanced",
42 	"Performance",
43 };
44 
45 #ifdef DUMBBELL_WIP
46 static void radeon_dynpm_idle_work_handler(struct work_struct *work);
47 #endif /* DUMBBELL_WIP */
48 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
49 static bool radeon_pm_in_vbl(struct radeon_device *rdev);
50 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
51 static void radeon_pm_update_profile(struct radeon_device *rdev);
52 static void radeon_pm_set_clocks(struct radeon_device *rdev);
53 
54 int radeon_pm_get_type_index(struct radeon_device *rdev,
55 			     enum radeon_pm_state_type ps_type,
56 			     int instance)
57 {
58 	int i;
59 	int found_instance = -1;
60 
61 	for (i = 0; i < rdev->pm.num_power_states; i++) {
62 		if (rdev->pm.power_state[i].type == ps_type) {
63 			found_instance++;
64 			if (found_instance == instance)
65 				return i;
66 		}
67 	}
68 	/* return default if no match */
69 	return rdev->pm.default_power_state_index;
70 }
71 
72 void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
73 {
74 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
75 		mutex_lock(&rdev->pm.mutex);
76 		if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE)
77 			rdev->pm.dpm.ac_power = true;
78 		else
79 			rdev->pm.dpm.ac_power = false;
80 		if (rdev->family == CHIP_ARUBA) {
81 			if (rdev->asic->dpm.enable_bapm)
82 				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
83 		}
84 		mutex_unlock(&rdev->pm.mutex);
85         } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
86 		if (rdev->pm.profile == PM_PROFILE_AUTO) {
87 			mutex_lock(&rdev->pm.mutex);
88 			radeon_pm_update_profile(rdev);
89 			radeon_pm_set_clocks(rdev);
90 			mutex_unlock(&rdev->pm.mutex);
91 		}
92 	}
93 }
94 
95 static void radeon_pm_update_profile(struct radeon_device *rdev)
96 {
97 	switch (rdev->pm.profile) {
98 	case PM_PROFILE_DEFAULT:
99 		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
100 		break;
101 	case PM_PROFILE_AUTO:
102 		if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE) {
103 			if (rdev->pm.active_crtc_count > 1)
104 				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
105 			else
106 				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
107 		} else {
108 			if (rdev->pm.active_crtc_count > 1)
109 				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
110 			else
111 				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
112 		}
113 		break;
114 	case PM_PROFILE_LOW:
115 		if (rdev->pm.active_crtc_count > 1)
116 			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
117 		else
118 			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
119 		break;
120 	case PM_PROFILE_MID:
121 		if (rdev->pm.active_crtc_count > 1)
122 			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
123 		else
124 			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
125 		break;
126 	case PM_PROFILE_HIGH:
127 		if (rdev->pm.active_crtc_count > 1)
128 			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
129 		else
130 			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
131 		break;
132 	}
133 
134 	if (rdev->pm.active_crtc_count == 0) {
135 		rdev->pm.requested_power_state_index =
136 			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
137 		rdev->pm.requested_clock_mode_index =
138 			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
139 	} else {
140 		rdev->pm.requested_power_state_index =
141 			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
142 		rdev->pm.requested_clock_mode_index =
143 			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
144 	}
145 }
146 
147 static void radeon_unmap_vram_bos(struct radeon_device *rdev)
148 {
149 	struct radeon_bo *bo, *n;
150 
151 	if (list_empty(&rdev->gem.objects))
152 		return;
153 
154 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
155 		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
156 			ttm_bo_unmap_virtual(&bo->tbo);
157 	}
158 }
159 
160 static void radeon_sync_with_vblank(struct radeon_device *rdev)
161 {
162 	if (rdev->pm.active_crtcs) {
163 		rdev->pm.vblank_sync = false;
164 #ifdef DUMBBELL_WIP
165 		wait_event_timeout(
166 			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
167 			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
168 #endif /* DUMBBELL_WIP */
169 	}
170 }
171 
172 static void radeon_set_power_state(struct radeon_device *rdev)
173 {
174 	u32 sclk, mclk;
175 	bool misc_after = false;
176 
177 	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
178 	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
179 		return;
180 
181 	if (radeon_gui_idle(rdev)) {
182 		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
183 			clock_info[rdev->pm.requested_clock_mode_index].sclk;
184 		if (sclk > rdev->pm.default_sclk)
185 			sclk = rdev->pm.default_sclk;
186 
187 		/* starting with BTC, there is one state that is used for both
188 		 * MH and SH.  Difference is that we always use the high clock index for
189 		 * mclk and vddci.
190 		 */
191 		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
192 		    (rdev->family >= CHIP_BARTS) &&
193 		    rdev->pm.active_crtc_count &&
194 		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
195 		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
196 			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
197 				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
198 		else
199 			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
200 				clock_info[rdev->pm.requested_clock_mode_index].mclk;
201 
202 		if (mclk > rdev->pm.default_mclk)
203 			mclk = rdev->pm.default_mclk;
204 
205 		/* upvolt before raising clocks, downvolt after lowering clocks */
206 		if (sclk < rdev->pm.current_sclk)
207 			misc_after = true;
208 
209 		radeon_sync_with_vblank(rdev);
210 
211 		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
212 			if (!radeon_pm_in_vbl(rdev))
213 				return;
214 		}
215 
216 		radeon_pm_prepare(rdev);
217 
218 		if (!misc_after)
219 			/* voltage, pcie lanes, etc.*/
220 			radeon_pm_misc(rdev);
221 
222 		/* set engine clock */
223 		if (sclk != rdev->pm.current_sclk) {
224 			radeon_pm_debug_check_in_vbl(rdev, false);
225 			radeon_set_engine_clock(rdev, sclk);
226 			radeon_pm_debug_check_in_vbl(rdev, true);
227 			rdev->pm.current_sclk = sclk;
228 			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
229 		}
230 
231 		/* set memory clock */
232 		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
233 			radeon_pm_debug_check_in_vbl(rdev, false);
234 			radeon_set_memory_clock(rdev, mclk);
235 			radeon_pm_debug_check_in_vbl(rdev, true);
236 			rdev->pm.current_mclk = mclk;
237 			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
238 		}
239 
240 		if (misc_after)
241 			/* voltage, pcie lanes, etc.*/
242 			radeon_pm_misc(rdev);
243 
244 		radeon_pm_finish(rdev);
245 
246 		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
247 		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
248 	} else
249 		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
250 }
251 
252 static void radeon_pm_set_clocks(struct radeon_device *rdev)
253 {
254 	int i, r;
255 
256 	/* no need to take locks, etc. if nothing's going to change */
257 	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
258 	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
259 		return;
260 
261 	mutex_lock(&rdev->ddev->struct_mutex);
262 	down_write(&rdev->pm.mclk_lock);
263 	mutex_lock(&rdev->ring_lock);
264 
265 	/* wait for the rings to drain */
266 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
267 		struct radeon_ring *ring = &rdev->ring[i];
268 		if (!ring->ready) {
269 			continue;
270 		}
271 		r = radeon_fence_wait_empty(rdev, i);
272 		if (r) {
273 			/* needs a GPU reset dont reset here */
274 			mutex_unlock(&rdev->ring_lock);
275 			up_write(&rdev->pm.mclk_lock);
276 			mutex_unlock(&rdev->ddev->struct_mutex);
277 			return;
278 		}
279 	}
280 
281 	radeon_unmap_vram_bos(rdev);
282 
283 	if (rdev->irq.installed) {
284 		for (i = 0; i < rdev->num_crtc; i++) {
285 			if (rdev->pm.active_crtcs & (1 << i)) {
286 				rdev->pm.req_vblank |= (1 << i);
287 				drm_vblank_get(rdev->ddev, i);
288 			}
289 		}
290 	}
291 
292 	radeon_set_power_state(rdev);
293 
294 	if (rdev->irq.installed) {
295 		for (i = 0; i < rdev->num_crtc; i++) {
296 			if (rdev->pm.req_vblank & (1 << i)) {
297 				rdev->pm.req_vblank &= ~(1 << i);
298 				drm_vblank_put(rdev->ddev, i);
299 			}
300 		}
301 	}
302 
303 	/* update display watermarks based on new power state */
304 	radeon_update_bandwidth_info(rdev);
305 	if (rdev->pm.active_crtc_count)
306 		radeon_bandwidth_update(rdev);
307 
308 	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
309 
310 	mutex_unlock(&rdev->ring_lock);
311 	up_write(&rdev->pm.mclk_lock);
312 	mutex_unlock(&rdev->ddev->struct_mutex);
313 }
314 
315 static void radeon_pm_print_states(struct radeon_device *rdev)
316 {
317 	int i, j;
318 	struct radeon_power_state *power_state;
319 	struct radeon_pm_clock_info *clock_info;
320 
321 	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
322 	for (i = 0; i < rdev->pm.num_power_states; i++) {
323 		power_state = &rdev->pm.power_state[i];
324 		DRM_DEBUG_DRIVER("State %d: %s\n", i,
325 			radeon_pm_state_type_name[power_state->type]);
326 		if (i == rdev->pm.default_power_state_index)
327 			DRM_DEBUG_DRIVER("\tDefault");
328 		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
329 			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
330 		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
331 			DRM_DEBUG_DRIVER("\tSingle display only\n");
332 		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
333 		for (j = 0; j < power_state->num_clock_modes; j++) {
334 			clock_info = &(power_state->clock_info[j]);
335 			if (rdev->flags & RADEON_IS_IGP)
336 				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
337 						 j,
338 						 clock_info->sclk * 10);
339 			else
340 				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
341 						 j,
342 						 clock_info->sclk * 10,
343 						 clock_info->mclk * 10,
344 						 clock_info->voltage.voltage);
345 		}
346 	}
347 }
348 
349 #ifdef DUMBBELL_WIP
350 static ssize_t radeon_get_pm_profile(struct device *dev,
351 				     struct device_attribute *attr,
352 				     char *buf)
353 {
354 	struct drm_device *ddev = dev_get_drvdata(dev);
355 	struct radeon_device *rdev = ddev->dev_private;
356 	int cp = rdev->pm.profile;
357 
358 	return ksnprintf(buf, PAGE_SIZE, "%s\n",
359 			(cp == PM_PROFILE_AUTO) ? "auto" :
360 			(cp == PM_PROFILE_LOW) ? "low" :
361 			(cp == PM_PROFILE_MID) ? "mid" :
362 			(cp == PM_PROFILE_HIGH) ? "high" : "default");
363 }
364 
365 static ssize_t radeon_set_pm_profile(struct device *dev,
366 				     struct device_attribute *attr,
367 				     const char *buf,
368 				     size_t count)
369 {
370 	struct drm_device *ddev = dev_get_drvdata(dev);
371 	struct radeon_device *rdev = ddev->dev_private;
372 
373 	/* Can't set profile when the card is off */
374 	if  ((rdev->flags & RADEON_IS_PX) &&
375 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
376 		return -EINVAL;
377 
378 	mutex_lock(&rdev->pm.mutex);
379 	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
380 		if (strncmp("default", buf, strlen("default")) == 0)
381 			rdev->pm.profile = PM_PROFILE_DEFAULT;
382 		else if (strncmp("auto", buf, strlen("auto")) == 0)
383 			rdev->pm.profile = PM_PROFILE_AUTO;
384 		else if (strncmp("low", buf, strlen("low")) == 0)
385 			rdev->pm.profile = PM_PROFILE_LOW;
386 		else if (strncmp("mid", buf, strlen("mid")) == 0)
387 			rdev->pm.profile = PM_PROFILE_MID;
388 		else if (strncmp("high", buf, strlen("high")) == 0)
389 			rdev->pm.profile = PM_PROFILE_HIGH;
390 		else {
391 			count = -EINVAL;
392 			goto fail;
393 		}
394 		radeon_pm_update_profile(rdev);
395 		radeon_pm_set_clocks(rdev);
396 	} else
397 		count = -EINVAL;
398 
399 fail:
400 	mutex_unlock(&rdev->pm.mutex);
401 
402 	return count;
403 }
404 
405 static ssize_t radeon_get_pm_method(struct device *dev,
406 				    struct device_attribute *attr,
407 				    char *buf)
408 {
409 	struct drm_device *ddev = dev_get_drvdata(dev);
410 	struct radeon_device *rdev = ddev->dev_private;
411 	int pm = rdev->pm.pm_method;
412 
413 	return ksnprintf(buf, PAGE_SIZE, "%s\n",
414 			(pm == PM_METHOD_DYNPM) ? "dynpm" :
415 			(pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
416 }
417 
418 static ssize_t radeon_set_pm_method(struct device *dev,
419 				    struct device_attribute *attr,
420 				    const char *buf,
421 				    size_t count)
422 {
423 	struct drm_device *ddev = dev_get_drvdata(dev);
424 	struct radeon_device *rdev = ddev->dev_private;
425 
426 	/* Can't set method when the card is off */
427 	if  ((rdev->flags & RADEON_IS_PX) &&
428 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
429 		count = -EINVAL;
430 		goto fail;
431 	}
432 
433 	/* we don't support the legacy modes with dpm */
434 	if (rdev->pm.pm_method == PM_METHOD_DPM) {
435 		count = -EINVAL;
436 		goto fail;
437 	}
438 
439 	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
440 		mutex_lock(&rdev->pm.mutex);
441 		rdev->pm.pm_method = PM_METHOD_DYNPM;
442 		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
443 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
444 		mutex_unlock(&rdev->pm.mutex);
445 	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
446 		mutex_lock(&rdev->pm.mutex);
447 		/* disable dynpm */
448 		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
449 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
450 		rdev->pm.pm_method = PM_METHOD_PROFILE;
451 		mutex_unlock(&rdev->pm.mutex);
452 #ifdef DUMBBELL_WIP
453 		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
454 #endif /* DUMBBELL_WIP */
455 	} else {
456 		count = -EINVAL;
457 		goto fail;
458 	}
459 	radeon_pm_compute_clocks(rdev);
460 fail:
461 	return count;
462 }
463 
464 static ssize_t radeon_get_dpm_state(struct device *dev,
465 				    struct device_attribute *attr,
466 				    char *buf)
467 {
468 	struct drm_device *ddev = dev_get_drvdata(dev);
469 	struct radeon_device *rdev = ddev->dev_private;
470 	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
471 
472 	return snprintf(buf, PAGE_SIZE, "%s\n",
473 			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
474 			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
475 }
476 
477 static ssize_t radeon_set_dpm_state(struct device *dev,
478 				    struct device_attribute *attr,
479 				    const char *buf,
480 				    size_t count)
481 {
482 	struct drm_device *ddev = dev_get_drvdata(dev);
483 	struct radeon_device *rdev = ddev->dev_private;
484 
485 	mutex_lock(&rdev->pm.mutex);
486 	if (strncmp("battery", buf, strlen("battery")) == 0)
487 		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
488 	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
489 		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
490 	else if (strncmp("performance", buf, strlen("performance")) == 0)
491 		rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
492 	else {
493 		mutex_unlock(&rdev->pm.mutex);
494 		count = -EINVAL;
495 		goto fail;
496 	}
497 	mutex_unlock(&rdev->pm.mutex);
498 
499 	/* Can't set dpm state when the card is off */
500 	if (!(rdev->flags & RADEON_IS_PX) ||
501 	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
502 		radeon_pm_compute_clocks(rdev);
503 
504 fail:
505 	return count;
506 }
507 
508 static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
509 						       struct device_attribute *attr,
510 						       char *buf)
511 {
512 	struct drm_device *ddev = dev_get_drvdata(dev);
513 	struct radeon_device *rdev = ddev->dev_private;
514 	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
515 
516 	if  ((rdev->flags & RADEON_IS_PX) &&
517 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
518 		return ksnprintf(buf, PAGE_SIZE, "off\n");
519 
520 	return snprintf(buf, PAGE_SIZE, "%s\n",
521 			(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
522 			(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
523 }
524 
525 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
526 						       struct device_attribute *attr,
527 						       const char *buf,
528 						       size_t count)
529 {
530 	struct drm_device *ddev = dev_get_drvdata(dev);
531 	struct radeon_device *rdev = ddev->dev_private;
532 	enum radeon_dpm_forced_level level;
533 	int ret = 0;
534 
535 	/* Can't force performance level when the card is off */
536 	if  ((rdev->flags & RADEON_IS_PX) &&
537 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
538 		return -EINVAL;
539 
540 	mutex_lock(&rdev->pm.mutex);
541 	if (strncmp("low", buf, strlen("low")) == 0) {
542 		level = RADEON_DPM_FORCED_LEVEL_LOW;
543 	} else if (strncmp("high", buf, strlen("high")) == 0) {
544 		level = RADEON_DPM_FORCED_LEVEL_HIGH;
545 	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
546 		level = RADEON_DPM_FORCED_LEVEL_AUTO;
547 	} else {
548 		count = -EINVAL;
549 		goto fail;
550 	}
551 	if (rdev->asic->dpm.force_performance_level) {
552 		if (rdev->pm.dpm.thermal_active) {
553 			count = -EINVAL;
554 			goto fail;
555 		}
556 		ret = radeon_dpm_force_performance_level(rdev, level);
557 		if (ret)
558 			count = -EINVAL;
559 	}
560 fail:
561 	mutex_unlock(&rdev->pm.mutex);
562 
563 	return count;
564 }
565 
566 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
567 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
568 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
569 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
570 		   radeon_get_dpm_forced_performance_level,
571 		   radeon_set_dpm_forced_performance_level);
572 
573 static ssize_t radeon_hwmon_show_temp(struct device *dev,
574 				      struct device_attribute *attr,
575 				      char *buf)
576 {
577 	struct radeon_device *rdev = dev_get_drvdata(dev);
578 	struct drm_device *ddev = rdev->ddev;
579 	int temp;
580 
581 	/* Can't get temperature when the card is off */
582 	if  ((rdev->flags & RADEON_IS_PX) &&
583 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
584 		return -EINVAL;
585 
586 	if (rdev->asic->pm.get_temperature)
587 		temp = radeon_get_temperature(rdev);
588 	else
589 		temp = 0;
590 
591 	return ksnprintf(buf, PAGE_SIZE, "%d\n", temp);
592 }
593 
594 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
595 					     struct device_attribute *attr,
596 					     char *buf)
597 {
598 	struct radeon_device *rdev = dev_get_drvdata(dev);
599 	int hyst = to_sensor_dev_attr(attr)->index;
600 	int temp;
601 
602 	if (hyst)
603 		temp = rdev->pm.dpm.thermal.min_temp;
604 	else
605 		temp = rdev->pm.dpm.thermal.max_temp;
606 
607 	return ksnprintf(buf, PAGE_SIZE, "%d\n", temp);
608 }
609 
610 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
611 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
612 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
613 
614 static struct attribute *hwmon_attributes[] = {
615 	&sensor_dev_attr_temp1_input.dev_attr.attr,
616 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
617 	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
618 	NULL
619 };
620 
621 static umode_t hwmon_attributes_visible(struct kobject *kobj,
622 					struct attribute *attr, int index)
623 {
624 	struct device *dev = container_of(kobj, struct device, kobj);
625 	struct radeon_device *rdev = dev_get_drvdata(dev);
626 
627 	/* Skip limit attributes if DPM is not enabled */
628 	if (rdev->pm.pm_method != PM_METHOD_DPM &&
629 	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
630 	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
631 		return 0;
632 
633 	return attr->mode;
634 }
635 
636 static const struct attribute_group hwmon_attrgroup = {
637 	.attrs = hwmon_attributes,
638 	.is_visible = hwmon_attributes_visible,
639 };
640 
641 static const struct attribute_group *hwmon_groups[] = {
642 	&hwmon_attrgroup,
643 	NULL
644 };
645 #endif /* DUMBBELL_WIP */
646 
647 static void
648 radeon_hwmon_refresh(void *arg)
649 {
650 	struct radeon_device *rdev = (struct radeon_device *)arg;
651 	struct drm_device *ddev = rdev->ddev;
652 	struct ksensor *s = rdev->pm.int_sensor;
653 	int temp;
654 	enum sensor_status stat;
655 
656 	/* Can't get temperature when the card is off */
657 	if  ((rdev->flags & RADEON_IS_PX) &&
658 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
659 		sensor_set_unknown(s);
660 		s->status = SENSOR_S_OK;
661 		return;
662 	}
663 
664 	if (rdev->asic->pm.get_temperature == NULL) {
665 		sensor_set_invalid(s);
666 		return;
667 	}
668 
669 	temp = radeon_get_temperature(rdev);
670 	if (temp >= rdev->pm.dpm.thermal.max_temp)
671 		stat = SENSOR_S_CRIT;
672 	else if (temp >= rdev->pm.dpm.thermal.min_temp)
673 		stat = SENSOR_S_WARN;
674 	else
675 		stat = SENSOR_S_OK;
676 
677 	sensor_set(s, temp * 1000 + 273150000, stat);
678 }
679 
680 static int radeon_hwmon_init(struct radeon_device *rdev)
681 {
682 	int err = 0;
683 
684 	rdev->pm.int_sensor = NULL;
685 	rdev->pm.int_sensordev = NULL;
686 
687 	switch (rdev->pm.int_thermal_type) {
688 	case THERMAL_TYPE_RV6XX:
689 	case THERMAL_TYPE_RV770:
690 	case THERMAL_TYPE_EVERGREEN:
691 	case THERMAL_TYPE_NI:
692 	case THERMAL_TYPE_SUMO:
693 	case THERMAL_TYPE_SI:
694 	case THERMAL_TYPE_CI:
695 	case THERMAL_TYPE_KV:
696 		if (rdev->asic->pm.get_temperature == NULL)
697 			return err;
698 
699 		rdev->pm.int_sensor = kmalloc(sizeof(*rdev->pm.int_sensor),
700 		    M_DRM, M_ZERO | M_WAITOK);
701 		rdev->pm.int_sensordev = kmalloc(
702 		    sizeof(*rdev->pm.int_sensordev), M_DRM,
703 		    M_ZERO | M_WAITOK);
704 		strlcpy(rdev->pm.int_sensordev->xname,
705 		    device_get_nameunit(rdev->dev->bsddev),
706 		    sizeof(rdev->pm.int_sensordev->xname));
707 		rdev->pm.int_sensor->type = SENSOR_TEMP;
708 		rdev->pm.int_sensor->flags |= SENSOR_FINVALID;
709 		sensor_attach(rdev->pm.int_sensordev, rdev->pm.int_sensor);
710 		sensor_task_register(rdev, radeon_hwmon_refresh, 5);
711 		sensordev_install(rdev->pm.int_sensordev);
712 		break;
713 	default:
714 		break;
715 	}
716 
717 	return err;
718 }
719 
720 static void radeon_hwmon_fini(struct radeon_device *rdev)
721 {
722 	if (rdev->pm.int_sensor != NULL && rdev->pm.int_sensordev != NULL) {
723 		sensordev_deinstall(rdev->pm.int_sensordev);
724 		sensor_task_unregister(rdev);
725 		kfree(rdev->pm.int_sensor);
726 		kfree(rdev->pm.int_sensordev);
727 		rdev->pm.int_sensor = NULL;
728 		rdev->pm.int_sensordev = NULL;
729 	}
730 }
731 
732 static void radeon_dpm_thermal_work_handler(struct work_struct *work)
733 {
734 	struct radeon_device *rdev =
735 		container_of(work, struct radeon_device,
736 			     pm.dpm.thermal.work);
737 	/* switch to the thermal state */
738 	enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
739 
740 	if (!rdev->pm.dpm_enabled)
741 		return;
742 
743 	if (rdev->asic->pm.get_temperature) {
744 		int temp = radeon_get_temperature(rdev);
745 
746 		if (temp < rdev->pm.dpm.thermal.min_temp)
747 			/* switch back the user state */
748 			dpm_state = rdev->pm.dpm.user_state;
749 	} else {
750 		if (rdev->pm.dpm.thermal.high_to_low)
751 			/* switch back the user state */
752 			dpm_state = rdev->pm.dpm.user_state;
753 	}
754 	mutex_lock(&rdev->pm.mutex);
755 	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
756 		rdev->pm.dpm.thermal_active = true;
757 	else
758 		rdev->pm.dpm.thermal_active = false;
759 	rdev->pm.dpm.state = dpm_state;
760 	mutex_unlock(&rdev->pm.mutex);
761 
762 	radeon_pm_compute_clocks(rdev);
763 }
764 
765 static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
766 						     enum radeon_pm_state_type dpm_state)
767 {
768 	int i;
769 	struct radeon_ps *ps;
770 	u32 ui_class;
771 	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
772 		true : false;
773 
774 	/* check if the vblank period is too short to adjust the mclk */
775 	if (single_display && rdev->asic->dpm.vblank_too_short) {
776 		if (radeon_dpm_vblank_too_short(rdev))
777 			single_display = false;
778 	}
779 
780 	/* certain older asics have a separare 3D performance state,
781 	 * so try that first if the user selected performance
782 	 */
783 	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
784 		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
785 	/* balanced states don't exist at the moment */
786 	if (dpm_state == POWER_STATE_TYPE_BALANCED)
787 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
788 
789 restart_search:
790 	/* Pick the best power state based on current conditions */
791 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
792 		ps = &rdev->pm.dpm.ps[i];
793 		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
794 		switch (dpm_state) {
795 		/* user states */
796 		case POWER_STATE_TYPE_BATTERY:
797 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
798 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
799 					if (single_display)
800 						return ps;
801 				} else
802 					return ps;
803 			}
804 			break;
805 		case POWER_STATE_TYPE_BALANCED:
806 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
807 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
808 					if (single_display)
809 						return ps;
810 				} else
811 					return ps;
812 			}
813 			break;
814 		case POWER_STATE_TYPE_PERFORMANCE:
815 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
816 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
817 					if (single_display)
818 						return ps;
819 				} else
820 					return ps;
821 			}
822 			break;
823 		/* internal states */
824 		case POWER_STATE_TYPE_INTERNAL_UVD:
825 			if (rdev->pm.dpm.uvd_ps)
826 				return rdev->pm.dpm.uvd_ps;
827 			else
828 				break;
829 		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
830 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
831 				return ps;
832 			break;
833 		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
834 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
835 				return ps;
836 			break;
837 		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
838 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
839 				return ps;
840 			break;
841 		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
842 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
843 				return ps;
844 			break;
845 		case POWER_STATE_TYPE_INTERNAL_BOOT:
846 			return rdev->pm.dpm.boot_ps;
847 		case POWER_STATE_TYPE_INTERNAL_THERMAL:
848 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
849 				return ps;
850 			break;
851 		case POWER_STATE_TYPE_INTERNAL_ACPI:
852 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
853 				return ps;
854 			break;
855 		case POWER_STATE_TYPE_INTERNAL_ULV:
856 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
857 				return ps;
858 			break;
859 		case POWER_STATE_TYPE_INTERNAL_3DPERF:
860 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
861 				return ps;
862 			break;
863 		default:
864 			break;
865 		}
866 	}
867 	/* use a fallback state if we didn't match */
868 	switch (dpm_state) {
869 	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
870 		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
871 		goto restart_search;
872 	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
873 	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
874 	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
875 		if (rdev->pm.dpm.uvd_ps) {
876 			return rdev->pm.dpm.uvd_ps;
877 		} else {
878 			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
879 			goto restart_search;
880 		}
881 	case POWER_STATE_TYPE_INTERNAL_THERMAL:
882 		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
883 		goto restart_search;
884 	case POWER_STATE_TYPE_INTERNAL_ACPI:
885 		dpm_state = POWER_STATE_TYPE_BATTERY;
886 		goto restart_search;
887 	case POWER_STATE_TYPE_BATTERY:
888 	case POWER_STATE_TYPE_BALANCED:
889 	case POWER_STATE_TYPE_INTERNAL_3DPERF:
890 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
891 		goto restart_search;
892 	default:
893 		break;
894 	}
895 
896 	return NULL;
897 }
898 
899 static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
900 {
901 	int i;
902 	struct radeon_ps *ps;
903 	enum radeon_pm_state_type dpm_state;
904 	int ret;
905 
906 	/* if dpm init failed */
907 	if (!rdev->pm.dpm_enabled)
908 		return;
909 
910 	if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
911 		/* add other state override checks here */
912 		if ((!rdev->pm.dpm.thermal_active) &&
913 		    (!rdev->pm.dpm.uvd_active))
914 			rdev->pm.dpm.state = rdev->pm.dpm.user_state;
915 	}
916 	dpm_state = rdev->pm.dpm.state;
917 
918 	ps = radeon_dpm_pick_power_state(rdev, dpm_state);
919 	if (ps)
920 		rdev->pm.dpm.requested_ps = ps;
921 	else
922 		return;
923 
924 	/* no need to reprogram if nothing changed unless we are on BTC+ */
925 	if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
926 		/* vce just modifies an existing state so force a change */
927 		if (ps->vce_active != rdev->pm.dpm.vce_active)
928 			goto force;
929 		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
930 			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
931 			 * all we need to do is update the display configuration.
932 			 */
933 			if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
934 				/* update display watermarks based on new power state */
935 				radeon_bandwidth_update(rdev);
936 				/* update displays */
937 				radeon_dpm_display_configuration_changed(rdev);
938 				rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
939 				rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
940 			}
941 			return;
942 		} else {
943 			/* for BTC+ if the num crtcs hasn't changed and state is the same,
944 			 * nothing to do, if the num crtcs is > 1 and state is the same,
945 			 * update display configuration.
946 			 */
947 			if (rdev->pm.dpm.new_active_crtcs ==
948 			    rdev->pm.dpm.current_active_crtcs) {
949 				return;
950 			} else {
951 				if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
952 				    (rdev->pm.dpm.new_active_crtc_count > 1)) {
953 					/* update display watermarks based on new power state */
954 					radeon_bandwidth_update(rdev);
955 					/* update displays */
956 					radeon_dpm_display_configuration_changed(rdev);
957 					rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
958 					rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
959 					return;
960 				}
961 			}
962 		}
963 	}
964 
965 force:
966 	if (radeon_dpm == 1) {
967 		printk("switching from power state:\n");
968 		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
969 		printk("switching to power state:\n");
970 		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
971 	}
972 
973 	mutex_lock(&rdev->ddev->struct_mutex);
974 	down_write(&rdev->pm.mclk_lock);
975 	mutex_lock(&rdev->ring_lock);
976 
977 	/* update whether vce is active */
978 	ps->vce_active = rdev->pm.dpm.vce_active;
979 
980 	ret = radeon_dpm_pre_set_power_state(rdev);
981 	if (ret)
982 		goto done;
983 
984 	/* update display watermarks based on new power state */
985 	radeon_bandwidth_update(rdev);
986 	/* update displays */
987 	radeon_dpm_display_configuration_changed(rdev);
988 
989 	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
990 	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
991 
992 	/* wait for the rings to drain */
993 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
994 		struct radeon_ring *ring = &rdev->ring[i];
995 		if (ring->ready)
996 			radeon_fence_wait_empty(rdev, i);
997 	}
998 
999 	/* program the new power state */
1000 	radeon_dpm_set_power_state(rdev);
1001 
1002 	/* update current power state */
1003 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
1004 
1005 	radeon_dpm_post_set_power_state(rdev);
1006 
1007 	if (rdev->asic->dpm.force_performance_level) {
1008 		if (rdev->pm.dpm.thermal_active) {
1009 			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
1010 			/* force low perf level for thermal */
1011 			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
1012 			/* save the user's level */
1013 			rdev->pm.dpm.forced_level = level;
1014 		} else {
1015 			/* otherwise, user selected level */
1016 			radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
1017 		}
1018 	}
1019 
1020 done:
1021 	mutex_unlock(&rdev->ring_lock);
1022 	up_write(&rdev->pm.mclk_lock);
1023 	mutex_unlock(&rdev->ddev->struct_mutex);
1024 }
1025 
1026 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
1027 {
1028 	enum radeon_pm_state_type dpm_state;
1029 
1030 	if (rdev->asic->dpm.powergate_uvd) {
1031 		mutex_lock(&rdev->pm.mutex);
1032 		/* don't powergate anything if we
1033 		   have active but pause streams */
1034 		enable |= rdev->pm.dpm.sd > 0;
1035 		enable |= rdev->pm.dpm.hd > 0;
1036 		/* enable/disable UVD */
1037 		radeon_dpm_powergate_uvd(rdev, !enable);
1038 		mutex_unlock(&rdev->pm.mutex);
1039 	} else {
1040 		if (enable) {
1041 			mutex_lock(&rdev->pm.mutex);
1042 			rdev->pm.dpm.uvd_active = true;
1043 			/* disable this for now */
1044 #if 0
1045 			if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
1046 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
1047 			else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
1048 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1049 			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
1050 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1051 			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
1052 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
1053 			else
1054 #endif
1055 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
1056 			rdev->pm.dpm.state = dpm_state;
1057 			mutex_unlock(&rdev->pm.mutex);
1058 		} else {
1059 			mutex_lock(&rdev->pm.mutex);
1060 			rdev->pm.dpm.uvd_active = false;
1061 			mutex_unlock(&rdev->pm.mutex);
1062 		}
1063 
1064 		radeon_pm_compute_clocks(rdev);
1065 	}
1066 }
1067 
1068 void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
1069 {
1070 	if (enable) {
1071 		mutex_lock(&rdev->pm.mutex);
1072 		rdev->pm.dpm.vce_active = true;
1073 		/* XXX select vce level based on ring/task */
1074 		rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
1075 		mutex_unlock(&rdev->pm.mutex);
1076 	} else {
1077 		mutex_lock(&rdev->pm.mutex);
1078 		rdev->pm.dpm.vce_active = false;
1079 		mutex_unlock(&rdev->pm.mutex);
1080 	}
1081 
1082 	radeon_pm_compute_clocks(rdev);
1083 }
1084 
1085 static void radeon_pm_suspend_old(struct radeon_device *rdev)
1086 {
1087 	mutex_lock(&rdev->pm.mutex);
1088 	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1089 		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1090 			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1091 	}
1092 	mutex_unlock(&rdev->pm.mutex);
1093 
1094 #ifdef DUMBBELL_WIP
1095 	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1096 #endif /* DUMBBELL_WIP */
1097 }
1098 
1099 static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1100 {
1101 	mutex_lock(&rdev->pm.mutex);
1102 	/* disable dpm */
1103 	radeon_dpm_disable(rdev);
1104 	/* reset the power state */
1105 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1106 	rdev->pm.dpm_enabled = false;
1107 	mutex_unlock(&rdev->pm.mutex);
1108 }
1109 
1110 void radeon_pm_suspend(struct radeon_device *rdev)
1111 {
1112 	if (rdev->pm.pm_method == PM_METHOD_DPM)
1113 		radeon_pm_suspend_dpm(rdev);
1114 	else
1115 		radeon_pm_suspend_old(rdev);
1116 }
1117 
1118 static void radeon_pm_resume_old(struct radeon_device *rdev)
1119 {
1120 	/* set up the default clocks if the MC ucode is loaded */
1121 	if ((rdev->family >= CHIP_BARTS) &&
1122 	    (rdev->family <= CHIP_CAYMAN) &&
1123 	    rdev->mc_fw) {
1124 		if (rdev->pm.default_vddc)
1125 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1126 						SET_VOLTAGE_TYPE_ASIC_VDDC);
1127 		if (rdev->pm.default_vddci)
1128 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1129 						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1130 		if (rdev->pm.default_sclk)
1131 			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1132 		if (rdev->pm.default_mclk)
1133 			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1134 	}
1135 	/* asic init will reset the default power state */
1136 	mutex_lock(&rdev->pm.mutex);
1137 	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1138 	rdev->pm.current_clock_mode_index = 0;
1139 	rdev->pm.current_sclk = rdev->pm.default_sclk;
1140 	rdev->pm.current_mclk = rdev->pm.default_mclk;
1141 	if (rdev->pm.power_state) {
1142 		rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1143 		rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1144 	}
1145 	if (rdev->pm.pm_method == PM_METHOD_DYNPM
1146 	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1147 		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1148 #ifdef DUMBBELL_WIP
1149 		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1150 				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1151 #endif /* DUMBBELL_WIP */
1152 	}
1153 	mutex_unlock(&rdev->pm.mutex);
1154 	radeon_pm_compute_clocks(rdev);
1155 }
1156 
1157 static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1158 {
1159 	int ret;
1160 
1161 	/* asic init will reset to the boot state */
1162 	mutex_lock(&rdev->pm.mutex);
1163 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1164 	radeon_dpm_setup_asic(rdev);
1165 	ret = radeon_dpm_enable(rdev);
1166 	mutex_unlock(&rdev->pm.mutex);
1167 	if (ret)
1168 		goto dpm_resume_fail;
1169 	rdev->pm.dpm_enabled = true;
1170 	return;
1171 
1172 dpm_resume_fail:
1173 	DRM_ERROR("radeon: dpm resume failed\n");
1174 	if ((rdev->family >= CHIP_BARTS) &&
1175 	    (rdev->family <= CHIP_CAYMAN) &&
1176 	    rdev->mc_fw) {
1177 		if (rdev->pm.default_vddc)
1178 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1179 						SET_VOLTAGE_TYPE_ASIC_VDDC);
1180 		if (rdev->pm.default_vddci)
1181 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1182 						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1183 		if (rdev->pm.default_sclk)
1184 			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1185 		if (rdev->pm.default_mclk)
1186 			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1187 	}
1188 }
1189 
1190 void radeon_pm_resume(struct radeon_device *rdev)
1191 {
1192 	if (rdev->pm.pm_method == PM_METHOD_DPM)
1193 		radeon_pm_resume_dpm(rdev);
1194 	else
1195 		radeon_pm_resume_old(rdev);
1196 }
1197 
1198 static int radeon_pm_init_old(struct radeon_device *rdev)
1199 {
1200 	int ret;
1201 
1202 	rdev->pm.profile = PM_PROFILE_DEFAULT;
1203 	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1204 	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1205 	rdev->pm.dynpm_can_upclock = true;
1206 	rdev->pm.dynpm_can_downclock = true;
1207 	rdev->pm.default_sclk = rdev->clock.default_sclk;
1208 	rdev->pm.default_mclk = rdev->clock.default_mclk;
1209 	rdev->pm.current_sclk = rdev->clock.default_sclk;
1210 	rdev->pm.current_mclk = rdev->clock.default_mclk;
1211 	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1212 
1213 	if (rdev->bios) {
1214 		if (rdev->is_atom_bios)
1215 			radeon_atombios_get_power_modes(rdev);
1216 		else
1217 			radeon_combios_get_power_modes(rdev);
1218 		radeon_pm_print_states(rdev);
1219 		radeon_pm_init_profile(rdev);
1220 		/* set up the default clocks if the MC ucode is loaded */
1221 		if ((rdev->family >= CHIP_BARTS) &&
1222 		    (rdev->family <= CHIP_CAYMAN) &&
1223 		    rdev->mc_fw) {
1224 			if (rdev->pm.default_vddc)
1225 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1226 							SET_VOLTAGE_TYPE_ASIC_VDDC);
1227 			if (rdev->pm.default_vddci)
1228 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1229 							SET_VOLTAGE_TYPE_ASIC_VDDCI);
1230 			if (rdev->pm.default_sclk)
1231 				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1232 			if (rdev->pm.default_mclk)
1233 				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1234 		}
1235 	}
1236 
1237 	/* set up the internal thermal sensor if applicable */
1238 	ret = radeon_hwmon_init(rdev);
1239 	if (ret)
1240 		return ret;
1241 
1242 #ifdef DUMBBELL_WIP
1243 	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1244 #endif /* DUMBBELL_WIP */
1245 
1246 	if (rdev->pm.num_power_states > 1) {
1247 		/* where's the best place to put these? */
1248 #ifdef DUMBBELL_WIP
1249 		ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1250 #endif /* DUMBBELL_WIP */
1251 		if (ret)
1252 			DRM_ERROR("failed to create device file for power profile\n");
1253 #ifdef DUMBBELL_WIP
1254 		ret = device_create_file(rdev->dev, &dev_attr_power_method);
1255 #endif /* DUMBBELL_WIP */
1256 		if (ret)
1257 			DRM_ERROR("failed to create device file for power method\n");
1258 
1259 		if (radeon_debugfs_pm_init(rdev)) {
1260 			DRM_ERROR("Failed to register debugfs file for PM!\n");
1261 		}
1262 
1263 		DRM_INFO("radeon: power management initialized\n");
1264 	}
1265 
1266 	return 0;
1267 }
1268 
1269 static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1270 {
1271 	int i;
1272 
1273 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1274 		printk("== power state %d ==\n", i);
1275 		radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1276 	}
1277 }
1278 
1279 static int radeon_pm_init_dpm(struct radeon_device *rdev)
1280 {
1281 	int ret;
1282 
1283 	/* default to balanced state */
1284 	rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1285 	rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1286 	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1287 	rdev->pm.default_sclk = rdev->clock.default_sclk;
1288 	rdev->pm.default_mclk = rdev->clock.default_mclk;
1289 	rdev->pm.current_sclk = rdev->clock.default_sclk;
1290 	rdev->pm.current_mclk = rdev->clock.default_mclk;
1291 	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1292 
1293 	if (rdev->bios && rdev->is_atom_bios)
1294 		radeon_atombios_get_power_modes(rdev);
1295 	else
1296 		return -EINVAL;
1297 
1298 	/* set up the internal thermal sensor if applicable */
1299 	ret = radeon_hwmon_init(rdev);
1300 	if (ret)
1301 		return ret;
1302 
1303 	INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1304 	mutex_lock(&rdev->pm.mutex);
1305 	radeon_dpm_init(rdev);
1306 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1307 	if (radeon_dpm == 1)
1308 		radeon_dpm_print_power_states(rdev);
1309 	radeon_dpm_setup_asic(rdev);
1310 	ret = radeon_dpm_enable(rdev);
1311 	mutex_unlock(&rdev->pm.mutex);
1312 	if (ret)
1313 		goto dpm_failed;
1314 	rdev->pm.dpm_enabled = true;
1315 
1316 #ifdef TODO_DEVICE_FILE
1317 	ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1318 	if (ret)
1319 		DRM_ERROR("failed to create device file for dpm state\n");
1320 	ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1321 	if (ret)
1322 		DRM_ERROR("failed to create device file for dpm state\n");
1323 	/* XXX: these are noops for dpm but are here for backwards compat */
1324 	ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1325 	if (ret)
1326 		DRM_ERROR("failed to create device file for power profile\n");
1327 	ret = device_create_file(rdev->dev, &dev_attr_power_method);
1328 	if (ret)
1329 		DRM_ERROR("failed to create device file for power method\n");
1330 
1331 	if (radeon_debugfs_pm_init(rdev)) {
1332 		DRM_ERROR("Failed to register debugfs file for dpm!\n");
1333 	}
1334 #endif
1335 
1336 	DRM_INFO("radeon: dpm initialized\n");
1337 
1338 	return 0;
1339 
1340 dpm_failed:
1341 	rdev->pm.dpm_enabled = false;
1342 	if ((rdev->family >= CHIP_BARTS) &&
1343 	    (rdev->family <= CHIP_CAYMAN) &&
1344 	    rdev->mc_fw) {
1345 		if (rdev->pm.default_vddc)
1346 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1347 						SET_VOLTAGE_TYPE_ASIC_VDDC);
1348 		if (rdev->pm.default_vddci)
1349 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1350 						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1351 		if (rdev->pm.default_sclk)
1352 			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1353 		if (rdev->pm.default_mclk)
1354 			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1355 	}
1356 	DRM_ERROR("radeon: dpm initialization failed\n");
1357 	return ret;
1358 }
1359 
1360 int radeon_pm_init(struct radeon_device *rdev)
1361 {
1362 	/* enable dpm on rv6xx+ */
1363 	switch (rdev->family) {
1364 	case CHIP_RV610:
1365 	case CHIP_RV630:
1366 	case CHIP_RV620:
1367 	case CHIP_RV635:
1368 	case CHIP_RV670:
1369 	case CHIP_RS780:
1370 	case CHIP_RS880:
1371 	case CHIP_RV770:
1372 		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1373 		if (!rdev->rlc_fw)
1374 			rdev->pm.pm_method = PM_METHOD_PROFILE;
1375 		else if ((rdev->family >= CHIP_RV770) &&
1376 			 (!(rdev->flags & RADEON_IS_IGP)) &&
1377 			 (!rdev->smc_fw))
1378 			rdev->pm.pm_method = PM_METHOD_PROFILE;
1379 		else if (radeon_dpm == 1)
1380 			rdev->pm.pm_method = PM_METHOD_DPM;
1381 		else
1382 			rdev->pm.pm_method = PM_METHOD_PROFILE;
1383 		break;
1384 	case CHIP_RV730:
1385 	case CHIP_RV710:
1386 	case CHIP_RV740:
1387 	case CHIP_CEDAR:
1388 	case CHIP_REDWOOD:
1389 	case CHIP_JUNIPER:
1390 	case CHIP_CYPRESS:
1391 	case CHIP_HEMLOCK:
1392 	case CHIP_PALM:
1393 	case CHIP_SUMO:
1394 	case CHIP_SUMO2:
1395 	case CHIP_BARTS:
1396 	case CHIP_TURKS:
1397 	case CHIP_CAICOS:
1398 	case CHIP_CAYMAN:
1399 	case CHIP_ARUBA:
1400 	case CHIP_TAHITI:
1401 	case CHIP_PITCAIRN:
1402 	case CHIP_VERDE:
1403 	case CHIP_OLAND:
1404 	case CHIP_HAINAN:
1405 	case CHIP_BONAIRE:
1406 	case CHIP_KABINI:
1407 	case CHIP_KAVERI:
1408 	case CHIP_HAWAII:
1409 	case CHIP_MULLINS:
1410 		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1411 		if (!rdev->rlc_fw)
1412 			rdev->pm.pm_method = PM_METHOD_PROFILE;
1413 		else if ((rdev->family >= CHIP_RV770) &&
1414 			 (!(rdev->flags & RADEON_IS_IGP)) &&
1415 			 (!rdev->smc_fw))
1416 			rdev->pm.pm_method = PM_METHOD_PROFILE;
1417 		else if (radeon_dpm == 0)
1418 			rdev->pm.pm_method = PM_METHOD_PROFILE;
1419 		else
1420 			rdev->pm.pm_method = PM_METHOD_DPM;
1421 		break;
1422 	default:
1423 		/* default to profile method */
1424 		rdev->pm.pm_method = PM_METHOD_PROFILE;
1425 		break;
1426 	}
1427 
1428 	if (rdev->pm.pm_method == PM_METHOD_DPM)
1429 		return radeon_pm_init_dpm(rdev);
1430 	else
1431 		return radeon_pm_init_old(rdev);
1432 }
1433 
1434 int radeon_pm_late_init(struct radeon_device *rdev)
1435 {
1436 	int ret = 0;
1437 
1438 	if (rdev->pm.pm_method == PM_METHOD_DPM) {
1439 		mutex_lock(&rdev->pm.mutex);
1440 		ret = radeon_dpm_late_enable(rdev);
1441 		mutex_unlock(&rdev->pm.mutex);
1442 	}
1443 	return ret;
1444 }
1445 
1446 static void radeon_pm_fini_old(struct radeon_device *rdev)
1447 {
1448 	if (rdev->pm.num_power_states > 1) {
1449 		mutex_lock(&rdev->pm.mutex);
1450 		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1451 			rdev->pm.profile = PM_PROFILE_DEFAULT;
1452 			radeon_pm_update_profile(rdev);
1453 			radeon_pm_set_clocks(rdev);
1454 		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1455 			/* reset default clocks */
1456 			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1457 			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1458 			radeon_pm_set_clocks(rdev);
1459 		}
1460 		mutex_unlock(&rdev->pm.mutex);
1461 
1462 #ifdef DUMBBELL_WIP
1463 		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1464 
1465 		device_remove_file(rdev->dev, &dev_attr_power_profile);
1466 		device_remove_file(rdev->dev, &dev_attr_power_method);
1467 #endif /* DUMBBELL_WIP */
1468 	}
1469 
1470 	if (rdev->pm.power_state) {
1471 		int i;
1472 		for (i = 0; i < rdev->pm.num_power_states; ++i) {
1473 			kfree(rdev->pm.power_state[i].clock_info);
1474 		}
1475 		kfree(rdev->pm.power_state);
1476 		rdev->pm.power_state = NULL;
1477 		rdev->pm.num_power_states = 0;
1478 	}
1479 
1480 	radeon_hwmon_fini(rdev);
1481 }
1482 
1483 static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1484 {
1485 	if (rdev->pm.num_power_states > 1) {
1486 		mutex_lock(&rdev->pm.mutex);
1487 		radeon_dpm_disable(rdev);
1488 		mutex_unlock(&rdev->pm.mutex);
1489 
1490 #ifdef TODO_DEVICE_FILE
1491 		device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
1492 		device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1493 		/* XXX backwards compat */
1494 		device_remove_file(rdev->dev, &dev_attr_power_profile);
1495 		device_remove_file(rdev->dev, &dev_attr_power_method);
1496 #endif
1497 	}
1498 	radeon_dpm_fini(rdev);
1499 
1500 	/* prevents leaking 440 bytes on OLAND */
1501 	if (rdev->pm.power_state) {
1502 		int i;
1503 		for (i = 0; i < rdev->pm.num_power_states; ++i) {
1504 			kfree(rdev->pm.power_state[i].clock_info);
1505 		}
1506 		kfree(rdev->pm.power_state);
1507 		rdev->pm.power_state = NULL;
1508 		rdev->pm.num_power_states = 0;
1509 	}
1510 
1511 	radeon_hwmon_fini(rdev);
1512 }
1513 
1514 void radeon_pm_fini(struct radeon_device *rdev)
1515 {
1516 	if (rdev->pm.pm_method == PM_METHOD_DPM)
1517 		radeon_pm_fini_dpm(rdev);
1518 	else
1519 		radeon_pm_fini_old(rdev);
1520 }
1521 
1522 static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1523 {
1524 	struct drm_device *ddev = rdev->ddev;
1525 	struct drm_crtc *crtc;
1526 	struct radeon_crtc *radeon_crtc;
1527 
1528 	if (rdev->pm.num_power_states < 2)
1529 		return;
1530 
1531 	mutex_lock(&rdev->pm.mutex);
1532 
1533 	rdev->pm.active_crtcs = 0;
1534 	rdev->pm.active_crtc_count = 0;
1535 	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1536 		list_for_each_entry(crtc,
1537 				    &ddev->mode_config.crtc_list, head) {
1538 			radeon_crtc = to_radeon_crtc(crtc);
1539 			if (radeon_crtc->enabled) {
1540 				rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1541 				rdev->pm.active_crtc_count++;
1542 			}
1543 		}
1544 	}
1545 
1546 	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1547 		radeon_pm_update_profile(rdev);
1548 		radeon_pm_set_clocks(rdev);
1549 	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1550 		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1551 			if (rdev->pm.active_crtc_count > 1) {
1552 				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1553 #ifdef DUMBBELL_WIP
1554 					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1555 #endif /* DUMBBELL_WIP */
1556 
1557 					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1558 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1559 					radeon_pm_get_dynpm_state(rdev);
1560 					radeon_pm_set_clocks(rdev);
1561 
1562 					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1563 				}
1564 			} else if (rdev->pm.active_crtc_count == 1) {
1565 				/* TODO: Increase clocks if needed for current mode */
1566 
1567 				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1568 					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1569 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1570 					radeon_pm_get_dynpm_state(rdev);
1571 					radeon_pm_set_clocks(rdev);
1572 
1573 #ifdef DUMBBELL_WIP
1574 					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1575 							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1576 #endif /* DUMBBELL_WIP */
1577 				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1578 					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1579 #ifdef DUMBBELL_WIP
1580 					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1581 							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1582 #endif /* DUMBBELL_WIP */
1583 					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1584 				}
1585 			} else { /* count == 0 */
1586 				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1587 #ifdef DUMBBELL_WIP
1588 					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1589 #endif /* DUMBBELL_WIP */
1590 
1591 					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1592 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1593 					radeon_pm_get_dynpm_state(rdev);
1594 					radeon_pm_set_clocks(rdev);
1595 				}
1596 			}
1597 		}
1598 	}
1599 
1600 	mutex_unlock(&rdev->pm.mutex);
1601 }
1602 
1603 static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1604 {
1605 	struct drm_device *ddev = rdev->ddev;
1606 	struct drm_crtc *crtc;
1607 	struct radeon_crtc *radeon_crtc;
1608 
1609 	if (!rdev->pm.dpm_enabled)
1610 		return;
1611 
1612 	mutex_lock(&rdev->pm.mutex);
1613 
1614 	/* update active crtc counts */
1615 	rdev->pm.dpm.new_active_crtcs = 0;
1616 	rdev->pm.dpm.new_active_crtc_count = 0;
1617 	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1618 		list_for_each_entry(crtc,
1619 				    &ddev->mode_config.crtc_list, head) {
1620 			radeon_crtc = to_radeon_crtc(crtc);
1621 			if (crtc->enabled) {
1622 				rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1623 				rdev->pm.dpm.new_active_crtc_count++;
1624 			}
1625 		}
1626 	}
1627 
1628 	/* update battery/ac status */
1629 	if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE)
1630 		rdev->pm.dpm.ac_power = true;
1631 	else
1632 		rdev->pm.dpm.ac_power = false;
1633 
1634 	radeon_dpm_change_power_state_locked(rdev);
1635 
1636 	mutex_unlock(&rdev->pm.mutex);
1637 
1638 }
1639 
1640 void radeon_pm_compute_clocks(struct radeon_device *rdev)
1641 {
1642 	if (rdev->pm.pm_method == PM_METHOD_DPM)
1643 		radeon_pm_compute_clocks_dpm(rdev);
1644 	else
1645 		radeon_pm_compute_clocks_old(rdev);
1646 }
1647 
1648 static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1649 {
1650 	int  crtc, vpos, hpos, vbl_status;
1651 	bool in_vbl = true;
1652 
1653 	/* Iterate over all active crtc's. All crtc's must be in vblank,
1654 	 * otherwise return in_vbl == false.
1655 	 */
1656 	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1657 		if (rdev->pm.active_crtcs & (1 << crtc)) {
1658 			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0,
1659 								&vpos, &hpos, NULL, NULL,
1660 								&rdev->mode_info.crtcs[crtc]->base.hwmode);
1661 			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1662 			    !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1663 				in_vbl = false;
1664 		}
1665 	}
1666 
1667 	return in_vbl;
1668 }
1669 
1670 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1671 {
1672 	u32 stat_crtc = 0;
1673 	bool in_vbl = radeon_pm_in_vbl(rdev);
1674 
1675 	if (in_vbl == false)
1676 		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1677 			 finish ? "exit" : "entry");
1678 	return in_vbl;
1679 }
1680 
1681 #ifdef DUMBBELL_WIP
1682 static void radeon_dynpm_idle_work_handler(struct work_struct *work)
1683 {
1684 	struct radeon_device *rdev;
1685 	int resched;
1686 	rdev = container_of(work, struct radeon_device,
1687 				pm.dynpm_idle_work.work);
1688 
1689 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1690 	mutex_lock(&rdev->pm.mutex);
1691 	if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1692 		int not_processed = 0;
1693 		int i;
1694 
1695 		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1696 			struct radeon_ring *ring = &rdev->ring[i];
1697 
1698 			if (ring->ready) {
1699 				not_processed += radeon_fence_count_emitted(rdev, i);
1700 				if (not_processed >= 3)
1701 					break;
1702 			}
1703 		}
1704 
1705 		if (not_processed >= 3) { /* should upclock */
1706 			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
1707 				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1708 			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1709 				   rdev->pm.dynpm_can_upclock) {
1710 				rdev->pm.dynpm_planned_action =
1711 					DYNPM_ACTION_UPCLOCK;
1712 				rdev->pm.dynpm_action_timeout = jiffies +
1713 				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1714 			}
1715 		} else if (not_processed == 0) { /* should downclock */
1716 			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
1717 				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1718 			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1719 				   rdev->pm.dynpm_can_downclock) {
1720 				rdev->pm.dynpm_planned_action =
1721 					DYNPM_ACTION_DOWNCLOCK;
1722 				rdev->pm.dynpm_action_timeout = jiffies +
1723 				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1724 			}
1725 		}
1726 
1727 		/* Note, radeon_pm_set_clocks is called with static_switch set
1728 		 * to false since we want to wait for vbl to avoid flicker.
1729 		 */
1730 		if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
1731 		    jiffies > rdev->pm.dynpm_action_timeout) {
1732 			radeon_pm_get_dynpm_state(rdev);
1733 			radeon_pm_set_clocks(rdev);
1734 		}
1735 
1736 		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1737 				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1738 	}
1739 	mutex_unlock(&rdev->pm.mutex);
1740 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1741 }
1742 #endif /* DUMBBELL_WIP */
1743 
1744 /*
1745  * Debugfs info
1746  */
1747 #if defined(CONFIG_DEBUG_FS)
1748 
1749 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1750 {
1751 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1752 	struct drm_device *dev = node->minor->dev;
1753 	struct radeon_device *rdev = dev->dev_private;
1754 	struct drm_device *ddev = rdev->ddev;
1755 
1756 	if  ((rdev->flags & RADEON_IS_PX) &&
1757 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1758 		seq_printf(m, "PX asic powered off\n");
1759 	} else if (rdev->pm.dpm_enabled) {
1760 		mutex_lock(&rdev->pm.mutex);
1761 		if (rdev->asic->dpm.debugfs_print_current_performance_level)
1762 			radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1763 		else
1764 			seq_printf(m, "Debugfs support not implemented for this asic\n");
1765 		mutex_unlock(&rdev->pm.mutex);
1766 	} else {
1767 		seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1768 		/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1769 		if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1770 			seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1771 		else
1772 			seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1773 		seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1774 		if (rdev->asic->pm.get_memory_clock)
1775 			seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1776 		if (rdev->pm.current_vddc)
1777 			seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1778 		if (rdev->asic->pm.get_pcie_lanes)
1779 			seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1780 	}
1781 
1782 	return 0;
1783 }
1784 
1785 static struct drm_info_list radeon_pm_info_list[] = {
1786 	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1787 };
1788 #endif
1789 
1790 static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1791 {
1792 #if defined(CONFIG_DEBUG_FS)
1793 	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1794 #else
1795 	return 0;
1796 #endif
1797 }
1798