xref: /dragonfly/sys/dev/drm/radeon/r600_dpm.c (revision 267c04fd)
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include <drm/drmP.h>
26 #include "radeon.h"
27 #include "radeon_asic.h"
28 #include "r600d.h"
29 #include "r600_dpm.h"
30 #include "atom.h"
31 
32 const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
33 {
34 	R600_UTC_DFLT_00,
35 	R600_UTC_DFLT_01,
36 	R600_UTC_DFLT_02,
37 	R600_UTC_DFLT_03,
38 	R600_UTC_DFLT_04,
39 	R600_UTC_DFLT_05,
40 	R600_UTC_DFLT_06,
41 	R600_UTC_DFLT_07,
42 	R600_UTC_DFLT_08,
43 	R600_UTC_DFLT_09,
44 	R600_UTC_DFLT_10,
45 	R600_UTC_DFLT_11,
46 	R600_UTC_DFLT_12,
47 	R600_UTC_DFLT_13,
48 	R600_UTC_DFLT_14,
49 };
50 
51 const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
52 {
53 	R600_DTC_DFLT_00,
54 	R600_DTC_DFLT_01,
55 	R600_DTC_DFLT_02,
56 	R600_DTC_DFLT_03,
57 	R600_DTC_DFLT_04,
58 	R600_DTC_DFLT_05,
59 	R600_DTC_DFLT_06,
60 	R600_DTC_DFLT_07,
61 	R600_DTC_DFLT_08,
62 	R600_DTC_DFLT_09,
63 	R600_DTC_DFLT_10,
64 	R600_DTC_DFLT_11,
65 	R600_DTC_DFLT_12,
66 	R600_DTC_DFLT_13,
67 	R600_DTC_DFLT_14,
68 };
69 
70 void r600_dpm_print_class_info(u32 class, u32 class2)
71 {
72 	printk("\tui class: ");
73 	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
74 	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
75 	default:
76 		printk("none\n");
77 		break;
78 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
79 		printk("battery\n");
80 		break;
81 	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
82 		printk("balanced\n");
83 		break;
84 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
85 		printk("performance\n");
86 		break;
87 	}
88 	printk("\tinternal class: ");
89 	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
90 	    (class2 == 0))
91 		printk("none");
92 	else {
93 		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
94 			printk("boot ");
95 		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
96 			printk("thermal ");
97 		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
98 			printk("limited_pwr ");
99 		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
100 			printk("rest ");
101 		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
102 			printk("forced ");
103 		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
104 			printk("3d_perf ");
105 		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
106 			printk("ovrdrv ");
107 		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
108 			printk("uvd ");
109 		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
110 			printk("3d_low ");
111 		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
112 			printk("acpi ");
113 		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
114 			printk("uvd_hd2 ");
115 		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
116 			printk("uvd_hd ");
117 		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
118 			printk("uvd_sd ");
119 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
120 			printk("limited_pwr2 ");
121 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
122 			printk("ulv ");
123 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
124 			printk("uvd_mvc ");
125 	}
126 	printk("\n");
127 }
128 
129 void r600_dpm_print_cap_info(u32 caps)
130 {
131 	printk("\tcaps: ");
132 	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
133 		printk("single_disp ");
134 	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
135 		printk("video ");
136 	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
137 		printk("no_dc ");
138 	printk("\n");
139 }
140 
141 void r600_dpm_print_ps_status(struct radeon_device *rdev,
142 			      struct radeon_ps *rps)
143 {
144 	printk("\tstatus: ");
145 	if (rps == rdev->pm.dpm.current_ps)
146 		printk("c ");
147 	if (rps == rdev->pm.dpm.requested_ps)
148 		printk("r ");
149 	if (rps == rdev->pm.dpm.boot_ps)
150 		printk("b ");
151 	printk("\n");
152 }
153 
154 u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
155 {
156 	struct drm_device *dev = rdev->ddev;
157 	struct drm_crtc *crtc;
158 	struct radeon_crtc *radeon_crtc;
159 	u32 line_time_us, vblank_lines;
160 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
161 
162 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
163 		radeon_crtc = to_radeon_crtc(crtc);
164 		if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
165 			line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
166 				radeon_crtc->hw_mode.clock;
167 			vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
168 				radeon_crtc->hw_mode.crtc_vdisplay +
169 				(radeon_crtc->v_border * 2);
170 			vblank_time_us = vblank_lines * line_time_us;
171 			break;
172 		}
173 	}
174 
175 	return vblank_time_us;
176 }
177 
178 void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
179 			    u32 *p, u32 *u)
180 {
181 	u32 b_c = 0;
182 	u32 i_c;
183 	u32 tmp;
184 
185 	i_c = (i * r_c) / 100;
186 	tmp = i_c >> p_b;
187 
188 	while (tmp) {
189 		b_c++;
190 		tmp >>= 1;
191 	}
192 
193 	*u = (b_c + 1) / 2;
194 	*p = i_c / (1 << (2 * (*u)));
195 }
196 
197 int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
198 {
199 	u32 k, a, ah, al;
200 	u32 t1;
201 
202 	if ((fl == 0) || (fh == 0) || (fl > fh))
203 		return -EINVAL;
204 
205 	k = (100 * fh) / fl;
206 	t1 = (t * (k - 100));
207 	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
208 	a = (a + 5) / 10;
209 	ah = ((a * t) + 5000) / 10000;
210 	al = a - ah;
211 
212 	*th = t - ah;
213 	*tl = t + al;
214 
215 	return 0;
216 }
217 
218 void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
219 {
220 	int i;
221 
222 	if (enable) {
223 		WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
224 	} else {
225 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
226 
227 		WREG32(CG_RLC_REQ_AND_RSP, 0x2);
228 
229 		for (i = 0; i < rdev->usec_timeout; i++) {
230 			if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1)
231 				break;
232 			udelay(1);
233 		}
234 
235 		WREG32(CG_RLC_REQ_AND_RSP, 0x0);
236 
237 		WREG32(GRBM_PWR_CNTL, 0x1);
238 		RREG32(GRBM_PWR_CNTL);
239 	}
240 }
241 
242 void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable)
243 {
244 	if (enable)
245 		WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
246 	else
247 		WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
248 }
249 
250 void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable)
251 {
252 	if (enable)
253 		WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
254 	else
255 		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
256 }
257 
258 void r600_enable_acpi_pm(struct radeon_device *rdev)
259 {
260 	WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
261 }
262 
263 void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable)
264 {
265 	if (enable)
266 		WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
267 	else
268 		WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
269 }
270 
271 bool r600_dynamicpm_enabled(struct radeon_device *rdev)
272 {
273 	if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
274 		return true;
275 	else
276 		return false;
277 }
278 
279 void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
280 {
281 	if (enable)
282 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
283 	else
284 		WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
285 }
286 
287 void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
288 {
289 	if (enable)
290 		WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
291 	else
292 		WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
293 }
294 
295 void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable)
296 {
297 	if (enable)
298 		WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN);
299 	else
300 		WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN);
301 }
302 
303 void r600_wait_for_spll_change(struct radeon_device *rdev)
304 {
305 	int i;
306 
307 	for (i = 0; i < rdev->usec_timeout; i++) {
308 		if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS)
309 			break;
310 		udelay(1);
311 	}
312 }
313 
314 void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
315 {
316 	WREG32(CG_BSP, BSP(p) | BSU(u));
317 }
318 
319 void r600_set_at(struct radeon_device *rdev,
320 		 u32 l_to_m, u32 m_to_h,
321 		 u32 h_to_m, u32 m_to_l)
322 {
323 	WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h));
324 	WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l));
325 }
326 
327 void r600_set_tc(struct radeon_device *rdev,
328 		 u32 index, u32 u_t, u32 d_t)
329 {
330 	WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t));
331 }
332 
333 void r600_select_td(struct radeon_device *rdev,
334 		    enum r600_td td)
335 {
336 	if (td == R600_TD_AUTO)
337 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
338 	else
339 		WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
340 	if (td == R600_TD_UP)
341 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
342 	if (td == R600_TD_DOWN)
343 		WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
344 }
345 
346 void r600_set_vrc(struct radeon_device *rdev, u32 vrv)
347 {
348 	WREG32(CG_FTV, vrv);
349 }
350 
351 void r600_set_tpu(struct radeon_device *rdev, u32 u)
352 {
353 	WREG32_P(CG_TPC, TPU(u), ~TPU_MASK);
354 }
355 
356 void r600_set_tpc(struct radeon_device *rdev, u32 c)
357 {
358 	WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK);
359 }
360 
361 void r600_set_sstu(struct radeon_device *rdev, u32 u)
362 {
363 	WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK);
364 }
365 
366 void r600_set_sst(struct radeon_device *rdev, u32 t)
367 {
368 	WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
369 }
370 
371 void r600_set_git(struct radeon_device *rdev, u32 t)
372 {
373 	WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
374 }
375 
376 void r600_set_fctu(struct radeon_device *rdev, u32 u)
377 {
378 	WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK);
379 }
380 
381 void r600_set_fct(struct radeon_device *rdev, u32 t)
382 {
383 	WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
384 }
385 
386 void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
387 {
388 	WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
389 }
390 
391 void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s)
392 {
393 	WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK);
394 }
395 
396 void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u)
397 {
398 	WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK);
399 }
400 
401 void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
402 {
403 	WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
404 }
405 
406 void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s)
407 {
408 	WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK);
409 }
410 
411 void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time)
412 {
413 	WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK);
414 }
415 
416 void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time)
417 {
418 	WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK);
419 }
420 
421 void r600_engine_clock_entry_enable(struct radeon_device *rdev,
422 				    u32 index, bool enable)
423 {
424 	if (enable)
425 		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
426 			 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID);
427 	else
428 		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
429 			 0, ~STEP_0_SPLL_ENTRY_VALID);
430 }
431 
432 void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
433 						   u32 index, bool enable)
434 {
435 	if (enable)
436 		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
437 			 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE);
438 	else
439 		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
440 			 0, ~STEP_0_SPLL_STEP_ENABLE);
441 }
442 
443 void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
444 						 u32 index, bool enable)
445 {
446 	if (enable)
447 		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
448 			 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN);
449 	else
450 		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
451 			 0, ~STEP_0_POST_DIV_EN);
452 }
453 
454 void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
455 					      u32 index, u32 divider)
456 {
457 	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
458 		 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK);
459 }
460 
461 void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
462 						   u32 index, u32 divider)
463 {
464 	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
465 		 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK);
466 }
467 
468 void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
469 						  u32 index, u32 divider)
470 {
471 	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
472 		 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK);
473 }
474 
475 void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
476 					   u32 index, u32 step_time)
477 {
478 	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
479 		 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK);
480 }
481 
482 void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u)
483 {
484 	WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK);
485 }
486 
487 void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u)
488 {
489 	WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK);
490 }
491 
492 void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
493 {
494 	WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
495 }
496 
497 void r600_voltage_control_enable_pins(struct radeon_device *rdev,
498 				      u64 mask)
499 {
500 	WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff);
501 	WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask));
502 }
503 
504 
505 void r600_voltage_control_program_voltages(struct radeon_device *rdev,
506 					   enum r600_power_level index, u64 pins)
507 {
508 	u32 tmp, mask;
509 	u32 ix = 3 - (3 & index);
510 
511 	WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff);
512 
513 	mask = 7 << (3 * ix);
514 	tmp = RREG32(VID_UPPER_GPIO_CNTL);
515 	tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask);
516 	WREG32(VID_UPPER_GPIO_CNTL, tmp);
517 }
518 
519 void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
520 						    u64 mask)
521 {
522 	u32 gpio;
523 
524 	gpio = RREG32(GPIOPAD_MASK);
525 	gpio &= ~mask;
526 	WREG32(GPIOPAD_MASK, gpio);
527 
528 	gpio = RREG32(GPIOPAD_EN);
529 	gpio &= ~mask;
530 	WREG32(GPIOPAD_EN, gpio);
531 
532 	gpio = RREG32(GPIOPAD_A);
533 	gpio &= ~mask;
534 	WREG32(GPIOPAD_A, gpio);
535 }
536 
537 void r600_power_level_enable(struct radeon_device *rdev,
538 			     enum r600_power_level index, bool enable)
539 {
540 	u32 ix = 3 - (3 & index);
541 
542 	if (enable)
543 		WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE,
544 			 ~CTXSW_FREQ_STATE_ENABLE);
545 	else
546 		WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0,
547 			 ~CTXSW_FREQ_STATE_ENABLE);
548 }
549 
550 void r600_power_level_set_voltage_index(struct radeon_device *rdev,
551 					enum r600_power_level index, u32 voltage_index)
552 {
553 	u32 ix = 3 - (3 & index);
554 
555 	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
556 		 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK);
557 }
558 
559 void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
560 					  enum r600_power_level index, u32 mem_clock_index)
561 {
562 	u32 ix = 3 - (3 & index);
563 
564 	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
565 		 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK);
566 }
567 
568 void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
569 					  enum r600_power_level index, u32 eng_clock_index)
570 {
571 	u32 ix = 3 - (3 & index);
572 
573 	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
574 		 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK);
575 }
576 
577 void r600_power_level_set_watermark_id(struct radeon_device *rdev,
578 				       enum r600_power_level index,
579 				       enum r600_display_watermark watermark_id)
580 {
581 	u32 ix = 3 - (3 & index);
582 	u32 tmp = 0;
583 
584 	if (watermark_id == R600_DISPLAY_WATERMARK_HIGH)
585 		tmp = CTXSW_FREQ_DISPLAY_WATERMARK;
586 	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK);
587 }
588 
589 void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
590 				    enum r600_power_level index, bool compatible)
591 {
592 	u32 ix = 3 - (3 & index);
593 	u32 tmp = 0;
594 
595 	if (compatible)
596 		tmp = CTXSW_FREQ_GEN2PCIE_VOLT;
597 	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT);
598 }
599 
600 enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev)
601 {
602 	u32 tmp;
603 
604 	tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK;
605 	tmp >>= CURRENT_PROFILE_INDEX_SHIFT;
606 	return tmp;
607 }
608 
609 enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev)
610 {
611 	u32 tmp;
612 
613 	tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK;
614 	tmp >>= TARGET_PROFILE_INDEX_SHIFT;
615 	return tmp;
616 }
617 
618 void r600_power_level_set_enter_index(struct radeon_device *rdev,
619 				      enum r600_power_level index)
620 {
621 	WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index),
622 		 ~DYN_PWR_ENTER_INDEX_MASK);
623 }
624 
625 void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
626 				       enum r600_power_level index)
627 {
628 	int i;
629 
630 	for (i = 0; i < rdev->usec_timeout; i++) {
631 		if (r600_power_level_get_target_index(rdev) != index)
632 			break;
633 		udelay(1);
634 	}
635 
636 	for (i = 0; i < rdev->usec_timeout; i++) {
637 		if (r600_power_level_get_current_index(rdev) != index)
638 			break;
639 		udelay(1);
640 	}
641 }
642 
643 void r600_wait_for_power_level(struct radeon_device *rdev,
644 			       enum r600_power_level index)
645 {
646 	int i;
647 
648 	for (i = 0; i < rdev->usec_timeout; i++) {
649 		if (r600_power_level_get_target_index(rdev) == index)
650 			break;
651 		udelay(1);
652 	}
653 
654 	for (i = 0; i < rdev->usec_timeout; i++) {
655 		if (r600_power_level_get_current_index(rdev) == index)
656 			break;
657 		udelay(1);
658 	}
659 }
660 
661 void r600_start_dpm(struct radeon_device *rdev)
662 {
663 	r600_enable_sclk_control(rdev, false);
664 	r600_enable_mclk_control(rdev, false);
665 
666 	r600_dynamicpm_enable(rdev, true);
667 
668 	radeon_wait_for_vblank(rdev, 0);
669 	radeon_wait_for_vblank(rdev, 1);
670 
671 	r600_enable_spll_bypass(rdev, true);
672 	r600_wait_for_spll_change(rdev);
673 	r600_enable_spll_bypass(rdev, false);
674 	r600_wait_for_spll_change(rdev);
675 
676 	r600_enable_spll_bypass(rdev, true);
677 	r600_wait_for_spll_change(rdev);
678 	r600_enable_spll_bypass(rdev, false);
679 	r600_wait_for_spll_change(rdev);
680 
681 	r600_enable_sclk_control(rdev, true);
682 	r600_enable_mclk_control(rdev, true);
683 }
684 
685 void r600_stop_dpm(struct radeon_device *rdev)
686 {
687 	r600_dynamicpm_enable(rdev, false);
688 }
689 
690 int r600_dpm_pre_set_power_state(struct radeon_device *rdev)
691 {
692 	return 0;
693 }
694 
695 void r600_dpm_post_set_power_state(struct radeon_device *rdev)
696 {
697 
698 }
699 
700 bool r600_is_uvd_state(u32 class, u32 class2)
701 {
702 	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
703 		return true;
704 	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
705 		return true;
706 	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
707 		return true;
708 	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
709 		return true;
710 	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
711 		return true;
712 	return false;
713 }
714 
715 int r600_set_thermal_temperature_range(struct radeon_device *rdev,
716 				       int min_temp, int max_temp)
717 {
718 	int low_temp = 0 * 1000;
719 	int high_temp = 255 * 1000;
720 
721 	if (low_temp < min_temp)
722 		low_temp = min_temp;
723 	if (high_temp > max_temp)
724 		high_temp = max_temp;
725 	if (high_temp < low_temp) {
726 		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
727 		return -EINVAL;
728 	}
729 
730 	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
731 	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
732 	WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
733 
734 	rdev->pm.dpm.thermal.min_temp = low_temp;
735 	rdev->pm.dpm.thermal.max_temp = high_temp;
736 
737 	return 0;
738 }
739 
740 bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
741 {
742 	switch (sensor) {
743 	case THERMAL_TYPE_RV6XX:
744 	case THERMAL_TYPE_RV770:
745 	case THERMAL_TYPE_EVERGREEN:
746 	case THERMAL_TYPE_SUMO:
747 	case THERMAL_TYPE_NI:
748 	case THERMAL_TYPE_SI:
749 		return true;
750 	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
751 	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
752 		return false; /* need special handling */
753 	case THERMAL_TYPE_NONE:
754 	case THERMAL_TYPE_EXTERNAL:
755 	case THERMAL_TYPE_EXTERNAL_GPIO:
756 	default:
757 		return false;
758 	}
759 }
760 
761 union power_info {
762 	struct _ATOM_POWERPLAY_INFO info;
763 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
764 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
765 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
766 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
767 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
768 	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
769 	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
770 };
771 
772 union fan_info {
773 	struct _ATOM_PPLIB_FANTABLE fan;
774 	struct _ATOM_PPLIB_FANTABLE2 fan2;
775 };
776 
777 static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
778 					    ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
779 {
780 	u32 size = atom_table->ucNumEntries *
781 		sizeof(struct radeon_clock_voltage_dependency_entry);
782 	int i;
783 
784 	radeon_table->entries = kzalloc(size, GFP_KERNEL);
785 	if (!radeon_table->entries)
786 		return -ENOMEM;
787 
788 	for (i = 0; i < atom_table->ucNumEntries; i++) {
789 		radeon_table->entries[i].clk = le16_to_cpu(atom_table->entries[i].usClockLow) |
790 			(atom_table->entries[i].ucClockHigh << 16);
791 		radeon_table->entries[i].v = le16_to_cpu(atom_table->entries[i].usVoltage);
792 	}
793 	radeon_table->count = atom_table->ucNumEntries;
794 
795 	return 0;
796 }
797 
798 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
799 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
800 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
801 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
802 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
803 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
804 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
805 
806 int r600_parse_extended_power_table(struct radeon_device *rdev)
807 {
808 	struct radeon_mode_info *mode_info = &rdev->mode_info;
809 	union power_info *power_info;
810 	union fan_info *fan_info;
811 	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
812 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
813         u16 data_offset;
814 	u8 frev, crev;
815 	int ret, i;
816 
817 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
818 				   &frev, &crev, &data_offset))
819 		return -EINVAL;
820 	power_info = (union power_info *)((uint8_t*)mode_info->atom_context->bios + data_offset);
821 
822 	/* fan table */
823 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
824 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
825 		if (power_info->pplib3.usFanTableOffset) {
826 			fan_info = (union fan_info *)((uint8_t*)mode_info->atom_context->bios + data_offset +
827 						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
828 			rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
829 			rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
830 			rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
831 			rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
832 			rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
833 			rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
834 			rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
835 			if (fan_info->fan.ucFanTableFormat >= 2)
836 				rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
837 			else
838 				rdev->pm.dpm.fan.t_max = 10900;
839 			rdev->pm.dpm.fan.cycle_delay = 100000;
840 			rdev->pm.dpm.fan.ucode_fan_control = true;
841 		}
842 	}
843 
844 	/* clock dependancy tables, shedding tables */
845 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
846 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
847 		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
848 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
849 				((uint8_t*)mode_info->atom_context->bios + data_offset +
850 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
851 			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
852 							       dep_table);
853 			if (ret)
854 				return ret;
855 		}
856 		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
857 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
858 				((uint8_t*)mode_info->atom_context->bios + data_offset +
859 				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
860 			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
861 							       dep_table);
862 			if (ret) {
863 				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
864 				return ret;
865 			}
866 		}
867 		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
868 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
869 				((uint8_t*)mode_info->atom_context->bios + data_offset +
870 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
871 			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
872 							       dep_table);
873 			if (ret) {
874 				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
875 				kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
876 				return ret;
877 			}
878 		}
879 		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
880 			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
881 				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
882 				((uint8_t*)mode_info->atom_context->bios + data_offset +
883 				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
884 			if (clk_v->ucNumEntries) {
885 				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
886 					le16_to_cpu(clk_v->entries[0].usSclkLow) |
887 					(clk_v->entries[0].ucSclkHigh << 16);
888 				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
889 					le16_to_cpu(clk_v->entries[0].usMclkLow) |
890 					(clk_v->entries[0].ucMclkHigh << 16);
891 				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
892 					le16_to_cpu(clk_v->entries[0].usVddc);
893 				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
894 					le16_to_cpu(clk_v->entries[0].usVddci);
895 			}
896 		}
897 		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
898 			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
899 				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
900 				((uint8_t*)mode_info->atom_context->bios + data_offset +
901 				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
902 
903 			rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
904 				kzalloc(psl->ucNumEntries *
905 					sizeof(struct radeon_phase_shedding_limits_entry),
906 					GFP_KERNEL);
907 			if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
908 				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
909 				kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
910 				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
911 				return -ENOMEM;
912 			}
913 
914 			for (i = 0; i < psl->ucNumEntries; i++) {
915 				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
916 					le16_to_cpu(psl->entries[i].usSclkLow) |
917 					(psl->entries[i].ucSclkHigh << 16);
918 				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
919 					le16_to_cpu(psl->entries[i].usMclkLow) |
920 					(psl->entries[i].ucMclkHigh << 16);
921 				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
922 					le16_to_cpu(psl->entries[i].usVoltage);
923 			}
924 			rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
925 				psl->ucNumEntries;
926 		}
927 	}
928 
929 	/* cac data */
930 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
931 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
932 		rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
933 		rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
934 		rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit;
935 		rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
936 		if (rdev->pm.dpm.tdp_od_limit)
937 			rdev->pm.dpm.power_control = true;
938 		else
939 			rdev->pm.dpm.power_control = false;
940 		rdev->pm.dpm.tdp_adjustment = 0;
941 		rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
942 		rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
943 		rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
944 		if (power_info->pplib5.usCACLeakageTableOffset) {
945 			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
946 				(ATOM_PPLIB_CAC_Leakage_Table *)
947 				((uint8_t*)mode_info->atom_context->bios + data_offset +
948 				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
949 			u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
950 			rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
951 			if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
952 				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
953 				kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
954 				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
955 				return -ENOMEM;
956 			}
957 			for (i = 0; i < cac_table->ucNumEntries; i++) {
958 				rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
959 					le16_to_cpu(cac_table->entries[i].usVddc);
960 				rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
961 					le32_to_cpu(cac_table->entries[i].ulLeakageValue);
962 			}
963 			rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
964 		}
965 	}
966 
967 	/* ppm table */
968 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
969 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
970 		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
971 			(mode_info->atom_context->bios + data_offset +
972 			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
973 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
974 		    ext_hdr->usPPMTableOffset) {
975 			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
976 				(mode_info->atom_context->bios + data_offset +
977 				 le16_to_cpu(ext_hdr->usPPMTableOffset));
978 			rdev->pm.dpm.dyn_state.ppm_table =
979 				kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
980 			if (!rdev->pm.dpm.dyn_state.ppm_table) {
981 				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
982 				kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
983 				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
984 				kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
985 				return -ENOMEM;
986 			}
987 			rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
988 			rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
989 				le16_to_cpu(ppm->usCpuCoreNumber);
990 			rdev->pm.dpm.dyn_state.ppm_table->platform_tdp =
991 				le32_to_cpu(ppm->ulPlatformTDP);
992 			rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
993 				le32_to_cpu(ppm->ulSmallACPlatformTDP);
994 			rdev->pm.dpm.dyn_state.ppm_table->platform_tdc =
995 				le32_to_cpu(ppm->ulPlatformTDC);
996 			rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
997 				le32_to_cpu(ppm->ulSmallACPlatformTDC);
998 			rdev->pm.dpm.dyn_state.ppm_table->apu_tdp =
999 				le32_to_cpu(ppm->ulApuTDP);
1000 			rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
1001 				le32_to_cpu(ppm->ulDGpuTDP);
1002 			rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
1003 				le32_to_cpu(ppm->ulDGpuUlvPower);
1004 			rdev->pm.dpm.dyn_state.ppm_table->tj_max =
1005 				le32_to_cpu(ppm->ulTjmax);
1006 		}
1007 	}
1008 
1009 	return 0;
1010 }
1011 
1012 void r600_free_extended_power_table(struct radeon_device *rdev)
1013 {
1014 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries)
1015 		kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
1016 	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries)
1017 		kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
1018 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries)
1019 		kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
1020 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries)
1021 		kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
1022 	if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
1023 		kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries);
1024 	if (rdev->pm.dpm.dyn_state.ppm_table)
1025 		kfree(rdev->pm.dpm.dyn_state.ppm_table);
1026 }
1027 
1028 enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1029 					       u32 sys_mask,
1030 					       enum radeon_pcie_gen asic_gen,
1031 					       enum radeon_pcie_gen default_gen)
1032 {
1033 	switch (asic_gen) {
1034 	case RADEON_PCIE_GEN1:
1035 		return RADEON_PCIE_GEN1;
1036 	case RADEON_PCIE_GEN2:
1037 		return RADEON_PCIE_GEN2;
1038 	case RADEON_PCIE_GEN3:
1039 		return RADEON_PCIE_GEN3;
1040 	default:
1041 		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
1042 			return RADEON_PCIE_GEN3;
1043 		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
1044 			return RADEON_PCIE_GEN2;
1045 		else
1046 			return RADEON_PCIE_GEN1;
1047 	}
1048 	return RADEON_PCIE_GEN1;
1049 }
1050