1 /*	$NetBSD: radeon_rv6xx_dpm.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2011 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Alex Deucher
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: radeon_rv6xx_dpm.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $");
29 
30 #include "radeon.h"
31 #include "radeon_asic.h"
32 #include "rv6xxd.h"
33 #include "r600_dpm.h"
34 #include "rv6xx_dpm.h"
35 #include "atom.h"
36 #include <linux/seq_file.h>
37 
38 static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
39 					u32 unscaled_count, u32 unit);
40 
rv6xx_get_ps(struct radeon_ps * rps)41 static struct rv6xx_ps *rv6xx_get_ps(struct radeon_ps *rps)
42 {
43 	struct rv6xx_ps *ps = rps->ps_priv;
44 
45 	return ps;
46 }
47 
rv6xx_get_pi(struct radeon_device * rdev)48 static struct rv6xx_power_info *rv6xx_get_pi(struct radeon_device *rdev)
49 {
50 	struct rv6xx_power_info *pi = rdev->pm.dpm.priv;
51 
52 	return pi;
53 }
54 
rv6xx_force_pcie_gen1(struct radeon_device * rdev)55 static void rv6xx_force_pcie_gen1(struct radeon_device *rdev)
56 {
57 	u32 tmp;
58 	int i;
59 
60 	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
61 	tmp &= LC_GEN2_EN;
62 	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
63 
64 	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
65 	tmp |= LC_INITIATE_LINK_SPEED_CHANGE;
66 	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
67 
68 	for (i = 0; i < rdev->usec_timeout; i++) {
69 		if (!(RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE))
70 			break;
71 		udelay(1);
72 	}
73 
74 	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
75 	tmp &= ~LC_INITIATE_LINK_SPEED_CHANGE;
76 	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
77 }
78 
rv6xx_enable_pcie_gen2_support(struct radeon_device * rdev)79 static void rv6xx_enable_pcie_gen2_support(struct radeon_device *rdev)
80 {
81 	u32 tmp;
82 
83 	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
84 
85 	if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
86 	    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
87 		tmp |= LC_GEN2_EN;
88 		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
89 	}
90 }
91 
rv6xx_enable_bif_dynamic_pcie_gen2(struct radeon_device * rdev,bool enable)92 static void rv6xx_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
93 					       bool enable)
94 {
95 	u32 tmp;
96 
97 	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
98 	if (enable)
99 		tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
100 	else
101 		tmp |= LC_HW_VOLTAGE_IF_CONTROL(0);
102 	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
103 }
104 
rv6xx_enable_l0s(struct radeon_device * rdev)105 static void rv6xx_enable_l0s(struct radeon_device *rdev)
106 {
107 	u32 tmp;
108 
109 	tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L0S_INACTIVITY_MASK;
110 	tmp |= LC_L0S_INACTIVITY(3);
111 	WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
112 }
113 
rv6xx_enable_l1(struct radeon_device * rdev)114 static void rv6xx_enable_l1(struct radeon_device *rdev)
115 {
116 	u32 tmp;
117 
118 	tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL);
119 	tmp &= ~LC_L1_INACTIVITY_MASK;
120 	tmp |= LC_L1_INACTIVITY(4);
121 	tmp &= ~LC_PMI_TO_L1_DIS;
122 	tmp &= ~LC_ASPM_TO_L1_DIS;
123 	WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
124 }
125 
rv6xx_enable_pll_sleep_in_l1(struct radeon_device * rdev)126 static void rv6xx_enable_pll_sleep_in_l1(struct radeon_device *rdev)
127 {
128 	u32 tmp;
129 
130 	tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L1_INACTIVITY_MASK;
131 	tmp |= LC_L1_INACTIVITY(8);
132 	WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
133 
134 	/* NOTE, this is a PCIE indirect reg, not PCIE PORT */
135 	tmp = RREG32_PCIE(PCIE_P_CNTL);
136 	tmp |= P_PLL_PWRDN_IN_L1L23;
137 	tmp &= ~P_PLL_BUF_PDNB;
138 	tmp &= ~P_PLL_PDNB;
139 	tmp |= P_ALLOW_PRX_FRONTEND_SHUTOFF;
140 	WREG32_PCIE(PCIE_P_CNTL, tmp);
141 }
142 
rv6xx_convert_clock_to_stepping(struct radeon_device * rdev,u32 clock,struct rv6xx_sclk_stepping * step)143 static int rv6xx_convert_clock_to_stepping(struct radeon_device *rdev,
144 					   u32 clock, struct rv6xx_sclk_stepping *step)
145 {
146 	int ret;
147 	struct atom_clock_dividers dividers;
148 
149 	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
150 					     clock, false, &dividers);
151 	if (ret)
152 		return ret;
153 
154 	if (dividers.enable_post_div)
155 		step->post_divider = 2 + (dividers.post_div & 0xF) + (dividers.post_div >> 4);
156 	else
157 		step->post_divider = 1;
158 
159 	step->vco_frequency = clock * step->post_divider;
160 
161 	return 0;
162 }
163 
rv6xx_output_stepping(struct radeon_device * rdev,u32 step_index,struct rv6xx_sclk_stepping * step)164 static void rv6xx_output_stepping(struct radeon_device *rdev,
165 				  u32 step_index, struct rv6xx_sclk_stepping *step)
166 {
167 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
168 	u32 ref_clk = rdev->clock.spll.reference_freq;
169 	u32 fb_divider;
170 	u32 spll_step_count = rv6xx_scale_count_given_unit(rdev,
171 							   R600_SPLLSTEPTIME_DFLT *
172 							   pi->spll_ref_div,
173 							   R600_SPLLSTEPUNIT_DFLT);
174 
175 	r600_engine_clock_entry_enable(rdev, step_index, true);
176 	r600_engine_clock_entry_enable_pulse_skipping(rdev, step_index, false);
177 
178 	if (step->post_divider == 1)
179 		r600_engine_clock_entry_enable_post_divider(rdev, step_index, false);
180 	else {
181 		u32 lo_len = (step->post_divider - 2) / 2;
182 		u32 hi_len = step->post_divider - 2 - lo_len;
183 
184 		r600_engine_clock_entry_enable_post_divider(rdev, step_index, true);
185 		r600_engine_clock_entry_set_post_divider(rdev, step_index, (hi_len << 4) | lo_len);
186 	}
187 
188 	fb_divider = ((step->vco_frequency * pi->spll_ref_div) / ref_clk) >>
189 		pi->fb_div_scale;
190 
191 	r600_engine_clock_entry_set_reference_divider(rdev, step_index,
192 						      pi->spll_ref_div - 1);
193 	r600_engine_clock_entry_set_feedback_divider(rdev, step_index, fb_divider);
194 	r600_engine_clock_entry_set_step_time(rdev, step_index, spll_step_count);
195 
196 }
197 
rv6xx_next_vco_step(struct radeon_device * rdev,struct rv6xx_sclk_stepping * cur,bool increasing_vco,u32 step_size)198 static struct rv6xx_sclk_stepping rv6xx_next_vco_step(struct radeon_device *rdev,
199 						      struct rv6xx_sclk_stepping *cur,
200 						      bool increasing_vco, u32 step_size)
201 {
202 	struct rv6xx_sclk_stepping next;
203 
204 	next.post_divider = cur->post_divider;
205 
206 	if (increasing_vco)
207 		next.vco_frequency = (cur->vco_frequency * (100 + step_size)) / 100;
208 	else
209 		next.vco_frequency = (cur->vco_frequency * 100 + 99 + step_size) / (100 + step_size);
210 
211 	return next;
212 }
213 
rv6xx_can_step_post_div(struct radeon_device * rdev,struct rv6xx_sclk_stepping * cur,struct rv6xx_sclk_stepping * target)214 static bool rv6xx_can_step_post_div(struct radeon_device *rdev,
215 				    struct rv6xx_sclk_stepping *cur,
216 				    struct rv6xx_sclk_stepping *target)
217 {
218 	return (cur->post_divider > target->post_divider) &&
219 		((cur->vco_frequency * target->post_divider) <=
220 		 (target->vco_frequency * (cur->post_divider - 1)));
221 }
222 
rv6xx_next_post_div_step(struct radeon_device * rdev,struct rv6xx_sclk_stepping * cur,struct rv6xx_sclk_stepping * target)223 static struct rv6xx_sclk_stepping rv6xx_next_post_div_step(struct radeon_device *rdev,
224 							   struct rv6xx_sclk_stepping *cur,
225 							   struct rv6xx_sclk_stepping *target)
226 {
227 	struct rv6xx_sclk_stepping next = *cur;
228 
229 	while (rv6xx_can_step_post_div(rdev, &next, target))
230 		next.post_divider--;
231 
232 	return next;
233 }
234 
rv6xx_reached_stepping_target(struct radeon_device * rdev,struct rv6xx_sclk_stepping * cur,struct rv6xx_sclk_stepping * target,bool increasing_vco)235 static bool rv6xx_reached_stepping_target(struct radeon_device *rdev,
236 					  struct rv6xx_sclk_stepping *cur,
237 					  struct rv6xx_sclk_stepping *target,
238 					  bool increasing_vco)
239 {
240 	return (increasing_vco && (cur->vco_frequency >= target->vco_frequency)) ||
241 		(!increasing_vco && (cur->vco_frequency <= target->vco_frequency));
242 }
243 
rv6xx_generate_steps(struct radeon_device * rdev,u32 low,u32 high,u32 start_index,u8 * end_index)244 static void rv6xx_generate_steps(struct radeon_device *rdev,
245 				 u32 low, u32 high,
246 				 u32 start_index, u8 *end_index)
247 {
248 	struct rv6xx_sclk_stepping cur;
249 	struct rv6xx_sclk_stepping target;
250 	bool increasing_vco;
251 	u32 step_index = start_index;
252 
253 	rv6xx_convert_clock_to_stepping(rdev, low, &cur);
254 	rv6xx_convert_clock_to_stepping(rdev, high, &target);
255 
256 	rv6xx_output_stepping(rdev, step_index++, &cur);
257 
258 	increasing_vco = (target.vco_frequency >= cur.vco_frequency);
259 
260 	if (target.post_divider > cur.post_divider)
261 		cur.post_divider = target.post_divider;
262 
263 	while (1) {
264 		struct rv6xx_sclk_stepping next;
265 
266 		if (rv6xx_can_step_post_div(rdev, &cur, &target))
267 			next = rv6xx_next_post_div_step(rdev, &cur, &target);
268 		else
269 			next = rv6xx_next_vco_step(rdev, &cur, increasing_vco, R600_VCOSTEPPCT_DFLT);
270 
271 		if (rv6xx_reached_stepping_target(rdev, &next, &target, increasing_vco)) {
272 			struct rv6xx_sclk_stepping tiny =
273 				rv6xx_next_vco_step(rdev, &target, !increasing_vco, R600_ENDINGVCOSTEPPCT_DFLT);
274 			tiny.post_divider = next.post_divider;
275 
276 			if (!rv6xx_reached_stepping_target(rdev, &tiny, &cur, !increasing_vco))
277 				rv6xx_output_stepping(rdev, step_index++, &tiny);
278 
279 			if ((next.post_divider != target.post_divider) &&
280 			    (next.vco_frequency != target.vco_frequency)) {
281 				struct rv6xx_sclk_stepping final_vco;
282 
283 				final_vco.vco_frequency = target.vco_frequency;
284 				final_vco.post_divider = next.post_divider;
285 
286 				rv6xx_output_stepping(rdev, step_index++, &final_vco);
287 			}
288 
289 			rv6xx_output_stepping(rdev, step_index++, &target);
290 			break;
291 		} else
292 			rv6xx_output_stepping(rdev, step_index++, &next);
293 
294 		cur = next;
295 	}
296 
297 	*end_index = (u8)step_index - 1;
298 
299 }
300 
rv6xx_generate_single_step(struct radeon_device * rdev,u32 clock,u32 index)301 static void rv6xx_generate_single_step(struct radeon_device *rdev,
302 				       u32 clock, u32 index)
303 {
304 	struct rv6xx_sclk_stepping step;
305 
306 	rv6xx_convert_clock_to_stepping(rdev, clock, &step);
307 	rv6xx_output_stepping(rdev, index, &step);
308 }
309 
rv6xx_invalidate_intermediate_steps_range(struct radeon_device * rdev,u32 start_index,u32 end_index)310 static void rv6xx_invalidate_intermediate_steps_range(struct radeon_device *rdev,
311 						      u32 start_index, u32 end_index)
312 {
313 	u32 step_index;
314 
315 	for (step_index = start_index + 1; step_index < end_index; step_index++)
316 		r600_engine_clock_entry_enable(rdev, step_index, false);
317 }
318 
rv6xx_set_engine_spread_spectrum_clk_s(struct radeon_device * rdev,u32 index,u32 clk_s)319 static void rv6xx_set_engine_spread_spectrum_clk_s(struct radeon_device *rdev,
320 						   u32 index, u32 clk_s)
321 {
322 	WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
323 		 CLKS(clk_s), ~CLKS_MASK);
324 }
325 
rv6xx_set_engine_spread_spectrum_clk_v(struct radeon_device * rdev,u32 index,u32 clk_v)326 static void rv6xx_set_engine_spread_spectrum_clk_v(struct radeon_device *rdev,
327 						   u32 index, u32 clk_v)
328 {
329 	WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
330 		 CLKV(clk_v), ~CLKV_MASK);
331 }
332 
rv6xx_enable_engine_spread_spectrum(struct radeon_device * rdev,u32 index,bool enable)333 static void rv6xx_enable_engine_spread_spectrum(struct radeon_device *rdev,
334 						u32 index, bool enable)
335 {
336 	if (enable)
337 		WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
338 			 SSEN, ~SSEN);
339 	else
340 		WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
341 			 0, ~SSEN);
342 }
343 
rv6xx_set_memory_spread_spectrum_clk_s(struct radeon_device * rdev,u32 clk_s)344 static void rv6xx_set_memory_spread_spectrum_clk_s(struct radeon_device *rdev,
345 						   u32 clk_s)
346 {
347 	WREG32_P(CG_MPLL_SPREAD_SPECTRUM, CLKS(clk_s), ~CLKS_MASK);
348 }
349 
rv6xx_set_memory_spread_spectrum_clk_v(struct radeon_device * rdev,u32 clk_v)350 static void rv6xx_set_memory_spread_spectrum_clk_v(struct radeon_device *rdev,
351 						   u32 clk_v)
352 {
353 	WREG32_P(CG_MPLL_SPREAD_SPECTRUM, CLKV(clk_v), ~CLKV_MASK);
354 }
355 
rv6xx_enable_memory_spread_spectrum(struct radeon_device * rdev,bool enable)356 static void rv6xx_enable_memory_spread_spectrum(struct radeon_device *rdev,
357 						bool enable)
358 {
359 	if (enable)
360 		WREG32_P(CG_MPLL_SPREAD_SPECTRUM, SSEN, ~SSEN);
361 	else
362 		WREG32_P(CG_MPLL_SPREAD_SPECTRUM, 0, ~SSEN);
363 }
364 
rv6xx_enable_dynamic_spread_spectrum(struct radeon_device * rdev,bool enable)365 static void rv6xx_enable_dynamic_spread_spectrum(struct radeon_device *rdev,
366 						 bool enable)
367 {
368 	if (enable)
369 		WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
370 	else
371 		WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
372 }
373 
rv6xx_memory_clock_entry_enable_post_divider(struct radeon_device * rdev,u32 index,bool enable)374 static void rv6xx_memory_clock_entry_enable_post_divider(struct radeon_device *rdev,
375 							 u32 index, bool enable)
376 {
377 	if (enable)
378 		WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4),
379 			 LEVEL0_MPLL_DIV_EN, ~LEVEL0_MPLL_DIV_EN);
380 	else
381 		WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4), 0, ~LEVEL0_MPLL_DIV_EN);
382 }
383 
rv6xx_memory_clock_entry_set_post_divider(struct radeon_device * rdev,u32 index,u32 divider)384 static void rv6xx_memory_clock_entry_set_post_divider(struct radeon_device *rdev,
385 						      u32 index, u32 divider)
386 {
387 	WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4),
388 		 LEVEL0_MPLL_POST_DIV(divider), ~LEVEL0_MPLL_POST_DIV_MASK);
389 }
390 
rv6xx_memory_clock_entry_set_feedback_divider(struct radeon_device * rdev,u32 index,u32 divider)391 static void rv6xx_memory_clock_entry_set_feedback_divider(struct radeon_device *rdev,
392 							  u32 index, u32 divider)
393 {
394 	WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4), LEVEL0_MPLL_FB_DIV(divider),
395 		 ~LEVEL0_MPLL_FB_DIV_MASK);
396 }
397 
rv6xx_memory_clock_entry_set_reference_divider(struct radeon_device * rdev,u32 index,u32 divider)398 static void rv6xx_memory_clock_entry_set_reference_divider(struct radeon_device *rdev,
399 							   u32 index, u32 divider)
400 {
401 	WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4),
402 		 LEVEL0_MPLL_REF_DIV(divider), ~LEVEL0_MPLL_REF_DIV_MASK);
403 }
404 
rv6xx_vid_response_set_brt(struct radeon_device * rdev,u32 rt)405 static void rv6xx_vid_response_set_brt(struct radeon_device *rdev, u32 rt)
406 {
407 	WREG32_P(VID_RT, BRT(rt), ~BRT_MASK);
408 }
409 
rv6xx_enable_engine_feedback_and_reference_sync(struct radeon_device * rdev)410 static void rv6xx_enable_engine_feedback_and_reference_sync(struct radeon_device *rdev)
411 {
412 	WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
413 }
414 
rv6xx_clocks_per_unit(u32 unit)415 static u32 rv6xx_clocks_per_unit(u32 unit)
416 {
417 	u32 tmp = 1 << (2 * unit);
418 
419 	return tmp;
420 }
421 
rv6xx_scale_count_given_unit(struct radeon_device * rdev,u32 unscaled_count,u32 unit)422 static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
423 					u32 unscaled_count, u32 unit)
424 {
425 	u32 count_per_unit = rv6xx_clocks_per_unit(unit);
426 
427 	return (unscaled_count + count_per_unit - 1) / count_per_unit;
428 }
429 
rv6xx_compute_count_for_delay(struct radeon_device * rdev,u32 delay_us,u32 unit)430 static u32 rv6xx_compute_count_for_delay(struct radeon_device *rdev,
431 					 u32 delay_us, u32 unit)
432 {
433 	u32 ref_clk = rdev->clock.spll.reference_freq;
434 
435 	return rv6xx_scale_count_given_unit(rdev, delay_us * (ref_clk / 100), unit);
436 }
437 
rv6xx_calculate_engine_speed_stepping_parameters(struct radeon_device * rdev,struct rv6xx_ps * state)438 static void rv6xx_calculate_engine_speed_stepping_parameters(struct radeon_device *rdev,
439 							     struct rv6xx_ps *state)
440 {
441 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
442 
443 	pi->hw.sclks[R600_POWER_LEVEL_LOW] =
444 		state->low.sclk;
445 	pi->hw.sclks[R600_POWER_LEVEL_MEDIUM] =
446 		state->medium.sclk;
447 	pi->hw.sclks[R600_POWER_LEVEL_HIGH] =
448 		state->high.sclk;
449 
450 	pi->hw.low_sclk_index = R600_POWER_LEVEL_LOW;
451 	pi->hw.medium_sclk_index = R600_POWER_LEVEL_MEDIUM;
452 	pi->hw.high_sclk_index = R600_POWER_LEVEL_HIGH;
453 }
454 
rv6xx_calculate_memory_clock_stepping_parameters(struct radeon_device * rdev,struct rv6xx_ps * state)455 static void rv6xx_calculate_memory_clock_stepping_parameters(struct radeon_device *rdev,
456 							     struct rv6xx_ps *state)
457 {
458 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
459 
460 	pi->hw.mclks[R600_POWER_LEVEL_CTXSW] =
461 		state->high.mclk;
462 	pi->hw.mclks[R600_POWER_LEVEL_HIGH] =
463 		state->high.mclk;
464 	pi->hw.mclks[R600_POWER_LEVEL_MEDIUM] =
465 		state->medium.mclk;
466 	pi->hw.mclks[R600_POWER_LEVEL_LOW] =
467 		state->low.mclk;
468 
469 	pi->hw.high_mclk_index = R600_POWER_LEVEL_HIGH;
470 
471 	if (state->high.mclk == state->medium.mclk)
472 		pi->hw.medium_mclk_index =
473 			pi->hw.high_mclk_index;
474 	else
475 		pi->hw.medium_mclk_index = R600_POWER_LEVEL_MEDIUM;
476 
477 
478 	if (state->medium.mclk == state->low.mclk)
479 		pi->hw.low_mclk_index =
480 			pi->hw.medium_mclk_index;
481 	else
482 		pi->hw.low_mclk_index = R600_POWER_LEVEL_LOW;
483 }
484 
rv6xx_calculate_voltage_stepping_parameters(struct radeon_device * rdev,struct rv6xx_ps * state)485 static void rv6xx_calculate_voltage_stepping_parameters(struct radeon_device *rdev,
486 							struct rv6xx_ps *state)
487 {
488 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
489 
490 	pi->hw.vddc[R600_POWER_LEVEL_CTXSW] = state->high.vddc;
491 	pi->hw.vddc[R600_POWER_LEVEL_HIGH] = state->high.vddc;
492 	pi->hw.vddc[R600_POWER_LEVEL_MEDIUM] = state->medium.vddc;
493 	pi->hw.vddc[R600_POWER_LEVEL_LOW] = state->low.vddc;
494 
495 	pi->hw.backbias[R600_POWER_LEVEL_CTXSW] =
496 		(state->high.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
497 	pi->hw.backbias[R600_POWER_LEVEL_HIGH] =
498 		(state->high.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
499 	pi->hw.backbias[R600_POWER_LEVEL_MEDIUM] =
500 		(state->medium.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
501 	pi->hw.backbias[R600_POWER_LEVEL_LOW] =
502 		(state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
503 
504 	pi->hw.pcie_gen2[R600_POWER_LEVEL_HIGH] =
505 		(state->high.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? true : false;
506 	pi->hw.pcie_gen2[R600_POWER_LEVEL_MEDIUM] =
507 		(state->medium.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? true : false;
508 	pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW] =
509 		(state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? true : false;
510 
511 	pi->hw.high_vddc_index = R600_POWER_LEVEL_HIGH;
512 
513 	if ((state->high.vddc == state->medium.vddc) &&
514 	    ((state->high.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ==
515 	     (state->medium.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE)))
516 		pi->hw.medium_vddc_index =
517 			pi->hw.high_vddc_index;
518 	else
519 		pi->hw.medium_vddc_index = R600_POWER_LEVEL_MEDIUM;
520 
521 	if ((state->medium.vddc == state->low.vddc) &&
522 	    ((state->medium.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ==
523 	     (state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE)))
524 		pi->hw.low_vddc_index =
525 			pi->hw.medium_vddc_index;
526 	else
527 		pi->hw.medium_vddc_index = R600_POWER_LEVEL_LOW;
528 }
529 
rv6xx_calculate_vco_frequency(u32 ref_clock,struct atom_clock_dividers * dividers,u32 fb_divider_scale)530 static inline u32 rv6xx_calculate_vco_frequency(u32 ref_clock,
531 						struct atom_clock_dividers *dividers,
532 						u32 fb_divider_scale)
533 {
534 	return ref_clock * ((dividers->fb_div & ~1) << fb_divider_scale) /
535 		(dividers->ref_div + 1);
536 }
537 
rv6xx_calculate_spread_spectrum_clk_v(u32 vco_freq,u32 ref_freq,u32 ss_rate,u32 ss_percent,u32 fb_divider_scale)538 static inline u32 rv6xx_calculate_spread_spectrum_clk_v(u32 vco_freq, u32 ref_freq,
539 							u32 ss_rate, u32 ss_percent,
540 							u32 fb_divider_scale)
541 {
542 	u32 fb_divider = vco_freq / ref_freq;
543 
544 	return (ss_percent * ss_rate * 4 * (fb_divider * fb_divider) /
545 		(5375 * ((vco_freq * 10) / (4096 >> fb_divider_scale))));
546 }
547 
rv6xx_calculate_spread_spectrum_clk_s(u32 ss_rate,u32 ref_freq)548 static inline u32 rv6xx_calculate_spread_spectrum_clk_s(u32 ss_rate, u32 ref_freq)
549 {
550 	return (((ref_freq * 10) / (ss_rate * 2)) - 1) / 4;
551 }
552 
rv6xx_program_engine_spread_spectrum(struct radeon_device * rdev,u32 clock,enum r600_power_level level)553 static void rv6xx_program_engine_spread_spectrum(struct radeon_device *rdev,
554 						 u32 clock, enum r600_power_level level)
555 {
556 	u32 ref_clk = rdev->clock.spll.reference_freq;
557 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
558 	struct atom_clock_dividers dividers;
559 	struct radeon_atom_ss ss;
560 	u32 vco_freq, clk_v, clk_s;
561 
562 	rv6xx_enable_engine_spread_spectrum(rdev, level, false);
563 
564 	if (clock && pi->sclk_ss) {
565 		if (radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, clock, false, &dividers) == 0) {
566 			vco_freq = rv6xx_calculate_vco_frequency(ref_clk, &dividers,
567 								 pi->fb_div_scale);
568 
569 			if (radeon_atombios_get_asic_ss_info(rdev, &ss,
570 							     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
571 				clk_v = rv6xx_calculate_spread_spectrum_clk_v(vco_freq,
572 									      (ref_clk / (dividers.ref_div + 1)),
573 									      ss.rate,
574 									      ss.percentage,
575 									      pi->fb_div_scale);
576 
577 				clk_s = rv6xx_calculate_spread_spectrum_clk_s(ss.rate,
578 									      (ref_clk / (dividers.ref_div + 1)));
579 
580 				rv6xx_set_engine_spread_spectrum_clk_v(rdev, level, clk_v);
581 				rv6xx_set_engine_spread_spectrum_clk_s(rdev, level, clk_s);
582 				rv6xx_enable_engine_spread_spectrum(rdev, level, true);
583 			}
584 		}
585 	}
586 }
587 
rv6xx_program_sclk_spread_spectrum_parameters_except_lowest_entry(struct radeon_device * rdev)588 static void rv6xx_program_sclk_spread_spectrum_parameters_except_lowest_entry(struct radeon_device *rdev)
589 {
590 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
591 
592 	rv6xx_program_engine_spread_spectrum(rdev,
593 					     pi->hw.sclks[R600_POWER_LEVEL_HIGH],
594 					     R600_POWER_LEVEL_HIGH);
595 
596 	rv6xx_program_engine_spread_spectrum(rdev,
597 					     pi->hw.sclks[R600_POWER_LEVEL_MEDIUM],
598 					     R600_POWER_LEVEL_MEDIUM);
599 
600 }
601 
rv6xx_program_mclk_stepping_entry(struct radeon_device * rdev,u32 entry,u32 clock)602 static int rv6xx_program_mclk_stepping_entry(struct radeon_device *rdev,
603 					     u32 entry, u32 clock)
604 {
605 	struct atom_clock_dividers dividers;
606 
607 	if (radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM, clock, false, &dividers))
608 	    return -EINVAL;
609 
610 
611 	rv6xx_memory_clock_entry_set_reference_divider(rdev, entry, dividers.ref_div);
612 	rv6xx_memory_clock_entry_set_feedback_divider(rdev, entry, dividers.fb_div);
613 	rv6xx_memory_clock_entry_set_post_divider(rdev, entry, dividers.post_div);
614 
615 	if (dividers.enable_post_div)
616 		rv6xx_memory_clock_entry_enable_post_divider(rdev, entry, true);
617 	else
618 		rv6xx_memory_clock_entry_enable_post_divider(rdev, entry, false);
619 
620 	return 0;
621 }
622 
rv6xx_program_mclk_stepping_parameters_except_lowest_entry(struct radeon_device * rdev)623 static void rv6xx_program_mclk_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
624 {
625 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
626 	int i;
627 
628 	for (i = 1; i < R600_PM_NUMBER_OF_MCLKS; i++) {
629 		if (pi->hw.mclks[i])
630 			rv6xx_program_mclk_stepping_entry(rdev, i,
631 							  pi->hw.mclks[i]);
632 	}
633 }
634 
rv6xx_find_memory_clock_with_highest_vco(struct radeon_device * rdev,u32 requested_memory_clock,u32 ref_clk,struct atom_clock_dividers * dividers,u32 * vco_freq)635 static void rv6xx_find_memory_clock_with_highest_vco(struct radeon_device *rdev,
636 						     u32 requested_memory_clock,
637 						     u32 ref_clk,
638 						     struct atom_clock_dividers *dividers,
639 						     u32 *vco_freq)
640 {
641 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
642 	struct atom_clock_dividers req_dividers;
643 	u32 vco_freq_temp;
644 
645 	if (radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
646 					   requested_memory_clock, false, &req_dividers) == 0) {
647 		vco_freq_temp = rv6xx_calculate_vco_frequency(ref_clk, &req_dividers,
648 							      pi->fb_div_scale);
649 
650 		if (vco_freq_temp > *vco_freq) {
651 			*dividers = req_dividers;
652 			*vco_freq = vco_freq_temp;
653 		}
654 	}
655 }
656 
rv6xx_program_mclk_spread_spectrum_parameters(struct radeon_device * rdev)657 static void rv6xx_program_mclk_spread_spectrum_parameters(struct radeon_device *rdev)
658 {
659 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
660 	u32 ref_clk = rdev->clock.mpll.reference_freq;
661 	struct atom_clock_dividers dividers;
662 	struct radeon_atom_ss ss;
663 	u32 vco_freq = 0, clk_v, clk_s;
664 
665 	rv6xx_enable_memory_spread_spectrum(rdev, false);
666 
667 	if (pi->mclk_ss) {
668 		rv6xx_find_memory_clock_with_highest_vco(rdev,
669 							 pi->hw.mclks[pi->hw.high_mclk_index],
670 							 ref_clk,
671 							 &dividers,
672 							 &vco_freq);
673 
674 		rv6xx_find_memory_clock_with_highest_vco(rdev,
675 							 pi->hw.mclks[pi->hw.medium_mclk_index],
676 							 ref_clk,
677 							 &dividers,
678 							 &vco_freq);
679 
680 		rv6xx_find_memory_clock_with_highest_vco(rdev,
681 							 pi->hw.mclks[pi->hw.low_mclk_index],
682 							 ref_clk,
683 							 &dividers,
684 							 &vco_freq);
685 
686 		if (vco_freq) {
687 			if (radeon_atombios_get_asic_ss_info(rdev, &ss,
688 							     ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
689 				clk_v = rv6xx_calculate_spread_spectrum_clk_v(vco_freq,
690 									     (ref_clk / (dividers.ref_div + 1)),
691 									     ss.rate,
692 									     ss.percentage,
693 									     pi->fb_div_scale);
694 
695 				clk_s = rv6xx_calculate_spread_spectrum_clk_s(ss.rate,
696 									     (ref_clk / (dividers.ref_div + 1)));
697 
698 				rv6xx_set_memory_spread_spectrum_clk_v(rdev, clk_v);
699 				rv6xx_set_memory_spread_spectrum_clk_s(rdev, clk_s);
700 				rv6xx_enable_memory_spread_spectrum(rdev, true);
701 			}
702 		}
703 	}
704 }
705 
rv6xx_program_voltage_stepping_entry(struct radeon_device * rdev,u32 entry,u16 voltage)706 static int rv6xx_program_voltage_stepping_entry(struct radeon_device *rdev,
707 						u32 entry, u16 voltage)
708 {
709 	u32 mask, set_pins;
710 	int ret;
711 
712 	ret = radeon_atom_get_voltage_gpio_settings(rdev, voltage,
713 						    SET_VOLTAGE_TYPE_ASIC_VDDC,
714 						    &set_pins, &mask);
715 	if (ret)
716 		return ret;
717 
718 	r600_voltage_control_program_voltages(rdev, entry, set_pins);
719 
720 	return 0;
721 }
722 
rv6xx_program_voltage_stepping_parameters_except_lowest_entry(struct radeon_device * rdev)723 static void rv6xx_program_voltage_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
724 {
725 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
726 	int i;
727 
728 	for (i = 1; i < R600_PM_NUMBER_OF_VOLTAGE_LEVELS; i++)
729 		rv6xx_program_voltage_stepping_entry(rdev, i,
730 						     pi->hw.vddc[i]);
731 
732 }
733 
rv6xx_program_backbias_stepping_parameters_except_lowest_entry(struct radeon_device * rdev)734 static void rv6xx_program_backbias_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
735 {
736 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
737 
738 	if (pi->hw.backbias[1])
739 		WREG32_P(VID_UPPER_GPIO_CNTL, MEDIUM_BACKBIAS_VALUE, ~MEDIUM_BACKBIAS_VALUE);
740 	else
741 		WREG32_P(VID_UPPER_GPIO_CNTL, 0, ~MEDIUM_BACKBIAS_VALUE);
742 
743 	if (pi->hw.backbias[2])
744 		WREG32_P(VID_UPPER_GPIO_CNTL, HIGH_BACKBIAS_VALUE, ~HIGH_BACKBIAS_VALUE);
745 	else
746 		WREG32_P(VID_UPPER_GPIO_CNTL, 0, ~HIGH_BACKBIAS_VALUE);
747 }
748 
rv6xx_program_sclk_spread_spectrum_parameters_lowest_entry(struct radeon_device * rdev)749 static void rv6xx_program_sclk_spread_spectrum_parameters_lowest_entry(struct radeon_device *rdev)
750 {
751 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
752 
753 	rv6xx_program_engine_spread_spectrum(rdev,
754 					     pi->hw.sclks[R600_POWER_LEVEL_LOW],
755 					     R600_POWER_LEVEL_LOW);
756 }
757 
rv6xx_program_mclk_stepping_parameters_lowest_entry(struct radeon_device * rdev)758 static void rv6xx_program_mclk_stepping_parameters_lowest_entry(struct radeon_device *rdev)
759 {
760 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
761 
762 	if (pi->hw.mclks[0])
763 		rv6xx_program_mclk_stepping_entry(rdev, 0,
764 						  pi->hw.mclks[0]);
765 }
766 
rv6xx_program_voltage_stepping_parameters_lowest_entry(struct radeon_device * rdev)767 static void rv6xx_program_voltage_stepping_parameters_lowest_entry(struct radeon_device *rdev)
768 {
769 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
770 
771 	rv6xx_program_voltage_stepping_entry(rdev, 0,
772 					     pi->hw.vddc[0]);
773 
774 }
775 
rv6xx_program_backbias_stepping_parameters_lowest_entry(struct radeon_device * rdev)776 static void rv6xx_program_backbias_stepping_parameters_lowest_entry(struct radeon_device *rdev)
777 {
778 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
779 
780 	if (pi->hw.backbias[0])
781 		WREG32_P(VID_UPPER_GPIO_CNTL, LOW_BACKBIAS_VALUE, ~LOW_BACKBIAS_VALUE);
782 	else
783 		WREG32_P(VID_UPPER_GPIO_CNTL, 0, ~LOW_BACKBIAS_VALUE);
784 }
785 
calculate_memory_refresh_rate(struct radeon_device * rdev,u32 engine_clock)786 static u32 calculate_memory_refresh_rate(struct radeon_device *rdev,
787 					 u32 engine_clock)
788 {
789 	u32 dram_rows, dram_refresh_rate;
790 	u32 tmp;
791 
792 	tmp = (RREG32(RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
793 	dram_rows = 1 << (tmp + 10);
794 	dram_refresh_rate = 1 << ((RREG32(MC_SEQ_RESERVE_M) & 0x3) + 3);
795 
796 	return ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
797 }
798 
rv6xx_program_memory_timing_parameters(struct radeon_device * rdev)799 static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev)
800 {
801 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
802 	u32 sqm_ratio;
803 	u32 arb_refresh_rate;
804 	u32 high_clock;
805 
806 	if (pi->hw.sclks[R600_POWER_LEVEL_HIGH] <
807 	    (pi->hw.sclks[R600_POWER_LEVEL_LOW] * 0xFF / 0x40))
808 		high_clock = pi->hw.sclks[R600_POWER_LEVEL_HIGH];
809 	else
810 		high_clock =
811 			pi->hw.sclks[R600_POWER_LEVEL_LOW] * 0xFF / 0x40;
812 
813 	radeon_atom_set_engine_dram_timings(rdev, high_clock, 0);
814 
815 	sqm_ratio = (STATE0(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_LOW]) |
816 		     STATE1(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_MEDIUM]) |
817 		     STATE2(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_HIGH]) |
818 		     STATE3(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_HIGH]));
819 	WREG32(SQM_RATIO, sqm_ratio);
820 
821 	arb_refresh_rate =
822 		(POWERMODE0(calculate_memory_refresh_rate(rdev,
823 							  pi->hw.sclks[R600_POWER_LEVEL_LOW])) |
824 		 POWERMODE1(calculate_memory_refresh_rate(rdev,
825 							  pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
826 		 POWERMODE2(calculate_memory_refresh_rate(rdev,
827 							  pi->hw.sclks[R600_POWER_LEVEL_HIGH])) |
828 		 POWERMODE3(calculate_memory_refresh_rate(rdev,
829 							  pi->hw.sclks[R600_POWER_LEVEL_HIGH])));
830 	WREG32(ARB_RFSH_RATE, arb_refresh_rate);
831 }
832 
rv6xx_program_mpll_timing_parameters(struct radeon_device * rdev)833 static void rv6xx_program_mpll_timing_parameters(struct radeon_device *rdev)
834 {
835 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
836 
837 	r600_set_mpll_lock_time(rdev, R600_MPLLLOCKTIME_DFLT *
838 				pi->mpll_ref_div);
839 	r600_set_mpll_reset_time(rdev, R600_MPLLRESETTIME_DFLT);
840 }
841 
rv6xx_program_bsp(struct radeon_device * rdev)842 static void rv6xx_program_bsp(struct radeon_device *rdev)
843 {
844 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
845 	u32 ref_clk = rdev->clock.spll.reference_freq;
846 
847 	r600_calculate_u_and_p(R600_ASI_DFLT,
848 			       ref_clk, 16,
849 			       &pi->bsp,
850 			       &pi->bsu);
851 
852 	r600_set_bsp(rdev, pi->bsu, pi->bsp);
853 }
854 
rv6xx_program_at(struct radeon_device * rdev)855 static void rv6xx_program_at(struct radeon_device *rdev)
856 {
857 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
858 
859 	r600_set_at(rdev,
860 		    (pi->hw.rp[0] * pi->bsp) / 200,
861 		    (pi->hw.rp[1] * pi->bsp) / 200,
862 		    (pi->hw.lp[2] * pi->bsp) / 200,
863 		    (pi->hw.lp[1] * pi->bsp) / 200);
864 }
865 
rv6xx_program_git(struct radeon_device * rdev)866 static void rv6xx_program_git(struct radeon_device *rdev)
867 {
868 	r600_set_git(rdev, R600_GICST_DFLT);
869 }
870 
rv6xx_program_tp(struct radeon_device * rdev)871 static void rv6xx_program_tp(struct radeon_device *rdev)
872 {
873 	int i;
874 
875 	for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
876 		r600_set_tc(rdev, i, r600_utc[i], r600_dtc[i]);
877 
878 	r600_select_td(rdev, R600_TD_DFLT);
879 }
880 
rv6xx_program_vc(struct radeon_device * rdev)881 static void rv6xx_program_vc(struct radeon_device *rdev)
882 {
883 	r600_set_vrc(rdev, R600_VRC_DFLT);
884 }
885 
rv6xx_clear_vc(struct radeon_device * rdev)886 static void rv6xx_clear_vc(struct radeon_device *rdev)
887 {
888 	r600_set_vrc(rdev, 0);
889 }
890 
rv6xx_program_tpp(struct radeon_device * rdev)891 static void rv6xx_program_tpp(struct radeon_device *rdev)
892 {
893 	r600_set_tpu(rdev, R600_TPU_DFLT);
894 	r600_set_tpc(rdev, R600_TPC_DFLT);
895 }
896 
rv6xx_program_sstp(struct radeon_device * rdev)897 static void rv6xx_program_sstp(struct radeon_device *rdev)
898 {
899 	r600_set_sstu(rdev, R600_SSTU_DFLT);
900 	r600_set_sst(rdev, R600_SST_DFLT);
901 }
902 
rv6xx_program_fcp(struct radeon_device * rdev)903 static void rv6xx_program_fcp(struct radeon_device *rdev)
904 {
905 	r600_set_fctu(rdev, R600_FCTU_DFLT);
906 	r600_set_fct(rdev, R600_FCT_DFLT);
907 }
908 
rv6xx_program_vddc3d_parameters(struct radeon_device * rdev)909 static void rv6xx_program_vddc3d_parameters(struct radeon_device *rdev)
910 {
911 	r600_set_vddc3d_oorsu(rdev, R600_VDDC3DOORSU_DFLT);
912 	r600_set_vddc3d_oorphc(rdev, R600_VDDC3DOORPHC_DFLT);
913 	r600_set_vddc3d_oorsdc(rdev, R600_VDDC3DOORSDC_DFLT);
914 	r600_set_ctxcgtt3d_rphc(rdev, R600_CTXCGTT3DRPHC_DFLT);
915 	r600_set_ctxcgtt3d_rsdc(rdev, R600_CTXCGTT3DRSDC_DFLT);
916 }
917 
rv6xx_program_voltage_timing_parameters(struct radeon_device * rdev)918 static void rv6xx_program_voltage_timing_parameters(struct radeon_device *rdev)
919 {
920 	u32 rt;
921 
922 	r600_vid_rt_set_vru(rdev, R600_VRU_DFLT);
923 
924 	r600_vid_rt_set_vrt(rdev,
925 			    rv6xx_compute_count_for_delay(rdev,
926 							  rdev->pm.dpm.voltage_response_time,
927 							  R600_VRU_DFLT));
928 
929 	rt = rv6xx_compute_count_for_delay(rdev,
930 					   rdev->pm.dpm.backbias_response_time,
931 					   R600_VRU_DFLT);
932 
933 	rv6xx_vid_response_set_brt(rdev, (rt + 0x1F) >> 5);
934 }
935 
rv6xx_program_engine_speed_parameters(struct radeon_device * rdev)936 static void rv6xx_program_engine_speed_parameters(struct radeon_device *rdev)
937 {
938 	r600_vid_rt_set_ssu(rdev, R600_SPLLSTEPUNIT_DFLT);
939 	rv6xx_enable_engine_feedback_and_reference_sync(rdev);
940 }
941 
rv6xx_get_master_voltage_mask(struct radeon_device * rdev)942 static u64 rv6xx_get_master_voltage_mask(struct radeon_device *rdev)
943 {
944 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
945 	u64 master_mask = 0;
946 	int i;
947 
948 	for (i = 0; i < R600_PM_NUMBER_OF_VOLTAGE_LEVELS; i++) {
949 		u32 tmp_mask, tmp_set_pins;
950 		int ret;
951 
952 		ret = radeon_atom_get_voltage_gpio_settings(rdev,
953 							    pi->hw.vddc[i],
954 							    SET_VOLTAGE_TYPE_ASIC_VDDC,
955 							    &tmp_set_pins, &tmp_mask);
956 
957 		if (ret == 0)
958 			master_mask |= tmp_mask;
959 	}
960 
961 	return master_mask;
962 }
963 
rv6xx_program_voltage_gpio_pins(struct radeon_device * rdev)964 static void rv6xx_program_voltage_gpio_pins(struct radeon_device *rdev)
965 {
966 	r600_voltage_control_enable_pins(rdev,
967 					 rv6xx_get_master_voltage_mask(rdev));
968 }
969 
rv6xx_enable_static_voltage_control(struct radeon_device * rdev,struct radeon_ps * new_ps,bool enable)970 static void rv6xx_enable_static_voltage_control(struct radeon_device *rdev,
971 						struct radeon_ps *new_ps,
972 						bool enable)
973 {
974 	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
975 
976 	if (enable)
977 		radeon_atom_set_voltage(rdev,
978 					new_state->low.vddc,
979 					SET_VOLTAGE_TYPE_ASIC_VDDC);
980 	else
981 		r600_voltage_control_deactivate_static_control(rdev,
982 							       rv6xx_get_master_voltage_mask(rdev));
983 }
984 
rv6xx_enable_display_gap(struct radeon_device * rdev,bool enable)985 static void rv6xx_enable_display_gap(struct radeon_device *rdev, bool enable)
986 {
987 	if (enable) {
988 		u32 tmp = (DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM) |
989 			   DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM) |
990 			   DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
991 			   DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
992 			   VBI_TIMER_COUNT(0x3FFF) |
993 			   VBI_TIMER_UNIT(7));
994 		WREG32(CG_DISPLAY_GAP_CNTL, tmp);
995 
996 		WREG32_P(MCLK_PWRMGT_CNTL, USE_DISPLAY_GAP, ~USE_DISPLAY_GAP);
997 	} else
998 		WREG32_P(MCLK_PWRMGT_CNTL, 0, ~USE_DISPLAY_GAP);
999 }
1000 
rv6xx_program_power_level_enter_state(struct radeon_device * rdev)1001 static void rv6xx_program_power_level_enter_state(struct radeon_device *rdev)
1002 {
1003 	r600_power_level_set_enter_index(rdev, R600_POWER_LEVEL_MEDIUM);
1004 }
1005 
rv6xx_calculate_t(u32 l_f,u32 h_f,int h,int d_l,int d_r,u8 * l,u8 * r)1006 static void rv6xx_calculate_t(u32 l_f, u32 h_f, int h,
1007 			      int d_l, int d_r, u8 *l, u8 *r)
1008 {
1009 	int a_n, a_d, h_r, l_r;
1010 
1011 	h_r = d_l;
1012 	l_r = 100 - d_r;
1013 
1014 	a_n = (int)h_f * d_l + (int)l_f * (h - d_r);
1015 	a_d = (int)l_f * l_r + (int)h_f * h_r;
1016 
1017 	if (a_d != 0) {
1018 		*l = d_l - h_r * a_n / a_d;
1019 		*r = d_r + l_r * a_n / a_d;
1020 	}
1021 }
1022 
rv6xx_calculate_ap(struct radeon_device * rdev,struct rv6xx_ps * state)1023 static void rv6xx_calculate_ap(struct radeon_device *rdev,
1024 			       struct rv6xx_ps *state)
1025 {
1026 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1027 
1028 	pi->hw.lp[0] = 0;
1029 	pi->hw.rp[R600_PM_NUMBER_OF_ACTIVITY_LEVELS - 1]
1030 		= 100;
1031 
1032 	rv6xx_calculate_t(state->low.sclk,
1033 			  state->medium.sclk,
1034 			  R600_AH_DFLT,
1035 			  R600_LMP_DFLT,
1036 			  R600_RLP_DFLT,
1037 			  &pi->hw.lp[1],
1038 			  &pi->hw.rp[0]);
1039 
1040 	rv6xx_calculate_t(state->medium.sclk,
1041 			  state->high.sclk,
1042 			  R600_AH_DFLT,
1043 			  R600_LHP_DFLT,
1044 			  R600_RMP_DFLT,
1045 			  &pi->hw.lp[2],
1046 			  &pi->hw.rp[1]);
1047 
1048 }
1049 
rv6xx_calculate_stepping_parameters(struct radeon_device * rdev,struct radeon_ps * new_ps)1050 static void rv6xx_calculate_stepping_parameters(struct radeon_device *rdev,
1051 						struct radeon_ps *new_ps)
1052 {
1053 	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1054 
1055 	rv6xx_calculate_engine_speed_stepping_parameters(rdev, new_state);
1056 	rv6xx_calculate_memory_clock_stepping_parameters(rdev, new_state);
1057 	rv6xx_calculate_voltage_stepping_parameters(rdev, new_state);
1058 	rv6xx_calculate_ap(rdev, new_state);
1059 }
1060 
rv6xx_program_stepping_parameters_except_lowest_entry(struct radeon_device * rdev)1061 static void rv6xx_program_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
1062 {
1063 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1064 
1065 	rv6xx_program_mclk_stepping_parameters_except_lowest_entry(rdev);
1066 	if (pi->voltage_control)
1067 		rv6xx_program_voltage_stepping_parameters_except_lowest_entry(rdev);
1068 	rv6xx_program_backbias_stepping_parameters_except_lowest_entry(rdev);
1069 	rv6xx_program_sclk_spread_spectrum_parameters_except_lowest_entry(rdev);
1070 	rv6xx_program_mclk_spread_spectrum_parameters(rdev);
1071 	rv6xx_program_memory_timing_parameters(rdev);
1072 }
1073 
rv6xx_program_stepping_parameters_lowest_entry(struct radeon_device * rdev)1074 static void rv6xx_program_stepping_parameters_lowest_entry(struct radeon_device *rdev)
1075 {
1076 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1077 
1078 	rv6xx_program_mclk_stepping_parameters_lowest_entry(rdev);
1079 	if (pi->voltage_control)
1080 		rv6xx_program_voltage_stepping_parameters_lowest_entry(rdev);
1081 	rv6xx_program_backbias_stepping_parameters_lowest_entry(rdev);
1082 	rv6xx_program_sclk_spread_spectrum_parameters_lowest_entry(rdev);
1083 }
1084 
rv6xx_program_power_level_low(struct radeon_device * rdev)1085 static void rv6xx_program_power_level_low(struct radeon_device *rdev)
1086 {
1087 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1088 
1089 	r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_LOW,
1090 					   pi->hw.low_vddc_index);
1091 	r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_LOW,
1092 					     pi->hw.low_mclk_index);
1093 	r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_LOW,
1094 					     pi->hw.low_sclk_index);
1095 	r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_LOW,
1096 					  R600_DISPLAY_WATERMARK_LOW);
1097 	r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_LOW,
1098 				       pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW]);
1099 }
1100 
rv6xx_program_power_level_low_to_lowest_state(struct radeon_device * rdev)1101 static void rv6xx_program_power_level_low_to_lowest_state(struct radeon_device *rdev)
1102 {
1103 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1104 
1105 	r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_LOW, 0);
1106 	r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_LOW, 0);
1107 	r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_LOW, 0);
1108 
1109 	r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_LOW,
1110 					  R600_DISPLAY_WATERMARK_LOW);
1111 
1112 	r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_LOW,
1113 				       pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW]);
1114 
1115 }
1116 
rv6xx_program_power_level_medium(struct radeon_device * rdev)1117 static void rv6xx_program_power_level_medium(struct radeon_device *rdev)
1118 {
1119 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1120 
1121 	r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_MEDIUM,
1122 					  pi->hw.medium_vddc_index);
1123 	r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
1124 					    pi->hw.medium_mclk_index);
1125 	r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
1126 					    pi->hw.medium_sclk_index);
1127 	r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_MEDIUM,
1128 					 R600_DISPLAY_WATERMARK_LOW);
1129 	r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_MEDIUM,
1130 				      pi->hw.pcie_gen2[R600_POWER_LEVEL_MEDIUM]);
1131 }
1132 
rv6xx_program_power_level_medium_for_transition(struct radeon_device * rdev)1133 static void rv6xx_program_power_level_medium_for_transition(struct radeon_device *rdev)
1134 {
1135 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1136 
1137 	rv6xx_program_mclk_stepping_entry(rdev,
1138 					  R600_POWER_LEVEL_CTXSW,
1139 					  pi->hw.mclks[pi->hw.low_mclk_index]);
1140 
1141 	r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_MEDIUM, 1);
1142 
1143 	r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
1144 					     R600_POWER_LEVEL_CTXSW);
1145 	r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
1146 					     pi->hw.medium_sclk_index);
1147 
1148 	r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_MEDIUM,
1149 					  R600_DISPLAY_WATERMARK_LOW);
1150 
1151 	rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_MEDIUM, false);
1152 
1153 	r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_MEDIUM,
1154 				       pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW]);
1155 }
1156 
rv6xx_program_power_level_high(struct radeon_device * rdev)1157 static void rv6xx_program_power_level_high(struct radeon_device *rdev)
1158 {
1159 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1160 
1161 	r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_HIGH,
1162 					   pi->hw.high_vddc_index);
1163 	r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_HIGH,
1164 					     pi->hw.high_mclk_index);
1165 	r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_HIGH,
1166 					     pi->hw.high_sclk_index);
1167 
1168 	r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_HIGH,
1169 					  R600_DISPLAY_WATERMARK_HIGH);
1170 
1171 	r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_HIGH,
1172 				       pi->hw.pcie_gen2[R600_POWER_LEVEL_HIGH]);
1173 }
1174 
rv6xx_enable_backbias(struct radeon_device * rdev,bool enable)1175 static void rv6xx_enable_backbias(struct radeon_device *rdev, bool enable)
1176 {
1177 	if (enable)
1178 		WREG32_P(GENERAL_PWRMGT, BACKBIAS_PAD_EN | BACKBIAS_DPM_CNTL,
1179 			 ~(BACKBIAS_PAD_EN | BACKBIAS_DPM_CNTL));
1180 	else
1181 		WREG32_P(GENERAL_PWRMGT, 0,
1182 			 ~(BACKBIAS_VALUE | BACKBIAS_PAD_EN | BACKBIAS_DPM_CNTL));
1183 }
1184 
rv6xx_program_display_gap(struct radeon_device * rdev)1185 static void rv6xx_program_display_gap(struct radeon_device *rdev)
1186 {
1187 	u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
1188 
1189 	tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
1190 	if (rdev->pm.dpm.new_active_crtcs & 1) {
1191 		tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1192 		tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1193 	} else if (rdev->pm.dpm.new_active_crtcs & 2) {
1194 		tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1195 		tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1196 	} else {
1197 		tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1198 		tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1199 	}
1200 	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1201 }
1202 
rv6xx_set_sw_voltage_to_safe(struct radeon_device * rdev,struct radeon_ps * new_ps,struct radeon_ps * old_ps)1203 static void rv6xx_set_sw_voltage_to_safe(struct radeon_device *rdev,
1204 					 struct radeon_ps *new_ps,
1205 					 struct radeon_ps *old_ps)
1206 {
1207 	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1208 	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1209 	u16 safe_voltage;
1210 
1211 	safe_voltage = (new_state->low.vddc >= old_state->low.vddc) ?
1212 		new_state->low.vddc : old_state->low.vddc;
1213 
1214 	rv6xx_program_voltage_stepping_entry(rdev, R600_POWER_LEVEL_CTXSW,
1215 					     safe_voltage);
1216 
1217 	WREG32_P(GENERAL_PWRMGT, SW_GPIO_INDEX(R600_POWER_LEVEL_CTXSW),
1218 		 ~SW_GPIO_INDEX_MASK);
1219 }
1220 
rv6xx_set_sw_voltage_to_low(struct radeon_device * rdev,struct radeon_ps * old_ps)1221 static void rv6xx_set_sw_voltage_to_low(struct radeon_device *rdev,
1222 					struct radeon_ps *old_ps)
1223 {
1224 	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1225 
1226 	rv6xx_program_voltage_stepping_entry(rdev, R600_POWER_LEVEL_CTXSW,
1227 					     old_state->low.vddc);
1228 
1229 	WREG32_P(GENERAL_PWRMGT, SW_GPIO_INDEX(R600_POWER_LEVEL_CTXSW),
1230 		~SW_GPIO_INDEX_MASK);
1231 }
1232 
rv6xx_set_safe_backbias(struct radeon_device * rdev,struct radeon_ps * new_ps,struct radeon_ps * old_ps)1233 static void rv6xx_set_safe_backbias(struct radeon_device *rdev,
1234 				    struct radeon_ps *new_ps,
1235 				    struct radeon_ps *old_ps)
1236 {
1237 	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1238 	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1239 
1240 	if ((new_state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) &&
1241 	    (old_state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE))
1242 		WREG32_P(GENERAL_PWRMGT, BACKBIAS_VALUE, ~BACKBIAS_VALUE);
1243 	else
1244 		WREG32_P(GENERAL_PWRMGT, 0, ~BACKBIAS_VALUE);
1245 }
1246 
rv6xx_set_safe_pcie_gen2(struct radeon_device * rdev,struct radeon_ps * new_ps,struct radeon_ps * old_ps)1247 static void rv6xx_set_safe_pcie_gen2(struct radeon_device *rdev,
1248 				     struct radeon_ps *new_ps,
1249 				     struct radeon_ps *old_ps)
1250 {
1251 	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1252 	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1253 
1254 	if ((new_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) !=
1255 	    (old_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2))
1256 		rv6xx_force_pcie_gen1(rdev);
1257 }
1258 
rv6xx_enable_dynamic_voltage_control(struct radeon_device * rdev,bool enable)1259 static void rv6xx_enable_dynamic_voltage_control(struct radeon_device *rdev,
1260 						 bool enable)
1261 {
1262 	if (enable)
1263 		WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
1264 	else
1265 		WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
1266 }
1267 
rv6xx_enable_dynamic_backbias_control(struct radeon_device * rdev,bool enable)1268 static void rv6xx_enable_dynamic_backbias_control(struct radeon_device *rdev,
1269 						  bool enable)
1270 {
1271 	if (enable)
1272 		WREG32_P(GENERAL_PWRMGT, BACKBIAS_DPM_CNTL, ~BACKBIAS_DPM_CNTL);
1273 	else
1274 		WREG32_P(GENERAL_PWRMGT, 0, ~BACKBIAS_DPM_CNTL);
1275 }
1276 
rv6xx_step_sw_voltage(struct radeon_device * rdev,u16 initial_voltage,u16 target_voltage)1277 static int rv6xx_step_sw_voltage(struct radeon_device *rdev,
1278 				 u16 initial_voltage,
1279 				 u16 target_voltage)
1280 {
1281 	u16 current_voltage;
1282 	u16 true_target_voltage;
1283 	u16 voltage_step;
1284 	int signed_voltage_step;
1285 
1286 	if ((radeon_atom_get_voltage_step(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
1287 					  &voltage_step)) ||
1288 	    (radeon_atom_round_to_true_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
1289 					       initial_voltage, &current_voltage)) ||
1290 	    (radeon_atom_round_to_true_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
1291 					       target_voltage, &true_target_voltage)))
1292 		return -EINVAL;
1293 
1294 	if (true_target_voltage < current_voltage)
1295 		signed_voltage_step = -(int)voltage_step;
1296 	else
1297 		signed_voltage_step = voltage_step;
1298 
1299 	while (current_voltage != true_target_voltage) {
1300 		current_voltage += signed_voltage_step;
1301 		rv6xx_program_voltage_stepping_entry(rdev, R600_POWER_LEVEL_CTXSW,
1302 						     current_voltage);
1303 		msleep((rdev->pm.dpm.voltage_response_time + 999) / 1000);
1304 	}
1305 
1306 	return 0;
1307 }
1308 
rv6xx_step_voltage_if_increasing(struct radeon_device * rdev,struct radeon_ps * new_ps,struct radeon_ps * old_ps)1309 static int rv6xx_step_voltage_if_increasing(struct radeon_device *rdev,
1310 					    struct radeon_ps *new_ps,
1311 					    struct radeon_ps *old_ps)
1312 {
1313 	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1314 	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1315 
1316 	if (new_state->low.vddc > old_state->low.vddc)
1317 		return rv6xx_step_sw_voltage(rdev,
1318 					     old_state->low.vddc,
1319 					     new_state->low.vddc);
1320 
1321 	return 0;
1322 }
1323 
rv6xx_step_voltage_if_decreasing(struct radeon_device * rdev,struct radeon_ps * new_ps,struct radeon_ps * old_ps)1324 static int rv6xx_step_voltage_if_decreasing(struct radeon_device *rdev,
1325 					    struct radeon_ps *new_ps,
1326 					    struct radeon_ps *old_ps)
1327 {
1328 	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1329 	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1330 
1331 	if (new_state->low.vddc < old_state->low.vddc)
1332 		return rv6xx_step_sw_voltage(rdev,
1333 					     old_state->low.vddc,
1334 					     new_state->low.vddc);
1335 	else
1336 		return 0;
1337 }
1338 
rv6xx_enable_high(struct radeon_device * rdev)1339 static void rv6xx_enable_high(struct radeon_device *rdev)
1340 {
1341 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1342 
1343 	if ((pi->restricted_levels < 1) ||
1344 	    (pi->restricted_levels == 3))
1345 		r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true);
1346 }
1347 
rv6xx_enable_medium(struct radeon_device * rdev)1348 static void rv6xx_enable_medium(struct radeon_device *rdev)
1349 {
1350 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1351 
1352 	if (pi->restricted_levels < 2)
1353 		r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
1354 }
1355 
rv6xx_set_dpm_event_sources(struct radeon_device * rdev,u32 sources)1356 static void rv6xx_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1357 {
1358 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1359 	bool want_thermal_protection;
1360 	enum radeon_dpm_event_src dpm_event_src;
1361 
1362 	switch (sources) {
1363 	case 0:
1364 	default:
1365 		want_thermal_protection = false;
1366 		break;
1367 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1368 		want_thermal_protection = true;
1369 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
1370 		break;
1371 
1372 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1373 		want_thermal_protection = true;
1374 		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1375 		break;
1376 
1377 	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1378 	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1379 			want_thermal_protection = true;
1380 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1381 		break;
1382 	}
1383 
1384 	if (want_thermal_protection) {
1385 		WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
1386 		if (pi->thermal_protection)
1387 			WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
1388 	} else {
1389 		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
1390 	}
1391 }
1392 
rv6xx_enable_auto_throttle_source(struct radeon_device * rdev,enum radeon_dpm_auto_throttle_src source,bool enable)1393 static void rv6xx_enable_auto_throttle_source(struct radeon_device *rdev,
1394 					      enum radeon_dpm_auto_throttle_src source,
1395 					      bool enable)
1396 {
1397 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1398 
1399 	if (enable) {
1400 		if (!(pi->active_auto_throttle_sources & (1 << source))) {
1401 			pi->active_auto_throttle_sources |= 1 << source;
1402 			rv6xx_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1403 		}
1404 	} else {
1405 		if (pi->active_auto_throttle_sources & (1 << source)) {
1406 			pi->active_auto_throttle_sources &= ~(1 << source);
1407 			rv6xx_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1408 		}
1409 	}
1410 }
1411 
1412 
rv6xx_enable_thermal_protection(struct radeon_device * rdev,bool enable)1413 static void rv6xx_enable_thermal_protection(struct radeon_device *rdev,
1414 					    bool enable)
1415 {
1416 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1417 
1418 	if (pi->active_auto_throttle_sources)
1419 		r600_enable_thermal_protection(rdev, enable);
1420 }
1421 
rv6xx_generate_transition_stepping(struct radeon_device * rdev,struct radeon_ps * new_ps,struct radeon_ps * old_ps)1422 static void rv6xx_generate_transition_stepping(struct radeon_device *rdev,
1423 					       struct radeon_ps *new_ps,
1424 					       struct radeon_ps *old_ps)
1425 {
1426 	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1427 	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1428 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1429 
1430 	rv6xx_generate_steps(rdev,
1431 			     old_state->low.sclk,
1432 			     new_state->low.sclk,
1433 			     0, &pi->hw.medium_sclk_index);
1434 }
1435 
rv6xx_generate_low_step(struct radeon_device * rdev,struct radeon_ps * new_ps)1436 static void rv6xx_generate_low_step(struct radeon_device *rdev,
1437 				    struct radeon_ps *new_ps)
1438 {
1439 	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1440 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1441 
1442 	pi->hw.low_sclk_index = 0;
1443 	rv6xx_generate_single_step(rdev,
1444 				   new_state->low.sclk,
1445 				   0);
1446 }
1447 
rv6xx_invalidate_intermediate_steps(struct radeon_device * rdev)1448 static void rv6xx_invalidate_intermediate_steps(struct radeon_device *rdev)
1449 {
1450 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1451 
1452 	rv6xx_invalidate_intermediate_steps_range(rdev, 0,
1453 						  pi->hw.medium_sclk_index);
1454 }
1455 
rv6xx_generate_stepping_table(struct radeon_device * rdev,struct radeon_ps * new_ps)1456 static void rv6xx_generate_stepping_table(struct radeon_device *rdev,
1457 					  struct radeon_ps *new_ps)
1458 {
1459 	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1460 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1461 
1462 	pi->hw.low_sclk_index = 0;
1463 
1464 	rv6xx_generate_steps(rdev,
1465 			     new_state->low.sclk,
1466 			     new_state->medium.sclk,
1467 			     0,
1468 			     &pi->hw.medium_sclk_index);
1469 	rv6xx_generate_steps(rdev,
1470 			     new_state->medium.sclk,
1471 			     new_state->high.sclk,
1472 			     pi->hw.medium_sclk_index,
1473 			     &pi->hw.high_sclk_index);
1474 }
1475 
rv6xx_enable_spread_spectrum(struct radeon_device * rdev,bool enable)1476 static void rv6xx_enable_spread_spectrum(struct radeon_device *rdev,
1477 					 bool enable)
1478 {
1479 	if (enable)
1480 		rv6xx_enable_dynamic_spread_spectrum(rdev, true);
1481 	else {
1482 		rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_LOW, false);
1483 		rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_MEDIUM, false);
1484 		rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_HIGH, false);
1485 		rv6xx_enable_dynamic_spread_spectrum(rdev, false);
1486 		rv6xx_enable_memory_spread_spectrum(rdev, false);
1487 	}
1488 }
1489 
rv6xx_reset_lvtm_data_sync(struct radeon_device * rdev)1490 static void rv6xx_reset_lvtm_data_sync(struct radeon_device *rdev)
1491 {
1492 	if (ASIC_IS_DCE3(rdev))
1493 		WREG32_P(DCE3_LVTMA_DATA_SYNCHRONIZATION, LVTMA_PFREQCHG, ~LVTMA_PFREQCHG);
1494 	else
1495 		WREG32_P(LVTMA_DATA_SYNCHRONIZATION, LVTMA_PFREQCHG, ~LVTMA_PFREQCHG);
1496 }
1497 
rv6xx_enable_dynamic_pcie_gen2(struct radeon_device * rdev,struct radeon_ps * new_ps,bool enable)1498 static void rv6xx_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
1499 					   struct radeon_ps *new_ps,
1500 					   bool enable)
1501 {
1502 	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1503 
1504 	if (enable) {
1505 		rv6xx_enable_bif_dynamic_pcie_gen2(rdev, true);
1506 		rv6xx_enable_pcie_gen2_support(rdev);
1507 		r600_enable_dynamic_pcie_gen2(rdev, true);
1508 	} else {
1509 		if (!(new_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2))
1510 			rv6xx_force_pcie_gen1(rdev);
1511 		rv6xx_enable_bif_dynamic_pcie_gen2(rdev, false);
1512 		r600_enable_dynamic_pcie_gen2(rdev, false);
1513 	}
1514 }
1515 
rv6xx_set_uvd_clock_before_set_eng_clock(struct radeon_device * rdev,struct radeon_ps * new_ps,struct radeon_ps * old_ps)1516 static void rv6xx_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
1517 						     struct radeon_ps *new_ps,
1518 						     struct radeon_ps *old_ps)
1519 {
1520 	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1521 	struct rv6xx_ps *current_state = rv6xx_get_ps(old_ps);
1522 
1523 	if ((new_ps->vclk == old_ps->vclk) &&
1524 	    (new_ps->dclk == old_ps->dclk))
1525 		return;
1526 
1527 	if (new_state->high.sclk >= current_state->high.sclk)
1528 		return;
1529 
1530 	radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
1531 }
1532 
rv6xx_set_uvd_clock_after_set_eng_clock(struct radeon_device * rdev,struct radeon_ps * new_ps,struct radeon_ps * old_ps)1533 static void rv6xx_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
1534 						    struct radeon_ps *new_ps,
1535 						    struct radeon_ps *old_ps)
1536 {
1537 	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1538 	struct rv6xx_ps *current_state = rv6xx_get_ps(old_ps);
1539 
1540 	if ((new_ps->vclk == old_ps->vclk) &&
1541 	    (new_ps->dclk == old_ps->dclk))
1542 		return;
1543 
1544 	if (new_state->high.sclk < current_state->high.sclk)
1545 		return;
1546 
1547 	radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
1548 }
1549 
rv6xx_dpm_enable(struct radeon_device * rdev)1550 int rv6xx_dpm_enable(struct radeon_device *rdev)
1551 {
1552 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1553 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1554 
1555 	if (r600_dynamicpm_enabled(rdev))
1556 		return -EINVAL;
1557 
1558 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1559 		rv6xx_enable_backbias(rdev, true);
1560 
1561 	if (pi->dynamic_ss)
1562 		rv6xx_enable_spread_spectrum(rdev, true);
1563 
1564 	rv6xx_program_mpll_timing_parameters(rdev);
1565 	rv6xx_program_bsp(rdev);
1566 	rv6xx_program_git(rdev);
1567 	rv6xx_program_tp(rdev);
1568 	rv6xx_program_tpp(rdev);
1569 	rv6xx_program_sstp(rdev);
1570 	rv6xx_program_fcp(rdev);
1571 	rv6xx_program_vddc3d_parameters(rdev);
1572 	rv6xx_program_voltage_timing_parameters(rdev);
1573 	rv6xx_program_engine_speed_parameters(rdev);
1574 
1575 	rv6xx_enable_display_gap(rdev, true);
1576 	if (pi->display_gap == false)
1577 		rv6xx_enable_display_gap(rdev, false);
1578 
1579 	rv6xx_program_power_level_enter_state(rdev);
1580 
1581 	rv6xx_calculate_stepping_parameters(rdev, boot_ps);
1582 
1583 	if (pi->voltage_control)
1584 		rv6xx_program_voltage_gpio_pins(rdev);
1585 
1586 	rv6xx_generate_stepping_table(rdev, boot_ps);
1587 
1588 	rv6xx_program_stepping_parameters_except_lowest_entry(rdev);
1589 	rv6xx_program_stepping_parameters_lowest_entry(rdev);
1590 
1591 	rv6xx_program_power_level_low(rdev);
1592 	rv6xx_program_power_level_medium(rdev);
1593 	rv6xx_program_power_level_high(rdev);
1594 	rv6xx_program_vc(rdev);
1595 	rv6xx_program_at(rdev);
1596 
1597 	r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
1598 	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
1599 	r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true);
1600 
1601 	rv6xx_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
1602 
1603 	r600_start_dpm(rdev);
1604 
1605 	if (pi->voltage_control)
1606 		rv6xx_enable_static_voltage_control(rdev, boot_ps, false);
1607 
1608 	if (pi->dynamic_pcie_gen2)
1609 		rv6xx_enable_dynamic_pcie_gen2(rdev, boot_ps, true);
1610 
1611 	if (pi->gfx_clock_gating)
1612 		r600_gfx_clockgating_enable(rdev, true);
1613 
1614 	return 0;
1615 }
1616 
rv6xx_dpm_disable(struct radeon_device * rdev)1617 void rv6xx_dpm_disable(struct radeon_device *rdev)
1618 {
1619 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1620 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1621 
1622 	if (!r600_dynamicpm_enabled(rdev))
1623 		return;
1624 
1625 	r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
1626 	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
1627 	rv6xx_enable_display_gap(rdev, false);
1628 	rv6xx_clear_vc(rdev);
1629 	r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
1630 
1631 	if (pi->thermal_protection)
1632 		r600_enable_thermal_protection(rdev, false);
1633 
1634 	r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
1635 	r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
1636 	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
1637 
1638 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1639 		rv6xx_enable_backbias(rdev, false);
1640 
1641 	rv6xx_enable_spread_spectrum(rdev, false);
1642 
1643 	if (pi->voltage_control)
1644 		rv6xx_enable_static_voltage_control(rdev, boot_ps, true);
1645 
1646 	if (pi->dynamic_pcie_gen2)
1647 		rv6xx_enable_dynamic_pcie_gen2(rdev, boot_ps, false);
1648 
1649 	if (rdev->irq.installed &&
1650 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1651 		rdev->irq.dpm_thermal = false;
1652 		radeon_irq_set(rdev);
1653 	}
1654 
1655 	if (pi->gfx_clock_gating)
1656 		r600_gfx_clockgating_enable(rdev, false);
1657 
1658 	r600_stop_dpm(rdev);
1659 }
1660 
rv6xx_dpm_set_power_state(struct radeon_device * rdev)1661 int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
1662 {
1663 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1664 	struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
1665 	struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
1666 	int ret;
1667 
1668 	pi->restricted_levels = 0;
1669 
1670 	rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1671 
1672 	rv6xx_clear_vc(rdev);
1673 	r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
1674 	r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
1675 
1676 	if (pi->thermal_protection)
1677 		r600_enable_thermal_protection(rdev, false);
1678 
1679 	r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
1680 	r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
1681 	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
1682 
1683 	rv6xx_generate_transition_stepping(rdev, new_ps, old_ps);
1684 	rv6xx_program_power_level_medium_for_transition(rdev);
1685 
1686 	if (pi->voltage_control) {
1687 		rv6xx_set_sw_voltage_to_safe(rdev, new_ps, old_ps);
1688 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1689 			rv6xx_set_sw_voltage_to_low(rdev, old_ps);
1690 	}
1691 
1692 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1693 		rv6xx_set_safe_backbias(rdev, new_ps, old_ps);
1694 
1695 	if (pi->dynamic_pcie_gen2)
1696 		rv6xx_set_safe_pcie_gen2(rdev, new_ps, old_ps);
1697 
1698 	if (pi->voltage_control)
1699 		rv6xx_enable_dynamic_voltage_control(rdev, false);
1700 
1701 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1702 		rv6xx_enable_dynamic_backbias_control(rdev, false);
1703 
1704 	if (pi->voltage_control) {
1705 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1706 			rv6xx_step_voltage_if_increasing(rdev, new_ps, old_ps);
1707 		msleep((rdev->pm.dpm.voltage_response_time + 999) / 1000);
1708 	}
1709 
1710 	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
1711 	r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false);
1712 	r600_wait_for_power_level_unequal(rdev, R600_POWER_LEVEL_LOW);
1713 
1714 	rv6xx_generate_low_step(rdev, new_ps);
1715 	rv6xx_invalidate_intermediate_steps(rdev);
1716 	rv6xx_calculate_stepping_parameters(rdev, new_ps);
1717 	rv6xx_program_stepping_parameters_lowest_entry(rdev);
1718 	rv6xx_program_power_level_low_to_lowest_state(rdev);
1719 
1720 	r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
1721 	r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
1722 	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
1723 
1724 	if (pi->voltage_control) {
1725 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) {
1726 			ret = rv6xx_step_voltage_if_decreasing(rdev, new_ps, old_ps);
1727 			if (ret)
1728 				return ret;
1729 		}
1730 		rv6xx_enable_dynamic_voltage_control(rdev, true);
1731 	}
1732 
1733 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1734 		rv6xx_enable_dynamic_backbias_control(rdev, true);
1735 
1736 	if (pi->dynamic_pcie_gen2)
1737 		rv6xx_enable_dynamic_pcie_gen2(rdev, new_ps, true);
1738 
1739 	rv6xx_reset_lvtm_data_sync(rdev);
1740 
1741 	rv6xx_generate_stepping_table(rdev, new_ps);
1742 	rv6xx_program_stepping_parameters_except_lowest_entry(rdev);
1743 	rv6xx_program_power_level_low(rdev);
1744 	rv6xx_program_power_level_medium(rdev);
1745 	rv6xx_program_power_level_high(rdev);
1746 	rv6xx_enable_medium(rdev);
1747 	rv6xx_enable_high(rdev);
1748 
1749 	if (pi->thermal_protection)
1750 		rv6xx_enable_thermal_protection(rdev, true);
1751 	rv6xx_program_vc(rdev);
1752 	rv6xx_program_at(rdev);
1753 
1754 	rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1755 
1756 	return 0;
1757 }
1758 
rv6xx_setup_asic(struct radeon_device * rdev)1759 void rv6xx_setup_asic(struct radeon_device *rdev)
1760 {
1761 	r600_enable_acpi_pm(rdev);
1762 
1763 	if (radeon_aspm != 0) {
1764 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
1765 			rv6xx_enable_l0s(rdev);
1766 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
1767 			rv6xx_enable_l1(rdev);
1768 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
1769 			rv6xx_enable_pll_sleep_in_l1(rdev);
1770 	}
1771 }
1772 
rv6xx_dpm_display_configuration_changed(struct radeon_device * rdev)1773 void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev)
1774 {
1775 	rv6xx_program_display_gap(rdev);
1776 }
1777 
1778 union power_info {
1779 	struct _ATOM_POWERPLAY_INFO info;
1780 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
1781 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
1782 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
1783 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
1784 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
1785 };
1786 
1787 union pplib_clock_info {
1788 	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
1789 	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
1790 	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
1791 	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
1792 };
1793 
1794 union pplib_power_state {
1795 	struct _ATOM_PPLIB_STATE v1;
1796 	struct _ATOM_PPLIB_STATE_V2 v2;
1797 };
1798 
rv6xx_parse_pplib_non_clock_info(struct radeon_device * rdev,struct radeon_ps * rps,struct _ATOM_PPLIB_NONCLOCK_INFO * non_clock_info)1799 static void rv6xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
1800 					     struct radeon_ps *rps,
1801 					     struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
1802 {
1803 	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
1804 	rps->class = le16_to_cpu(non_clock_info->usClassification);
1805 	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
1806 
1807 	if (r600_is_uvd_state(rps->class, rps->class2)) {
1808 		rps->vclk = RV6XX_DEFAULT_VCLK_FREQ;
1809 		rps->dclk = RV6XX_DEFAULT_DCLK_FREQ;
1810 	} else {
1811 		rps->vclk = 0;
1812 		rps->dclk = 0;
1813 	}
1814 
1815 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
1816 		rdev->pm.dpm.boot_ps = rps;
1817 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
1818 		rdev->pm.dpm.uvd_ps = rps;
1819 }
1820 
rv6xx_parse_pplib_clock_info(struct radeon_device * rdev,struct radeon_ps * rps,int index,union pplib_clock_info * clock_info)1821 static void rv6xx_parse_pplib_clock_info(struct radeon_device *rdev,
1822 					 struct radeon_ps *rps, int index,
1823 					 union pplib_clock_info *clock_info)
1824 {
1825 	struct rv6xx_ps *ps = rv6xx_get_ps(rps);
1826 	u32 sclk, mclk;
1827 	u16 vddc;
1828 	struct rv6xx_pl *pl;
1829 
1830 	switch (index) {
1831 	case 0:
1832 		pl = &ps->low;
1833 		break;
1834 	case 1:
1835 		pl = &ps->medium;
1836 		break;
1837 	case 2:
1838 	default:
1839 		pl = &ps->high;
1840 		break;
1841 	}
1842 
1843 	sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
1844 	sclk |= clock_info->r600.ucEngineClockHigh << 16;
1845 	mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
1846 	mclk |= clock_info->r600.ucMemoryClockHigh << 16;
1847 
1848 	pl->mclk = mclk;
1849 	pl->sclk = sclk;
1850 	pl->vddc = le16_to_cpu(clock_info->r600.usVDDC);
1851 	pl->flags = le32_to_cpu(clock_info->r600.ulFlags);
1852 
1853 	/* patch up vddc if necessary */
1854 	if (pl->vddc == 0xff01) {
1855 		if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
1856 			pl->vddc = vddc;
1857 	}
1858 
1859 	/* fix up pcie gen2 */
1860 	if (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) {
1861 		if ((rdev->family == CHIP_RV610) || (rdev->family == CHIP_RV630)) {
1862 			if (pl->vddc < 1100)
1863 				pl->flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
1864 		}
1865 	}
1866 
1867 	/* patch up boot state */
1868 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
1869 		u16 vddc, vddci, mvdd;
1870 		radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
1871 		pl->mclk = rdev->clock.default_mclk;
1872 		pl->sclk = rdev->clock.default_sclk;
1873 		pl->vddc = vddc;
1874 	}
1875 }
1876 
rv6xx_parse_power_table(struct radeon_device * rdev)1877 static int rv6xx_parse_power_table(struct radeon_device *rdev)
1878 {
1879 	struct radeon_mode_info *mode_info = &rdev->mode_info;
1880 	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
1881 	union pplib_power_state *power_state;
1882 	int i, j;
1883 	union pplib_clock_info *clock_info;
1884 	union power_info *power_info;
1885 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
1886 	u16 data_offset;
1887 	u8 frev, crev;
1888 	struct rv6xx_ps *ps;
1889 
1890 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
1891 				   &frev, &crev, &data_offset))
1892 		return -EINVAL;
1893 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1894 
1895 	rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
1896 				  sizeof(struct radeon_ps),
1897 				  GFP_KERNEL);
1898 	if (!rdev->pm.dpm.ps)
1899 		return -ENOMEM;
1900 
1901 	for (i = 0; i < power_info->pplib.ucNumStates; i++) {
1902 		power_state = (union pplib_power_state *)
1903 			(mode_info->atom_context->bios + data_offset +
1904 			 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
1905 			 i * power_info->pplib.ucStateEntrySize);
1906 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
1907 			(mode_info->atom_context->bios + data_offset +
1908 			 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
1909 			 (power_state->v1.ucNonClockStateIndex *
1910 			  power_info->pplib.ucNonClockSize));
1911 		if (power_info->pplib.ucStateEntrySize - 1) {
1912 			u8 *idx;
1913 			ps = kzalloc(sizeof(struct rv6xx_ps), GFP_KERNEL);
1914 			if (ps == NULL) {
1915 				kfree(rdev->pm.dpm.ps);
1916 				return -ENOMEM;
1917 			}
1918 			rdev->pm.dpm.ps[i].ps_priv = ps;
1919 			rv6xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
1920 							 non_clock_info);
1921 			idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
1922 			for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
1923 				clock_info = (union pplib_clock_info *)
1924 					(mode_info->atom_context->bios + data_offset +
1925 					 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
1926 					 (idx[j] * power_info->pplib.ucClockInfoSize));
1927 				rv6xx_parse_pplib_clock_info(rdev,
1928 							     &rdev->pm.dpm.ps[i], j,
1929 							     clock_info);
1930 			}
1931 		}
1932 	}
1933 	rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
1934 	return 0;
1935 }
1936 
rv6xx_dpm_init(struct radeon_device * rdev)1937 int rv6xx_dpm_init(struct radeon_device *rdev)
1938 {
1939 	struct radeon_atom_ss ss;
1940 	struct atom_clock_dividers dividers;
1941 	struct rv6xx_power_info *pi;
1942 	int ret;
1943 
1944 	pi = kzalloc(sizeof(struct rv6xx_power_info), GFP_KERNEL);
1945 	if (pi == NULL)
1946 		return -ENOMEM;
1947 	rdev->pm.dpm.priv = pi;
1948 
1949 	ret = r600_get_platform_caps(rdev);
1950 	if (ret)
1951 		return ret;
1952 
1953 	ret = rv6xx_parse_power_table(rdev);
1954 	if (ret)
1955 		return ret;
1956 
1957 	if (rdev->pm.dpm.voltage_response_time == 0)
1958 		rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
1959 	if (rdev->pm.dpm.backbias_response_time == 0)
1960 		rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
1961 
1962 	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1963 					     0, false, &dividers);
1964 	if (ret)
1965 		pi->spll_ref_div = dividers.ref_div + 1;
1966 	else
1967 		pi->spll_ref_div = R600_REFERENCEDIVIDER_DFLT;
1968 
1969 	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
1970 					     0, false, &dividers);
1971 	if (ret)
1972 		pi->mpll_ref_div = dividers.ref_div + 1;
1973 	else
1974 		pi->mpll_ref_div = R600_REFERENCEDIVIDER_DFLT;
1975 
1976 	if (rdev->family >= CHIP_RV670)
1977 		pi->fb_div_scale = 1;
1978 	else
1979 		pi->fb_div_scale = 0;
1980 
1981 	pi->voltage_control =
1982 		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
1983 
1984 	pi->gfx_clock_gating = true;
1985 
1986 	pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
1987 						       ASIC_INTERNAL_ENGINE_SS, 0);
1988 	pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
1989 						       ASIC_INTERNAL_MEMORY_SS, 0);
1990 
1991 	/* Disable sclk ss, causes hangs on a lot of systems */
1992 	pi->sclk_ss = false;
1993 
1994 	if (pi->sclk_ss || pi->mclk_ss)
1995 		pi->dynamic_ss = true;
1996 	else
1997 		pi->dynamic_ss = false;
1998 
1999 	pi->dynamic_pcie_gen2 = true;
2000 
2001 	if (pi->gfx_clock_gating &&
2002 	    (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2003 		pi->thermal_protection = true;
2004 	else
2005 		pi->thermal_protection = false;
2006 
2007 	pi->display_gap = true;
2008 
2009 	return 0;
2010 }
2011 
rv6xx_dpm_print_power_state(struct radeon_device * rdev,struct radeon_ps * rps)2012 void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
2013 				 struct radeon_ps *rps)
2014 {
2015 	struct rv6xx_ps *ps = rv6xx_get_ps(rps);
2016 	struct rv6xx_pl *pl;
2017 
2018 	r600_dpm_print_class_info(rps->class, rps->class2);
2019 	r600_dpm_print_cap_info(rps->caps);
2020 	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2021 	pl = &ps->low;
2022 	printk("\t\tpower level 0    sclk: %u mclk: %u vddc: %u\n",
2023 	       pl->sclk, pl->mclk, pl->vddc);
2024 	pl = &ps->medium;
2025 	printk("\t\tpower level 1    sclk: %u mclk: %u vddc: %u\n",
2026 	       pl->sclk, pl->mclk, pl->vddc);
2027 	pl = &ps->high;
2028 	printk("\t\tpower level 2    sclk: %u mclk: %u vddc: %u\n",
2029 	       pl->sclk, pl->mclk, pl->vddc);
2030 	r600_dpm_print_ps_status(rdev, rps);
2031 }
2032 
2033 #ifdef CONFIG_DEBUG_FS
rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device * rdev,struct seq_file * m)2034 void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2035 						       struct seq_file *m)
2036 {
2037 	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
2038 	struct rv6xx_ps *ps = rv6xx_get_ps(rps);
2039 	struct rv6xx_pl *pl;
2040 	u32 current_index =
2041 		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
2042 		CURRENT_PROFILE_INDEX_SHIFT;
2043 
2044 	if (current_index > 2) {
2045 		seq_printf(m, "invalid dpm profile %d\n", current_index);
2046 	} else {
2047 		if (current_index == 0)
2048 			pl = &ps->low;
2049 		else if (current_index == 1)
2050 			pl = &ps->medium;
2051 		else /* current_index == 2 */
2052 			pl = &ps->high;
2053 		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2054 		seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u\n",
2055 			   current_index, pl->sclk, pl->mclk, pl->vddc);
2056 	}
2057 }
2058 #endif
2059 
2060 /* get the current sclk in 10 khz units */
rv6xx_dpm_get_current_sclk(struct radeon_device * rdev)2061 u32 rv6xx_dpm_get_current_sclk(struct radeon_device *rdev)
2062 {
2063 	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
2064 	struct rv6xx_ps *ps = rv6xx_get_ps(rps);
2065 	struct rv6xx_pl *pl;
2066 	u32 current_index =
2067 		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
2068 		CURRENT_PROFILE_INDEX_SHIFT;
2069 
2070 	if (current_index > 2) {
2071 		return 0;
2072 	} else {
2073 		if (current_index == 0)
2074 			pl = &ps->low;
2075 		else if (current_index == 1)
2076 			pl = &ps->medium;
2077 		else /* current_index == 2 */
2078 			pl = &ps->high;
2079 		return pl->sclk;
2080 	}
2081 }
2082 
2083 /* get the current mclk in 10 khz units */
rv6xx_dpm_get_current_mclk(struct radeon_device * rdev)2084 u32 rv6xx_dpm_get_current_mclk(struct radeon_device *rdev)
2085 {
2086 	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
2087 	struct rv6xx_ps *ps = rv6xx_get_ps(rps);
2088 	struct rv6xx_pl *pl;
2089 	u32 current_index =
2090 		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
2091 		CURRENT_PROFILE_INDEX_SHIFT;
2092 
2093 	if (current_index > 2) {
2094 		return 0;
2095 	} else {
2096 		if (current_index == 0)
2097 			pl = &ps->low;
2098 		else if (current_index == 1)
2099 			pl = &ps->medium;
2100 		else /* current_index == 2 */
2101 			pl = &ps->high;
2102 		return pl->mclk;
2103 	}
2104 }
2105 
rv6xx_dpm_fini(struct radeon_device * rdev)2106 void rv6xx_dpm_fini(struct radeon_device *rdev)
2107 {
2108 	int i;
2109 
2110 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2111 		kfree(rdev->pm.dpm.ps[i].ps_priv);
2112 	}
2113 	kfree(rdev->pm.dpm.ps);
2114 	kfree(rdev->pm.dpm.priv);
2115 }
2116 
rv6xx_dpm_get_sclk(struct radeon_device * rdev,bool low)2117 u32 rv6xx_dpm_get_sclk(struct radeon_device *rdev, bool low)
2118 {
2119 	struct rv6xx_ps *requested_state = rv6xx_get_ps(rdev->pm.dpm.requested_ps);
2120 
2121 	if (low)
2122 		return requested_state->low.sclk;
2123 	else
2124 		return requested_state->high.sclk;
2125 }
2126 
rv6xx_dpm_get_mclk(struct radeon_device * rdev,bool low)2127 u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low)
2128 {
2129 	struct rv6xx_ps *requested_state = rv6xx_get_ps(rdev->pm.dpm.requested_ps);
2130 
2131 	if (low)
2132 		return requested_state->low.mclk;
2133 	else
2134 		return requested_state->high.mclk;
2135 }
2136 
rv6xx_dpm_force_performance_level(struct radeon_device * rdev,enum radeon_dpm_forced_level level)2137 int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
2138 				      enum radeon_dpm_forced_level level)
2139 {
2140 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
2141 
2142 	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
2143 		pi->restricted_levels = 3;
2144 	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
2145 		pi->restricted_levels = 2;
2146 	} else {
2147 		pi->restricted_levels = 0;
2148 	}
2149 
2150 	rv6xx_clear_vc(rdev);
2151 	r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
2152 	r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
2153 	r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
2154 	r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
2155 	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
2156 	rv6xx_enable_medium(rdev);
2157 	rv6xx_enable_high(rdev);
2158 	if (pi->restricted_levels == 3)
2159 		r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false);
2160 	rv6xx_program_vc(rdev);
2161 	rv6xx_program_at(rdev);
2162 
2163 	rdev->pm.dpm.forced_level = level;
2164 
2165 	return 0;
2166 }
2167